Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'ib-mfd-gpio-4.9', 'ib-mfd-gpio-regulator-4.9', 'ib-mfd-input-4.9', 'ib-mfd-regulator-4.9', 'ib-mfd-regulator-4.9.1', 'ib-mfd-regulator-rtc-4.9', 'ib-mfd-regulator-rtc-4.9-1' and 'ib-mfd-rtc-4.9' into ibs-for-mfd-merged

+5831 -1426
+18
Documentation/block/queue-sysfs.txt
··· 14 14 This file allows to turn off the disk entropy contribution. Default 15 15 value of this file is '1'(on). 16 16 17 + dax (RO) 18 + -------- 19 + This file indicates whether the device supports Direct Access (DAX), 20 + used by CPU-addressable storage to bypass the pagecache. It shows '1' 21 + if true, '0' if not. 22 + 17 23 discard_granularity (RO) 18 24 ----------------------- 19 25 This shows the size of internal allocation of the device in bytes, if ··· 51 45 hw_sector_size (RO) 52 46 ------------------- 53 47 This is the hardware sector size of the device, in bytes. 48 + 49 + io_poll (RW) 50 + ------------ 51 + When read, this file shows the total number of block IO polls and how 52 + many returned success. Writing '0' to this file will disable polling 53 + for this device. Writing any non-zero value will enable this feature. 54 54 55 55 iostats (RW) 56 56 ------------- ··· 162 150 device state. This means that it might not be safe to toggle the 163 151 setting from "write back" to "write through", since that will also 164 152 eliminate cache flushes issued by the kernel. 153 + 154 + write_same_max_bytes (RO) 155 + ------------------------- 156 + This is the number of bytes the device can write in a single write-same 157 + command. A value of '0' means write-same is not supported by this 158 + device. 165 159 166 160 167 161 Jens Axboe <jens.axboe@oracle.com>, February 2009
+54
Documentation/devicetree/bindings/mfd/ac100.txt
··· 1 + X-Powers AC100 Codec/RTC IC Device Tree bindings 2 + 3 + AC100 is a audio codec and RTC subsystem combo IC. The 2 parts are 4 + separated, including power supplies and interrupt lines, but share 5 + a common register address space and host interface. 6 + 7 + Required properties: 8 + - compatible: "x-powers,ac100" 9 + - reg: The I2C slave address or RSB hardware address for the chip 10 + - sub-nodes: 11 + - codec 12 + - compatible: "x-powers,ac100-codec" 13 + - interrupt-parent: The parent interrupt controller 14 + - interrupts: SoC NMI / GPIO interrupt connected to the 15 + IRQ_AUDIO pin 16 + - #clock-cells: Shall be 0 17 + - clock-output-names: "4M_adda" 18 + 19 + - see clock/clock-bindings.txt for common clock bindings 20 + 21 + - rtc 22 + - compatible: "x-powers,ac100-rtc" 23 + - interrupt-parent: The parent interrupt controller 24 + - interrupts: SoC NMI / GPIO interrupt connected to the 25 + IRQ_RTC pin 26 + - clocks: A phandle to the codec's "4M_adda" clock 27 + - #clock-cells: Shall be 1 28 + - clock-output-names: "cko1_rtc", "cko2_rtc", "cko3_rtc" 29 + 30 + - see clock/clock-bindings.txt for common clock bindings 31 + 32 + Example: 33 + 34 + ac100: codec@e89 { 35 + compatible = "x-powers,ac100"; 36 + reg = <0xe89>; 37 + 38 + ac100_codec: codec { 39 + compatible = "x-powers,ac100-codec"; 40 + interrupt-parent = <&r_pio>; 41 + interrupts = <0 9 IRQ_TYPE_LEVEL_LOW>; /* PL9 */ 42 + #clock-cells = <0>; 43 + clock-output-names = "4M_adda"; 44 + }; 45 + 46 + ac100_rtc: rtc { 47 + compatible = "x-powers,ac100-rtc"; 48 + interrupt-parent = <&nmi_intc>; 49 + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; 50 + clocks = <&ac100_codec>; 51 + #clock-cells = <1>; 52 + clock-output-names = "cko1_rtc", "cko2_rtc", "cko3_rtc"; 53 + }; 54 + };
+26 -2
Documentation/devicetree/bindings/mfd/axp20x.txt
··· 10 10 11 11 Required properties: 12 12 - compatible: "x-powers,axp152", "x-powers,axp202", "x-powers,axp209", 13 - "x-powers,axp221", "x-powers,axp223", "x-powers,axp809" 13 + "x-powers,axp221", "x-powers,axp223", "x-powers,axp806", 14 + "x-powers,axp809" 14 15 - reg: The I2C slave address or RSB hardware address for the AXP chip 15 16 - interrupt-parent: The parent interrupt controller 16 17 - interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin ··· 47 46 under light loads for lower output noise. This 48 47 probably makes sense for HiFi audio related 49 48 applications that aren't battery constrained. 50 - 51 49 52 50 AXP202/AXP209 regulators, type, and corresponding input supply names: 53 51 ··· 85 85 LDO_IO1 : LDO : ips-supply : GPIO 1 86 86 RTC_LDO : LDO : ips-supply : always on 87 87 DRIVEVBUS : Enable output : drivevbus-supply : external regulator 88 + 89 + AXP806 regulators, type, and corresponding input supply names: 90 + 91 + Regulator Type Supply Name Notes 92 + --------- ---- ----------- ----- 93 + DCDCA : DC-DC buck : vina-supply : poly-phase capable 94 + DCDCB : DC-DC buck : vinb-supply : poly-phase capable 95 + DCDCC : DC-DC buck : vinc-supply : poly-phase capable 96 + DCDCD : DC-DC buck : vind-supply : poly-phase capable 97 + DCDCE : DC-DC buck : vine-supply : poly-phase capable 98 + ALDO1 : LDO : aldoin-supply : shared supply 99 + ALDO2 : LDO : aldoin-supply : shared supply 100 + ALDO3 : LDO : aldoin-supply : shared supply 101 + BLDO1 : LDO : bldoin-supply : shared supply 102 + BLDO2 : LDO : bldoin-supply : shared supply 103 + BLDO3 : LDO : bldoin-supply : shared supply 104 + BLDO4 : LDO : bldoin-supply : shared supply 105 + CLDO1 : LDO : cldoin-supply : shared supply 106 + CLDO2 : LDO : cldoin-supply : shared supply 107 + CLDO3 : LDO : cldoin-supply : shared supply 108 + SW : On/Off Switch : swin-supply 109 + 110 + Additionally, the AXP806 DC-DC regulators support poly-phase arrangements 111 + for higher output current. The possible groupings are: A+B, A+B+C, D+E. 88 112 89 113 AXP809 regulators, type, and corresponding input supply names: 90 114
+1
Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt
··· 62 62 "qcom,pm8058-rtc" 63 63 "qcom,pm8921-rtc" 64 64 "qcom,pm8941-rtc" 65 + "qcom,pm8018-rtc" 65 66 66 67 - reg: 67 68 Usage: required
+15
Documentation/devicetree/bindings/mfd/qcom-rpm.txt
··· 13 13 "qcom,rpm-msm8660" 14 14 "qcom,rpm-msm8960" 15 15 "qcom,rpm-ipq8064" 16 + "qcom,rpm-mdm9615" 16 17 17 18 - reg: 18 19 Usage: required ··· 60 59 "qcom,rpm-pm8058-regulators" 61 60 "qcom,rpm-pm8901-regulators" 62 61 "qcom,rpm-pm8921-regulators" 62 + "qcom,rpm-pm8018-regulators" 63 63 64 64 - vdd_l0_l1_lvs-supply: 65 65 - vdd_l2_l11_l12-supply: ··· 139 137 Definition: reference to regulator supplying the input pin, as 140 138 described in the data sheet 141 139 140 + - vin_lvs1-supply: 141 + - vdd_l7-supply: 142 + - vdd_l8-supply: 143 + - vdd_l9_l10_l11_l12-supply: 144 + Usage: optional (pm8018 only) 145 + Value type: <phandle> 146 + Definition: reference to regulator supplying the input pin, as 147 + described in the data sheet 148 + 142 149 The regulator node houses sub-nodes for each regulator within the device. Each 143 150 sub-node is identified using the node's name, with valid values listed for each 144 151 of the pmics below. ··· 166 155 l12, l14, l15, l16, l17, l18, l21, l22, l23, l24, l25, l26, l27, l28, 167 156 l29, lvs1, lvs2, lvs3, lvs4, lvs5, lvs6, lvs7, usb-switch, hdmi-switch, 168 157 ncp 158 + 159 + pm8018: 160 + s1, s2, s3, s4, s5, , l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, 161 + l12, l14, lvs1 169 162 170 163 The content of each sub-node is defined by the standard binding for regulators - 171 164 see regulator.txt - with additional custom properties described below:
+34 -3
Documentation/devicetree/bindings/mfd/rk808.txt
··· 1 - RK808 Power Management Integrated Circuit 1 + RK8XX Power Management Integrated Circuit 2 + 3 + The rk8xx family current members: 4 + rk808 5 + rk818 2 6 3 7 Required properties: 4 - - compatible: "rockchip,rk808" 8 + - compatible: "rockchip,rk808", "rockchip,rk818" 5 9 - reg: I2C slave address 6 10 - interrupt-parent: The parent interrupt controller. 7 11 - interrupts: the interrupt outputs of the controller. ··· 17 13 default output clock name 18 14 - rockchip,system-power-controller: Telling whether or not this pmic is controlling 19 15 the system power. 16 + 17 + Optional RK808 properties: 20 18 - vcc1-supply: The input supply for DCDC_REG1 21 19 - vcc2-supply: The input supply for DCDC_REG2 22 20 - vcc3-supply: The input supply for DCDC_REG3 ··· 35 29 the gpio controller. If DVS GPIOs aren't present, voltage changes will happen 36 30 very quickly with no slow ramp time. 37 31 38 - Regulators: All the regulators of RK808 to be instantiated shall be 32 + Optional RK818 properties: 33 + - vcc1-supply: The input supply for DCDC_REG1 34 + - vcc2-supply: The input supply for DCDC_REG2 35 + - vcc3-supply: The input supply for DCDC_REG3 36 + - vcc4-supply: The input supply for DCDC_REG4 37 + - boost-supply: The input supply for DCDC_BOOST 38 + - vcc6-supply: The input supply for LDO_REG1 and LDO_REG2 39 + - vcc7-supply: The input supply for LDO_REG3, LDO_REG5 and LDO_REG7 40 + - vcc8-supply: The input supply for LDO_REG4, LDO_REG6 and LDO_REG8 41 + - vcc9-supply: The input supply for LDO_REG9 and SWITCH_REG 42 + - h_5v-supply: The input supply for HDMI_SWITCH 43 + - usb-supply: The input supply for OTG_SWITCH 44 + 45 + Regulators: All the regulators of RK8XX to be instantiated shall be 39 46 listed in a child node named 'regulators'. Each regulator is represented 40 47 by a child node of the 'regulators' node. 41 48 ··· 66 47 - valid values for n are 1 to 8. 67 48 - SWITCH_REGn 68 49 - valid values for n are 1 to 2 50 + 51 + Following regulators of the RK818 PMIC block are supported. Note that 52 + the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO 53 + number as described in RK818 datasheet. 54 + 55 + - DCDC_REGn 56 + - valid values for n are 1 to 4. 57 + - LDO_REGn 58 + - valid values for n are 1 to 9. 59 + - SWITCH_REG 60 + - HDMI_SWITCH 61 + - OTG_SWITCH 69 62 70 63 Standard regulator bindings are used inside regulator subnodes. Check 71 64 Documentation/devicetree/bindings/regulator/regulator.txt
+1
MAINTAINERS
··· 1004 1004 ARM/Annapurna Labs ALPINE ARCHITECTURE 1005 1005 M: Tsahee Zidenberg <tsahee@annapurnalabs.com> 1006 1006 M: Antoine Tenart <antoine.tenart@free-electrons.com> 1007 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1007 1008 S: Maintained 1008 1009 F: arch/arm/mach-alpine/ 1009 1010 F: arch/arm/boot/dts/alpine*
+1 -8
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 8 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc1 4 + EXTRAVERSION = -rc2 5 5 NAME = Psychotic Stoned Sheep 6 6 7 7 # *DOCUMENTATION* ··· 634 634 635 635 # Tell gcc to never replace conditional load with a non-conditional one 636 636 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) 637 - 638 - PHONY += gcc-plugins 639 - gcc-plugins: scripts_basic 640 - ifdef CONFIG_GCC_PLUGINS 641 - $(Q)$(MAKE) $(build)=scripts/gcc-plugins 642 - endif 643 - @: 644 637 645 638 include scripts/Makefile.gcc-plugins 646 639
+9
arch/Kconfig
··· 461 461 462 462 endchoice 463 463 464 + config HAVE_ARCH_WITHIN_STACK_FRAMES 465 + bool 466 + help 467 + An architecture should select this if it can walk the kernel stack 468 + frames to determine if an object is part of either the arguments 469 + or local variables (i.e. that it excludes saved return addresses, 470 + and similar) by implementing an inline arch_within_stack_frames(), 471 + which is used by CONFIG_HARDENED_USERCOPY. 472 + 464 473 config HAVE_CONTEXT_TRACKING 465 474 bool 466 475 help
+1
arch/arm/Kconfig
··· 35 35 select HARDIRQS_SW_RESEND 36 36 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) 37 37 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 38 + select HAVE_ARCH_HARDENED_USERCOPY 38 39 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU 39 40 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU 40 41 select HAVE_ARCH_MMAP_RND_BITS if MMU
+2
arch/arm/Makefile
··· 260 260 platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y))) 261 261 262 262 ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y) 263 + ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y) 263 264 ifeq ($(KBUILD_SRC),) 264 265 KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs)) 265 266 else 266 267 KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs)) 268 + endif 267 269 endif 268 270 endif 269 271
+4 -5
arch/arm/boot/dts/arm-realview-pbx-a9.dts
··· 70 70 * associativity as these may be erroneously set 71 71 * up by boot loader(s). 72 72 */ 73 - cache-size = <1048576>; // 1MB 74 - cache-sets = <4096>; 73 + cache-size = <131072>; // 128KB 74 + cache-sets = <512>; 75 75 cache-line-size = <32>; 76 76 arm,parity-disable; 77 - arm,tag-latency = <1>; 78 - arm,data-latency = <1 1>; 79 - arm,dirty-latency = <1>; 77 + arm,tag-latency = <1 1 1>; 78 + arm,data-latency = <1 1 1>; 80 79 }; 81 80 82 81 scu: scu@1f000000 {
+1 -1
arch/arm/boot/dts/integratorap.dts
··· 42 42 }; 43 43 44 44 syscon { 45 - compatible = "arm,integrator-ap-syscon"; 45 + compatible = "arm,integrator-ap-syscon", "syscon"; 46 46 reg = <0x11000000 0x100>; 47 47 interrupt-parent = <&pic>; 48 48 /* These are the logical module IRQs */
+1 -1
arch/arm/boot/dts/integratorcp.dts
··· 94 94 }; 95 95 96 96 syscon { 97 - compatible = "arm,integrator-cp-syscon"; 97 + compatible = "arm,integrator-cp-syscon", "syscon"; 98 98 reg = <0xcb000000 0x100>; 99 99 }; 100 100
-8
arch/arm/boot/dts/keystone.dtsi
··· 70 70 cpu_on = <0x84000003>; 71 71 }; 72 72 73 - psci { 74 - compatible = "arm,psci"; 75 - method = "smc"; 76 - cpu_suspend = <0x84000001>; 77 - cpu_off = <0x84000002>; 78 - cpu_on = <0x84000003>; 79 - }; 80 - 81 73 soc { 82 74 #address-cells = <1>; 83 75 #size-cells = <1>;
+2 -2
arch/arm/boot/dts/tegra124-jetson-tk1.dts
··· 1382 1382 * Pin 41: BR_UART1_TXD 1383 1383 * Pin 44: BR_UART1_RXD 1384 1384 */ 1385 - serial@70006000 { 1385 + serial@0,70006000 { 1386 1386 compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart"; 1387 1387 status = "okay"; 1388 1388 }; ··· 1394 1394 * Pin 71: UART2_CTS_L 1395 1395 * Pin 74: UART2_RTS_L 1396 1396 */ 1397 - serial@70006040 { 1397 + serial@0,70006040 { 1398 1398 compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart"; 1399 1399 status = "okay"; 1400 1400 };
+1 -1
arch/arm/configs/aspeed_g4_defconfig
··· 58 58 # CONFIG_IOMMU_SUPPORT is not set 59 59 CONFIG_FIRMWARE_MEMMAP=y 60 60 CONFIG_FANOTIFY=y 61 - CONFIG_PRINTK_TIME=1 61 + CONFIG_PRINTK_TIME=y 62 62 CONFIG_DYNAMIC_DEBUG=y 63 63 CONFIG_STRIP_ASM_SYMS=y 64 64 CONFIG_PAGE_POISONING=y
+1 -1
arch/arm/configs/aspeed_g5_defconfig
··· 59 59 # CONFIG_IOMMU_SUPPORT is not set 60 60 CONFIG_FIRMWARE_MEMMAP=y 61 61 CONFIG_FANOTIFY=y 62 - CONFIG_PRINTK_TIME=1 62 + CONFIG_PRINTK_TIME=y 63 63 CONFIG_DYNAMIC_DEBUG=y 64 64 CONFIG_STRIP_ASM_SYMS=y 65 65 CONFIG_PAGE_POISONING=y
+9 -2
arch/arm/include/asm/uaccess.h
··· 480 480 static inline unsigned long __must_check 481 481 __copy_from_user(void *to, const void __user *from, unsigned long n) 482 482 { 483 - unsigned int __ua_flags = uaccess_save_and_enable(); 483 + unsigned int __ua_flags; 484 + 485 + check_object_size(to, n, false); 486 + __ua_flags = uaccess_save_and_enable(); 484 487 n = arm_copy_from_user(to, from, n); 485 488 uaccess_restore(__ua_flags); 486 489 return n; ··· 498 495 __copy_to_user(void __user *to, const void *from, unsigned long n) 499 496 { 500 497 #ifndef CONFIG_UACCESS_WITH_MEMCPY 501 - unsigned int __ua_flags = uaccess_save_and_enable(); 498 + unsigned int __ua_flags; 499 + 500 + check_object_size(from, n, true); 501 + __ua_flags = uaccess_save_and_enable(); 502 502 n = arm_copy_to_user(to, from, n); 503 503 uaccess_restore(__ua_flags); 504 504 return n; 505 505 #else 506 + check_object_size(from, n, true); 506 507 return arm_copy_to_user(to, from, n); 507 508 #endif 508 509 }
+7 -1
arch/arm/kernel/sys_oabi-compat.c
··· 279 279 mm_segment_t fs; 280 280 long ret, err, i; 281 281 282 - if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) 282 + if (maxevents <= 0 || 283 + maxevents > (INT_MAX/sizeof(*kbuf)) || 284 + maxevents > (INT_MAX/sizeof(*events))) 283 285 return -EINVAL; 286 + if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) 287 + return -EFAULT; 284 288 kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); 285 289 if (!kbuf) 286 290 return -ENOMEM; ··· 321 317 322 318 if (nsops < 1 || nsops > SEMOPM) 323 319 return -EINVAL; 320 + if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) 321 + return -EFAULT; 324 322 sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); 325 323 if (!sops) 326 324 return -ENOMEM;
+5 -1
arch/arm/kvm/arm.c
··· 1009 1009 1010 1010 switch (ioctl) { 1011 1011 case KVM_CREATE_IRQCHIP: { 1012 + int ret; 1012 1013 if (!vgic_present) 1013 1014 return -ENXIO; 1014 - return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 1015 + mutex_lock(&kvm->lock); 1016 + ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 1017 + mutex_unlock(&kvm->lock); 1018 + return ret; 1015 1019 } 1016 1020 case KVM_ARM_SET_DEVICE_ADDR: { 1017 1021 struct kvm_arm_device_addr dev_addr;
+1 -1
arch/arm/mach-clps711x/Kconfig
··· 1 1 menuconfig ARCH_CLPS711X 2 2 bool "Cirrus Logic EP721x/EP731x-based" 3 3 depends on ARCH_MULTI_V4T 4 - select ARCH_REQUIRE_GPIOLIB 5 4 select AUTO_ZRELADDR 6 5 select CLKSRC_OF 7 6 select CLPS711X_TIMER 8 7 select COMMON_CLK 9 8 select CPU_ARM720T 10 9 select GENERIC_CLOCKEVENTS 10 + select GPIOLIB 11 11 select MFD_SYSCON 12 12 select OF_IRQ 13 13 select USE_OF
+1 -2
arch/arm/mach-mvebu/Makefile
··· 1 - ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ 2 - -I$(srctree)/arch/arm/plat-orion/include 1 + ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-orion/include 3 2 4 3 AFLAGS_coherency_ll.o := -Wa,-march=armv7-a 5 4 CFLAGS_pmsu.o := -march=armv7-a
+2
arch/arm/mach-oxnas/Kconfig
··· 11 11 12 12 config MACH_OX810SE 13 13 bool "Support OX810SE Based Products" 14 + select ARCH_HAS_RESET_CONTROLLER 14 15 select COMMON_CLK_OXNAS 15 16 select CPU_ARM926T 16 17 select MFD_SYSCON 17 18 select OXNAS_RPS_TIMER 18 19 select PINCTRL_OXNAS 20 + select RESET_CONTROLLER 19 21 select RESET_OXNAS 20 22 select VERSATILE_FPGA_IRQ 21 23 help
+1
arch/arm/mach-pxa/corgi.c
··· 13 13 */ 14 14 15 15 #include <linux/kernel.h> 16 + #include <linux/module.h> /* symbol_get ; symbol_put */ 16 17 #include <linux/init.h> 17 18 #include <linux/platform_device.h> 18 19 #include <linux/major.h>
+1
arch/arm/mach-pxa/spitz.c
··· 13 13 */ 14 14 15 15 #include <linux/kernel.h> 16 + #include <linux/module.h> /* symbol_get ; symbol_put */ 16 17 #include <linux/platform_device.h> 17 18 #include <linux/delay.h> 18 19 #include <linux/gpio_keys.h>
+1 -2
arch/arm/mach-realview/Makefile
··· 1 1 # 2 2 # Makefile for the linux kernel. 3 3 # 4 - ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ 5 - -I$(srctree)/arch/arm/plat-versatile/include 4 + ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/include 6 5 7 6 obj-y := core.o 8 7 obj-$(CONFIG_REALVIEW_DT) += realview-dt.o
+1 -1
arch/arm/mach-s5pv210/Makefile
··· 5 5 # 6 6 # Licensed under GPLv2 7 7 8 - ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)/arch/arm/plat-samsung/include 8 + ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/arch/arm/plat-samsung/include 9 9 10 10 # Core 11 11
+3
arch/arm/mach-shmobile/platsmp.c
··· 40 40 bool __init shmobile_smp_init_fallback_ops(void) 41 41 { 42 42 /* fallback on PSCI/smp_ops if no other DT based method is detected */ 43 + if (!IS_ENABLED(CONFIG_SMP)) 44 + return false; 45 + 43 46 return platform_can_secondary_boot() ? true : false; 44 47 }
+1
arch/arm64/Kconfig
··· 54 54 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 55 55 select HAVE_ARCH_AUDITSYSCALL 56 56 select HAVE_ARCH_BITREVERSE 57 + select HAVE_ARCH_HARDENED_USERCOPY 57 58 select HAVE_ARCH_HUGE_VMAP 58 59 select HAVE_ARCH_JUMP_LABEL 59 60 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+2 -2
arch/arm64/Kconfig.platforms
··· 8 8 9 9 config ARCH_ALPINE 10 10 bool "Annapurna Labs Alpine platform" 11 - select ALPINE_MSI 11 + select ALPINE_MSI if PCI 12 12 help 13 13 This enables support for the Annapurna Labs Alpine 14 14 Soc family. ··· 66 66 config ARCH_HISI 67 67 bool "Hisilicon SoC Family" 68 68 select ARM_TIMER_SP804 69 - select HISILICON_IRQ_MBIGEN 69 + select HISILICON_IRQ_MBIGEN if PCI 70 70 help 71 71 This enables support for Hisilicon ARMv8 SoC family 72 72
+3
arch/arm64/boot/dts/exynos/exynos7-espresso.dts
··· 12 12 /dts-v1/; 13 13 #include "exynos7.dtsi" 14 14 #include <dt-bindings/interrupt-controller/irq.h> 15 + #include <dt-bindings/clock/samsung,s2mps11.h> 15 16 16 17 / { 17 18 model = "Samsung Exynos7 Espresso board based on EXYNOS7"; ··· 44 43 45 44 &rtc { 46 45 status = "okay"; 46 + clocks = <&clock_ccore PCLK_RTC>, <&s2mps15_osc S2MPS11_CLK_AP>; 47 + clock-names = "rtc", "rtc_src"; 47 48 }; 48 49 49 50 &watchdog {
+46 -7
arch/arm64/configs/defconfig
··· 1 - # CONFIG_LOCALVERSION_AUTO is not set 2 1 CONFIG_SYSVIPC=y 3 2 CONFIG_POSIX_MQUEUE=y 4 3 CONFIG_AUDIT=y ··· 14 15 CONFIG_LOG_BUF_SHIFT=14 15 16 CONFIG_MEMCG=y 16 17 CONFIG_MEMCG_SWAP=y 18 + CONFIG_BLK_CGROUP=y 19 + CONFIG_CGROUP_PIDS=y 17 20 CONFIG_CGROUP_HUGETLB=y 18 - # CONFIG_UTS_NS is not set 19 - # CONFIG_IPC_NS is not set 20 - # CONFIG_NET_NS is not set 21 + CONFIG_CPUSETS=y 22 + CONFIG_CGROUP_DEVICE=y 23 + CONFIG_CGROUP_CPUACCT=y 24 + CONFIG_CGROUP_PERF=y 25 + CONFIG_USER_NS=y 21 26 CONFIG_SCHED_AUTOGROUP=y 22 27 CONFIG_BLK_DEV_INITRD=y 23 28 CONFIG_KALLSYMS_ALL=y ··· 74 71 CONFIG_KSM=y 75 72 CONFIG_TRANSPARENT_HUGEPAGE=y 76 73 CONFIG_CMA=y 74 + CONFIG_SECCOMP=y 77 75 CONFIG_XEN=y 78 76 CONFIG_KEXEC=y 79 77 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ··· 88 84 CONFIG_PACKET=y 89 85 CONFIG_UNIX=y 90 86 CONFIG_INET=y 87 + CONFIG_IP_MULTICAST=y 91 88 CONFIG_IP_PNP=y 92 89 CONFIG_IP_PNP_DHCP=y 93 90 CONFIG_IP_PNP_BOOTP=y 94 - # CONFIG_IPV6 is not set 91 + CONFIG_IPV6=m 92 + CONFIG_NETFILTER=y 93 + CONFIG_NF_CONNTRACK=m 94 + CONFIG_NF_CONNTRACK_EVENTS=y 95 + CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 96 + CONFIG_NETFILTER_XT_TARGET_LOG=m 97 + CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m 98 + CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m 99 + CONFIG_NF_CONNTRACK_IPV4=m 100 + CONFIG_IP_NF_IPTABLES=m 101 + CONFIG_IP_NF_FILTER=m 102 + CONFIG_IP_NF_TARGET_REJECT=m 103 + CONFIG_IP_NF_NAT=m 104 + CONFIG_IP_NF_TARGET_MASQUERADE=m 105 + CONFIG_IP_NF_MANGLE=m 106 + CONFIG_NF_CONNTRACK_IPV6=m 107 + CONFIG_IP6_NF_IPTABLES=m 108 + CONFIG_IP6_NF_FILTER=m 109 + CONFIG_IP6_NF_TARGET_REJECT=m 110 + CONFIG_IP6_NF_MANGLE=m 111 + CONFIG_IP6_NF_NAT=m 112 + CONFIG_IP6_NF_TARGET_MASQUERADE=m 113 + CONFIG_BRIDGE=m 114 + CONFIG_BRIDGE_VLAN_FILTERING=y 115 + CONFIG_VLAN_8021Q=m 116 + CONFIG_VLAN_8021Q_GVRP=y 117 + CONFIG_VLAN_8021Q_MVRP=y 95 118 CONFIG_BPF_JIT=y 96 119 CONFIG_CFG80211=m 97 120 CONFIG_MAC80211=m ··· 134 103 CONFIG_MTD_M25P80=y 135 104 CONFIG_MTD_SPI_NOR=y 136 105 CONFIG_BLK_DEV_LOOP=y 106 + CONFIG_BLK_DEV_NBD=m 137 107 CONFIG_VIRTIO_BLK=y 138 108 CONFIG_SRAM=y 139 109 # CONFIG_SCSI_PROC_FS is not set ··· 152 120 CONFIG_PATA_PLATFORM=y 153 121 CONFIG_PATA_OF_PLATFORM=y 154 122 CONFIG_NETDEVICES=y 123 + CONFIG_MACVLAN=m 124 + CONFIG_MACVTAP=m 155 125 CONFIG_TUN=y 126 + CONFIG_VETH=m 156 127 CONFIG_VIRTIO_NET=y 157 128 CONFIG_AMD_XGBE=y 158 129 CONFIG_NET_XGENE=y ··· 385 350 CONFIG_PWM_SAMSUNG=y 386 351 CONFIG_EXT2_FS=y 387 352 CONFIG_EXT3_FS=y 353 + CONFIG_EXT4_FS_POSIX_ACL=y 354 + CONFIG_BTRFS_FS=m 355 + CONFIG_BTRFS_FS_POSIX_ACL=y 388 356 CONFIG_FANOTIFY=y 389 357 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 390 358 CONFIG_QUOTA=y 391 359 CONFIG_AUTOFS4_FS=y 392 - CONFIG_FUSE_FS=y 393 - CONFIG_CUSE=y 360 + CONFIG_FUSE_FS=m 361 + CONFIG_CUSE=m 362 + CONFIG_OVERLAY_FS=m 394 363 CONFIG_VFAT_FS=y 395 364 CONFIG_TMPFS=y 396 365 CONFIG_HUGETLBFS=y
-2
arch/arm64/include/asm/kprobes.h
··· 22 22 23 23 #define __ARCH_WANT_KPROBES_INSN_SLOT 24 24 #define MAX_INSN_SIZE 1 25 - #define MAX_STACK_SIZE 128 26 25 27 26 #define flush_insn_slot(p) do { } while (0) 28 27 #define kretprobe_blacklist_size 0 ··· 46 47 struct prev_kprobe prev_kprobe; 47 48 struct kprobe_step_ctx ss_ctx; 48 49 struct pt_regs jprobe_saved_regs; 49 - char jprobes_stack[MAX_STACK_SIZE]; 50 50 }; 51 51 52 52 void arch_remove_kprobe(struct kprobe *);
+10 -5
arch/arm64/include/asm/uaccess.h
··· 265 265 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) 266 266 { 267 267 kasan_check_write(to, n); 268 - return __arch_copy_from_user(to, from, n); 268 + check_object_size(to, n, false); 269 + return __arch_copy_from_user(to, from, n); 269 270 } 270 271 271 272 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) 272 273 { 273 274 kasan_check_read(from, n); 274 - return __arch_copy_to_user(to, from, n); 275 + check_object_size(from, n, true); 276 + return __arch_copy_to_user(to, from, n); 275 277 } 276 278 277 279 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 278 280 { 279 281 kasan_check_write(to, n); 280 282 281 - if (access_ok(VERIFY_READ, from, n)) 283 + if (access_ok(VERIFY_READ, from, n)) { 284 + check_object_size(to, n, false); 282 285 n = __arch_copy_from_user(to, from, n); 283 - else /* security hole - plug it */ 286 + } else /* security hole - plug it */ 284 287 memset(to, 0, n); 285 288 return n; 286 289 } ··· 292 289 { 293 290 kasan_check_read(from, n); 294 291 295 - if (access_ok(VERIFY_WRITE, to, n)) 292 + if (access_ok(VERIFY_WRITE, to, n)) { 293 + check_object_size(from, n, true); 296 294 n = __arch_copy_to_user(to, from, n); 295 + } 297 296 return n; 298 297 } 299 298
+7
arch/arm64/kernel/entry.S
··· 353 353 lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class 354 354 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 355 355 b.eq el1_da 356 + cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 357 + b.eq el1_ia 356 358 cmp x24, #ESR_ELx_EC_SYS64 // configurable trap 357 359 b.eq el1_undef 358 360 cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception ··· 366 364 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 367 365 b.ge el1_dbg 368 366 b el1_inv 367 + 368 + el1_ia: 369 + /* 370 + * Fall through to the Data abort case 371 + */ 369 372 el1_da: 370 373 /* 371 374 * Data abort handling
+49 -33
arch/arm64/kernel/hibernate.c
··· 35 35 #include <asm/sections.h> 36 36 #include <asm/smp.h> 37 37 #include <asm/suspend.h> 38 + #include <asm/sysreg.h> 38 39 #include <asm/virt.h> 39 40 40 41 /* ··· 218 217 set_pte(pte, __pte(virt_to_phys((void *)dst) | 219 218 pgprot_val(PAGE_KERNEL_EXEC))); 220 219 221 - /* Load our new page tables */ 222 - asm volatile("msr ttbr0_el1, %0;" 223 - "isb;" 224 - "tlbi vmalle1is;" 225 - "dsb ish;" 226 - "isb" : : "r"(virt_to_phys(pgd))); 220 + /* 221 + * Load our new page tables. A strict BBM approach requires that we 222 + * ensure that TLBs are free of any entries that may overlap with the 223 + * global mappings we are about to install. 224 + * 225 + * For a real hibernate/resume cycle TTBR0 currently points to a zero 226 + * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI 227 + * runtime services), while for a userspace-driven test_resume cycle it 228 + * points to userspace page tables (and we must point it at a zero page 229 + * ourselves). Elsewhere we only (un)install the idmap with preemption 230 + * disabled, so T0SZ should be as required regardless. 231 + */ 232 + cpu_set_reserved_ttbr0(); 233 + local_flush_tlb_all(); 234 + write_sysreg(virt_to_phys(pgd), ttbr0_el1); 235 + isb(); 227 236 228 237 *phys_dst_addr = virt_to_phys((void *)dst); 229 238 ··· 405 394 void *, phys_addr_t, phys_addr_t); 406 395 407 396 /* 397 + * Restoring the memory image will overwrite the ttbr1 page tables. 398 + * Create a second copy of just the linear map, and use this when 399 + * restoring. 400 + */ 401 + tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); 402 + if (!tmp_pg_dir) { 403 + pr_err("Failed to allocate memory for temporary page tables."); 404 + rc = -ENOMEM; 405 + goto out; 406 + } 407 + rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); 408 + if (rc) 409 + goto out; 410 + 411 + /* 412 + * Since we only copied the linear map, we need to find restore_pblist's 413 + * linear map address. 414 + */ 415 + lm_restore_pblist = LMADDR(restore_pblist); 416 + 417 + /* 418 + * We need a zero page that is zero before & after resume in order to 419 + * to break before make on the ttbr1 page tables. 420 + */ 421 + zero_page = (void *)get_safe_page(GFP_ATOMIC); 422 + if (!zero_page) { 423 + pr_err("Failed to allocate zero page."); 424 + rc = -ENOMEM; 425 + goto out; 426 + } 427 + 428 + /* 408 429 * Locate the exit code in the bottom-but-one page, so that *NULL 409 430 * still has disastrous affects. 410 431 */ ··· 462 419 __flush_dcache_area(hibernate_exit, exit_size); 463 420 464 421 /* 465 - * Restoring the memory image will overwrite the ttbr1 page tables. 466 - * Create a second copy of just the linear map, and use this when 467 - * restoring. 468 - */ 469 - tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); 470 - if (!tmp_pg_dir) { 471 - pr_err("Failed to allocate memory for temporary page tables."); 472 - rc = -ENOMEM; 473 - goto out; 474 - } 475 - rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); 476 - if (rc) 477 - goto out; 478 - 479 - /* 480 - * Since we only copied the linear map, we need to find restore_pblist's 481 - * linear map address. 482 - */ 483 - lm_restore_pblist = LMADDR(restore_pblist); 484 - 485 - /* 486 422 * KASLR will cause the el2 vectors to be in a different location in 487 423 * the resumed kernel. Load hibernate's temporary copy into el2. 488 424 * ··· 474 452 475 453 __hyp_set_vectors(el2_vectors); 476 454 } 477 - 478 - /* 479 - * We need a zero page that is zero before & after resume in order to 480 - * to break before make on the ttbr1 page tables. 481 - */ 482 - zero_page = (void *)get_safe_page(GFP_ATOMIC); 483 455 484 456 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, 485 457 resume_hdr.reenter_kernel, lm_restore_pblist,
+5 -26
arch/arm64/kernel/probes/kprobes.c
··· 41 41 static void __kprobes 42 42 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); 43 43 44 - static inline unsigned long min_stack_size(unsigned long addr) 45 - { 46 - unsigned long size; 47 - 48 - if (on_irq_stack(addr, raw_smp_processor_id())) 49 - size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr; 50 - else 51 - size = (unsigned long)current_thread_info() + THREAD_START_SP - addr; 52 - 53 - return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack)); 54 - } 55 - 56 44 static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 57 45 { 58 46 /* prepare insn slot */ ··· 477 489 { 478 490 struct jprobe *jp = container_of(p, struct jprobe, kp); 479 491 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 480 - long stack_ptr = kernel_stack_pointer(regs); 481 492 482 493 kcb->jprobe_saved_regs = *regs; 483 494 /* 484 - * As Linus pointed out, gcc assumes that the callee 485 - * owns the argument space and could overwrite it, e.g. 486 - * tailcall optimization. So, to be absolutely safe 487 - * we also save and restore enough stack bytes to cover 488 - * the argument area. 495 + * Since we can't be sure where in the stack frame "stacked" 496 + * pass-by-value arguments are stored we just don't try to 497 + * duplicate any of the stack. Do not use jprobes on functions that 498 + * use more than 64 bytes (after padding each to an 8 byte boundary) 499 + * of arguments, or pass individual arguments larger than 16 bytes. 489 500 */ 490 - kasan_disable_current(); 491 - memcpy(kcb->jprobes_stack, (void *)stack_ptr, 492 - min_stack_size(stack_ptr)); 493 - kasan_enable_current(); 494 501 495 502 instruction_pointer_set(regs, (unsigned long) jp->entry); 496 503 preempt_disable(); ··· 537 554 } 538 555 unpause_graph_tracing(); 539 556 *regs = kcb->jprobe_saved_regs; 540 - kasan_disable_current(); 541 - memcpy((void *)stack_addr, kcb->jprobes_stack, 542 - min_stack_size(stack_addr)); 543 - kasan_enable_current(); 544 557 preempt_enable_no_resched(); 545 558 return 1; 546 559 }
+4 -4
arch/arm64/kernel/smp.c
··· 661 661 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 662 662 acpi_parse_gic_cpu_interface, 0); 663 663 664 - if (cpu_count > NR_CPUS) 665 - pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n", 666 - cpu_count, NR_CPUS); 664 + if (cpu_count > nr_cpu_ids) 665 + pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n", 666 + cpu_count, nr_cpu_ids); 667 667 668 668 if (!bootcpu_valid) { 669 669 pr_err("missing boot CPU MPIDR, not enabling secondaries\n"); ··· 677 677 * with entries in cpu_logical_map while initializing the cpus. 678 678 * If the cpu set-up fails, invalidate the cpu_logical_map entry. 679 679 */ 680 - for (i = 1; i < NR_CPUS; i++) { 680 + for (i = 1; i < nr_cpu_ids; i++) { 681 681 if (cpu_logical_map(i) != INVALID_HWID) { 682 682 if (smp_cpu_setup(i)) 683 683 cpu_logical_map(i) = INVALID_HWID;
+12 -2
arch/arm64/mm/fault.c
··· 153 153 } 154 154 #endif 155 155 156 + static bool is_el1_instruction_abort(unsigned int esr) 157 + { 158 + return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; 159 + } 160 + 156 161 /* 157 162 * The kernel tried to access some page that wasn't present. 158 163 */ ··· 166 161 { 167 162 /* 168 163 * Are we prepared to handle this kernel fault? 164 + * We are almost certainly not prepared to handle instruction faults. 169 165 */ 170 - if (fixup_exception(regs)) 166 + if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) 171 167 return; 172 168 173 169 /* ··· 273 267 unsigned int ec = ESR_ELx_EC(esr); 274 268 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; 275 269 276 - return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM); 270 + return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) || 271 + (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM); 277 272 } 278 273 279 274 static bool is_el0_instruction_abort(unsigned int esr) ··· 318 311 /* regs->orig_addr_limit may be 0 if we entered from EL0 */ 319 312 if (regs->orig_addr_limit == KERNEL_DS) 320 313 die("Accessing user space memory with fs=KERNEL_DS", regs, esr); 314 + 315 + if (is_el1_instruction_abort(esr)) 316 + die("Attempting to execute userspace memory", regs, esr); 321 317 322 318 if (!search_exception_tables(regs->pc)) 323 319 die("Accessing user space memory outside uaccess.h routines", regs, esr);
+2
arch/h8300/include/asm/io.h
··· 3 3 4 4 #ifdef __KERNEL__ 5 5 6 + #include <linux/types.h> 7 + 6 8 /* H8/300 internal I/O functions */ 7 9 8 10 #define __raw_readb __raw_readb
+1
arch/ia64/Kconfig
··· 52 52 select MODULES_USE_ELF_RELA 53 53 select ARCH_USE_CMPXCHG_LOCKREF 54 54 select HAVE_ARCH_AUDITSYSCALL 55 + select HAVE_ARCH_HARDENED_USERCOPY 55 56 default y 56 57 help 57 58 The Itanium Processor Family is Intel's 64-bit successor to
+15 -3
arch/ia64/include/asm/uaccess.h
··· 241 241 static inline unsigned long 242 242 __copy_to_user (void __user *to, const void *from, unsigned long count) 243 243 { 244 + if (!__builtin_constant_p(count)) 245 + check_object_size(from, count, true); 246 + 244 247 return __copy_user(to, (__force void __user *) from, count); 245 248 } 246 249 247 250 static inline unsigned long 248 251 __copy_from_user (void *to, const void __user *from, unsigned long count) 249 252 { 253 + if (!__builtin_constant_p(count)) 254 + check_object_size(to, count, false); 255 + 250 256 return __copy_user((__force void __user *) to, from, count); 251 257 } 252 258 ··· 264 258 const void *__cu_from = (from); \ 265 259 long __cu_len = (n); \ 266 260 \ 267 - if (__access_ok(__cu_to, __cu_len, get_fs())) \ 268 - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 261 + if (__access_ok(__cu_to, __cu_len, get_fs())) { \ 262 + if (!__builtin_constant_p(n)) \ 263 + check_object_size(__cu_from, __cu_len, true); \ 264 + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 265 + } \ 269 266 __cu_len; \ 270 267 }) 271 268 ··· 279 270 long __cu_len = (n); \ 280 271 \ 281 272 __chk_user_ptr(__cu_from); \ 282 - if (__access_ok(__cu_from, __cu_len, get_fs())) \ 273 + if (__access_ok(__cu_from, __cu_len, get_fs())) { \ 274 + if (!__builtin_constant_p(n)) \ 275 + check_object_size(__cu_to, __cu_len, false); \ 283 276 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ 277 + } \ 284 278 __cu_len; \ 285 279 }) 286 280
-1
arch/m68k/kernel/signal.c
··· 213 213 214 214 static inline void adjustformat(struct pt_regs *regs) 215 215 { 216 - ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data; 217 216 /* 218 217 * set format byte to make stack appear modulo 4, which it will 219 218 * be when doing the rte
-1
arch/metag/mm/init.c
··· 390 390 391 391 free_all_bootmem(); 392 392 mem_init_print_info(NULL); 393 - show_mem(0); 394 393 } 395 394 396 395 void free_initmem(void)
+26 -9
arch/mips/kvm/emulate.c
··· 1642 1642 1643 1643 preempt_disable(); 1644 1644 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { 1645 - if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) 1646 - kvm_mips_handle_kseg0_tlb_fault(va, vcpu); 1645 + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 && 1646 + kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) { 1647 + kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n", 1648 + __func__, va, vcpu, read_c0_entryhi()); 1649 + er = EMULATE_FAIL; 1650 + preempt_enable(); 1651 + goto done; 1652 + } 1647 1653 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || 1648 1654 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { 1649 1655 int index; ··· 1686 1680 run, vcpu); 1687 1681 preempt_enable(); 1688 1682 goto dont_update_pc; 1689 - } else { 1690 - /* 1691 - * We fault an entry from the guest tlb to the 1692 - * shadow host TLB 1693 - */ 1694 - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb); 1683 + } 1684 + /* 1685 + * We fault an entry from the guest tlb to the 1686 + * shadow host TLB 1687 + */ 1688 + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { 1689 + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 1690 + __func__, va, index, vcpu, 1691 + read_c0_entryhi()); 1692 + er = EMULATE_FAIL; 1693 + preempt_enable(); 1694 + goto done; 1695 1695 } 1696 1696 } 1697 1697 } else { ··· 2671 2659 * OK we have a Guest TLB entry, now inject it into the 2672 2660 * shadow host TLB 2673 2661 */ 2674 - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb); 2662 + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) { 2663 + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 2664 + __func__, va, index, vcpu, 2665 + read_c0_entryhi()); 2666 + er = EMULATE_FAIL; 2667 + } 2675 2668 } 2676 2669 } 2677 2670
+42 -22
arch/mips/kvm/mmu.c
··· 99 99 } 100 100 101 101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); 102 - if (gfn >= kvm->arch.guest_pmap_npages) { 102 + if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { 103 103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, 104 104 gfn, badvaddr); 105 105 kvm_mips_dump_host_tlbs(); ··· 138 138 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; 139 139 struct kvm *kvm = vcpu->kvm; 140 140 kvm_pfn_t pfn0, pfn1; 141 + gfn_t gfn0, gfn1; 142 + long tlb_lo[2]; 141 143 int ret; 142 144 143 - if ((tlb->tlb_hi & VPN2_MASK) == 0) { 144 - pfn0 = 0; 145 - pfn1 = 0; 146 - } else { 147 - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) 148 - >> PAGE_SHIFT) < 0) 149 - return -1; 145 + tlb_lo[0] = tlb->tlb_lo[0]; 146 + tlb_lo[1] = tlb->tlb_lo[1]; 150 147 151 - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) 152 - >> PAGE_SHIFT) < 0) 153 - return -1; 148 + /* 149 + * The commpage address must not be mapped to anything else if the guest 150 + * TLB contains entries nearby, or commpage accesses will break. 151 + */ 152 + if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & 153 + VPN2_MASK & (PAGE_MASK << 1))) 154 + tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; 154 155 155 - pfn0 = kvm->arch.guest_pmap[ 156 - mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT]; 157 - pfn1 = kvm->arch.guest_pmap[ 158 - mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT]; 156 + gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; 157 + gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; 158 + if (gfn0 >= kvm->arch.guest_pmap_npages || 159 + gfn1 >= kvm->arch.guest_pmap_npages) { 160 + kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", 161 + __func__, gfn0, gfn1, tlb->tlb_hi); 162 + kvm_mips_dump_guest_tlbs(vcpu); 163 + return -1; 159 164 } 165 + 166 + if (kvm_mips_map_page(kvm, gfn0) < 0) 167 + return -1; 168 + 169 + if (kvm_mips_map_page(kvm, gfn1) < 0) 170 + return -1; 171 + 172 + pfn0 = kvm->arch.guest_pmap[gfn0]; 173 + pfn1 = kvm->arch.guest_pmap[gfn1]; 160 174 161 175 /* Get attributes from the Guest TLB */ 162 176 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | 163 177 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | 164 - (tlb->tlb_lo[0] & ENTRYLO_D) | 165 - (tlb->tlb_lo[0] & ENTRYLO_V); 178 + (tlb_lo[0] & ENTRYLO_D) | 179 + (tlb_lo[0] & ENTRYLO_V); 166 180 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | 167 181 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | 168 - (tlb->tlb_lo[1] & ENTRYLO_D) | 169 - (tlb->tlb_lo[1] & ENTRYLO_V); 182 + (tlb_lo[1] & ENTRYLO_D) | 183 + (tlb_lo[1] & ENTRYLO_V); 170 184 171 185 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 172 186 tlb->tlb_lo[0], tlb->tlb_lo[1]); ··· 368 354 local_irq_restore(flags); 369 355 return KVM_INVALID_INST; 370 356 } 371 - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, 372 - &vcpu->arch. 373 - guest_tlb[index]); 357 + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, 358 + &vcpu->arch.guest_tlb[index])) { 359 + kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", 360 + __func__, opc, index, vcpu, 361 + read_c0_entryhi()); 362 + kvm_mips_dump_guest_tlbs(vcpu); 363 + local_irq_restore(flags); 364 + return KVM_INVALID_INST; 365 + } 374 366 inst = *(opc); 375 367 } 376 368 local_irq_restore(flags);
+1
arch/powerpc/Kconfig
··· 166 166 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS 167 167 select GENERIC_CPU_AUTOPROBE 168 168 select HAVE_VIRT_CPU_ACCOUNTING 169 + select HAVE_ARCH_HARDENED_USERCOPY 169 170 170 171 config GENERIC_CSUM 171 172 def_bool CPU_LITTLE_ENDIAN
+12 -10
arch/powerpc/Makefile
··· 66 66 UTS_MACHINE := $(OLDARCH) 67 67 68 68 ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) 69 - override CC += -mlittle-endian 70 - ifneq ($(cc-name),clang) 71 - override CC += -mno-strict-align 72 - endif 73 - override AS += -mlittle-endian 74 69 override LD += -EL 75 - override CROSS32CC += -mlittle-endian 76 70 override CROSS32AS += -mlittle-endian 77 71 LDEMULATION := lppc 78 72 GNUTARGET := powerpcle 79 73 MULTIPLEWORD := -mno-multiple 80 74 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect) 81 75 else 82 - ifeq ($(call cc-option-yn,-mbig-endian),y) 83 - override CC += -mbig-endian 84 - override AS += -mbig-endian 85 - endif 86 76 override LD += -EB 87 77 LDEMULATION := ppc 88 78 GNUTARGET := powerpc 89 79 MULTIPLEWORD := -mmultiple 90 80 endif 81 + 82 + cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) 83 + cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian 84 + ifneq ($(cc-name),clang) 85 + cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align 86 + endif 87 + 88 + aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) 89 + aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian 91 90 92 91 ifeq ($(HAS_BIARCH),y) 93 92 override AS += -a$(CONFIG_WORD_SIZE) ··· 230 231 231 232 KBUILD_AFLAGS += $(cpu-as-y) 232 233 KBUILD_CFLAGS += $(cpu-as-y) 234 + 235 + KBUILD_AFLAGS += $(aflags-y) 236 + KBUILD_CFLAGS += $(cflags-y) 233 237 234 238 head-y := arch/powerpc/kernel/head_$(CONFIG_WORD_SIZE).o 235 239 head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
+2 -1
arch/powerpc/crypto/crc32c-vpmsum_glue.c
··· 4 4 #include <linux/module.h> 5 5 #include <linux/string.h> 6 6 #include <linux/kernel.h> 7 + #include <linux/cpufeature.h> 7 8 #include <asm/switch_to.h> 8 9 9 10 #define CHKSUM_BLOCK_SIZE 1 ··· 158 157 crypto_unregister_shash(&alg); 159 158 } 160 159 161 - module_init(crc32c_vpmsum_mod_init); 160 + module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init); 162 161 module_exit(crc32c_vpmsum_mod_fini); 163 162 164 163 MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
+13
arch/powerpc/include/asm/cpuidle.h
··· 19 19 20 20 #endif 21 21 22 + /* Idle state entry routines */ 23 + #ifdef CONFIG_PPC_P7_NAP 24 + #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ 25 + /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ 26 + std r0,0(r1); \ 27 + ptesync; \ 28 + ld r0,0(r1); \ 29 + 1: cmp cr0,r0,r0; \ 30 + bne 1b; \ 31 + IDLE_INST; \ 32 + b . 33 + #endif /* CONFIG_PPC_P7_NAP */ 34 + 22 35 #endif
+1
arch/powerpc/include/asm/feature-fixups.h
··· 186 186 187 187 #ifndef __ASSEMBLY__ 188 188 void apply_feature_fixups(void); 189 + void setup_feature_keys(void); 189 190 #endif 190 191 191 192 #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
-8
arch/powerpc/include/asm/switch_to.h
··· 75 75 static inline void __giveup_spe(struct task_struct *t) { } 76 76 #endif 77 77 78 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 79 - extern void flush_tmregs_to_thread(struct task_struct *); 80 - #else 81 - static inline void flush_tmregs_to_thread(struct task_struct *t) 82 - { 83 - } 84 - #endif 85 - 86 78 static inline void clear_task_ebb(struct task_struct *t) 87 79 { 88 80 #ifdef CONFIG_PPC_BOOK3S_64
+19 -2
arch/powerpc/include/asm/uaccess.h
··· 310 310 { 311 311 unsigned long over; 312 312 313 - if (access_ok(VERIFY_READ, from, n)) 313 + if (access_ok(VERIFY_READ, from, n)) { 314 + if (!__builtin_constant_p(n)) 315 + check_object_size(to, n, false); 314 316 return __copy_tofrom_user((__force void __user *)to, from, n); 317 + } 315 318 if ((unsigned long)from < TASK_SIZE) { 316 319 over = (unsigned long)from + n - TASK_SIZE; 320 + if (!__builtin_constant_p(n - over)) 321 + check_object_size(to, n - over, false); 317 322 return __copy_tofrom_user((__force void __user *)to, from, 318 323 n - over) + over; 319 324 } ··· 330 325 { 331 326 unsigned long over; 332 327 333 - if (access_ok(VERIFY_WRITE, to, n)) 328 + if (access_ok(VERIFY_WRITE, to, n)) { 329 + if (!__builtin_constant_p(n)) 330 + check_object_size(from, n, true); 334 331 return __copy_tofrom_user(to, (__force void __user *)from, n); 332 + } 335 333 if ((unsigned long)to < TASK_SIZE) { 336 334 over = (unsigned long)to + n - TASK_SIZE; 335 + if (!__builtin_constant_p(n)) 336 + check_object_size(from, n - over, true); 337 337 return __copy_tofrom_user(to, (__force void __user *)from, 338 338 n - over) + over; 339 339 } ··· 382 372 if (ret == 0) 383 373 return 0; 384 374 } 375 + 376 + if (!__builtin_constant_p(n)) 377 + check_object_size(to, n, false); 378 + 385 379 return __copy_tofrom_user((__force void __user *)to, from, n); 386 380 } 387 381 ··· 412 398 if (ret == 0) 413 399 return 0; 414 400 } 401 + if (!__builtin_constant_p(n)) 402 + check_object_size(from, n, true); 403 + 415 404 return __copy_tofrom_user(to, (__force const void __user *)from, n); 416 405 } 417 406
+2
arch/powerpc/include/asm/xics.h
··· 159 159 extern void xics_kexec_teardown_cpu(int secondary); 160 160 extern void xics_migrate_irqs_away(void); 161 161 extern void icp_native_eoi(struct irq_data *d); 162 + extern int xics_set_irq_type(struct irq_data *d, unsigned int flow_type); 163 + extern int xics_retrigger(struct irq_data *data); 162 164 #ifdef CONFIG_SMP 163 165 extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, 164 166 unsigned int strict_check);
+2 -2
arch/powerpc/kernel/eeh.c
··· 168 168 int n = 0, l = 0; 169 169 char buffer[128]; 170 170 171 - n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n", 171 + n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", 172 172 edev->phb->global_number, pdn->busno, 173 173 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 174 - pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n", 174 + pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n", 175 175 edev->phb->global_number, pdn->busno, 176 176 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 177 177
+40 -29
arch/powerpc/kernel/exceptions-64s.S
··· 144 144 * vector 145 145 */ 146 146 SET_SCRATCH0(r13) /* save r13 */ 147 - #ifdef CONFIG_PPC_P7_NAP 148 - BEGIN_FTR_SECTION 149 - /* Running native on arch 2.06 or later, check if we are 150 - * waking up from nap. We only handle no state loss and 151 - * supervisor state loss. We do -not- handle hypervisor 152 - * state loss at this time. 147 + /* 148 + * Running native on arch 2.06 or later, we may wakeup from winkle 149 + * inside machine check. If yes, then last bit of HSPGR0 would be set 150 + * to 1. Hence clear it unconditionally. 153 151 */ 154 - mfspr r13,SPRN_SRR1 155 - rlwinm. r13,r13,47-31,30,31 156 - OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 157 - beq 9f 158 - 159 - mfspr r13,SPRN_SRR1 160 - rlwinm. r13,r13,47-31,30,31 161 - /* waking up from powersave (nap) state */ 162 - cmpwi cr1,r13,2 163 - /* Total loss of HV state is fatal. let's just stay stuck here */ 164 - OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 165 - bgt cr1,. 166 - 9: 167 - OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) 168 - END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) 169 - #endif /* CONFIG_PPC_P7_NAP */ 152 + GET_PACA(r13) 153 + clrrdi r13,r13,1 154 + SET_PACA(r13) 170 155 EXCEPTION_PROLOG_0(PACA_EXMC) 171 156 BEGIN_FTR_SECTION 172 157 b machine_check_powernv_early ··· 1258 1273 * Check if thread was in power saving mode. We come here when any 1259 1274 * of the following is true: 1260 1275 * a. thread wasn't in power saving mode 1261 - * b. thread was in power saving mode with no state loss or 1262 - * supervisor state loss 1276 + * b. thread was in power saving mode with no state loss, 1277 + * supervisor state loss or hypervisor state loss. 1263 1278 * 1264 - * Go back to nap again if (b) is true. 1279 + * Go back to nap/sleep/winkle mode again if (b) is true. 1265 1280 */ 1266 1281 rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ 1267 1282 beq 4f /* No, it wasn;t */ 1268 1283 /* Thread was in power saving mode. Go back to nap again. */ 1269 1284 cmpwi r11,2 1270 - bne 3f 1271 - /* Supervisor state loss */ 1285 + blt 3f 1286 + /* Supervisor/Hypervisor state loss */ 1272 1287 li r0,1 1273 1288 stb r0,PACA_NAPSTATELOST(r13) 1274 1289 3: bl machine_check_queue_event 1275 1290 MACHINE_CHECK_HANDLER_WINDUP 1276 1291 GET_PACA(r13) 1277 1292 ld r1,PACAR1(r13) 1278 - li r3,PNV_THREAD_NAP 1279 - b pnv_enter_arch207_idle_mode 1293 + /* 1294 + * Check what idle state this CPU was in and go back to same mode 1295 + * again. 1296 + */ 1297 + lbz r3,PACA_THREAD_IDLE_STATE(r13) 1298 + cmpwi r3,PNV_THREAD_NAP 1299 + bgt 10f 1300 + IDLE_STATE_ENTER_SEQ(PPC_NAP) 1301 + /* No return */ 1302 + 10: 1303 + cmpwi r3,PNV_THREAD_SLEEP 1304 + bgt 2f 1305 + IDLE_STATE_ENTER_SEQ(PPC_SLEEP) 1306 + /* No return */ 1307 + 1308 + 2: 1309 + /* 1310 + * Go back to winkle. Please note that this thread was woken up in 1311 + * machine check from winkle and have not restored the per-subcore 1312 + * state. Hence before going back to winkle, set last bit of HSPGR0 1313 + * to 1. This will make sure that if this thread gets woken up 1314 + * again at reset vector 0x100 then it will get chance to restore 1315 + * the subcore state. 1316 + */ 1317 + ori r13,r13,1 1318 + SET_PACA(r13) 1319 + IDLE_STATE_ENTER_SEQ(PPC_WINKLE) 1320 + /* No return */ 1280 1321 4: 1281 1322 #endif 1282 1323 /*
+4 -13
arch/powerpc/kernel/idle_book3s.S
··· 44 44 PSSCR_PSLL_MASK | PSSCR_TR_MASK | \ 45 45 PSSCR_MTL_MASK 46 46 47 - /* Idle state entry routines */ 48 - 49 - #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ 50 - /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ 51 - std r0,0(r1); \ 52 - ptesync; \ 53 - ld r0,0(r1); \ 54 - 1: cmp cr0,r0,r0; \ 55 - bne 1b; \ 56 - IDLE_INST; \ 57 - b . 58 - 59 47 .text 60 48 61 49 /* ··· 351 363 * cr3 - set to gt if waking up with partial/complete hypervisor state loss 352 364 */ 353 365 _GLOBAL(pnv_restore_hyp_resource) 354 - ld r2,PACATOC(r13); 355 366 BEGIN_FTR_SECTION 367 + ld r2,PACATOC(r13); 356 368 /* 357 369 * POWER ISA 3. Use PSSCR to determine if we 358 370 * are waking up from deep idle state ··· 383 395 */ 384 396 clrldi r5,r13,63 385 397 clrrdi r13,r13,1 398 + 399 + /* Now that we are sure r13 is corrected, load TOC */ 400 + ld r2,PACATOC(r13); 386 401 cmpwi cr4,r5,1 387 402 mtspr SPRN_HSPRG0,r13 388 403
+2 -1
arch/powerpc/kernel/mce.c
··· 92 92 mce->in_use = 1; 93 93 94 94 mce->initiator = MCE_INITIATOR_CPU; 95 - if (handled) 95 + /* Mark it recovered if we have handled it and MSR(RI=1). */ 96 + if (handled && (regs->msr & MSR_RI)) 96 97 mce->disposition = MCE_DISPOSITION_RECOVERED; 97 98 else 98 99 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
+5 -2
arch/powerpc/kernel/pci-common.c
··· 78 78 static int get_phb_number(struct device_node *dn) 79 79 { 80 80 int ret, phb_id = -1; 81 + u32 prop_32; 81 82 u64 prop; 82 83 83 84 /* ··· 87 86 * reading "ibm,opal-phbid", only present in OPAL environment. 88 87 */ 89 88 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop); 90 - if (ret) 91 - ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop); 89 + if (ret) { 90 + ret = of_property_read_u32_index(dn, "reg", 1, &prop_32); 91 + prop = prop_32; 92 + } 92 93 93 94 if (!ret) 94 95 phb_id = (int)(prop & (MAX_PHBS - 1));
-20
arch/powerpc/kernel/process.c
··· 1074 1074 #endif 1075 1075 } 1076 1076 1077 - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1078 - void flush_tmregs_to_thread(struct task_struct *tsk) 1079 - { 1080 - /* 1081 - * Process self tracing is not yet supported through 1082 - * ptrace interface. Ptrace generic code should have 1083 - * prevented this from happening in the first place. 1084 - * Warn once here with the message, if some how it 1085 - * is attempted. 1086 - */ 1087 - WARN_ONCE(tsk == current, 1088 - "Not expecting ptrace on self: TM regs may be incorrect\n"); 1089 - 1090 - /* 1091 - * If task is not current, it should have been flushed 1092 - * already to it's thread_struct during __switch_to(). 1093 - */ 1094 - } 1095 - #endif 1096 - 1097 1077 struct task_struct *__switch_to(struct task_struct *prev, 1098 1078 struct task_struct *new) 1099 1079 {
+1 -1
arch/powerpc/kernel/prom_init.c
··· 2940 2940 2941 2941 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 2942 2942 if (of_platform != PLATFORM_OPAL) { 2943 - prom_printf("Booting Linux via __start() ...\n"); 2943 + prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); 2944 2944 prom_debug("->dt_header_start=0x%x\n", hdr); 2945 2945 } 2946 2946
+19
arch/powerpc/kernel/ptrace.c
··· 38 38 #include <asm/page.h> 39 39 #include <asm/pgtable.h> 40 40 #include <asm/switch_to.h> 41 + #include <asm/tm.h> 41 42 42 43 #define CREATE_TRACE_POINTS 43 44 #include <trace/events/syscalls.h> ··· 118 117 REG_OFFSET_NAME(dsisr), 119 118 REG_OFFSET_END, 120 119 }; 120 + 121 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 122 + static void flush_tmregs_to_thread(struct task_struct *tsk) 123 + { 124 + /* 125 + * If task is not current, it will have been flushed already to 126 + * it's thread_struct during __switch_to(). 127 + * 128 + * A reclaim flushes ALL the state. 129 + */ 130 + 131 + if (tsk == current && MSR_TM_SUSPENDED(mfmsr())) 132 + tm_reclaim_current(TM_CAUSE_SIGNAL); 133 + 134 + } 135 + #else 136 + static inline void flush_tmregs_to_thread(struct task_struct *tsk) { } 137 + #endif 121 138 122 139 /** 123 140 * regs_query_register_offset() - query register offset from its name
+5 -4
arch/powerpc/kernel/setup_32.c
··· 93 93 * and we are running with enough of the MMU enabled to have our 94 94 * proper kernel virtual addresses 95 95 * 96 - * Find out what kind of machine we're on and save any data we need 97 - * from the early boot process (devtree is copied on pmac by prom_init()). 98 - * This is called very early on the boot process, after a minimal 99 - * MMU environment has been set up but before MMU_init is called. 96 + * We do the initial parsing of the flat device-tree and prepares 97 + * for the MMU to be fully initialized. 100 98 */ 101 99 extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ 102 100 103 101 notrace void __init machine_init(u64 dt_ptr) 104 102 { 103 + /* Configure static keys first, now that we're relocated. */ 104 + setup_feature_keys(); 105 + 105 106 /* Enable early debugging if any specified (see udbg.h) */ 106 107 udbg_early_init(); 107 108
+1
arch/powerpc/kernel/setup_64.c
··· 300 300 301 301 /* Apply all the dynamic patching */ 302 302 apply_feature_fixups(); 303 + setup_feature_keys(); 303 304 304 305 /* Initialize the hash table or TLB handling */ 305 306 early_init_mmu();
+1
arch/powerpc/kernel/vdso.c
··· 22 22 #include <linux/security.h> 23 23 #include <linux/memblock.h> 24 24 25 + #include <asm/cpu_has_feature.h> 25 26 #include <asm/pgtable.h> 26 27 #include <asm/processor.h> 27 28 #include <asm/mmu.h>
+3 -3
arch/powerpc/kernel/vdso32/Makefile
··· 30 30 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 31 31 32 32 # link rule for the .so file, .lds has to be first 33 - $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) 33 + $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE 34 34 $(call if_changed,vdso32ld) 35 35 36 36 # strip rule for the .so file ··· 39 39 $(call if_changed,objcopy) 40 40 41 41 # assembly rules for the .S files 42 - $(obj-vdso32): %.o: %.S 42 + $(obj-vdso32): %.o: %.S FORCE 43 43 $(call if_changed_dep,vdso32as) 44 44 45 45 # actual build commands 46 46 quiet_cmd_vdso32ld = VDSO32L $@ 47 - cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ 47 + cmd_vdso32ld = $(CROSS32CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) 48 48 quiet_cmd_vdso32as = VDSO32A $@ 49 49 cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< 50 50
+3 -3
arch/powerpc/kernel/vdso64/Makefile
··· 23 23 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 24 24 25 25 # link rule for the .so file, .lds has to be first 26 - $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) 26 + $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE 27 27 $(call if_changed,vdso64ld) 28 28 29 29 # strip rule for the .so file ··· 32 32 $(call if_changed,objcopy) 33 33 34 34 # assembly rules for the .S files 35 - $(obj-vdso64): %.o: %.S 35 + $(obj-vdso64): %.o: %.S FORCE 36 36 $(call if_changed_dep,vdso64as) 37 37 38 38 # actual build commands 39 39 quiet_cmd_vdso64ld = VDSO64L $@ 40 - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ 40 + cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) 41 41 quiet_cmd_vdso64as = VDSO64A $@ 42 42 cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< 43 43
+8 -4
arch/powerpc/kvm/book3s_xics.c
··· 1329 1329 xics->kvm = kvm; 1330 1330 1331 1331 /* Already there ? */ 1332 - mutex_lock(&kvm->lock); 1333 1332 if (kvm->arch.xics) 1334 1333 ret = -EEXIST; 1335 1334 else 1336 1335 kvm->arch.xics = xics; 1337 - mutex_unlock(&kvm->lock); 1338 1336 1339 1337 if (ret) { 1340 1338 kfree(xics); 1341 1339 return ret; 1342 1340 } 1343 - 1344 - xics_debugfs_init(xics); 1345 1341 1346 1342 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 1347 1343 if (cpu_has_feature(CPU_FTR_ARCH_206)) { ··· 1350 1354 return 0; 1351 1355 } 1352 1356 1357 + static void kvmppc_xics_init(struct kvm_device *dev) 1358 + { 1359 + struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private; 1360 + 1361 + xics_debugfs_init(xics); 1362 + } 1363 + 1353 1364 struct kvm_device_ops kvm_xics_ops = { 1354 1365 .name = "kvm-xics", 1355 1366 .create = kvmppc_xics_create, 1367 + .init = kvmppc_xics_init, 1356 1368 .destroy = kvmppc_xics_free, 1357 1369 .set_attr = xics_set_attr, 1358 1370 .get_attr = xics_get_attr,
+4 -3
arch/powerpc/lib/checksum_32.S
··· 127 127 stw r7,12(r1) 128 128 stw r8,8(r1) 129 129 130 - andi. r0,r4,1 /* is destination address even ? */ 131 - cmplwi cr7,r0,0 130 + rlwinm r0,r4,3,0x8 131 + rlwnm r6,r6,r0,0,31 /* odd destination address: rotate one byte */ 132 + cmplwi cr7,r0,0 /* is destination address even ? */ 132 133 addic r12,r6,0 133 134 addi r6,r4,-4 134 135 neg r0,r4 ··· 238 237 66: addze r3,r12 239 238 addi r1,r1,16 240 239 beqlr+ cr7 241 - rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */ 240 + rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */ 242 241 blr 243 242 244 243 /* read fault */
+3
arch/powerpc/lib/feature-fixups.c
··· 188 188 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 189 189 #endif 190 190 do_final_fixups(); 191 + } 191 192 193 + void __init setup_feature_keys(void) 194 + { 192 195 /* 193 196 * Initialise jump label. This causes all the cpu/mmu_has_feature() 194 197 * checks to take on their correct polarity based on the current set of
+3 -1
arch/powerpc/platforms/cell/spufs/inode.c
··· 496 496 gang = alloc_spu_gang(); 497 497 SPUFS_I(inode)->i_ctx = NULL; 498 498 SPUFS_I(inode)->i_gang = gang; 499 - if (!gang) 499 + if (!gang) { 500 + ret = -ENOMEM; 500 501 goto out_iput; 502 + } 501 503 502 504 inode->i_op = &simple_dir_inode_operations; 503 505 inode->i_fop = &simple_dir_operations;
+5
arch/powerpc/platforms/pasemi/iommu.c
··· 187 187 if (dev->vendor == 0x1959 && dev->device == 0xa007 && 188 188 !firmware_has_feature(FW_FEATURE_LPAR)) { 189 189 dev->dev.archdata.dma_ops = &dma_direct_ops; 190 + /* 191 + * Set the coherent DMA mask to prevent the iommu 192 + * being used unnecessarily 193 + */ 194 + dev->dev.coherent_dma_mask = DMA_BIT_MASK(44); 190 195 return; 191 196 } 192 197 #endif
+2 -1
arch/powerpc/platforms/powernv/opal-irqchip.c
··· 228 228 } 229 229 230 230 /* Install interrupt handler */ 231 - rc = request_irq(virq, opal_interrupt, 0, "opal", NULL); 231 + rc = request_irq(virq, opal_interrupt, IRQF_TRIGGER_LOW, 232 + "opal", NULL); 232 233 if (rc) { 233 234 irq_dispose_mapping(virq); 234 235 pr_warn("Error %d requesting irq %d (0x%x)\n",
+1
arch/powerpc/platforms/powernv/opal.c
··· 399 399 400 400 if (!(regs->msr & MSR_RI)) { 401 401 /* If MSR_RI isn't set, we cannot recover */ 402 + pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n"); 402 403 recovered = 0; 403 404 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { 404 405 /* Platform corrected itself */
+21 -11
arch/powerpc/platforms/powernv/pci-ioda.c
··· 111 111 } 112 112 early_param("iommu", iommu_setup); 113 113 114 - static inline bool pnv_pci_is_mem_pref_64(unsigned long flags) 114 + static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r) 115 115 { 116 - return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) == 117 - (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)); 116 + /* 117 + * WARNING: We cannot rely on the resource flags. The Linux PCI 118 + * allocation code sometimes decides to put a 64-bit prefetchable 119 + * BAR in the 32-bit window, so we have to compare the addresses. 120 + * 121 + * For simplicity we only test resource start. 122 + */ 123 + return (r->start >= phb->ioda.m64_base && 124 + r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); 118 125 } 119 126 120 127 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) ··· 236 229 sgsz = phb->ioda.m64_segsize; 237 230 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 238 231 r = &pdev->resource[i]; 239 - if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags)) 232 + if (!r->parent || !pnv_pci_is_m64(phb, r)) 240 233 continue; 241 234 242 235 start = _ALIGN_DOWN(r->start - base, sgsz); ··· 1884 1877 unsigned shift, unsigned long index, 1885 1878 unsigned long npages) 1886 1879 { 1887 - __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false); 1880 + __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm); 1888 1881 unsigned long start, end, inc; 1889 1882 1890 1883 /* We'll invalidate DMA address in PE scope */ ··· 2870 2863 res = &pdev->resource[i + PCI_IOV_RESOURCES]; 2871 2864 if (!res->flags || res->parent) 2872 2865 continue; 2873 - if (!pnv_pci_is_mem_pref_64(res->flags)) { 2866 + if (!pnv_pci_is_m64(phb, res)) { 2874 2867 dev_warn(&pdev->dev, "Don't support SR-IOV with" 2875 2868 " non M64 VF BAR%d: %pR. \n", 2876 2869 i, res); ··· 2965 2958 index++; 2966 2959 } 2967 2960 } else if ((res->flags & IORESOURCE_MEM) && 2968 - !pnv_pci_is_mem_pref_64(res->flags)) { 2961 + !pnv_pci_is_m64(phb, res)) { 2969 2962 region.start = res->start - 2970 2963 phb->hose->mem_offset[0] - 2971 2964 phb->ioda.m32_pci_base; ··· 3090 3083 bridge = bridge->bus->self; 3091 3084 } 3092 3085 3093 - /* We fail back to M32 if M64 isn't supported */ 3094 - if (phb->ioda.m64_segsize && 3095 - pnv_pci_is_mem_pref_64(type)) 3086 + /* 3087 + * We fall back to M32 if M64 isn't supported. We enforce the M64 3088 + * alignment for any 64-bit resource, PCIe doesn't care and 3089 + * bridges only do 64-bit prefetchable anyway. 3090 + */ 3091 + if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64)) 3096 3092 return phb->ioda.m64_segsize; 3097 3093 if (type & IORESOURCE_MEM) 3098 3094 return phb->ioda.m32_segsize; ··· 3135 3125 w = NULL; 3136 3126 if (r->flags & type & IORESOURCE_IO) 3137 3127 w = &hose->io_resource; 3138 - else if (pnv_pci_is_mem_pref_64(r->flags) && 3128 + else if (pnv_pci_is_m64(phb, r) && 3139 3129 (type & IORESOURCE_PREFETCH) && 3140 3130 phb->ioda.m64_segsize) 3141 3131 w = &hose->mem_resources[1];
+13 -13
arch/powerpc/platforms/pseries/hotplug-memory.c
··· 320 320 return dlpar_update_device_tree_lmb(lmb); 321 321 } 322 322 323 - static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) 324 - { 325 - unsigned long section_nr; 326 - struct mem_section *mem_sect; 327 - struct memory_block *mem_block; 328 - 329 - section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); 330 - mem_sect = __nr_to_section(section_nr); 331 - 332 - mem_block = find_memory_block(mem_sect); 333 - return mem_block; 334 - } 335 - 336 323 #ifdef CONFIG_MEMORY_HOTREMOVE 337 324 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 338 325 { ··· 406 419 } 407 420 408 421 static int dlpar_add_lmb(struct of_drconf_cell *); 422 + 423 + static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) 424 + { 425 + unsigned long section_nr; 426 + struct mem_section *mem_sect; 427 + struct memory_block *mem_block; 428 + 429 + section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); 430 + mem_sect = __nr_to_section(section_nr); 431 + 432 + mem_block = find_memory_block(mem_sect); 433 + return mem_block; 434 + } 409 435 410 436 static int dlpar_remove_lmb(struct of_drconf_cell *lmb) 411 437 {
+1
arch/powerpc/sysdev/xics/Kconfig
··· 1 1 config PPC_XICS 2 2 def_bool n 3 3 select PPC_SMP_MUXED_IPI 4 + select HARDIRQS_SW_RESEND 4 5 5 6 config PPC_ICP_NATIVE 6 7 def_bool n
+3 -1
arch/powerpc/sysdev/xics/ics-opal.c
··· 156 156 .irq_mask = ics_opal_mask_irq, 157 157 .irq_unmask = ics_opal_unmask_irq, 158 158 .irq_eoi = NULL, /* Patched at init time */ 159 - .irq_set_affinity = ics_opal_set_affinity 159 + .irq_set_affinity = ics_opal_set_affinity, 160 + .irq_set_type = xics_set_irq_type, 161 + .irq_retrigger = xics_retrigger, 160 162 }; 161 163 162 164 static int ics_opal_map(struct ics *ics, unsigned int virq);
+3 -1
arch/powerpc/sysdev/xics/ics-rtas.c
··· 163 163 .irq_mask = ics_rtas_mask_irq, 164 164 .irq_unmask = ics_rtas_unmask_irq, 165 165 .irq_eoi = NULL, /* Patched at init time */ 166 - .irq_set_affinity = ics_rtas_set_affinity 166 + .irq_set_affinity = ics_rtas_set_affinity, 167 + .irq_set_type = xics_set_irq_type, 168 + .irq_retrigger = xics_retrigger, 167 169 }; 168 170 169 171 static int ics_rtas_map(struct ics *ics, unsigned int virq)
+52 -7
arch/powerpc/sysdev/xics/xics-common.c
··· 328 328 329 329 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); 330 330 331 - /* They aren't all level sensitive but we just don't really know */ 332 - irq_set_status_flags(virq, IRQ_LEVEL); 331 + /* 332 + * Mark interrupts as edge sensitive by default so that resend 333 + * actually works. The device-tree parsing will turn the LSIs 334 + * back to level. 335 + */ 336 + irq_clear_status_flags(virq, IRQ_LEVEL); 333 337 334 338 /* Don't call into ICS for IPIs */ 335 339 if (hw == XICS_IPI) { ··· 355 351 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 356 352 357 353 { 358 - /* Current xics implementation translates everything 359 - * to level. It is not technically right for MSIs but this 360 - * is irrelevant at this point. We might get smarter in the future 361 - */ 362 354 *out_hwirq = intspec[0]; 363 - *out_flags = IRQ_TYPE_LEVEL_LOW; 364 355 356 + /* 357 + * If intsize is at least 2, we look for the type in the second cell, 358 + * we assume the LSB indicates a level interrupt. 359 + */ 360 + if (intsize > 1) { 361 + if (intspec[1] & 1) 362 + *out_flags = IRQ_TYPE_LEVEL_LOW; 363 + else 364 + *out_flags = IRQ_TYPE_EDGE_RISING; 365 + } else 366 + *out_flags = IRQ_TYPE_LEVEL_LOW; 367 + 368 + return 0; 369 + } 370 + 371 + int xics_set_irq_type(struct irq_data *d, unsigned int flow_type) 372 + { 373 + /* 374 + * We only support these. This has really no effect other than setting 375 + * the corresponding descriptor bits mind you but those will in turn 376 + * affect the resend function when re-enabling an edge interrupt. 377 + * 378 + * Set set the default to edge as explained in map(). 379 + */ 380 + if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) 381 + flow_type = IRQ_TYPE_EDGE_RISING; 382 + 383 + if (flow_type != IRQ_TYPE_EDGE_RISING && 384 + flow_type != IRQ_TYPE_LEVEL_LOW) 385 + return -EINVAL; 386 + 387 + irqd_set_trigger_type(d, flow_type); 388 + 389 + return IRQ_SET_MASK_OK_NOCOPY; 390 + } 391 + 392 + int xics_retrigger(struct irq_data *data) 393 + { 394 + /* 395 + * We need to push a dummy CPPR when retriggering, since the subsequent 396 + * EOI will try to pop it. Passing 0 works, as the function hard codes 397 + * the priority value anyway. 398 + */ 399 + xics_push_cppr(0); 400 + 401 + /* Tell the core to do a soft retrigger */ 365 402 return 0; 366 403 } 367 404
+14
arch/s390/Kconfig
··· 123 123 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 124 124 select HAVE_ARCH_AUDITSYSCALL 125 125 select HAVE_ARCH_EARLY_PFN_TO_NID 126 + select HAVE_ARCH_HARDENED_USERCOPY 126 127 select HAVE_ARCH_JUMP_LABEL 127 128 select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES 128 129 select HAVE_ARCH_SECCOMP_FILTER ··· 871 870 872 871 Select this option if you want to run the kernel as a guest under 873 872 the KVM hypervisor. 873 + 874 + config S390_GUEST_OLD_TRANSPORT 875 + def_bool y 876 + prompt "Guest support for old s390 virtio transport (DEPRECATED)" 877 + depends on S390_GUEST 878 + help 879 + Enable this option to add support for the old s390-virtio 880 + transport (i.e. virtio devices NOT based on virtio-ccw). This 881 + type of virtio devices is only available on the experimental 882 + kuli userspace or with old (< 2.6) qemu. If you are running 883 + with a modern version of qemu (which supports virtio-ccw since 884 + 1.4 and uses it by default since version 2.4), you probably won't 885 + need this. 874 886 875 887 endmenu
+4 -1
arch/s390/kvm/kvm-s390.c
··· 1672 1672 KVM_SYNC_CRS | 1673 1673 KVM_SYNC_ARCH0 | 1674 1674 KVM_SYNC_PFAULT; 1675 + kvm_s390_set_prefix(vcpu, 0); 1675 1676 if (test_kvm_facility(vcpu->kvm, 64)) 1676 1677 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; 1677 1678 /* fprs can be synchronized via vrs, even if the guest has no vx. With ··· 2362 2361 rc = gmap_mprotect_notify(vcpu->arch.gmap, 2363 2362 kvm_s390_get_prefix(vcpu), 2364 2363 PAGE_SIZE * 2, PROT_WRITE); 2365 - if (rc) 2364 + if (rc) { 2365 + kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 2366 2366 return rc; 2367 + } 2367 2368 goto retry; 2368 2369 } 2369 2370
+2
arch/s390/lib/uaccess.c
··· 104 104 105 105 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 106 106 { 107 + check_object_size(to, n, false); 107 108 if (static_branch_likely(&have_mvcos)) 108 109 return copy_from_user_mvcos(to, from, n); 109 110 return copy_from_user_mvcp(to, from, n); ··· 178 177 179 178 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 180 179 { 180 + check_object_size(from, n, true); 181 181 if (static_branch_likely(&have_mvcos)) 182 182 return copy_to_user_mvcos(to, from, n); 183 183 return copy_to_user_mvcs(to, from, n);
+1
arch/sparc/Kconfig
··· 43 43 select OLD_SIGSUSPEND 44 44 select ARCH_HAS_SG_CHAIN 45 45 select CPU_NO_EFFICIENT_FFS 46 + select HAVE_ARCH_HARDENED_USERCOPY 46 47 47 48 config SPARC32 48 49 def_bool !64BIT
+10 -4
arch/sparc/include/asm/uaccess_32.h
··· 248 248 249 249 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 250 250 { 251 - if (n && __access_ok((unsigned long) to, n)) 251 + if (n && __access_ok((unsigned long) to, n)) { 252 + if (!__builtin_constant_p(n)) 253 + check_object_size(from, n, true); 252 254 return __copy_user(to, (__force void __user *) from, n); 253 - else 255 + } else 254 256 return n; 255 257 } 256 258 257 259 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 258 260 { 261 + if (!__builtin_constant_p(n)) 262 + check_object_size(from, n, true); 259 263 return __copy_user(to, (__force void __user *) from, n); 260 264 } 261 265 262 266 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 263 267 { 264 - if (n && __access_ok((unsigned long) from, n)) 268 + if (n && __access_ok((unsigned long) from, n)) { 269 + if (!__builtin_constant_p(n)) 270 + check_object_size(to, n, false); 265 271 return __copy_user((__force void __user *) to, from, n); 266 - else 272 + } else 267 273 return n; 268 274 } 269 275
+9 -2
arch/sparc/include/asm/uaccess_64.h
··· 210 210 static inline unsigned long __must_check 211 211 copy_from_user(void *to, const void __user *from, unsigned long size) 212 212 { 213 - unsigned long ret = ___copy_from_user(to, from, size); 213 + unsigned long ret; 214 214 215 + if (!__builtin_constant_p(size)) 216 + check_object_size(to, size, false); 217 + 218 + ret = ___copy_from_user(to, from, size); 215 219 if (unlikely(ret)) 216 220 ret = copy_from_user_fixup(to, from, size); 217 221 ··· 231 227 static inline unsigned long __must_check 232 228 copy_to_user(void __user *to, const void *from, unsigned long size) 233 229 { 234 - unsigned long ret = ___copy_to_user(to, from, size); 230 + unsigned long ret; 235 231 232 + if (!__builtin_constant_p(size)) 233 + check_object_size(from, size, true); 234 + ret = ___copy_to_user(to, from, size); 236 235 if (unlikely(ret)) 237 236 ret = copy_to_user_fixup(to, from, size); 238 237 return ret;
+1 -1
arch/unicore32/include/asm/mmu_context.h
··· 98 98 } 99 99 100 100 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 101 - bool write, bool foreign) 101 + bool write, bool execute, bool foreign) 102 102 { 103 103 /* by default, allow everything */ 104 104 return true;
+2
arch/x86/Kconfig
··· 80 80 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 81 81 select HAVE_AOUT if X86_32 82 82 select HAVE_ARCH_AUDITSYSCALL 83 + select HAVE_ARCH_HARDENED_USERCOPY 83 84 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 84 85 select HAVE_ARCH_JUMP_LABEL 85 86 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP ··· 92 91 select HAVE_ARCH_SOFT_DIRTY if X86_64 93 92 select HAVE_ARCH_TRACEHOOK 94 93 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 94 + select HAVE_ARCH_WITHIN_STACK_FRAMES 95 95 select HAVE_EBPF_JIT if X86_64 96 96 select HAVE_CC_STACKPROTECTOR 97 97 select HAVE_CMPXCHG_DOUBLE
+2
arch/x86/entry/Makefile
··· 5 5 OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y 6 6 OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y 7 7 8 + CFLAGS_syscall_64.o += -Wno-override-init 9 + CFLAGS_syscall_32.o += -Wno-override-init 8 10 obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o 9 11 obj-y += common.o 10 12
+20 -5
arch/x86/entry/entry_64.S
··· 288 288 jne opportunistic_sysret_failed 289 289 290 290 /* 291 - * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, 292 - * restoring TF results in a trap from userspace immediately after 293 - * SYSRET. This would cause an infinite loop whenever #DB happens 294 - * with register state that satisfies the opportunistic SYSRET 295 - * conditions. For example, single-stepping this user code: 291 + * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot 292 + * restore RF properly. If the slowpath sets it for whatever reason, we 293 + * need to restore it correctly. 294 + * 295 + * SYSRET can restore TF, but unlike IRET, restoring TF results in a 296 + * trap from userspace immediately after SYSRET. This would cause an 297 + * infinite loop whenever #DB happens with register state that satisfies 298 + * the opportunistic SYSRET conditions. For example, single-stepping 299 + * this user code: 296 300 * 297 301 * movq $stuck_here, %rcx 298 302 * pushfq ··· 605 601 .endm 606 602 #endif 607 603 604 + /* Make sure APIC interrupt handlers end up in the irqentry section: */ 605 + #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) 606 + # define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" 607 + # define POP_SECTION_IRQENTRY .popsection 608 + #else 609 + # define PUSH_SECTION_IRQENTRY 610 + # define POP_SECTION_IRQENTRY 611 + #endif 612 + 608 613 .macro apicinterrupt num sym do_sym 614 + PUSH_SECTION_IRQENTRY 609 615 apicinterrupt3 \num \sym \do_sym 610 616 trace_apicinterrupt \num \sym 617 + POP_SECTION_IRQENTRY 611 618 .endm 612 619 613 620 #ifdef CONFIG_SMP
+14
arch/x86/events/intel/uncore_snb.c
··· 100 100 } 101 101 } 102 102 103 + static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) 104 + { 105 + wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 106 + SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); 107 + } 108 + 103 109 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) 104 110 { 105 111 if (box->pmu->pmu_idx == 0) ··· 133 127 134 128 static struct intel_uncore_ops snb_uncore_msr_ops = { 135 129 .init_box = snb_uncore_msr_init_box, 130 + .enable_box = snb_uncore_msr_enable_box, 136 131 .exit_box = snb_uncore_msr_exit_box, 137 132 .disable_event = snb_uncore_msr_disable_event, 138 133 .enable_event = snb_uncore_msr_enable_event, ··· 199 192 } 200 193 } 201 194 195 + static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) 196 + { 197 + wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 198 + SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 199 + } 200 + 202 201 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 203 202 { 204 203 if (box->pmu->pmu_idx == 0) ··· 213 200 214 201 static struct intel_uncore_ops skl_uncore_msr_ops = { 215 202 .init_box = skl_uncore_msr_init_box, 203 + .enable_box = skl_uncore_msr_enable_box, 216 204 .exit_box = skl_uncore_msr_exit_box, 217 205 .disable_event = snb_uncore_msr_disable_event, 218 206 .enable_event = snb_uncore_msr_enable_event,
+5 -5
arch/x86/events/intel/uncore_snbep.c
··· 2626 2626 2627 2627 static struct intel_uncore_type hswep_uncore_ha = { 2628 2628 .name = "ha", 2629 - .num_counters = 5, 2629 + .num_counters = 4, 2630 2630 .num_boxes = 2, 2631 2631 .perf_ctr_bits = 48, 2632 2632 SNBEP_UNCORE_PCI_COMMON_INIT(), ··· 2645 2645 2646 2646 static struct intel_uncore_type hswep_uncore_imc = { 2647 2647 .name = "imc", 2648 - .num_counters = 5, 2648 + .num_counters = 4, 2649 2649 .num_boxes = 8, 2650 2650 .perf_ctr_bits = 48, 2651 2651 .fixed_ctr_bits = 48, ··· 2691 2691 2692 2692 static struct intel_uncore_type hswep_uncore_qpi = { 2693 2693 .name = "qpi", 2694 - .num_counters = 5, 2694 + .num_counters = 4, 2695 2695 .num_boxes = 3, 2696 2696 .perf_ctr_bits = 48, 2697 2697 .perf_ctr = SNBEP_PCI_PMON_CTR0, ··· 2773 2773 2774 2774 static struct intel_uncore_type hswep_uncore_r3qpi = { 2775 2775 .name = "r3qpi", 2776 - .num_counters = 4, 2776 + .num_counters = 3, 2777 2777 .num_boxes = 3, 2778 2778 .perf_ctr_bits = 44, 2779 2779 .constraints = hswep_uncore_r3qpi_constraints, ··· 2972 2972 2973 2973 static struct intel_uncore_type bdx_uncore_imc = { 2974 2974 .name = "imc", 2975 - .num_counters = 5, 2975 + .num_counters = 4, 2976 2976 .num_boxes = 8, 2977 2977 .perf_ctr_bits = 48, 2978 2978 .fixed_ctr_bits = 48,
+2
arch/x86/include/asm/apic.h
··· 135 135 void register_lapic_address(unsigned long address); 136 136 extern void setup_boot_APIC_clock(void); 137 137 extern void setup_secondary_APIC_clock(void); 138 + extern void lapic_update_tsc_freq(void); 138 139 extern int APIC_init_uniprocessor(void); 139 140 140 141 #ifdef CONFIG_X86_64 ··· 171 170 static inline void disable_local_APIC(void) { } 172 171 # define setup_boot_APIC_clock x86_init_noop 173 172 # define setup_secondary_APIC_clock x86_init_noop 173 + static inline void lapic_update_tsc_freq(void) { } 174 174 #endif /* !CONFIG_X86_LOCAL_APIC */ 175 175 176 176 #ifdef CONFIG_X86_X2APIC
-4
arch/x86/include/asm/hardirq.h
··· 22 22 #ifdef CONFIG_SMP 23 23 unsigned int irq_resched_count; 24 24 unsigned int irq_call_count; 25 - /* 26 - * irq_tlb_count is double-counted in irq_call_count, so it must be 27 - * subtracted from irq_call_count when displaying irq_call_count 28 - */ 29 25 unsigned int irq_tlb_count; 30 26 #endif 31 27 #ifdef CONFIG_X86_THERMAL_VECTOR
+2 -2
arch/x86/include/asm/init.h
··· 5 5 void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ 6 6 void *context; /* context for alloc_pgt_page */ 7 7 unsigned long pmd_flag; /* page flag for PMD entry */ 8 - bool kernel_mapping; /* kernel mapping or ident mapping */ 8 + unsigned long offset; /* ident mapping offset */ 9 9 }; 10 10 11 11 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 12 - unsigned long addr, unsigned long end); 12 + unsigned long pstart, unsigned long pend); 13 13 14 14 #endif /* _ASM_X86_INIT_H */
+2 -2
arch/x86/include/asm/pgtable_64.h
··· 145 145 * 146 146 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number 147 147 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names 148 - * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry 148 + * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry 149 149 * 150 150 * G (8) is aliased and used as a PROT_NONE indicator for 151 151 * !present ptes. We need to start storing swap entries above ··· 156 156 #define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) 157 157 #define SWP_TYPE_BITS 5 158 158 /* Place the offset above the type: */ 159 - #define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1) 159 + #define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) 160 160 161 161 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) 162 162
+9 -1
arch/x86/include/asm/realmode.h
··· 58 58 extern unsigned char secondary_startup_64[]; 59 59 #endif 60 60 61 + static inline size_t real_mode_size_needed(void) 62 + { 63 + if (real_mode_header) 64 + return 0; /* already allocated. */ 65 + 66 + return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE); 67 + } 68 + 69 + void set_real_mode_mem(phys_addr_t mem, size_t size); 61 70 void reserve_real_mode(void); 62 - void setup_real_mode(void); 63 71 64 72 #endif /* _ARCH_X86_REALMODE_H */
+44
arch/x86/include/asm/thread_info.h
··· 176 176 return sp; 177 177 } 178 178 179 + /* 180 + * Walks up the stack frames to make sure that the specified object is 181 + * entirely contained by a single stack frame. 182 + * 183 + * Returns: 184 + * 1 if within a frame 185 + * -1 if placed across a frame boundary (or outside stack) 186 + * 0 unable to determine (no frame pointers, etc) 187 + */ 188 + static inline int arch_within_stack_frames(const void * const stack, 189 + const void * const stackend, 190 + const void *obj, unsigned long len) 191 + { 192 + #if defined(CONFIG_FRAME_POINTER) 193 + const void *frame = NULL; 194 + const void *oldframe; 195 + 196 + oldframe = __builtin_frame_address(1); 197 + if (oldframe) 198 + frame = __builtin_frame_address(2); 199 + /* 200 + * low ----------------------------------------------> high 201 + * [saved bp][saved ip][args][local vars][saved bp][saved ip] 202 + * ^----------------^ 203 + * allow copies only within here 204 + */ 205 + while (stack <= frame && frame < stackend) { 206 + /* 207 + * If obj + len extends past the last frame, this 208 + * check won't pass and the next frame will be 0, 209 + * causing us to bail out and correctly report 210 + * the copy as invalid. 211 + */ 212 + if (obj + len <= frame) 213 + return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1; 214 + oldframe = frame; 215 + frame = *(const void * const *)frame; 216 + } 217 + return -1; 218 + #else 219 + return 0; 220 + #endif 221 + } 222 + 179 223 #else /* !__ASSEMBLY__ */ 180 224 181 225 #ifdef CONFIG_X86_64
+7
arch/x86/include/asm/tlbflush.h
··· 135 135 136 136 static inline void __native_flush_tlb(void) 137 137 { 138 + /* 139 + * If current->mm == NULL then we borrow a mm which may change during a 140 + * task switch and therefore we must not be preempted while we write CR3 141 + * back: 142 + */ 143 + preempt_disable(); 138 144 native_write_cr3(native_read_cr3()); 145 + preempt_enable(); 139 146 } 140 147 141 148 static inline void __native_flush_tlb_global_irq_disabled(void)
+14 -12
arch/x86/include/asm/uaccess.h
··· 761 761 * case, and do only runtime checking for non-constant sizes. 762 762 */ 763 763 764 - if (likely(sz < 0 || sz >= n)) 764 + if (likely(sz < 0 || sz >= n)) { 765 + check_object_size(to, n, false); 765 766 n = _copy_from_user(to, from, n); 766 - else if(__builtin_constant_p(n)) 767 + } else if (__builtin_constant_p(n)) 767 768 copy_from_user_overflow(); 768 769 else 769 770 __copy_from_user_overflow(sz, n); ··· 782 781 might_fault(); 783 782 784 783 /* See the comment in copy_from_user() above. */ 785 - if (likely(sz < 0 || sz >= n)) 784 + if (likely(sz < 0 || sz >= n)) { 785 + check_object_size(from, n, true); 786 786 n = _copy_to_user(to, from, n); 787 - else if(__builtin_constant_p(n)) 787 + } else if (__builtin_constant_p(n)) 788 788 copy_to_user_overflow(); 789 789 else 790 790 __copy_to_user_overflow(sz, n); ··· 814 812 #define user_access_begin() __uaccess_begin() 815 813 #define user_access_end() __uaccess_end() 816 814 817 - #define unsafe_put_user(x, ptr) \ 818 - ({ \ 815 + #define unsafe_put_user(x, ptr, err_label) \ 816 + do { \ 819 817 int __pu_err; \ 820 818 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ 821 - __builtin_expect(__pu_err, 0); \ 822 - }) 819 + if (unlikely(__pu_err)) goto err_label; \ 820 + } while (0) 823 821 824 - #define unsafe_get_user(x, ptr) \ 825 - ({ \ 822 + #define unsafe_get_user(x, ptr, err_label) \ 823 + do { \ 826 824 int __gu_err; \ 827 825 unsigned long __gu_val; \ 828 826 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ 829 827 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 830 - __builtin_expect(__gu_err, 0); \ 831 - }) 828 + if (unlikely(__gu_err)) goto err_label; \ 829 + } while (0) 832 830 833 831 #endif /* _ASM_X86_UACCESS_H */ 834 832
+2
arch/x86/include/asm/uaccess_32.h
··· 37 37 static __always_inline unsigned long __must_check 38 38 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 39 39 { 40 + check_object_size(from, n, true); 40 41 return __copy_to_user_ll(to, from, n); 41 42 } 42 43 ··· 96 95 __copy_from_user(void *to, const void __user *from, unsigned long n) 97 96 { 98 97 might_fault(); 98 + check_object_size(to, n, false); 99 99 if (__builtin_constant_p(n)) { 100 100 unsigned long ret; 101 101
+2
arch/x86/include/asm/uaccess_64.h
··· 54 54 { 55 55 int ret = 0; 56 56 57 + check_object_size(dst, size, false); 57 58 if (!__builtin_constant_p(size)) 58 59 return copy_user_generic(dst, (__force void *)src, size); 59 60 switch (size) { ··· 120 119 { 121 120 int ret = 0; 122 121 122 + check_object_size(src, size, true); 123 123 if (!__builtin_constant_p(size)) 124 124 return copy_user_generic((__force void *)dst, src, size); 125 125 switch (size) {
+3 -2
arch/x86/include/asm/uv/bios.h
··· 79 79 u16 nasid; /* HNasid */ 80 80 u16 sockid; /* Socket ID, high bits of APIC ID */ 81 81 u16 pnode; /* Index to MMR and GRU spaces */ 82 - u32 pxm; /* ACPI proximity domain number */ 82 + u32 unused2; 83 83 u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */ 84 84 }; 85 85 ··· 88 88 #define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */ 89 89 #define UV_SYSTAB_VERSION_UV4_1 0x401 /* + gpa_shift */ 90 90 #define UV_SYSTAB_VERSION_UV4_2 0x402 /* + TYPE_NVRAM/WINDOW/MBOX */ 91 - #define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_2 91 + #define UV_SYSTAB_VERSION_UV4_3 0x403 /* - GAM Range PXM Value */ 92 + #define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_3 92 93 93 94 #define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */ 94 95 #define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */
+26 -2
arch/x86/kernel/apic/apic.c
··· 313 313 314 314 /* Clock divisor */ 315 315 #define APIC_DIVISOR 16 316 - #define TSC_DIVISOR 32 316 + #define TSC_DIVISOR 8 317 317 318 318 /* 319 319 * This function sets up the local APIC timer, with a timeout of ··· 565 565 CLOCK_EVT_FEAT_DUMMY); 566 566 levt->set_next_event = lapic_next_deadline; 567 567 clockevents_config_and_register(levt, 568 - (tsc_khz / TSC_DIVISOR) * 1000, 568 + tsc_khz * (1000 / TSC_DIVISOR), 569 569 0xF, ~0UL); 570 570 } else 571 571 clockevents_register_device(levt); 572 + } 573 + 574 + /* 575 + * Install the updated TSC frequency from recalibration at the TSC 576 + * deadline clockevent devices. 577 + */ 578 + static void __lapic_update_tsc_freq(void *info) 579 + { 580 + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); 581 + 582 + if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) 583 + return; 584 + 585 + clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR)); 586 + } 587 + 588 + void lapic_update_tsc_freq(void) 589 + { 590 + /* 591 + * The clockevent device's ->mult and ->shift can both be 592 + * changed. In order to avoid races, schedule the frequency 593 + * update code on each CPU. 594 + */ 595 + on_each_cpu(__lapic_update_tsc_freq, NULL, 0); 572 596 } 573 597 574 598 /*
+9 -4
arch/x86/kernel/apic/x2apic_cluster.c
··· 155 155 /* 156 156 * At CPU state changes, update the x2apic cluster sibling info. 157 157 */ 158 - int x2apic_prepare_cpu(unsigned int cpu) 158 + static int x2apic_prepare_cpu(unsigned int cpu) 159 159 { 160 160 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL)) 161 161 return -ENOMEM; ··· 168 168 return 0; 169 169 } 170 170 171 - int x2apic_dead_cpu(unsigned int this_cpu) 171 + static int x2apic_dead_cpu(unsigned int this_cpu) 172 172 { 173 173 int cpu; 174 174 ··· 186 186 static int x2apic_cluster_probe(void) 187 187 { 188 188 int cpu = smp_processor_id(); 189 + int ret; 189 190 190 191 if (!x2apic_mode) 191 192 return 0; 192 193 194 + ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE", 195 + x2apic_prepare_cpu, x2apic_dead_cpu); 196 + if (ret < 0) { 197 + pr_err("Failed to register X2APIC_PREPARE\n"); 198 + return 0; 199 + } 193 200 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); 194 - cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE", 195 - x2apic_prepare_cpu, x2apic_dead_cpu); 196 201 return 1; 197 202 } 198 203
+20 -22
arch/x86/kernel/apic/x2apic_uv_x.c
··· 223 223 if (strncmp(oem_id, "SGI", 3) != 0) 224 224 return 0; 225 225 226 + if (numa_off) { 227 + pr_err("UV: NUMA is off, disabling UV support\n"); 228 + return 0; 229 + } 230 + 226 231 /* Setup early hub type field in uv_hub_info for Node 0 */ 227 232 uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0; 228 233 ··· 330 325 struct uv_gam_range_entry *gre = uv_gre_table; 331 326 struct uv_gam_range_s *grt; 332 327 unsigned long last_limit = 0, ram_limit = 0; 333 - int bytes, i, sid, lsid = -1; 328 + int bytes, i, sid, lsid = -1, indx = 0, lindx = -1; 334 329 335 330 if (!gre) 336 331 return; ··· 361 356 } 362 357 sid = gre->sockid - _min_socket; 363 358 if (lsid < sid) { /* new range */ 364 - grt = &_gr_table[sid]; 365 - grt->base = lsid; 359 + grt = &_gr_table[indx]; 360 + grt->base = lindx; 366 361 grt->nasid = gre->nasid; 367 362 grt->limit = last_limit = gre->limit; 368 363 lsid = sid; 364 + lindx = indx++; 369 365 continue; 370 366 } 371 367 if (lsid == sid && !ram_limit) { /* update range */ ··· 377 371 } 378 372 if (!ram_limit) { /* non-contiguous ram range */ 379 373 grt++; 380 - grt->base = sid - 1; 374 + grt->base = lindx; 381 375 grt->nasid = gre->nasid; 382 376 grt->limit = last_limit = gre->limit; 383 377 continue; ··· 1161 1155 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { 1162 1156 if (!index) { 1163 1157 pr_info("UV: GAM Range Table...\n"); 1164 - pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s %3s\n", 1158 + pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", 1165 1159 "Range", "", "Size", "Type", "NASID", 1166 - "SID", "PN", "PXM"); 1160 + "SID", "PN"); 1167 1161 } 1168 1162 pr_info( 1169 - "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x %3d\n", 1163 + "UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n", 1170 1164 index++, 1171 1165 (unsigned long)lgre << UV_GAM_RANGE_SHFT, 1172 1166 (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, 1173 1167 ((unsigned long)(gre->limit - lgre)) >> 1174 1168 (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */ 1175 - gre->type, gre->nasid, gre->sockid, 1176 - gre->pnode, gre->pxm); 1169 + gre->type, gre->nasid, gre->sockid, gre->pnode); 1177 1170 1178 1171 lgre = gre->limit; 1179 1172 if (sock_min > gre->sockid) ··· 1291 1286 _pnode_to_socket[i] = SOCK_EMPTY; 1292 1287 1293 1288 /* fill in pnode/node/addr conversion list values */ 1294 - pr_info("UV: GAM Building socket/pnode/pxm conversion tables\n"); 1289 + pr_info("UV: GAM Building socket/pnode conversion tables\n"); 1295 1290 for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { 1296 1291 if (gre->type == UV_GAM_RANGE_TYPE_HOLE) 1297 1292 continue; ··· 1299 1294 if (_socket_to_pnode[i] != SOCK_EMPTY) 1300 1295 continue; /* duplicate */ 1301 1296 _socket_to_pnode[i] = gre->pnode; 1302 - _socket_to_node[i] = gre->pxm; 1303 1297 1304 1298 i = gre->pnode - minpnode; 1305 1299 _pnode_to_socket[i] = gre->sockid; 1306 1300 1307 1301 pr_info( 1308 - "UV: sid:%02x type:%d nasid:%04x pn:%02x pxm:%2d pn2s:%2x\n", 1302 + "UV: sid:%02x type:%d nasid:%04x pn:%02x pn2s:%2x\n", 1309 1303 gre->sockid, gre->type, gre->nasid, 1310 1304 _socket_to_pnode[gre->sockid - minsock], 1311 - _socket_to_node[gre->sockid - minsock], 1312 1305 _pnode_to_socket[gre->pnode - minpnode]); 1313 1306 } 1314 1307 1315 - /* check socket -> node values */ 1308 + /* Set socket -> node values */ 1316 1309 lnid = -1; 1317 1310 for_each_present_cpu(cpu) { 1318 1311 int nid = cpu_to_node(cpu); ··· 1321 1318 lnid = nid; 1322 1319 apicid = per_cpu(x86_cpu_to_apicid, cpu); 1323 1320 sockid = apicid >> uv_cpuid.socketid_shift; 1324 - i = sockid - minsock; 1325 - 1326 - if (nid != _socket_to_node[i]) { 1327 - pr_warn( 1328 - "UV: %02x: type:%d socket:%02x PXM:%02x != node:%2d\n", 1329 - i, sockid, gre->type, _socket_to_node[i], nid); 1330 - _socket_to_node[i] = nid; 1331 - } 1321 + _socket_to_node[sockid - minsock] = nid; 1322 + pr_info("UV: sid:%02x: apicid:%04x node:%2d\n", 1323 + sockid, apicid, nid); 1332 1324 } 1333 1325 1334 1326 /* Setup physical blade to pnode translation from GAM Range Table */
+17 -121
arch/x86/kernel/fpu/xstate.c
··· 866 866 return get_xsave_addr(&fpu->state.xsave, xsave_state); 867 867 } 868 868 869 - 870 - /* 871 - * Set xfeatures (aka XSTATE_BV) bit for a feature that we want 872 - * to take out of its "init state". This will ensure that an 873 - * XRSTOR actually restores the state. 874 - */ 875 - static void fpu__xfeature_set_non_init(struct xregs_state *xsave, 876 - int xstate_feature_mask) 877 - { 878 - xsave->header.xfeatures |= xstate_feature_mask; 879 - } 880 - 881 - /* 882 - * This function is safe to call whether the FPU is in use or not. 883 - * 884 - * Note that this only works on the current task. 885 - * 886 - * Inputs: 887 - * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP, 888 - * XFEATURE_MASK_SSE, etc...) 889 - * @xsave_state_ptr: a pointer to a copy of the state that you would 890 - * like written in to the current task's FPU xsave state. This pointer 891 - * must not be located in the current tasks's xsave area. 892 - * Output: 893 - * address of the state in the xsave area or NULL if the state 894 - * is not present or is in its 'init state'. 895 - */ 896 - static void fpu__xfeature_set_state(int xstate_feature_mask, 897 - void *xstate_feature_src, size_t len) 898 - { 899 - struct xregs_state *xsave = &current->thread.fpu.state.xsave; 900 - struct fpu *fpu = &current->thread.fpu; 901 - void *dst; 902 - 903 - if (!boot_cpu_has(X86_FEATURE_XSAVE)) { 904 - WARN_ONCE(1, "%s() attempted with no xsave support", __func__); 905 - return; 906 - } 907 - 908 - /* 909 - * Tell the FPU code that we need the FPU state to be in 910 - * 'fpu' (not in the registers), and that we need it to 911 - * be stable while we write to it. 912 - */ 913 - fpu__current_fpstate_write_begin(); 914 - 915 - /* 916 - * This method *WILL* *NOT* work for compact-format 917 - * buffers. If the 'xstate_feature_mask' is unset in 918 - * xcomp_bv then we may need to move other feature state 919 - * "up" in the buffer. 920 - */ 921 - if (xsave->header.xcomp_bv & xstate_feature_mask) { 922 - WARN_ON_ONCE(1); 923 - goto out; 924 - } 925 - 926 - /* find the location in the xsave buffer of the desired state */ 927 - dst = __raw_xsave_addr(&fpu->state.xsave, xstate_feature_mask); 928 - 929 - /* 930 - * Make sure that the pointer being passed in did not 931 - * come from the xsave buffer itself. 932 - */ 933 - WARN_ONCE(xstate_feature_src == dst, "set from xsave buffer itself"); 934 - 935 - /* put the caller-provided data in the location */ 936 - memcpy(dst, xstate_feature_src, len); 937 - 938 - /* 939 - * Mark the xfeature so that the CPU knows there is state 940 - * in the buffer now. 941 - */ 942 - fpu__xfeature_set_non_init(xsave, xstate_feature_mask); 943 - out: 944 - /* 945 - * We are done writing to the 'fpu'. Reenable preeption 946 - * and (possibly) move the fpstate back in to the fpregs. 947 - */ 948 - fpu__current_fpstate_write_end(); 949 - } 950 - 951 869 #define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) 952 870 #define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) 953 871 954 872 /* 955 - * This will go out and modify the XSAVE buffer so that PKRU is 956 - * set to a particular state for access to 'pkey'. 957 - * 958 - * PKRU state does affect kernel access to user memory. We do 959 - * not modfiy PKRU *itself* here, only the XSAVE state that will 960 - * be restored in to PKRU when we return back to userspace. 873 + * This will go out and modify PKRU register to set the access 874 + * rights for @pkey to @init_val. 961 875 */ 962 876 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 963 877 unsigned long init_val) 964 878 { 965 - struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; 966 - struct pkru_state *old_pkru_state; 967 - struct pkru_state new_pkru_state; 879 + u32 old_pkru; 968 880 int pkey_shift = (pkey * PKRU_BITS_PER_PKEY); 969 881 u32 new_pkru_bits = 0; 970 882 ··· 886 974 */ 887 975 if (!boot_cpu_has(X86_FEATURE_OSPKE)) 888 976 return -EINVAL; 977 + /* 978 + * For most XSAVE components, this would be an arduous task: 979 + * brining fpstate up to date with fpregs, updating fpstate, 980 + * then re-populating fpregs. But, for components that are 981 + * never lazily managed, we can just access the fpregs 982 + * directly. PKRU is never managed lazily, so we can just 983 + * manipulate it directly. Make sure it stays that way. 984 + */ 985 + WARN_ON_ONCE(!use_eager_fpu()); 889 986 890 987 /* Set the bits we need in PKRU: */ 891 988 if (init_val & PKEY_DISABLE_ACCESS) ··· 905 984 /* Shift the bits in to the correct place in PKRU for pkey: */ 906 985 new_pkru_bits <<= pkey_shift; 907 986 908 - /* Locate old copy of the state in the xsave buffer: */ 909 - old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU); 987 + /* Get old PKRU and mask off any old bits in place: */ 988 + old_pkru = read_pkru(); 989 + old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift); 910 990 911 - /* 912 - * When state is not in the buffer, it is in the init 913 - * state, set it manually. Otherwise, copy out the old 914 - * state. 915 - */ 916 - if (!old_pkru_state) 917 - new_pkru_state.pkru = 0; 918 - else 919 - new_pkru_state.pkru = old_pkru_state->pkru; 920 - 921 - /* Mask off any old bits in place: */ 922 - new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift); 923 - 924 - /* Set the newly-requested bits: */ 925 - new_pkru_state.pkru |= new_pkru_bits; 926 - 927 - /* 928 - * We could theoretically live without zeroing pkru.pad. 929 - * The current XSAVE feature state definition says that 930 - * only bytes 0->3 are used. But we do not want to 931 - * chance leaking kernel stack out to userspace in case a 932 - * memcpy() of the whole xsave buffer was done. 933 - * 934 - * They're in the same cacheline anyway. 935 - */ 936 - new_pkru_state.pad = 0; 937 - 938 - fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state)); 991 + /* Write old part along with new part: */ 992 + write_pkru(old_pkru | new_pkru_bits); 939 993 940 994 return 0; 941 995 }
-2
arch/x86/kernel/head32.c
··· 25 25 /* Initialize 32bit specific setup functions */ 26 26 x86_init.resources.reserve_resources = i386_reserve_resources; 27 27 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; 28 - 29 - reserve_bios_regions(); 30 28 } 31 29 32 30 asmlinkage __visible void __init i386_start_kernel(void)
-1
arch/x86/kernel/head64.c
··· 183 183 copy_bootdata(__va(real_mode_data)); 184 184 185 185 x86_early_init_platform_quirks(); 186 - reserve_bios_regions(); 187 186 188 187 switch (boot_params.hdr.hardware_subarch) { 189 188 case X86_SUBARCH_INTEL_MID:
+1 -1
arch/x86/kernel/hpet.c
··· 1242 1242 memset(&curr_time, 0, sizeof(struct rtc_time)); 1243 1243 1244 1244 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) 1245 - mc146818_set_time(&curr_time); 1245 + mc146818_get_time(&curr_time); 1246 1246 1247 1247 if (hpet_rtc_flags & RTC_UIE && 1248 1248 curr_time.tm_sec != hpet_prev_update_sec) {
+1 -2
arch/x86/kernel/irq.c
··· 102 102 seq_puts(p, " Rescheduling interrupts\n"); 103 103 seq_printf(p, "%*s: ", prec, "CAL"); 104 104 for_each_online_cpu(j) 105 - seq_printf(p, "%10u ", irq_stats(j)->irq_call_count - 106 - irq_stats(j)->irq_tlb_count); 105 + seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 107 106 seq_puts(p, " Function call interrupts\n"); 108 107 seq_printf(p, "%*s: ", prec, "TLB"); 109 108 for_each_online_cpu(j)
+17 -10
arch/x86/kernel/setup.c
··· 936 936 937 937 x86_init.oem.arch_setup(); 938 938 939 - kernel_randomize_memory(); 940 - 941 939 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; 942 940 setup_memory_map(); 943 941 parse_setup_data(); ··· 1053 1055 1054 1056 max_possible_pfn = max_pfn; 1055 1057 1058 + /* 1059 + * Define random base addresses for memory sections after max_pfn is 1060 + * defined and before each memory section base is used. 1061 + */ 1062 + kernel_randomize_memory(); 1063 + 1056 1064 #ifdef CONFIG_X86_32 1057 1065 /* max_low_pfn get updated here */ 1058 1066 find_low_pfn_range(); ··· 1101 1097 efi_find_mirror(); 1102 1098 } 1103 1099 1100 + reserve_bios_regions(); 1101 + 1104 1102 /* 1105 1103 * The EFI specification says that boot service code won't be called 1106 1104 * after ExitBootServices(). This is, in fact, a lie. ··· 1131 1125 1132 1126 early_trap_pf_init(); 1133 1127 1134 - setup_real_mode(); 1128 + /* 1129 + * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features) 1130 + * with the current CR4 value. This may not be necessary, but 1131 + * auditing all the early-boot CR4 manipulation would be needed to 1132 + * rule it out. 1133 + */ 1134 + if (boot_cpu_data.cpuid_level >= 0) 1135 + /* A CPU has %cr4 if and only if it has CPUID. */ 1136 + mmu_cr4_features = __read_cr4(); 1135 1137 1136 1138 memblock_set_current_limit(get_max_mapped()); 1137 1139 ··· 1187 1173 x86_init.paging.pagetable_init(); 1188 1174 1189 1175 kasan_init(); 1190 - 1191 - if (boot_cpu_data.cpuid_level >= 0) { 1192 - /* A CPU has %cr4 if and only if it has CPUID */ 1193 - mmu_cr4_features = __read_cr4(); 1194 - if (trampoline_cr4_features) 1195 - *trampoline_cr4_features = mmu_cr4_features; 1196 - } 1197 1176 1198 1177 #ifdef CONFIG_X86_32 1199 1178 /* sync back kernel address range */
+4
arch/x86/kernel/tsc.c
··· 22 22 #include <asm/nmi.h> 23 23 #include <asm/x86_init.h> 24 24 #include <asm/geode.h> 25 + #include <asm/apic.h> 25 26 26 27 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 27 28 EXPORT_SYMBOL(cpu_khz); ··· 1249 1248 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", 1250 1249 (unsigned long)tsc_khz / 1000, 1251 1250 (unsigned long)tsc_khz % 1000); 1251 + 1252 + /* Inform the TSC deadline clockevent devices about the recalibration */ 1253 + lapic_update_tsc_freq(); 1252 1254 1253 1255 out: 1254 1256 if (boot_cpu_has(X86_FEATURE_ART))
+11 -11
arch/x86/kernel/uprobes.c
··· 357 357 *cursor &= 0xfe; 358 358 } 359 359 /* 360 - * Similar treatment for VEX3 prefix. 361 - * TODO: add XOP/EVEX treatment when insn decoder supports them 360 + * Similar treatment for VEX3/EVEX prefix. 361 + * TODO: add XOP treatment when insn decoder supports them 362 362 */ 363 - if (insn->vex_prefix.nbytes == 3) { 363 + if (insn->vex_prefix.nbytes >= 3) { 364 364 /* 365 365 * vex2: c5 rvvvvLpp (has no b bit) 366 366 * vex3/xop: c4/8f rxbmmmmm wvvvvLpp 367 367 * evex: 62 rxbR00mm wvvvv1pp zllBVaaa 368 - * (evex will need setting of both b and x since 369 - * in non-sib encoding evex.x is 4th bit of MODRM.rm) 370 - * Setting VEX3.b (setting because it has inverted meaning): 368 + * Setting VEX3.b (setting because it has inverted meaning). 369 + * Setting EVEX.x since (in non-SIB encoding) EVEX.x 370 + * is the 4th bit of MODRM.rm, and needs the same treatment. 371 + * For VEX3-encoded insns, VEX3.x value has no effect in 372 + * non-SIB encoding, the change is superfluous but harmless. 371 373 */ 372 374 cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; 373 - *cursor |= 0x20; 375 + *cursor |= 0x60; 374 376 } 375 377 376 378 /* ··· 417 415 418 416 reg = MODRM_REG(insn); /* Fetch modrm.reg */ 419 417 reg2 = 0xff; /* Fetch vex.vvvv */ 420 - if (insn->vex_prefix.nbytes == 2) 421 - reg2 = insn->vex_prefix.bytes[1]; 422 - else if (insn->vex_prefix.nbytes == 3) 418 + if (insn->vex_prefix.nbytes) 423 419 reg2 = insn->vex_prefix.bytes[2]; 424 420 /* 425 - * TODO: add XOP, EXEV vvvv reading. 421 + * TODO: add XOP vvvv reading. 426 422 * 427 423 * vex.vvvv field is in bits 6-3, bits are inverted. 428 424 * But in 32-bit mode, high-order bit may be ignored.
+2
arch/x86/lib/hweight.S
··· 35 35 36 36 ENTRY(__sw_hweight64) 37 37 #ifdef CONFIG_X86_64 38 + pushq %rdi 38 39 pushq %rdx 39 40 40 41 movq %rdi, %rdx # w -> t ··· 61 60 shrq $56, %rax # w = w_tmp >> 56 62 61 63 62 popq %rdx 63 + popq %rdi 64 64 ret 65 65 #else /* CONFIG_X86_32 */ 66 66 /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
+1 -1
arch/x86/lib/kaslr.c
··· 19 19 #include <asm/cpufeature.h> 20 20 #include <asm/setup.h> 21 21 22 - #define debug_putstr(v) early_printk(v) 22 + #define debug_putstr(v) early_printk("%s", v) 23 23 #define has_cpuflag(f) boot_cpu_has(f) 24 24 #define get_boot_seed() kaslr_offset() 25 25 #endif
+11 -8
arch/x86/mm/ident_map.c
··· 3 3 * included by both the compressed kernel and the regular kernel. 4 4 */ 5 5 6 - static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, 6 + static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, 7 7 unsigned long addr, unsigned long end) 8 8 { 9 9 addr &= PMD_MASK; 10 10 for (; addr < end; addr += PMD_SIZE) { 11 11 pmd_t *pmd = pmd_page + pmd_index(addr); 12 12 13 - if (!pmd_present(*pmd)) 14 - set_pmd(pmd, __pmd(addr | pmd_flag)); 13 + if (pmd_present(*pmd)) 14 + continue; 15 + 16 + set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag)); 15 17 } 16 18 } 17 19 ··· 32 30 33 31 if (pud_present(*pud)) { 34 32 pmd = pmd_offset(pud, 0); 35 - ident_pmd_init(info->pmd_flag, pmd, addr, next); 33 + ident_pmd_init(info, pmd, addr, next); 36 34 continue; 37 35 } 38 36 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 39 37 if (!pmd) 40 38 return -ENOMEM; 41 - ident_pmd_init(info->pmd_flag, pmd, addr, next); 39 + ident_pmd_init(info, pmd, addr, next); 42 40 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 43 41 } 44 42 ··· 46 44 } 47 45 48 46 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 49 - unsigned long addr, unsigned long end) 47 + unsigned long pstart, unsigned long pend) 50 48 { 49 + unsigned long addr = pstart + info->offset; 50 + unsigned long end = pend + info->offset; 51 51 unsigned long next; 52 52 int result; 53 - int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0; 54 53 55 54 for (; addr < end; addr = next) { 56 - pgd_t *pgd = pgd_page + pgd_index(addr) + off; 55 + pgd_t *pgd = pgd_page + pgd_index(addr); 57 56 pud_t *pud; 58 57 59 58 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
+12 -2
arch/x86/mm/init.c
··· 122 122 return __va(pfn << PAGE_SHIFT); 123 123 } 124 124 125 - /* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */ 126 - #define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE) 125 + /* 126 + * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS. 127 + * With KASLR memory randomization, depending on the machine e820 memory 128 + * and the PUD alignment. We may need twice more pages when KASLR memory 129 + * randomization is enabled. 130 + */ 131 + #ifndef CONFIG_RANDOMIZE_MEMORY 132 + #define INIT_PGD_PAGE_COUNT 6 133 + #else 134 + #define INIT_PGD_PAGE_COUNT 12 135 + #endif 136 + #define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE) 127 137 RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); 128 138 void __init early_alloc_pgt_buf(void) 129 139 {
+1 -1
arch/x86/mm/kaslr.c
··· 97 97 * add padding if needed (especially for memory hotplug support). 98 98 */ 99 99 BUG_ON(kaslr_regions[0].base != &page_offset_base); 100 - memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) + 100 + memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + 101 101 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; 102 102 103 103 /* Adapt phyiscal memory region size based on available memory */
+21
arch/x86/platform/efi/quirks.c
··· 254 254 for_each_efi_memory_desc(md) { 255 255 unsigned long long start = md->phys_addr; 256 256 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 257 + size_t rm_size; 257 258 258 259 if (md->type != EFI_BOOT_SERVICES_CODE && 259 260 md->type != EFI_BOOT_SERVICES_DATA) ··· 263 262 /* Do not free, someone else owns it: */ 264 263 if (md->attribute & EFI_MEMORY_RUNTIME) 265 264 continue; 265 + 266 + /* 267 + * Nasty quirk: if all sub-1MB memory is used for boot 268 + * services, we can get here without having allocated the 269 + * real mode trampoline. It's too late to hand boot services 270 + * memory back to the memblock allocator, so instead 271 + * try to manually allocate the trampoline if needed. 272 + * 273 + * I've seen this on a Dell XPS 13 9350 with firmware 274 + * 1.4.4 with SGX enabled booting Linux via Fedora 24's 275 + * grub2-efi on a hard disk. (And no, I don't know why 276 + * this happened, but Linux should still try to boot rather 277 + * panicing early.) 278 + */ 279 + rm_size = real_mode_size_needed(); 280 + if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { 281 + set_real_mode_mem(start, rm_size); 282 + start += rm_size; 283 + size -= rm_size; 284 + } 266 285 267 286 free_bootmem_late(start, size); 268 287 }
+7 -4
arch/x86/platform/uv/bios_uv.c
··· 187 187 void uv_bios_init(void) 188 188 { 189 189 uv_systab = NULL; 190 - if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) { 190 + if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || 191 + !efi.uv_systab || efi_runtime_disabled()) { 191 192 pr_crit("UV: UVsystab: missing\n"); 192 193 return; 193 194 } ··· 200 199 return; 201 200 } 202 201 202 + /* Starting with UV4 the UV systab size is variable */ 203 203 if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) { 204 + int size = uv_systab->size; 205 + 204 206 iounmap(uv_systab); 205 - uv_systab = ioremap(efi.uv_systab, uv_systab->size); 207 + uv_systab = ioremap(efi.uv_systab, size); 206 208 if (!uv_systab) { 207 - pr_err("UV: UVsystab: ioremap(%d) failed!\n", 208 - uv_systab->size); 209 + pr_err("UV: UVsystab: ioremap(%d) failed!\n", size); 209 210 return; 210 211 } 211 212 }
+1 -1
arch/x86/power/hibernate_64.c
··· 87 87 struct x86_mapping_info info = { 88 88 .alloc_pgt_page = alloc_pgt_page, 89 89 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, 90 - .kernel_mapping = true, 90 + .offset = __PAGE_OFFSET, 91 91 }; 92 92 unsigned long mstart, mend; 93 93 pgd_t *pgd;
+38 -15
arch/x86/realmode/init.c
··· 1 1 #include <linux/io.h> 2 + #include <linux/slab.h> 2 3 #include <linux/memblock.h> 3 4 4 5 #include <asm/cacheflush.h> 5 6 #include <asm/pgtable.h> 6 7 #include <asm/realmode.h> 8 + #include <asm/tlbflush.h> 7 9 8 10 struct real_mode_header *real_mode_header; 9 11 u32 *trampoline_cr4_features; ··· 13 11 /* Hold the pgd entry used on booting additional CPUs */ 14 12 pgd_t trampoline_pgd_entry; 15 13 16 - void __init reserve_real_mode(void) 14 + void __init set_real_mode_mem(phys_addr_t mem, size_t size) 17 15 { 18 - phys_addr_t mem; 19 - unsigned char *base; 20 - size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 16 + void *base = __va(mem); 21 17 22 - /* Has to be under 1M so we can execute real-mode AP code. */ 23 - mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); 24 - if (!mem) 25 - panic("Cannot allocate trampoline\n"); 26 - 27 - base = __va(mem); 28 - memblock_reserve(mem, size); 29 18 real_mode_header = (struct real_mode_header *) base; 30 19 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", 31 20 base, (unsigned long long)mem, size); 32 21 } 33 22 34 - void __init setup_real_mode(void) 23 + void __init reserve_real_mode(void) 24 + { 25 + phys_addr_t mem; 26 + size_t size = real_mode_size_needed(); 27 + 28 + if (!size) 29 + return; 30 + 31 + WARN_ON(slab_is_available()); 32 + 33 + /* Has to be under 1M so we can execute real-mode AP code. */ 34 + mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); 35 + if (!mem) { 36 + pr_info("No sub-1M memory is available for the trampoline\n"); 37 + return; 38 + } 39 + 40 + memblock_reserve(mem, size); 41 + set_real_mode_mem(mem, size); 42 + } 43 + 44 + static void __init setup_real_mode(void) 35 45 { 36 46 u16 real_mode_seg; 37 47 const u32 *rel; ··· 98 84 99 85 trampoline_header->start = (u64) secondary_startup_64; 100 86 trampoline_cr4_features = &trampoline_header->cr4; 101 - *trampoline_cr4_features = __read_cr4(); 87 + *trampoline_cr4_features = mmu_cr4_features; 102 88 103 89 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); 104 90 trampoline_pgd[0] = trampoline_pgd_entry.pgd; ··· 114 100 * need to mark it executable at do_pre_smp_initcalls() at least, 115 101 * thus run it as a early_initcall(). 116 102 */ 117 - static int __init set_real_mode_permissions(void) 103 + static void __init set_real_mode_permissions(void) 118 104 { 119 105 unsigned char *base = (unsigned char *) real_mode_header; 120 106 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); ··· 133 119 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); 134 120 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); 135 121 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); 122 + } 123 + 124 + static int __init init_real_mode(void) 125 + { 126 + if (!real_mode_header) 127 + panic("Real mode trampoline was not allocated"); 128 + 129 + setup_real_mode(); 130 + set_real_mode_permissions(); 136 131 137 132 return 0; 138 133 } 139 - early_initcall(set_real_mode_permissions); 134 + early_initcall(init_real_mode);
+2 -1
drivers/acpi/nfit/core.c
··· 1527 1527 { 1528 1528 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1529 1529 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 1530 + const u32 STATUS_MASK = 0x80000037; 1530 1531 1531 1532 if (mmio->num_lines) 1532 1533 offset = to_interleave_offset(offset, mmio); 1533 1534 1534 - return readl(mmio->addr.base + offset); 1535 + return readl(mmio->addr.base + offset) & STATUS_MASK; 1535 1536 } 1536 1537 1537 1538 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
+1 -9
drivers/block/rbd.c
··· 3950 3950 bool need_put = !!rbd_dev->opts; 3951 3951 3952 3952 ceph_oid_destroy(&rbd_dev->header_oid); 3953 + ceph_oloc_destroy(&rbd_dev->header_oloc); 3953 3954 3954 3955 rbd_put_client(rbd_dev->rbd_client); 3955 3956 rbd_spec_put(rbd_dev->spec); ··· 5336 5335 goto err_out_client; 5337 5336 } 5338 5337 spec->pool_id = (u64)rc; 5339 - 5340 - /* The ceph file layout needs to fit pool id in 32 bits */ 5341 - 5342 - if (spec->pool_id > (u64)U32_MAX) { 5343 - rbd_warn(NULL, "pool id too large (%llu > %u)", 5344 - (unsigned long long)spec->pool_id, U32_MAX); 5345 - rc = -EIO; 5346 - goto err_out_client; 5347 - } 5348 5338 5349 5339 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); 5350 5340 if (!rbd_dev) {
+9 -19
drivers/block/virtio_blk.c
··· 391 391 num_vqs = 1; 392 392 393 393 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); 394 - if (!vblk->vqs) { 394 + if (!vblk->vqs) 395 + return -ENOMEM; 396 + 397 + names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); 398 + callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); 399 + vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); 400 + if (!names || !callbacks || !vqs) { 395 401 err = -ENOMEM; 396 402 goto out; 397 403 } 398 - 399 - names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); 400 - if (!names) 401 - goto err_names; 402 - 403 - callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); 404 - if (!callbacks) 405 - goto err_callbacks; 406 - 407 - vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); 408 - if (!vqs) 409 - goto err_vqs; 410 404 411 405 for (i = 0; i < num_vqs; i++) { 412 406 callbacks[i] = virtblk_done; ··· 411 417 /* Discover virtqueues and write information to configuration. */ 412 418 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); 413 419 if (err) 414 - goto err_find_vqs; 420 + goto out; 415 421 416 422 for (i = 0; i < num_vqs; i++) { 417 423 spin_lock_init(&vblk->vqs[i].lock); ··· 419 425 } 420 426 vblk->num_vqs = num_vqs; 421 427 422 - err_find_vqs: 428 + out: 423 429 kfree(vqs); 424 - err_vqs: 425 430 kfree(callbacks); 426 - err_callbacks: 427 431 kfree(names); 428 - err_names: 429 432 if (err) 430 433 kfree(vblk->vqs); 431 - out: 432 434 return err; 433 435 } 434 436
+23 -3
drivers/clocksource/arm_arch_timer.c
··· 8 8 * it under the terms of the GNU General Public License version 2 as 9 9 * published by the Free Software Foundation. 10 10 */ 11 + 12 + #define pr_fmt(fmt) "arm_arch_timer: " fmt 13 + 11 14 #include <linux/init.h> 12 15 #include <linux/kernel.h> 13 16 #include <linux/device.h> ··· 373 370 arch_timer_ppi[PHYS_NONSECURE_PPI]); 374 371 } 375 372 373 + static u32 check_ppi_trigger(int irq) 374 + { 375 + u32 flags = irq_get_trigger_type(irq); 376 + 377 + if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) { 378 + pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq); 379 + pr_warn("WARNING: Please fix your firmware\n"); 380 + flags = IRQF_TRIGGER_LOW; 381 + } 382 + 383 + return flags; 384 + } 385 + 376 386 static int arch_timer_starting_cpu(unsigned int cpu) 377 387 { 378 388 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); 389 + u32 flags; 379 390 380 391 __arch_timer_setup(ARCH_CP15_TIMER, clk); 381 392 382 - enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); 393 + flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); 394 + enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); 383 395 384 - if (arch_timer_has_nonsecure_ppi()) 385 - enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 396 + if (arch_timer_has_nonsecure_ppi()) { 397 + flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]); 398 + enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags); 399 + } 386 400 387 401 arch_counter_set_user_access(); 388 402 if (evtstrm_enable)
+20 -1
drivers/cpufreq/powernv-cpufreq.c
··· 145 145 /* Use following macros for conversions between pstate_id and index */ 146 146 static inline int idx_to_pstate(unsigned int i) 147 147 { 148 + if (unlikely(i >= powernv_pstate_info.nr_pstates)) { 149 + pr_warn_once("index %u is out of bound\n", i); 150 + return powernv_freqs[powernv_pstate_info.nominal].driver_data; 151 + } 152 + 148 153 return powernv_freqs[i].driver_data; 149 154 } 150 155 151 156 static inline unsigned int pstate_to_idx(int pstate) 152 157 { 158 + int min = powernv_freqs[powernv_pstate_info.min].driver_data; 159 + int max = powernv_freqs[powernv_pstate_info.max].driver_data; 160 + 161 + if (min > 0) { 162 + if (unlikely((pstate < max) || (pstate > min))) { 163 + pr_warn_once("pstate %d is out of bound\n", pstate); 164 + return powernv_pstate_info.nominal; 165 + } 166 + } else { 167 + if (unlikely((pstate > max) || (pstate < min))) { 168 + pr_warn_once("pstate %d is out of bound\n", pstate); 169 + return powernv_pstate_info.nominal; 170 + } 171 + } 153 172 /* 154 173 * abs() is deliberately used so that is works with 155 174 * both monotonically increasing and decreasing ··· 612 593 } else { 613 594 gpstate_idx = calc_global_pstate(gpstates->elapsed_time, 614 595 gpstates->highest_lpstate_idx, 615 - freq_data.pstate_id); 596 + gpstates->last_lpstate_idx); 616 597 } 617 598 618 599 /*
+5 -3
drivers/firmware/efi/capsule-loader.c
··· 16 16 #include <linux/slab.h> 17 17 #include <linux/mutex.h> 18 18 #include <linux/efi.h> 19 + #include <linux/vmalloc.h> 19 20 20 21 #define NO_FURTHER_WRITE_ACTION -1 21 22 ··· 109 108 int ret; 110 109 void *cap_hdr_temp; 111 110 112 - cap_hdr_temp = kmap(cap_info->pages[0]); 111 + cap_hdr_temp = vmap(cap_info->pages, cap_info->index, 112 + VM_MAP, PAGE_KERNEL); 113 113 if (!cap_hdr_temp) { 114 - pr_debug("%s: kmap() failed\n", __func__); 114 + pr_debug("%s: vmap() failed\n", __func__); 115 115 return -EFAULT; 116 116 } 117 117 118 118 ret = efi_capsule_update(cap_hdr_temp, cap_info->pages); 119 - kunmap(cap_info->pages[0]); 119 + vunmap(cap_hdr_temp); 120 120 if (ret) { 121 121 pr_err("%s: efi_capsule_update() failed\n", __func__); 122 122 return ret;
+3 -3
drivers/firmware/efi/capsule.c
··· 190 190 * map the capsule described by @capsule with its data in @pages and 191 191 * send it to the firmware via the UpdateCapsule() runtime service. 192 192 * 193 - * @capsule must be a virtual mapping of the first page in @pages 194 - * (@pages[0]) in the kernel address space. That is, a 195 - * capsule_header_t that describes the entire contents of the capsule 193 + * @capsule must be a virtual mapping of the complete capsule update in the 194 + * kernel address space, as the capsule can be consumed immediately. 195 + * A capsule_header_t that describes the entire contents of the capsule 196 196 * must be at the start of the first data page. 197 197 * 198 198 * Even though this function will validate that the firmware supports
+10
drivers/gpio/Kconfig
··· 874 874 LP3943 can be used as a GPIO expander which provides up to 16 GPIOs. 875 875 Open drain outputs are required for this usage. 876 876 877 + config GPIO_LP873X 878 + tristate "TI LP873X GPO" 879 + depends on MFD_TI_LP873X 880 + help 881 + This driver supports the GPO on TI Lp873x PMICs. 2 GPOs are present 882 + on LP873X PMICs. 883 + 884 + This driver can also be built as a module. If so, the module will be 885 + called gpio-lp873x. 886 + 877 887 config GPIO_MAX77620 878 888 tristate "GPIO support for PMIC MAX77620 and MAX20024" 879 889 depends on MFD_MAX77620
+1
drivers/gpio/Makefile
··· 56 56 obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o 57 57 obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o 58 58 obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o 59 + obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o 59 60 obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o 60 61 obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o 61 62 obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
+193
drivers/gpio/gpio-lp873x.c
··· 1 + /* 2 + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ 3 + * Keerthy <j-keerthy@ti.com> 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 + * kind, whether expressed or implied; without even the implied warranty 11 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License version 2 for more details. 13 + * 14 + * Based on the TPS65218 driver 15 + */ 16 + 17 + #include <linux/gpio.h> 18 + #include <linux/module.h> 19 + #include <linux/platform_device.h> 20 + #include <linux/regmap.h> 21 + 22 + #include <linux/mfd/lp873x.h> 23 + 24 + #define BITS_PER_GPO 0x4 25 + #define LP873X_GPO_CTRL_OD 0x2 26 + 27 + struct lp873x_gpio { 28 + struct gpio_chip chip; 29 + struct lp873x *lp873; 30 + }; 31 + 32 + static int lp873x_gpio_get_direction(struct gpio_chip *chip, 33 + unsigned int offset) 34 + { 35 + /* This device is output only */ 36 + return 0; 37 + } 38 + 39 + static int lp873x_gpio_direction_input(struct gpio_chip *chip, 40 + unsigned int offset) 41 + { 42 + /* This device is output only */ 43 + return -EINVAL; 44 + } 45 + 46 + static int lp873x_gpio_direction_output(struct gpio_chip *chip, 47 + unsigned int offset, int value) 48 + { 49 + struct lp873x_gpio *gpio = gpiochip_get_data(chip); 50 + 51 + /* Set the initial value */ 52 + return regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL, 53 + BIT(offset * BITS_PER_GPO), 54 + value ? BIT(offset * BITS_PER_GPO) : 0); 55 + } 56 + 57 + static int lp873x_gpio_get(struct gpio_chip *chip, unsigned int offset) 58 + { 59 + struct lp873x_gpio *gpio = gpiochip_get_data(chip); 60 + int ret, val; 61 + 62 + ret = regmap_read(gpio->lp873->regmap, LP873X_REG_GPO_CTRL, &val); 63 + if (ret < 0) 64 + return ret; 65 + 66 + return val & BIT(offset * BITS_PER_GPO); 67 + } 68 + 69 + static void lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset, 70 + int value) 71 + { 72 + struct lp873x_gpio *gpio = gpiochip_get_data(chip); 73 + 74 + regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL, 75 + BIT(offset * BITS_PER_GPO), 76 + value ? BIT(offset * BITS_PER_GPO) : 0); 77 + } 78 + 79 + static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset) 80 + { 81 + struct lp873x_gpio *gpio = gpiochip_get_data(gc); 82 + int ret; 83 + 84 + switch (offset) { 85 + case 0: 86 + /* No MUX Set up Needed for GPO */ 87 + break; 88 + case 1: 89 + /* Setup the CLKIN_PIN_SEL MUX to GPO2 */ 90 + ret = regmap_update_bits(gpio->lp873->regmap, LP873X_REG_CONFIG, 91 + LP873X_CONFIG_CLKIN_PIN_SEL, 0); 92 + if (ret) 93 + return ret; 94 + 95 + break; 96 + default: 97 + return -EINVAL; 98 + } 99 + 100 + return 0; 101 + } 102 + 103 + static int lp873x_gpio_set_single_ended(struct gpio_chip *gc, 104 + unsigned int offset, 105 + enum single_ended_mode mode) 106 + { 107 + struct lp873x_gpio *gpio = gpiochip_get_data(gc); 108 + 109 + switch (mode) { 110 + case LINE_MODE_OPEN_DRAIN: 111 + return regmap_update_bits(gpio->lp873->regmap, 112 + LP873X_REG_GPO_CTRL, 113 + BIT(offset * BITS_PER_GPO + 114 + LP873X_GPO_CTRL_OD), 115 + BIT(offset * BITS_PER_GPO + 116 + LP873X_GPO_CTRL_OD)); 117 + case LINE_MODE_PUSH_PULL: 118 + return regmap_update_bits(gpio->lp873->regmap, 119 + LP873X_REG_GPO_CTRL, 120 + BIT(offset * BITS_PER_GPO + 121 + LP873X_GPO_CTRL_OD), 0); 122 + default: 123 + return -ENOTSUPP; 124 + } 125 + } 126 + 127 + static struct gpio_chip template_chip = { 128 + .label = "lp873x-gpio", 129 + .owner = THIS_MODULE, 130 + .request = lp873x_gpio_request, 131 + .get_direction = lp873x_gpio_get_direction, 132 + .direction_input = lp873x_gpio_direction_input, 133 + .direction_output = lp873x_gpio_direction_output, 134 + .get = lp873x_gpio_get, 135 + .set = lp873x_gpio_set, 136 + .set_single_ended = lp873x_gpio_set_single_ended, 137 + .base = -1, 138 + .ngpio = 2, 139 + .can_sleep = true, 140 + }; 141 + 142 + static int lp873x_gpio_probe(struct platform_device *pdev) 143 + { 144 + struct lp873x_gpio *gpio; 145 + int ret; 146 + 147 + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); 148 + if (!gpio) 149 + return -ENOMEM; 150 + 151 + platform_set_drvdata(pdev, gpio); 152 + 153 + gpio->lp873 = dev_get_drvdata(pdev->dev.parent); 154 + gpio->chip = template_chip; 155 + gpio->chip.parent = gpio->lp873->dev; 156 + 157 + ret = gpiochip_add_data(&gpio->chip, gpio); 158 + if (ret < 0) { 159 + dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret); 160 + return ret; 161 + } 162 + 163 + return 0; 164 + } 165 + 166 + static int lp873x_gpio_remove(struct platform_device *pdev) 167 + { 168 + struct lp873x_gpio *gpio = platform_get_drvdata(pdev); 169 + 170 + gpiochip_remove(&gpio->chip); 171 + 172 + return 0; 173 + } 174 + 175 + static const struct platform_device_id lp873x_gpio_id_table[] = { 176 + { "lp873x-gpio", }, 177 + { /* sentinel */ } 178 + }; 179 + MODULE_DEVICE_TABLE(platform, lp873x_gpio_id_table); 180 + 181 + static struct platform_driver lp873x_gpio_driver = { 182 + .driver = { 183 + .name = "lp873x-gpio", 184 + }, 185 + .probe = lp873x_gpio_probe, 186 + .remove = lp873x_gpio_remove, 187 + .id_table = lp873x_gpio_id_table, 188 + }; 189 + module_platform_driver(lp873x_gpio_driver); 190 + 191 + MODULE_AUTHOR("Keerthy <j-keerthy@ti.com>"); 192 + MODULE_DESCRIPTION("LP873X GPIO driver"); 193 + MODULE_LICENSE("GPL v2");
+2 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 305 305 struct drm_device *ddev = dev_get_drvdata(dev); 306 306 struct amdgpu_device *adev = ddev->dev_private; 307 307 char *table = NULL; 308 - int size, i; 308 + int size; 309 309 310 310 if (adev->pp_enabled) 311 311 size = amdgpu_dpm_get_pp_table(adev, &table); ··· 315 315 if (size >= PAGE_SIZE) 316 316 size = PAGE_SIZE - 1; 317 317 318 - for (i = 0; i < size; i++) { 319 - sprintf(buf + i, "%02x", table[i]); 320 - } 321 - sprintf(buf + i, "\n"); 318 + memcpy(buf, table, size); 322 319 323 320 return size; 324 321 }
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 335 335 if (unlikely(r)) { 336 336 goto out_cleanup; 337 337 } 338 - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 338 + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); 339 339 out_cleanup: 340 340 ttm_bo_mem_put(bo, &tmp_mem); 341 341 return r; ··· 368 368 if (unlikely(r)) { 369 369 return r; 370 370 } 371 - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 371 + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); 372 372 if (unlikely(r)) { 373 373 goto out_cleanup; 374 374 }
+1
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
··· 5779 5779 break; 5780 5780 case CHIP_KAVERI: 5781 5781 case CHIP_KABINI: 5782 + case CHIP_MULLINS: 5782 5783 default: BUG(); 5783 5784 } 5784 5785
+8 -4
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 270 270 271 271 static const u32 golden_settings_polaris11_a11[] = 272 272 { 273 - mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, 273 + mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208, 274 + mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000, 274 275 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 275 276 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 276 277 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, ··· 280 279 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, 281 280 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, 282 281 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, 283 - mmSQ_CONFIG, 0x07f80000, 0x07180000, 282 + mmSQ_CONFIG, 0x07f80000, 0x01180000, 284 283 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 285 284 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 286 285 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3, ··· 302 301 static const u32 golden_settings_polaris10_a11[] = 303 302 { 304 303 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, 305 - mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, 306 - mmCB_HW_CONTROL_2, 0, 0x0f000000, 304 + mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208, 305 + mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000, 307 306 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 308 307 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 309 308 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, ··· 410 409 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 411 410 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002, 412 411 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, 412 + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, 413 413 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 414 414 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 415 415 mmTCC_CTRL, 0x00100000, 0xf31fff7f, ··· 507 505 mmGB_GPU_ID, 0x0000000f, 0x00000000, 508 506 mmPA_SC_ENHANCE, 0xffffffff, 0x00000001, 509 507 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 508 + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, 510 509 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 511 510 mmTA_CNTL_AUX, 0x000f000f, 0x00010000, 511 + mmTCC_CTRL, 0x00100000, 0xf31fff7f, 512 512 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 513 513 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3, 514 514 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
+1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 144 144 break; 145 145 case CHIP_KAVERI: 146 146 case CHIP_KABINI: 147 + case CHIP_MULLINS: 147 148 return 0; 148 149 default: BUG(); 149 150 }
+8
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 103 103 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 104 104 }; 105 105 106 + static const u32 golden_settings_stoney_common[] = 107 + { 108 + mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, 109 + mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 110 + }; 106 111 107 112 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) 108 113 { ··· 147 142 amdgpu_program_register_sequence(adev, 148 143 stoney_mgcg_cgcg_init, 149 144 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); 145 + amdgpu_program_register_sequence(adev, 146 + golden_settings_stoney_common, 147 + (const u32)ARRAY_SIZE(golden_settings_stoney_common)); 150 148 break; 151 149 default: 152 150 break;
+11 -2
drivers/gpu/drm/cirrus/cirrus_main.c
··· 185 185 goto out; 186 186 } 187 187 188 + /* 189 + * cirrus_modeset_init() is initializing/registering the emulated fbdev 190 + * and DRM internals can access/test some of the fields in 191 + * mode_config->funcs as part of the fbdev registration process. 192 + * Make sure dev->mode_config.funcs is properly set to avoid 193 + * dereferencing a NULL pointer. 194 + * FIXME: mode_config.funcs assignment should probably be done in 195 + * cirrus_modeset_init() (that's a common pattern seen in other DRM 196 + * drivers). 197 + */ 198 + dev->mode_config.funcs = &cirrus_mode_funcs; 188 199 r = cirrus_modeset_init(cdev); 189 200 if (r) { 190 201 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); 191 202 goto out; 192 203 } 193 - 194 - dev->mode_config.funcs = (void *)&cirrus_mode_funcs; 195 204 196 205 return 0; 197 206 out:
+3 -5
drivers/gpu/drm/drm_crtc.c
··· 1121 1121 struct drm_connector *connector; 1122 1122 int ret; 1123 1123 1124 - mutex_lock(&dev->mode_config.mutex); 1125 - 1126 - drm_for_each_connector(connector, dev) { 1124 + /* FIXME: taking the mode config mutex ends up in a clash with 1125 + * fbcon/backlight registration */ 1126 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1127 1127 ret = drm_connector_register(connector); 1128 1128 if (ret) 1129 1129 goto err; 1130 1130 } 1131 - 1132 - mutex_unlock(&dev->mode_config.mutex); 1133 1131 1134 1132 return 0; 1135 1133
+22
drivers/gpu/drm/drm_edid.c
··· 74 74 #define EDID_QUIRK_FORCE_8BPC (1 << 8) 75 75 /* Force 12bpc */ 76 76 #define EDID_QUIRK_FORCE_12BPC (1 << 9) 77 + /* Force 6bpc */ 78 + #define EDID_QUIRK_FORCE_6BPC (1 << 10) 77 79 78 80 struct detailed_mode_closure { 79 81 struct drm_connector *connector; ··· 101 99 { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, 102 100 /* Unknown Acer */ 103 101 { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, 102 + 103 + /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ 104 + { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, 104 105 105 106 /* Belinea 10 15 55 */ 106 107 { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, ··· 3867 3862 /* HDMI deep color modes supported? Assign to info, if so */ 3868 3863 drm_assign_hdmi_deep_color_info(edid, info, connector); 3869 3864 3865 + /* 3866 + * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3? 3867 + * 3868 + * For such displays, the DFP spec 1.0, section 3.10 "EDID support" 3869 + * tells us to assume 8 bpc color depth if the EDID doesn't have 3870 + * extensions which tell otherwise. 3871 + */ 3872 + if ((info->bpc == 0) && (edid->revision < 4) && 3873 + (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) { 3874 + info->bpc = 8; 3875 + DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n", 3876 + connector->name, info->bpc); 3877 + } 3878 + 3870 3879 /* Only defined for 1.4 with digital displays */ 3871 3880 if (edid->revision < 4) 3872 3881 return; ··· 4100 4081 edid_fixup_preferred(connector, quirks); 4101 4082 4102 4083 drm_add_display_info(edid, &connector->display_info, connector); 4084 + 4085 + if (quirks & EDID_QUIRK_FORCE_6BPC) 4086 + connector->display_info.bpc = 6; 4103 4087 4104 4088 if (quirks & EDID_QUIRK_FORCE_8BPC) 4105 4089 connector->display_info.bpc = 8;
+6 -24
drivers/gpu/drm/i915/intel_display.c
··· 5691 5691 5692 5692 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 5693 5693 { 5694 - unsigned int i; 5695 - 5696 - for (i = 0; i < 15; i++) { 5697 - if (skl_cdclk_pcu_ready(dev_priv)) 5698 - return true; 5699 - udelay(10); 5700 - } 5701 - 5702 - return false; 5694 + return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; 5703 5695 } 5704 5696 5705 5697 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) ··· 12106 12114 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 12107 12115 } 12108 12116 12109 - /* Clamp bpp to default limit on screens without EDID 1.4 */ 12110 - if (connector->base.display_info.bpc == 0) { 12111 - int type = connector->base.connector_type; 12112 - int clamp_bpp = 24; 12113 - 12114 - /* Fall back to 18 bpp when DP sink capability is unknown. */ 12115 - if (type == DRM_MODE_CONNECTOR_DisplayPort || 12116 - type == DRM_MODE_CONNECTOR_eDP) 12117 - clamp_bpp = 18; 12118 - 12119 - if (bpp > clamp_bpp) { 12120 - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", 12121 - bpp, clamp_bpp); 12122 - pipe_config->pipe_bpp = clamp_bpp; 12123 - } 12117 + /* Clamp bpp to 8 on screens without EDID 1.4 */ 12118 + if (connector->base.display_info.bpc == 0 && bpp > 24) { 12119 + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 12120 + bpp); 12121 + pipe_config->pipe_bpp = 24; 12124 12122 } 12125 12123 } 12126 12124
+11 -14
drivers/gpu/drm/i915/intel_fbdev.c
··· 782 782 struct intel_fbdev *ifbdev = dev_priv->fbdev; 783 783 struct fb_info *info; 784 784 785 - if (!ifbdev) 785 + if (!ifbdev || !ifbdev->fb) 786 786 return; 787 787 788 788 info = ifbdev->helper.fbdev; ··· 827 827 828 828 void intel_fbdev_output_poll_changed(struct drm_device *dev) 829 829 { 830 - struct drm_i915_private *dev_priv = to_i915(dev); 831 - if (dev_priv->fbdev) 832 - drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 830 + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 831 + 832 + if (ifbdev && ifbdev->fb) 833 + drm_fb_helper_hotplug_event(&ifbdev->helper); 833 834 } 834 835 835 836 void intel_fbdev_restore_mode(struct drm_device *dev) 836 837 { 837 - int ret; 838 - struct drm_i915_private *dev_priv = to_i915(dev); 839 - struct intel_fbdev *ifbdev = dev_priv->fbdev; 840 - struct drm_fb_helper *fb_helper; 838 + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 841 839 842 840 if (!ifbdev) 843 841 return; 844 842 845 843 intel_fbdev_sync(ifbdev); 844 + if (!ifbdev->fb) 845 + return; 846 846 847 - fb_helper = &ifbdev->helper; 848 - 849 - ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); 850 - if (ret) { 847 + if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) { 851 848 DRM_DEBUG("failed to restore crtc mode\n"); 852 849 } else { 853 - mutex_lock(&fb_helper->dev->struct_mutex); 850 + mutex_lock(&dev->struct_mutex); 854 851 intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); 855 - mutex_unlock(&fb_helper->dev->struct_mutex); 852 + mutex_unlock(&dev->struct_mutex); 856 853 } 857 854 }
+2 -1
drivers/gpu/drm/i915/intel_pm.c
··· 4892 4892 else 4893 4893 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); 4894 4894 dev_priv->rps.last_adj = 0; 4895 - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4895 + I915_WRITE(GEN6_PMINTRMSK, 4896 + gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 4896 4897 } 4897 4898 mutex_unlock(&dev_priv->rps.hw_lock); 4898 4899
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1151 1151 if (ret) 1152 1152 goto out; 1153 1153 1154 - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 1154 + ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem); 1155 1155 out: 1156 1156 ttm_bo_mem_put(bo, &tmp_mem); 1157 1157 return ret; ··· 1179 1179 if (ret) 1180 1180 return ret; 1181 1181 1182 - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 1182 + ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem); 1183 1183 if (ret) 1184 1184 goto out; 1185 1185
+2 -2
drivers/gpu/drm/radeon/radeon_ttm.c
··· 346 346 if (unlikely(r)) { 347 347 goto out_cleanup; 348 348 } 349 - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 349 + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); 350 350 out_cleanup: 351 351 ttm_bo_mem_put(bo, &tmp_mem); 352 352 return r; ··· 379 379 if (unlikely(r)) { 380 380 return r; 381 381 } 382 - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 382 + r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); 383 383 if (unlikely(r)) { 384 384 goto out_cleanup; 385 385 }
+1
drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
··· 125 125 126 126 /* Link drm_bridge to encoder */ 127 127 bridge->encoder = encoder; 128 + encoder->bridge = bridge; 128 129 129 130 ret = drm_bridge_attach(rcdu->ddev, bridge); 130 131 if (ret) {
+2 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 354 354 355 355 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 356 356 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 357 - ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 357 + ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu, 358 + mem); 358 359 else if (bdev->driver->move) 359 360 ret = bdev->driver->move(bo, evict, interruptible, 360 361 no_wait_gpu, mem);
+9 -1
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 45 45 } 46 46 47 47 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 48 - bool evict, 48 + bool evict, bool interruptible, 49 49 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 50 50 { 51 51 struct ttm_tt *ttm = bo->ttm; ··· 53 53 int ret; 54 54 55 55 if (old_mem->mem_type != TTM_PL_SYSTEM) { 56 + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); 57 + 58 + if (unlikely(ret != 0)) { 59 + if (ret != -ERESTARTSYS) 60 + pr_err("Failed to expire sync object before unbinding TTM\n"); 61 + return ret; 62 + } 63 + 56 64 ttm_tt_unbind(ttm); 57 65 ttm_bo_free_old_node(bo); 58 66 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
+32 -105
drivers/input/keyboard/cros_ec_keyb.c
··· 27 27 #include <linux/input.h> 28 28 #include <linux/interrupt.h> 29 29 #include <linux/kernel.h> 30 + #include <linux/notifier.h> 30 31 #include <linux/platform_device.h> 31 32 #include <linux/slab.h> 32 33 #include <linux/input/matrix_keypad.h> ··· 45 44 * @dev: Device pointer 46 45 * @idev: Input device 47 46 * @ec: Top level ChromeOS device to use to talk to EC 47 + * @notifier: interrupt event notifier for transport devices 48 48 */ 49 49 struct cros_ec_keyb { 50 50 unsigned int rows; ··· 59 57 struct device *dev; 60 58 struct input_dev *idev; 61 59 struct cros_ec_device *ec; 60 + struct notifier_block notifier; 62 61 }; 63 62 64 63 ··· 149 146 input_sync(ckdev->idev); 150 147 } 151 148 152 - static int cros_ec_keyb_get_state(struct cros_ec_keyb *ckdev, uint8_t *kb_state) 153 - { 154 - int ret = 0; 155 - struct cros_ec_command *msg; 156 - 157 - msg = kmalloc(sizeof(*msg) + ckdev->cols, GFP_KERNEL); 158 - if (!msg) 159 - return -ENOMEM; 160 - 161 - msg->version = 0; 162 - msg->command = EC_CMD_MKBP_STATE; 163 - msg->insize = ckdev->cols; 164 - msg->outsize = 0; 165 - 166 - ret = cros_ec_cmd_xfer(ckdev->ec, msg); 167 - if (ret < 0) { 168 - dev_err(ckdev->dev, "Error transferring EC message %d\n", ret); 169 - goto exit; 170 - } 171 - 172 - memcpy(kb_state, msg->data, ckdev->cols); 173 - exit: 174 - kfree(msg); 175 - return ret; 176 - } 177 - 178 - static irqreturn_t cros_ec_keyb_irq(int irq, void *data) 179 - { 180 - struct cros_ec_keyb *ckdev = data; 181 - struct cros_ec_device *ec = ckdev->ec; 182 - int ret; 183 - uint8_t kb_state[ckdev->cols]; 184 - 185 - if (device_may_wakeup(ec->dev)) 186 - pm_wakeup_event(ec->dev, 0); 187 - 188 - ret = cros_ec_keyb_get_state(ckdev, kb_state); 189 - if (ret >= 0) 190 - cros_ec_keyb_process(ckdev, kb_state, ret); 191 - else 192 - dev_err(ckdev->dev, "failed to get keyboard state: %d\n", ret); 193 - 194 - return IRQ_HANDLED; 195 - } 196 - 197 149 static int cros_ec_keyb_open(struct input_dev *dev) 198 150 { 199 151 struct cros_ec_keyb *ckdev = input_get_drvdata(dev); 200 - struct cros_ec_device *ec = ckdev->ec; 201 152 202 - return request_threaded_irq(ec->irq, NULL, cros_ec_keyb_irq, 203 - IRQF_TRIGGER_LOW | IRQF_ONESHOT, 204 - "cros_ec_keyb", ckdev); 153 + return blocking_notifier_chain_register(&ckdev->ec->event_notifier, 154 + &ckdev->notifier); 205 155 } 206 156 207 157 static void cros_ec_keyb_close(struct input_dev *dev) 208 158 { 209 159 struct cros_ec_keyb *ckdev = input_get_drvdata(dev); 210 - struct cros_ec_device *ec = ckdev->ec; 211 160 212 - free_irq(ec->irq, ckdev); 161 + blocking_notifier_chain_unregister(&ckdev->ec->event_notifier, 162 + &ckdev->notifier); 163 + } 164 + 165 + static int cros_ec_keyb_work(struct notifier_block *nb, 166 + unsigned long queued_during_suspend, void *_notify) 167 + { 168 + struct cros_ec_keyb *ckdev = container_of(nb, struct cros_ec_keyb, 169 + notifier); 170 + 171 + if (ckdev->ec->event_data.event_type != EC_MKBP_EVENT_KEY_MATRIX) 172 + return NOTIFY_DONE; 173 + /* 174 + * If EC is not the wake source, discard key state changes during 175 + * suspend. 176 + */ 177 + if (queued_during_suspend) 178 + return NOTIFY_OK; 179 + if (ckdev->ec->event_size != ckdev->cols) { 180 + dev_err(ckdev->dev, 181 + "Discarded incomplete key matrix event.\n"); 182 + return NOTIFY_OK; 183 + } 184 + cros_ec_keyb_process(ckdev, ckdev->ec->event_data.data.key_matrix, 185 + ckdev->ec->event_size); 186 + return NOTIFY_OK; 213 187 } 214 188 215 189 /* ··· 245 265 if (!idev) 246 266 return -ENOMEM; 247 267 248 - if (!ec->irq) { 249 - dev_err(dev, "no EC IRQ specified\n"); 250 - return -EINVAL; 251 - } 252 - 253 268 ckdev->ec = ec; 269 + ckdev->notifier.notifier_call = cros_ec_keyb_work; 254 270 ckdev->dev = dev; 255 271 dev_set_drvdata(dev, ckdev); 256 272 ··· 287 311 return 0; 288 312 } 289 313 290 - #ifdef CONFIG_PM_SLEEP 291 - /* Clear any keys in the buffer */ 292 - static void cros_ec_keyb_clear_keyboard(struct cros_ec_keyb *ckdev) 293 - { 294 - uint8_t old_state[ckdev->cols]; 295 - uint8_t new_state[ckdev->cols]; 296 - unsigned long duration; 297 - int i, ret; 298 - 299 - /* 300 - * Keep reading until we see that the scan state does not change. 301 - * That indicates that we are done. 302 - * 303 - * Assume that the EC keyscan buffer is at most 32 deep. 304 - */ 305 - duration = jiffies; 306 - ret = cros_ec_keyb_get_state(ckdev, new_state); 307 - for (i = 1; !ret && i < 32; i++) { 308 - memcpy(old_state, new_state, sizeof(old_state)); 309 - ret = cros_ec_keyb_get_state(ckdev, new_state); 310 - if (0 == memcmp(old_state, new_state, sizeof(old_state))) 311 - break; 312 - } 313 - duration = jiffies - duration; 314 - dev_info(ckdev->dev, "Discarded %d keyscan(s) in %dus\n", i, 315 - jiffies_to_usecs(duration)); 316 - } 317 - 318 - static int cros_ec_keyb_resume(struct device *dev) 319 - { 320 - struct cros_ec_keyb *ckdev = dev_get_drvdata(dev); 321 - 322 - /* 323 - * When the EC is not a wake source, then it could not have caused the 324 - * resume, so we clear the EC's key scan buffer. If the EC was a 325 - * wake source (e.g. the lid is open and the user might press a key to 326 - * wake) then the key scan buffer should be preserved. 327 - */ 328 - if (!ckdev->ec->was_wake_device) 329 - cros_ec_keyb_clear_keyboard(ckdev); 330 - 331 - return 0; 332 - } 333 - 334 - #endif 335 - 336 - static SIMPLE_DEV_PM_OPS(cros_ec_keyb_pm_ops, NULL, cros_ec_keyb_resume); 337 - 338 314 #ifdef CONFIG_OF 339 315 static const struct of_device_id cros_ec_keyb_of_match[] = { 340 316 { .compatible = "google,cros-ec-keyb" }, ··· 300 372 .driver = { 301 373 .name = "cros-ec-keyb", 302 374 .of_match_table = of_match_ptr(cros_ec_keyb_of_match), 303 - .pm = &cros_ec_keyb_pm_ops, 304 375 }, 305 376 }; 306 377
+26 -2
drivers/mfd/Kconfig
··· 112 112 help 113 113 Support for the BCM590xx PMUs from Broadcom 114 114 115 + config MFD_AC100 116 + tristate "X-Powers AC100" 117 + select MFD_CORE 118 + depends on SUNXI_RSB 119 + help 120 + If you say Y here you get support for the X-Powers AC100 audio codec 121 + IC. 122 + This driver include only the core APIs. You have to select individual 123 + components like codecs or RTC under the corresponding menus. 124 + 115 125 config MFD_AXP20X 116 126 tristate 117 127 select MFD_CORE ··· 862 852 different functionality of the device. 863 853 864 854 config MFD_RK808 865 - tristate "Rockchip RK808 Power Management chip" 855 + tristate "Rockchip RK808/RK818 Power Management Chip" 866 856 depends on I2C && OF 867 857 select MFD_CORE 868 858 select REGMAP_I2C 869 859 select REGMAP_IRQ 870 860 help 871 - If you say yes here you get support for the RK808 861 + If you say yes here you get support for the RK808 and RK818 872 862 Power Management chips. 873 863 This driver provides common support for accessing the device 874 864 through I2C interface. The device supports multiple sub-devices ··· 1233 1223 1234 1224 This driver can also be built as a module. If so, the module 1235 1225 will be called tps65217. 1226 + 1227 + config MFD_TI_LP873X 1228 + tristate "TI LP873X Power Management IC" 1229 + depends on I2C 1230 + select MFD_CORE 1231 + select REGMAP_I2C 1232 + help 1233 + If you say yes here then you get support for the LP873X series of 1234 + Power Management Integrated Circuits (PMIC). 1235 + These include voltage regulators, thermal protection, configurable 1236 + General Purpose Outputs (GPO) that are used in portable devices. 1237 + 1238 + This driver can also be built as a module. If so, the module 1239 + will be called lp873x. 1236 1240 1237 1241 config MFD_TPS65218 1238 1242 tristate "TI TPS65218 Power Management chips"
+4
drivers/mfd/Makefile
··· 22 22 obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o 23 23 obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o 24 24 25 + obj-$(CONFIG_MFD_TI_LP873X) += lp873x.o 26 + 25 27 obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o 26 28 obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o 27 29 obj-$(CONFIG_MFD_TI_AM335X_TSCADC) += ti_am335x_tscadc.o ··· 115 113 obj-$(CONFIG_PMIC_DA9052) += da9052-core.o 116 114 obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o 117 115 obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o 116 + 117 + obj-$(CONFIG_MFD_AC100) += ac100.o 118 118 obj-$(CONFIG_MFD_AXP20X) += axp20x.o 119 119 obj-$(CONFIG_MFD_AXP20X_I2C) += axp20x-i2c.o 120 120 obj-$(CONFIG_MFD_AXP20X_RSB) += axp20x-rsb.o
+137
drivers/mfd/ac100.c
··· 1 + /* 2 + * MFD core driver for X-Powers' AC100 Audio Codec IC 3 + * 4 + * The AC100 is a highly integrated audio codec and RTC subsystem designed 5 + * for mobile applications. It has 3 I2S/PCM interfaces, a 2 channel DAC, 6 + * a 2 channel ADC with 5 inputs and a builtin mixer. The RTC subsystem has 7 + * 3 clock outputs. 8 + * 9 + * The audio codec and RTC parts are completely separate, sharing only the 10 + * host interface for access to its registers. 11 + * 12 + * Copyright (2016) Chen-Yu Tsai 13 + * 14 + * Author: Chen-Yu Tsai <wens@csie.org> 15 + * 16 + * This program is free software; you can redistribute it and/or modify 17 + * it under the terms of the GNU General Public License version 2 as 18 + * published by the Free Software Foundation. 19 + */ 20 + 21 + #include <linux/interrupt.h> 22 + #include <linux/kernel.h> 23 + #include <linux/mfd/core.h> 24 + #include <linux/mfd/ac100.h> 25 + #include <linux/module.h> 26 + #include <linux/of.h> 27 + #include <linux/regmap.h> 28 + #include <linux/sunxi-rsb.h> 29 + 30 + static const struct regmap_range ac100_writeable_ranges[] = { 31 + regmap_reg_range(AC100_CHIP_AUDIO_RST, AC100_I2S_SR_CTRL), 32 + regmap_reg_range(AC100_I2S1_CLK_CTRL, AC100_I2S1_MXR_GAIN), 33 + regmap_reg_range(AC100_I2S2_CLK_CTRL, AC100_I2S2_MXR_GAIN), 34 + regmap_reg_range(AC100_I2S3_CLK_CTRL, AC100_I2S3_SIG_PATH_CTRL), 35 + regmap_reg_range(AC100_ADC_DIG_CTRL, AC100_ADC_VOL_CTRL), 36 + regmap_reg_range(AC100_HMIC_CTRL1, AC100_HMIC_STATUS), 37 + regmap_reg_range(AC100_DAC_DIG_CTRL, AC100_DAC_MXR_GAIN), 38 + regmap_reg_range(AC100_ADC_APC_CTRL, AC100_LINEOUT_CTRL), 39 + regmap_reg_range(AC100_ADC_DAP_L_CTRL, AC100_ADC_DAP_OPT), 40 + regmap_reg_range(AC100_DAC_DAP_CTRL, AC100_DAC_DAP_OPT), 41 + regmap_reg_range(AC100_ADC_DAP_ENA, AC100_DAC_DAP_ENA), 42 + regmap_reg_range(AC100_SRC1_CTRL1, AC100_SRC1_CTRL2), 43 + regmap_reg_range(AC100_SRC2_CTRL1, AC100_SRC2_CTRL2), 44 + regmap_reg_range(AC100_CLK32K_ANALOG_CTRL, AC100_CLKOUT_CTRL3), 45 + regmap_reg_range(AC100_RTC_RST, AC100_RTC_UPD), 46 + regmap_reg_range(AC100_ALM_INT_ENA, AC100_ALM_INT_STA), 47 + regmap_reg_range(AC100_ALM_SEC, AC100_RTC_GP(15)), 48 + }; 49 + 50 + static const struct regmap_range ac100_volatile_ranges[] = { 51 + regmap_reg_range(AC100_CHIP_AUDIO_RST, AC100_PLL_CTRL2), 52 + regmap_reg_range(AC100_HMIC_STATUS, AC100_HMIC_STATUS), 53 + regmap_reg_range(AC100_ADC_DAP_L_STA, AC100_ADC_DAP_L_STA), 54 + regmap_reg_range(AC100_SRC1_CTRL1, AC100_SRC1_CTRL1), 55 + regmap_reg_range(AC100_SRC1_CTRL3, AC100_SRC2_CTRL1), 56 + regmap_reg_range(AC100_SRC2_CTRL3, AC100_SRC2_CTRL4), 57 + regmap_reg_range(AC100_RTC_RST, AC100_RTC_RST), 58 + regmap_reg_range(AC100_RTC_SEC, AC100_ALM_INT_STA), 59 + regmap_reg_range(AC100_ALM_SEC, AC100_ALM_UPD), 60 + }; 61 + 62 + static const struct regmap_access_table ac100_writeable_table = { 63 + .yes_ranges = ac100_writeable_ranges, 64 + .n_yes_ranges = ARRAY_SIZE(ac100_writeable_ranges), 65 + }; 66 + 67 + static const struct regmap_access_table ac100_volatile_table = { 68 + .yes_ranges = ac100_volatile_ranges, 69 + .n_yes_ranges = ARRAY_SIZE(ac100_volatile_ranges), 70 + }; 71 + 72 + static const struct regmap_config ac100_regmap_config = { 73 + .reg_bits = 8, 74 + .val_bits = 16, 75 + .wr_table = &ac100_writeable_table, 76 + .volatile_table = &ac100_volatile_table, 77 + .max_register = AC100_RTC_GP(15), 78 + .cache_type = REGCACHE_RBTREE, 79 + }; 80 + 81 + static struct mfd_cell ac100_cells[] = { 82 + { 83 + .name = "ac100-codec", 84 + .of_compatible = "x-powers,ac100-codec", 85 + }, { 86 + .name = "ac100-rtc", 87 + .of_compatible = "x-powers,ac100-rtc", 88 + }, 89 + }; 90 + 91 + static int ac100_rsb_probe(struct sunxi_rsb_device *rdev) 92 + { 93 + struct ac100_dev *ac100; 94 + int ret; 95 + 96 + ac100 = devm_kzalloc(&rdev->dev, sizeof(*ac100), GFP_KERNEL); 97 + if (!ac100) 98 + return -ENOMEM; 99 + 100 + ac100->dev = &rdev->dev; 101 + sunxi_rsb_device_set_drvdata(rdev, ac100); 102 + 103 + ac100->regmap = devm_regmap_init_sunxi_rsb(rdev, &ac100_regmap_config); 104 + if (IS_ERR(ac100->regmap)) { 105 + ret = PTR_ERR(ac100->regmap); 106 + dev_err(ac100->dev, "regmap init failed: %d\n", ret); 107 + return ret; 108 + } 109 + 110 + ret = devm_mfd_add_devices(ac100->dev, PLATFORM_DEVID_NONE, ac100_cells, 111 + ARRAY_SIZE(ac100_cells), NULL, 0, NULL); 112 + if (ret) { 113 + dev_err(ac100->dev, "failed to add MFD devices: %d\n", ret); 114 + return ret; 115 + } 116 + 117 + return 0; 118 + } 119 + 120 + static const struct of_device_id ac100_of_match[] = { 121 + { .compatible = "x-powers,ac100" }, 122 + { }, 123 + }; 124 + MODULE_DEVICE_TABLE(of, ac100_of_match); 125 + 126 + static struct sunxi_rsb_driver ac100_rsb_driver = { 127 + .driver = { 128 + .name = "ac100", 129 + .of_match_table = of_match_ptr(ac100_of_match), 130 + }, 131 + .probe = ac100_rsb_probe, 132 + }; 133 + module_sunxi_rsb_driver(ac100_rsb_driver); 134 + 135 + MODULE_DESCRIPTION("Audio codec MFD core driver for AC100"); 136 + MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); 137 + MODULE_LICENSE("GPL v2");
+28 -2
drivers/mfd/arizona-core.c
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #include <linux/clk.h> 13 14 #include <linux/delay.h> 14 15 #include <linux/err.h> 15 16 #include <linux/gpio.h> ··· 50 49 case ARIZONA_32KZ_MCLK1: 51 50 ret = pm_runtime_get_sync(arizona->dev); 52 51 if (ret != 0) 53 - goto out; 52 + goto err_ref; 53 + ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]); 54 + if (ret != 0) 55 + goto err_pm; 56 + break; 57 + case ARIZONA_32KZ_MCLK2: 58 + ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]); 59 + if (ret != 0) 60 + goto err_ref; 54 61 break; 55 62 } 56 63 ··· 67 58 ARIZONA_CLK_32K_ENA); 68 59 } 69 60 70 - out: 61 + err_pm: 62 + pm_runtime_put_sync(arizona->dev); 63 + err_ref: 71 64 if (ret != 0) 72 65 arizona->clk32k_ref--; 73 66 ··· 94 83 switch (arizona->pdata.clk32k_src) { 95 84 case ARIZONA_32KZ_MCLK1: 96 85 pm_runtime_put_sync(arizona->dev); 86 + clk_disable_unprepare(arizona->mclk[ARIZONA_MCLK1]); 87 + break; 88 + case ARIZONA_32KZ_MCLK2: 89 + clk_disable_unprepare(arizona->mclk[ARIZONA_MCLK2]); 97 90 break; 98 91 } 99 92 } ··· 1015 1000 1016 1001 int arizona_dev_init(struct arizona *arizona) 1017 1002 { 1003 + const char * const mclk_name[] = { "mclk1", "mclk2" }; 1018 1004 struct device *dev = arizona->dev; 1019 1005 const char *type_name = NULL; 1020 1006 unsigned int reg, val, mask; ··· 1031 1015 sizeof(arizona->pdata)); 1032 1016 else 1033 1017 arizona_of_get_core_pdata(arizona); 1018 + 1019 + BUILD_BUG_ON(ARRAY_SIZE(arizona->mclk) != ARRAY_SIZE(mclk_name)); 1020 + for (i = 0; i < ARRAY_SIZE(arizona->mclk); i++) { 1021 + arizona->mclk[i] = devm_clk_get(arizona->dev, mclk_name[i]); 1022 + if (IS_ERR(arizona->mclk[i])) { 1023 + dev_info(arizona->dev, "Failed to get %s: %ld\n", 1024 + mclk_name[i], PTR_ERR(arizona->mclk[i])); 1025 + arizona->mclk[i] = NULL; 1026 + } 1027 + } 1034 1028 1035 1029 regcache_cache_only(arizona->regmap, true); 1036 1030
+1
drivers/mfd/axp20x-rsb.c
··· 61 61 62 62 static const struct of_device_id axp20x_rsb_of_match[] = { 63 63 { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID }, 64 + { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID }, 64 65 { .compatible = "x-powers,axp809", .data = (void *)AXP809_ID }, 65 66 { }, 66 67 };
+72
drivers/mfd/axp20x.c
··· 38 38 "AXP221", 39 39 "AXP223", 40 40 "AXP288", 41 + "AXP806", 41 42 "AXP809", 42 43 }; 43 44 ··· 128 127 static const struct regmap_access_table axp288_volatile_table = { 129 128 .yes_ranges = axp288_volatile_ranges, 130 129 .n_yes_ranges = ARRAY_SIZE(axp288_volatile_ranges), 130 + }; 131 + 132 + static const struct regmap_range axp806_writeable_ranges[] = { 133 + regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_DATACACHE(3)), 134 + regmap_reg_range(AXP806_PWR_OUT_CTRL1, AXP806_CLDO3_V_CTRL), 135 + regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ2_EN), 136 + regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE), 137 + }; 138 + 139 + static const struct regmap_range axp806_volatile_ranges[] = { 140 + regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE), 141 + }; 142 + 143 + static const struct regmap_access_table axp806_writeable_table = { 144 + .yes_ranges = axp806_writeable_ranges, 145 + .n_yes_ranges = ARRAY_SIZE(axp806_writeable_ranges), 146 + }; 147 + 148 + static const struct regmap_access_table axp806_volatile_table = { 149 + .yes_ranges = axp806_volatile_ranges, 150 + .n_yes_ranges = ARRAY_SIZE(axp806_volatile_ranges), 131 151 }; 132 152 133 153 static struct resource axp152_pek_resources[] = { ··· 300 278 .cache_type = REGCACHE_RBTREE, 301 279 }; 302 280 281 + static const struct regmap_config axp806_regmap_config = { 282 + .reg_bits = 8, 283 + .val_bits = 8, 284 + .wr_table = &axp806_writeable_table, 285 + .volatile_table = &axp806_volatile_table, 286 + .max_register = AXP806_VREF_TEMP_WARN_L, 287 + .cache_type = REGCACHE_RBTREE, 288 + }; 289 + 303 290 #define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask) \ 304 291 [_variant##_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) } 305 292 ··· 440 409 INIT_REGMAP_IRQ(AXP288, BC_USB_CHNG, 5, 1), 441 410 }; 442 411 412 + static const struct regmap_irq axp806_regmap_irqs[] = { 413 + INIT_REGMAP_IRQ(AXP806, DIE_TEMP_HIGH_LV1, 0, 0), 414 + INIT_REGMAP_IRQ(AXP806, DIE_TEMP_HIGH_LV2, 0, 1), 415 + INIT_REGMAP_IRQ(AXP806, DCDCA_V_LOW, 0, 3), 416 + INIT_REGMAP_IRQ(AXP806, DCDCB_V_LOW, 0, 4), 417 + INIT_REGMAP_IRQ(AXP806, DCDCC_V_LOW, 0, 5), 418 + INIT_REGMAP_IRQ(AXP806, DCDCD_V_LOW, 0, 6), 419 + INIT_REGMAP_IRQ(AXP806, DCDCE_V_LOW, 0, 7), 420 + INIT_REGMAP_IRQ(AXP806, PWROK_LONG, 1, 0), 421 + INIT_REGMAP_IRQ(AXP806, PWROK_SHORT, 1, 1), 422 + INIT_REGMAP_IRQ(AXP806, WAKEUP, 1, 4), 423 + INIT_REGMAP_IRQ(AXP806, PWROK_FALL, 1, 5), 424 + INIT_REGMAP_IRQ(AXP806, PWROK_RISE, 1, 6), 425 + }; 426 + 443 427 static const struct regmap_irq axp809_regmap_irqs[] = { 444 428 INIT_REGMAP_IRQ(AXP809, ACIN_OVER_V, 0, 7), 445 429 INIT_REGMAP_IRQ(AXP809, ACIN_PLUGIN, 0, 6), ··· 538 492 .num_irqs = ARRAY_SIZE(axp288_regmap_irqs), 539 493 .num_regs = 6, 540 494 495 + }; 496 + 497 + static const struct regmap_irq_chip axp806_regmap_irq_chip = { 498 + .name = "axp806", 499 + .status_base = AXP20X_IRQ1_STATE, 500 + .ack_base = AXP20X_IRQ1_STATE, 501 + .mask_base = AXP20X_IRQ1_EN, 502 + .mask_invert = true, 503 + .init_ack_masked = true, 504 + .irqs = axp806_regmap_irqs, 505 + .num_irqs = ARRAY_SIZE(axp806_regmap_irqs), 506 + .num_regs = 2, 541 507 }; 542 508 543 509 static const struct regmap_irq_chip axp809_regmap_irq_chip = { ··· 718 660 }, 719 661 }; 720 662 663 + static struct mfd_cell axp806_cells[] = { 664 + { 665 + .id = 2, 666 + .name = "axp20x-regulator", 667 + }, 668 + }; 669 + 721 670 static struct mfd_cell axp809_cells[] = { 722 671 { 723 672 .name = "axp20x-pek", 724 673 .num_resources = ARRAY_SIZE(axp809_pek_resources), 725 674 .resources = axp809_pek_resources, 726 675 }, { 676 + .id = 1, 727 677 .name = "axp20x-regulator", 728 678 }, 729 679 }; ··· 797 731 axp20x->nr_cells = ARRAY_SIZE(axp288_cells); 798 732 axp20x->regmap_cfg = &axp288_regmap_config; 799 733 axp20x->regmap_irq_chip = &axp288_regmap_irq_chip; 734 + break; 735 + case AXP806_ID: 736 + axp20x->nr_cells = ARRAY_SIZE(axp806_cells); 737 + axp20x->cells = axp806_cells; 738 + axp20x->regmap_cfg = &axp806_regmap_config; 739 + axp20x->regmap_irq_chip = &axp806_regmap_irq_chip; 800 740 break; 801 741 case AXP809_ID: 802 742 axp20x->nr_cells = ARRAY_SIZE(axp809_cells);
+55 -3
drivers/mfd/cros_ec.c
··· 23 23 #include <linux/module.h> 24 24 #include <linux/mfd/core.h> 25 25 #include <linux/mfd/cros_ec.h> 26 + #include <asm/unaligned.h> 26 27 27 28 #define CROS_EC_DEV_EC_INDEX 0 28 29 #define CROS_EC_DEV_PD_INDEX 1 ··· 50 49 .pdata_size = sizeof(pd_p), 51 50 }; 52 51 52 + static irqreturn_t ec_irq_thread(int irq, void *data) 53 + { 54 + struct cros_ec_device *ec_dev = data; 55 + int ret; 56 + 57 + if (device_may_wakeup(ec_dev->dev)) 58 + pm_wakeup_event(ec_dev->dev, 0); 59 + 60 + ret = cros_ec_get_next_event(ec_dev); 61 + if (ret > 0) 62 + blocking_notifier_call_chain(&ec_dev->event_notifier, 63 + 0, ec_dev); 64 + return IRQ_HANDLED; 65 + } 66 + 53 67 int cros_ec_register(struct cros_ec_device *ec_dev) 54 68 { 55 69 struct device *dev = ec_dev->dev; 56 70 int err = 0; 71 + 72 + BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier); 57 73 58 74 ec_dev->max_request = sizeof(struct ec_params_hello); 59 75 ec_dev->max_response = sizeof(struct ec_response_get_protocol_info); ··· 88 70 89 71 cros_ec_query_all(ec_dev); 90 72 73 + if (ec_dev->irq) { 74 + err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread, 75 + IRQF_TRIGGER_LOW | IRQF_ONESHOT, 76 + "chromeos-ec", ec_dev); 77 + if (err) { 78 + dev_err(dev, "Failed to request IRQ %d: %d", 79 + ec_dev->irq, err); 80 + return err; 81 + } 82 + } 83 + 91 84 err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, 1, 92 85 NULL, ec_dev->irq, NULL); 93 86 if (err) { 94 87 dev_err(dev, 95 88 "Failed to register Embedded Controller subdevice %d\n", 96 89 err); 97 - return err; 90 + goto fail_mfd; 98 91 } 99 92 100 93 if (ec_dev->max_passthru) { ··· 123 94 dev_err(dev, 124 95 "Failed to register Power Delivery subdevice %d\n", 125 96 err); 126 - return err; 97 + goto fail_mfd; 127 98 } 128 99 } 129 100 ··· 132 103 if (err) { 133 104 mfd_remove_devices(dev); 134 105 dev_err(dev, "Failed to register sub-devices\n"); 135 - return err; 106 + goto fail_mfd; 136 107 } 137 108 } 138 109 139 110 dev_info(dev, "Chrome EC device registered\n"); 140 111 141 112 return 0; 113 + 114 + fail_mfd: 115 + if (ec_dev->irq) 116 + free_irq(ec_dev->irq, ec_dev); 117 + return err; 142 118 } 143 119 EXPORT_SYMBOL(cros_ec_register); 144 120 ··· 170 136 } 171 137 EXPORT_SYMBOL(cros_ec_suspend); 172 138 139 + static void cros_ec_drain_events(struct cros_ec_device *ec_dev) 140 + { 141 + while (cros_ec_get_next_event(ec_dev) > 0) 142 + blocking_notifier_call_chain(&ec_dev->event_notifier, 143 + 1, ec_dev); 144 + } 145 + 173 146 int cros_ec_resume(struct cros_ec_device *ec_dev) 174 147 { 175 148 enable_irq(ec_dev->irq); 176 149 150 + /* 151 + * In some cases, we need to distinguish between events that occur 152 + * during suspend if the EC is not a wake source. For example, 153 + * keypresses during suspend should be discarded if it does not wake 154 + * the system. 155 + * 156 + * If the EC is not a wake source, drain the event queue and mark them 157 + * as "queued during suspend". 158 + */ 177 159 if (ec_dev->wake_enabled) { 178 160 disable_irq_wake(ec_dev->irq); 179 161 ec_dev->wake_enabled = 0; 162 + } else { 163 + cros_ec_drain_events(ec_dev); 180 164 } 181 165 182 166 return 0;
+99
drivers/mfd/lp873x.c
··· 1 + /* 2 + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ 3 + * 4 + * Author: Keerthy <j-keerthy@ti.com> 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License as 8 + * published by the Free Software Foundation version 2. 9 + * 10 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 + * kind, whether express or implied; without even the implied warranty 12 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + */ 15 + 16 + #include <linux/interrupt.h> 17 + #include <linux/mfd/core.h> 18 + #include <linux/module.h> 19 + #include <linux/of_device.h> 20 + #include <linux/regmap.h> 21 + 22 + #include <linux/mfd/lp873x.h> 23 + 24 + static const struct regmap_config lp873x_regmap_config = { 25 + .reg_bits = 8, 26 + .val_bits = 8, 27 + .max_register = LP873X_REG_MAX, 28 + }; 29 + 30 + static const struct mfd_cell lp873x_cells[] = { 31 + { .name = "lp873x-regulator", }, 32 + { .name = "lp873x-gpio", }, 33 + }; 34 + 35 + static int lp873x_probe(struct i2c_client *client, 36 + const struct i2c_device_id *ids) 37 + { 38 + struct lp873x *lp873; 39 + int ret; 40 + unsigned int otpid; 41 + 42 + lp873 = devm_kzalloc(&client->dev, sizeof(*lp873), GFP_KERNEL); 43 + if (!lp873) 44 + return -ENOMEM; 45 + 46 + lp873->dev = &client->dev; 47 + 48 + lp873->regmap = devm_regmap_init_i2c(client, &lp873x_regmap_config); 49 + if (IS_ERR(lp873->regmap)) { 50 + ret = PTR_ERR(lp873->regmap); 51 + dev_err(lp873->dev, 52 + "Failed to initialize register map: %d\n", ret); 53 + return ret; 54 + } 55 + 56 + mutex_init(&lp873->lock); 57 + 58 + ret = regmap_read(lp873->regmap, LP873X_REG_OTP_REV, &otpid); 59 + if (ret) { 60 + dev_err(lp873->dev, "Failed to read OTP ID\n"); 61 + return ret; 62 + } 63 + 64 + lp873->rev = otpid & LP873X_OTP_REV_OTP_ID; 65 + 66 + i2c_set_clientdata(client, lp873); 67 + 68 + ret = mfd_add_devices(lp873->dev, PLATFORM_DEVID_AUTO, lp873x_cells, 69 + ARRAY_SIZE(lp873x_cells), NULL, 0, NULL); 70 + 71 + return ret; 72 + } 73 + 74 + static const struct of_device_id of_lp873x_match_table[] = { 75 + { .compatible = "ti,lp8733", }, 76 + { .compatible = "ti,lp8732", }, 77 + {} 78 + }; 79 + MODULE_DEVICE_TABLE(of, of_lp873x_match_table); 80 + 81 + static const struct i2c_device_id lp873x_id_table[] = { 82 + { "lp873x", 0 }, 83 + { }, 84 + }; 85 + MODULE_DEVICE_TABLE(i2c, lp873x_id_table); 86 + 87 + static struct i2c_driver lp873x_driver = { 88 + .driver = { 89 + .name = "lp873x", 90 + .of_match_table = of_lp873x_match_table, 91 + }, 92 + .probe = lp873x_probe, 93 + .id_table = lp873x_id_table, 94 + }; 95 + module_i2c_driver(lp873x_driver); 96 + 97 + MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>"); 98 + MODULE_DESCRIPTION("LP873X chip family Multi-Function Device driver"); 99 + MODULE_LICENSE("GPL v2");
+1
drivers/mfd/pm8921-core.c
··· 309 309 }; 310 310 311 311 static const struct of_device_id pm8921_id_table[] = { 312 + { .compatible = "qcom,pm8018", }, 312 313 { .compatible = "qcom,pm8058", }, 313 314 { .compatible = "qcom,pm8921", }, 314 315 { }
+51
drivers/mfd/qcom_rpm.c
··· 388 388 .ack_sel_size = 7, 389 389 }; 390 390 391 + static const struct qcom_rpm_resource mdm9615_rpm_resource_table[] = { 392 + [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 }, 393 + [QCOM_RPM_SYS_FABRIC_CLK] = { 26, 10, 9, 1 }, 394 + [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 27, 11, 11, 1 }, 395 + [QCOM_RPM_SFPB_CLK] = { 28, 12, 12, 1 }, 396 + [QCOM_RPM_CFPB_CLK] = { 29, 13, 13, 1 }, 397 + [QCOM_RPM_EBI1_CLK] = { 30, 14, 16, 1 }, 398 + [QCOM_RPM_APPS_FABRIC_HALT] = { 31, 15, 22, 2 }, 399 + [QCOM_RPM_APPS_FABRIC_MODE] = { 33, 16, 23, 3 }, 400 + [QCOM_RPM_APPS_FABRIC_IOCTL] = { 36, 17, 24, 1 }, 401 + [QCOM_RPM_APPS_FABRIC_ARB] = { 37, 18, 25, 27 }, 402 + [QCOM_RPM_PM8018_SMPS1] = { 64, 19, 30, 2 }, 403 + [QCOM_RPM_PM8018_SMPS2] = { 66, 21, 31, 2 }, 404 + [QCOM_RPM_PM8018_SMPS3] = { 68, 23, 32, 2 }, 405 + [QCOM_RPM_PM8018_SMPS4] = { 70, 25, 33, 2 }, 406 + [QCOM_RPM_PM8018_SMPS5] = { 72, 27, 34, 2 }, 407 + [QCOM_RPM_PM8018_LDO1] = { 74, 29, 35, 2 }, 408 + [QCOM_RPM_PM8018_LDO2] = { 76, 31, 36, 2 }, 409 + [QCOM_RPM_PM8018_LDO3] = { 78, 33, 37, 2 }, 410 + [QCOM_RPM_PM8018_LDO4] = { 80, 35, 38, 2 }, 411 + [QCOM_RPM_PM8018_LDO5] = { 82, 37, 39, 2 }, 412 + [QCOM_RPM_PM8018_LDO6] = { 84, 39, 40, 2 }, 413 + [QCOM_RPM_PM8018_LDO7] = { 86, 41, 41, 2 }, 414 + [QCOM_RPM_PM8018_LDO8] = { 88, 43, 42, 2 }, 415 + [QCOM_RPM_PM8018_LDO9] = { 90, 45, 43, 2 }, 416 + [QCOM_RPM_PM8018_LDO10] = { 92, 47, 44, 2 }, 417 + [QCOM_RPM_PM8018_LDO11] = { 94, 49, 45, 2 }, 418 + [QCOM_RPM_PM8018_LDO12] = { 96, 51, 46, 2 }, 419 + [QCOM_RPM_PM8018_LDO13] = { 98, 53, 47, 2 }, 420 + [QCOM_RPM_PM8018_LDO14] = { 100, 55, 48, 2 }, 421 + [QCOM_RPM_PM8018_LVS1] = { 102, 57, 49, 1 }, 422 + [QCOM_RPM_PM8018_NCP] = { 103, 58, 80, 2 }, 423 + [QCOM_RPM_CXO_BUFFERS] = { 105, 60, 81, 1 }, 424 + [QCOM_RPM_USB_OTG_SWITCH] = { 106, 61, 82, 1 }, 425 + [QCOM_RPM_HDMI_SWITCH] = { 107, 62, 83, 1 }, 426 + [QCOM_RPM_VOLTAGE_CORNER] = { 109, 64, 87, 1 }, 427 + }; 428 + 429 + static const struct qcom_rpm_data mdm9615_template = { 430 + .version = 3, 431 + .resource_table = mdm9615_rpm_resource_table, 432 + .n_resources = ARRAY_SIZE(mdm9615_rpm_resource_table), 433 + .req_ctx_off = 3, 434 + .req_sel_off = 11, 435 + .ack_ctx_off = 15, 436 + .ack_sel_off = 23, 437 + .req_sel_size = 4, 438 + .ack_sel_size = 7, 439 + }; 440 + 391 441 static const struct of_device_id qcom_rpm_of_match[] = { 392 442 { .compatible = "qcom,rpm-apq8064", .data = &apq8064_template }, 393 443 { .compatible = "qcom,rpm-msm8660", .data = &msm8660_template }, 394 444 { .compatible = "qcom,rpm-msm8960", .data = &msm8960_template }, 395 445 { .compatible = "qcom,rpm-ipq8064", .data = &ipq806x_template }, 446 + { .compatible = "qcom,rpm-mdm9615", .data = &mdm9615_template }, 396 447 { } 397 448 }; 398 449 MODULE_DEVICE_TABLE(of, qcom_rpm_of_match);
+201 -37
drivers/mfd/rk808.c
··· 1 1 /* 2 - * MFD core driver for Rockchip RK808 2 + * MFD core driver for Rockchip RK808/RK818 3 3 * 4 4 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd 5 5 * 6 6 * Author: Chris Zhong <zyw@rock-chips.com> 7 7 * Author: Zhang Qing <zhangqing@rock-chips.com> 8 + * 9 + * Copyright (C) 2016 PHYTEC Messtechnik GmbH 10 + * 11 + * Author: Wadim Egorov <w.egorov@phytec.de> 8 12 * 9 13 * This program is free software; you can redistribute it and/or modify it 10 14 * under the terms and conditions of the GNU General Public License, ··· 25 21 #include <linux/mfd/rk808.h> 26 22 #include <linux/mfd/core.h> 27 23 #include <linux/module.h> 24 + #include <linux/of_device.h> 28 25 #include <linux/regmap.h> 29 26 30 27 struct rk808_reg_data { ··· 62 57 return false; 63 58 } 64 59 60 + static const struct regmap_config rk818_regmap_config = { 61 + .reg_bits = 8, 62 + .val_bits = 8, 63 + .max_register = RK818_USB_CTRL_REG, 64 + .cache_type = REGCACHE_RBTREE, 65 + .volatile_reg = rk808_is_volatile_reg, 66 + }; 67 + 65 68 static const struct regmap_config rk808_regmap_config = { 66 69 .reg_bits = 8, 67 70 .val_bits = 8, ··· 92 79 { 93 80 .name = "rk808-rtc", 94 81 .num_resources = ARRAY_SIZE(rtc_resources), 95 - .resources = &rtc_resources[0], 82 + .resources = rtc_resources, 96 83 }, 97 84 }; 98 85 99 - static const struct rk808_reg_data pre_init_reg[] = { 86 + static const struct mfd_cell rk818s[] = { 87 + { .name = "rk808-clkout", }, 88 + { .name = "rk808-regulator", }, 89 + { 90 + .name = "rk808-rtc", 91 + .num_resources = ARRAY_SIZE(rtc_resources), 92 + .resources = rtc_resources, 93 + }, 94 + }; 95 + 96 + static const struct rk808_reg_data rk808_pre_init_reg[] = { 100 97 { RK808_BUCK3_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_150MA }, 101 98 { RK808_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_200MA }, 102 99 { RK808_BOOST_CONFIG_REG, BOOST_ILMIN_MASK, BOOST_ILMIN_100MA }, ··· 114 91 { RK808_BUCK2_CONFIG_REG, BUCK2_RATE_MASK, BUCK_ILMIN_200MA }, 115 92 { RK808_DCDC_UV_ACT_REG, BUCK_UV_ACT_MASK, BUCK_UV_ACT_DISABLE}, 116 93 { RK808_VB_MON_REG, MASK_ALL, VB_LO_ACT | 94 + VB_LO_SEL_3500MV }, 95 + }; 96 + 97 + static const struct rk808_reg_data rk818_pre_init_reg[] = { 98 + /* improve efficiency */ 99 + { RK818_BUCK2_CONFIG_REG, BUCK2_RATE_MASK, BUCK_ILMIN_250MA }, 100 + { RK818_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_250MA }, 101 + { RK818_BOOST_CONFIG_REG, BOOST_ILMIN_MASK, BOOST_ILMIN_100MA }, 102 + { RK818_USB_CTRL_REG, RK818_USB_ILIM_SEL_MASK, 103 + RK818_USB_ILMIN_2000MA }, 104 + /* close charger when usb lower then 3.4V */ 105 + { RK818_USB_CTRL_REG, RK818_USB_CHG_SD_VSEL_MASK, 106 + (0x7 << 4) }, 107 + /* no action when vref */ 108 + { RK818_H5V_EN_REG, BIT(1), RK818_REF_RDY_CTRL }, 109 + /* enable HDMI 5V */ 110 + { RK818_H5V_EN_REG, BIT(0), RK818_H5V_EN }, 111 + { RK808_VB_MON_REG, MASK_ALL, VB_LO_ACT | 117 112 VB_LO_SEL_3500MV }, 118 113 }; 119 114 ··· 177 136 }, 178 137 }; 179 138 139 + static const struct regmap_irq rk818_irqs[] = { 140 + /* INT_STS */ 141 + [RK818_IRQ_VOUT_LO] = { 142 + .mask = RK818_IRQ_VOUT_LO_MSK, 143 + .reg_offset = 0, 144 + }, 145 + [RK818_IRQ_VB_LO] = { 146 + .mask = RK818_IRQ_VB_LO_MSK, 147 + .reg_offset = 0, 148 + }, 149 + [RK818_IRQ_PWRON] = { 150 + .mask = RK818_IRQ_PWRON_MSK, 151 + .reg_offset = 0, 152 + }, 153 + [RK818_IRQ_PWRON_LP] = { 154 + .mask = RK818_IRQ_PWRON_LP_MSK, 155 + .reg_offset = 0, 156 + }, 157 + [RK818_IRQ_HOTDIE] = { 158 + .mask = RK818_IRQ_HOTDIE_MSK, 159 + .reg_offset = 0, 160 + }, 161 + [RK818_IRQ_RTC_ALARM] = { 162 + .mask = RK818_IRQ_RTC_ALARM_MSK, 163 + .reg_offset = 0, 164 + }, 165 + [RK818_IRQ_RTC_PERIOD] = { 166 + .mask = RK818_IRQ_RTC_PERIOD_MSK, 167 + .reg_offset = 0, 168 + }, 169 + [RK818_IRQ_USB_OV] = { 170 + .mask = RK818_IRQ_USB_OV_MSK, 171 + .reg_offset = 0, 172 + }, 173 + 174 + /* INT_STS2 */ 175 + [RK818_IRQ_PLUG_IN] = { 176 + .mask = RK818_IRQ_PLUG_IN_MSK, 177 + .reg_offset = 1, 178 + }, 179 + [RK818_IRQ_PLUG_OUT] = { 180 + .mask = RK818_IRQ_PLUG_OUT_MSK, 181 + .reg_offset = 1, 182 + }, 183 + [RK818_IRQ_CHG_OK] = { 184 + .mask = RK818_IRQ_CHG_OK_MSK, 185 + .reg_offset = 1, 186 + }, 187 + [RK818_IRQ_CHG_TE] = { 188 + .mask = RK818_IRQ_CHG_TE_MSK, 189 + .reg_offset = 1, 190 + }, 191 + [RK818_IRQ_CHG_TS1] = { 192 + .mask = RK818_IRQ_CHG_TS1_MSK, 193 + .reg_offset = 1, 194 + }, 195 + [RK818_IRQ_TS2] = { 196 + .mask = RK818_IRQ_TS2_MSK, 197 + .reg_offset = 1, 198 + }, 199 + [RK818_IRQ_CHG_CVTLIM] = { 200 + .mask = RK818_IRQ_CHG_CVTLIM_MSK, 201 + .reg_offset = 1, 202 + }, 203 + [RK818_IRQ_DISCHG_ILIM] = { 204 + .mask = RK818_IRQ_DISCHG_ILIM_MSK, 205 + .reg_offset = 1, 206 + }, 207 + }; 208 + 180 209 static struct regmap_irq_chip rk808_irq_chip = { 181 210 .name = "rk808", 182 211 .irqs = rk808_irqs, ··· 256 145 .status_base = RK808_INT_STS_REG1, 257 146 .mask_base = RK808_INT_STS_MSK_REG1, 258 147 .ack_base = RK808_INT_STS_REG1, 148 + .init_ack_masked = true, 149 + }; 150 + 151 + static struct regmap_irq_chip rk818_irq_chip = { 152 + .name = "rk818", 153 + .irqs = rk818_irqs, 154 + .num_irqs = ARRAY_SIZE(rk818_irqs), 155 + .num_regs = 2, 156 + .irq_reg_stride = 2, 157 + .status_base = RK818_INT_STS_REG1, 158 + .mask_base = RK818_INT_STS_MSK_REG1, 159 + .ack_base = RK818_INT_STS_REG1, 259 160 .init_ack_masked = true, 260 161 }; 261 162 ··· 290 167 dev_err(&rk808_i2c_client->dev, "power off error!\n"); 291 168 } 292 169 170 + static const struct of_device_id rk808_of_match[] = { 171 + { .compatible = "rockchip,rk808" }, 172 + { .compatible = "rockchip,rk818" }, 173 + { }, 174 + }; 175 + MODULE_DEVICE_TABLE(of, rk808_of_match); 176 + 293 177 static int rk808_probe(struct i2c_client *client, 294 178 const struct i2c_device_id *id) 295 179 { 296 180 struct device_node *np = client->dev.of_node; 297 181 struct rk808 *rk808; 182 + const struct rk808_reg_data *pre_init_reg; 183 + const struct mfd_cell *cells; 184 + int nr_pre_init_regs; 185 + int nr_cells; 298 186 int pm_off = 0; 299 187 int ret; 300 188 int i; 189 + 190 + rk808 = devm_kzalloc(&client->dev, sizeof(*rk808), GFP_KERNEL); 191 + if (!rk808) 192 + return -ENOMEM; 193 + 194 + rk808->variant = i2c_smbus_read_word_data(client, RK808_ID_MSB); 195 + if (rk808->variant < 0) { 196 + dev_err(&client->dev, "Failed to read the chip id at 0x%02x\n", 197 + RK808_ID_MSB); 198 + return rk808->variant; 199 + } 200 + 201 + dev_dbg(&client->dev, "Chip id: 0x%x\n", (unsigned int)rk808->variant); 202 + 203 + switch (rk808->variant) { 204 + case RK808_ID: 205 + rk808->regmap_cfg = &rk808_regmap_config; 206 + rk808->regmap_irq_chip = &rk808_irq_chip; 207 + pre_init_reg = rk808_pre_init_reg; 208 + nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg); 209 + cells = rk808s; 210 + nr_cells = ARRAY_SIZE(rk808s); 211 + break; 212 + case RK818_ID: 213 + rk808->regmap_cfg = &rk818_regmap_config; 214 + rk808->regmap_irq_chip = &rk818_irq_chip; 215 + pre_init_reg = rk818_pre_init_reg; 216 + nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg); 217 + cells = rk818s; 218 + nr_cells = ARRAY_SIZE(rk818s); 219 + break; 220 + default: 221 + dev_err(&client->dev, "Unsupported RK8XX ID %lu\n", 222 + rk808->variant); 223 + return -EINVAL; 224 + } 225 + 226 + rk808->i2c = client; 227 + i2c_set_clientdata(client, rk808); 228 + 229 + rk808->regmap = devm_regmap_init_i2c(client, rk808->regmap_cfg); 230 + if (IS_ERR(rk808->regmap)) { 231 + dev_err(&client->dev, "regmap initialization failed\n"); 232 + return PTR_ERR(rk808->regmap); 233 + } 301 234 302 235 if (!client->irq) { 303 236 dev_err(&client->dev, "No interrupt support, no core IRQ\n"); 304 237 return -EINVAL; 305 238 } 306 239 307 - rk808 = devm_kzalloc(&client->dev, sizeof(*rk808), GFP_KERNEL); 308 - if (!rk808) 309 - return -ENOMEM; 310 - 311 - rk808->regmap = devm_regmap_init_i2c(client, &rk808_regmap_config); 312 - if (IS_ERR(rk808->regmap)) { 313 - dev_err(&client->dev, "regmap initialization failed\n"); 314 - return PTR_ERR(rk808->regmap); 315 - } 316 - 317 - for (i = 0; i < ARRAY_SIZE(pre_init_reg); i++) { 318 - ret = regmap_update_bits(rk808->regmap, pre_init_reg[i].addr, 319 - pre_init_reg[i].mask, 320 - pre_init_reg[i].value); 321 - if (ret) { 322 - dev_err(&client->dev, 323 - "0x%x write err\n", pre_init_reg[i].addr); 324 - return ret; 325 - } 326 - } 327 - 328 240 ret = regmap_add_irq_chip(rk808->regmap, client->irq, 329 241 IRQF_ONESHOT, -1, 330 - &rk808_irq_chip, &rk808->irq_data); 242 + rk808->regmap_irq_chip, &rk808->irq_data); 331 243 if (ret) { 332 244 dev_err(&client->dev, "Failed to add irq_chip %d\n", ret); 333 245 return ret; 334 246 } 335 247 336 - rk808->i2c = client; 337 - i2c_set_clientdata(client, rk808); 248 + for (i = 0; i < nr_pre_init_regs; i++) { 249 + ret = regmap_update_bits(rk808->regmap, 250 + pre_init_reg[i].addr, 251 + pre_init_reg[i].mask, 252 + pre_init_reg[i].value); 253 + if (ret) { 254 + dev_err(&client->dev, 255 + "0x%x write err\n", 256 + pre_init_reg[i].addr); 257 + return ret; 258 + } 259 + } 338 260 339 - ret = devm_mfd_add_devices(&client->dev, -1, 340 - rk808s, ARRAY_SIZE(rk808s), NULL, 0, 341 - regmap_irq_get_domain(rk808->irq_data)); 261 + ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE, 262 + cells, nr_cells, NULL, 0, 263 + regmap_irq_get_domain(rk808->irq_data)); 342 264 if (ret) { 343 265 dev_err(&client->dev, "failed to add MFD devices %d\n", ret); 344 266 goto err_irq; ··· 413 245 return 0; 414 246 } 415 247 416 - static const struct of_device_id rk808_of_match[] = { 417 - { .compatible = "rockchip,rk808" }, 418 - { }, 419 - }; 420 - MODULE_DEVICE_TABLE(of, rk808_of_match); 421 - 422 248 static const struct i2c_device_id rk808_ids[] = { 423 249 { "rk808" }, 250 + { "rk818" }, 424 251 { }, 425 252 }; 426 253 MODULE_DEVICE_TABLE(i2c, rk808_ids); ··· 435 272 MODULE_LICENSE("GPL"); 436 273 MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>"); 437 274 MODULE_AUTHOR("Zhang Qing <zhangqing@rock-chips.com>"); 438 - MODULE_DESCRIPTION("RK808 PMIC driver"); 275 + MODULE_AUTHOR("Wadim Egorov <w.egorov@phytec.de>"); 276 + MODULE_DESCRIPTION("RK808/RK818 PMIC driver");
+2 -1
drivers/misc/Makefile
··· 69 69 OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \ 70 70 --set-section-flags .text=alloc,readonly \ 71 71 --rename-section .text=.rodata 72 - $(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o 72 + targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o 73 + $(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE 73 74 $(call if_changed,objcopy)
+1 -2
drivers/misc/cxl/context.c
··· 90 90 */ 91 91 mutex_lock(&afu->contexts_lock); 92 92 idr_preload(GFP_KERNEL); 93 - i = idr_alloc(&ctx->afu->contexts_idr, ctx, 94 - ctx->afu->adapter->native->sl_ops->min_pe, 93 + i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe, 95 94 ctx->afu->num_procs, GFP_NOWAIT); 96 95 idr_preload_end(); 97 96 mutex_unlock(&afu->contexts_lock);
+1 -1
drivers/misc/cxl/cxl.h
··· 561 561 u64 (*timebase_read)(struct cxl *adapter); 562 562 int capi_mode; 563 563 bool needs_reset_before_disable; 564 - int min_pe; 565 564 }; 566 565 567 566 struct cxl_native { ··· 602 603 struct bin_attribute cxl_attr; 603 604 int adapter_num; 604 605 int user_irqs; 606 + int min_pe; 605 607 u64 ps_size; 606 608 u16 psl_rev; 607 609 u16 base_image;
+1 -1
drivers/misc/cxl/native.c
··· 924 924 return fail_psl_irq(afu, &irq_info); 925 925 } 926 926 927 - void native_irq_wait(struct cxl_context *ctx) 927 + static void native_irq_wait(struct cxl_context *ctx) 928 928 { 929 929 u64 dsisr; 930 930 int timeout = 1000;
+8 -4
drivers/misc/cxl/pci.c
··· 379 379 380 380 static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) 381 381 { 382 - u64 psl_dsnctl; 382 + u64 psl_dsnctl, psl_fircntl; 383 383 u64 chipid; 384 384 u64 capp_unit_id; 385 385 int rc; ··· 398 398 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); 399 399 /* snoop write mask */ 400 400 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); 401 - /* set fir_accum */ 402 - cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL); 401 + /* set fir_cntl to recommended value for production env */ 402 + psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ 403 + psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ 404 + psl_fircntl |= 0x1ULL; /* ce_thresh */ 405 + cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl); 403 406 /* for debugging with trace arrays */ 404 407 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); 405 408 ··· 1524 1521 .write_timebase_ctrl = write_timebase_ctrl_xsl, 1525 1522 .timebase_read = timebase_read_xsl, 1526 1523 .capi_mode = OPAL_PHB_CAPI_MODE_DMA, 1527 - .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */ 1528 1524 }; 1529 1525 1530 1526 static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) 1531 1527 { 1532 1528 if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { 1529 + /* Mellanox CX-4 */ 1533 1530 dev_info(&adapter->dev, "Device uses an XSL\n"); 1534 1531 adapter->native->sl_ops = &xsl_ops; 1532 + adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */ 1535 1533 } else { 1536 1534 dev_info(&adapter->dev, "Device uses a PSL\n"); 1537 1535 adapter->native->sl_ops = &psl_ops;
+1 -1
drivers/misc/cxl/vphb.c
··· 221 221 /* Setup the PHB using arch provided callback */ 222 222 phb->ops = &cxl_pcie_pci_ops; 223 223 phb->cfg_addr = NULL; 224 - phb->cfg_data = 0; 224 + phb->cfg_data = NULL; 225 225 phb->private_data = afu; 226 226 phb->controller_ops = cxl_pci_controller_ops; 227 227
+1 -1
drivers/misc/lkdtm_usercopy.c
··· 49 49 50 50 /* This is a pointer to outside our current stack frame. */ 51 51 if (bad_frame) { 52 - bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack); 52 + bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack); 53 53 } else { 54 54 /* Put start address just inside stack. */ 55 55 bad_stack = task_stack_page(current) + THREAD_SIZE;
+1
drivers/nvdimm/btt.c
··· 1269 1269 } 1270 1270 } 1271 1271 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); 1272 + btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; 1272 1273 revalidate_disk(btt->btt_disk); 1273 1274 1274 1275 return 0;
+20
drivers/nvdimm/btt_devs.c
··· 140 140 } 141 141 static DEVICE_ATTR_RW(namespace); 142 142 143 + static ssize_t size_show(struct device *dev, 144 + struct device_attribute *attr, char *buf) 145 + { 146 + struct nd_btt *nd_btt = to_nd_btt(dev); 147 + ssize_t rc; 148 + 149 + device_lock(dev); 150 + if (dev->driver) 151 + rc = sprintf(buf, "%llu\n", nd_btt->size); 152 + else { 153 + /* no size to convey if the btt instance is disabled */ 154 + rc = -ENXIO; 155 + } 156 + device_unlock(dev); 157 + 158 + return rc; 159 + } 160 + static DEVICE_ATTR_RO(size); 161 + 143 162 static struct attribute *nd_btt_attributes[] = { 144 163 &dev_attr_sector_size.attr, 145 164 &dev_attr_namespace.attr, 146 165 &dev_attr_uuid.attr, 166 + &dev_attr_size.attr, 147 167 NULL, 148 168 }; 149 169
+1
drivers/nvdimm/nd.h
··· 143 143 struct nd_namespace_common *ndns; 144 144 struct btt *btt; 145 145 unsigned long lbasize; 146 + u64 size; 146 147 u8 *uuid; 147 148 int id; 148 149 };
+8 -12
drivers/nvme/host/pci.c
··· 1543 1543 reinit_completion(&dev->ioq_wait); 1544 1544 retry: 1545 1545 timeout = ADMIN_TIMEOUT; 1546 - for (; i > 0; i--) { 1547 - struct nvme_queue *nvmeq = dev->queues[i]; 1548 - 1549 - if (!pass) 1550 - nvme_suspend_queue(nvmeq); 1551 - if (nvme_delete_queue(nvmeq, opcode)) 1546 + for (; i > 0; i--, sent++) 1547 + if (nvme_delete_queue(dev->queues[i], opcode)) 1552 1548 break; 1553 - ++sent; 1554 - } 1549 + 1555 1550 while (sent--) { 1556 1551 timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); 1557 1552 if (timeout == 0) ··· 1688 1693 nvme_stop_queues(&dev->ctrl); 1689 1694 csts = readl(dev->bar + NVME_REG_CSTS); 1690 1695 } 1696 + 1697 + for (i = dev->queue_count - 1; i > 0; i--) 1698 + nvme_suspend_queue(dev->queues[i]); 1699 + 1691 1700 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 1692 - for (i = dev->queue_count - 1; i >= 0; i--) { 1693 - struct nvme_queue *nvmeq = dev->queues[i]; 1694 - nvme_suspend_queue(nvmeq); 1695 - } 1701 + nvme_suspend_queue(dev->queues[0]); 1696 1702 } else { 1697 1703 nvme_disable_io_queues(dev); 1698 1704 nvme_disable_admin_queue(dev, shutdown);
+47 -40
drivers/nvme/host/rdma.c
··· 12 12 * more details. 13 13 */ 14 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 - #include <linux/delay.h> 16 15 #include <linux/module.h> 17 16 #include <linux/init.h> 18 17 #include <linux/slab.h> 19 18 #include <linux/err.h> 20 19 #include <linux/string.h> 21 - #include <linux/jiffies.h> 22 20 #include <linux/atomic.h> 23 21 #include <linux/blk-mq.h> 24 22 #include <linux/types.h> ··· 24 26 #include <linux/mutex.h> 25 27 #include <linux/scatterlist.h> 26 28 #include <linux/nvme.h> 27 - #include <linux/t10-pi.h> 28 29 #include <asm/unaligned.h> 29 30 30 31 #include <rdma/ib_verbs.h> ··· 166 169 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 167 170 struct rdma_cm_event *event); 168 171 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 169 - static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl); 170 172 171 173 /* XXX: really should move to a generic header sooner or later.. */ 172 174 static inline void put_unaligned_le24(u32 val, u8 *p) ··· 683 687 list_del(&ctrl->list); 684 688 mutex_unlock(&nvme_rdma_ctrl_mutex); 685 689 686 - if (ctrl->ctrl.tagset) { 687 - blk_cleanup_queue(ctrl->ctrl.connect_q); 688 - blk_mq_free_tag_set(&ctrl->tag_set); 689 - nvme_rdma_dev_put(ctrl->device); 690 - } 691 690 kfree(ctrl->queues); 692 691 nvmf_free_options(nctrl->opts); 693 692 free_ctrl: ··· 739 748 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 740 749 WARN_ON_ONCE(!changed); 741 750 742 - if (ctrl->queue_count > 1) 751 + if (ctrl->queue_count > 1) { 743 752 nvme_start_queues(&ctrl->ctrl); 753 + nvme_queue_scan(&ctrl->ctrl); 754 + nvme_queue_async_events(&ctrl->ctrl); 755 + } 744 756 745 757 dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); 746 758 ··· 1263 1269 { 1264 1270 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1265 1271 struct rdma_conn_param param = { }; 1266 - struct nvme_rdma_cm_req priv; 1272 + struct nvme_rdma_cm_req priv = { }; 1267 1273 int ret; 1268 1274 1269 1275 param.qp_num = queue->qp->qp_num; ··· 1312 1318 * that caught the event. Since we hold the callout until the controller 1313 1319 * deletion is completed, we'll deadlock if the controller deletion will 1314 1320 * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership 1315 - * of destroying this queue before-hand, destroy the queue resources 1316 - * after the controller deletion completed with the exception of destroying 1317 - * the cm_id implicitely by returning a non-zero rc to the callout. 1321 + * of destroying this queue before-hand, destroy the queue resources, 1322 + * then queue the controller deletion which won't destroy this queue and 1323 + * we destroy the cm_id implicitely by returning a non-zero rc to the callout. 1318 1324 */ 1319 1325 static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) 1320 1326 { 1321 1327 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1322 - int ret, ctrl_deleted = 0; 1328 + int ret; 1323 1329 1324 - /* First disable the queue so ctrl delete won't free it */ 1325 - if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) 1326 - goto out; 1330 + /* Own the controller deletion */ 1331 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) 1332 + return 0; 1327 1333 1328 - /* delete the controller */ 1329 - ret = __nvme_rdma_del_ctrl(ctrl); 1330 - if (!ret) { 1331 - dev_warn(ctrl->ctrl.device, 1332 - "Got rdma device removal event, deleting ctrl\n"); 1333 - flush_work(&ctrl->delete_work); 1334 + dev_warn(ctrl->ctrl.device, 1335 + "Got rdma device removal event, deleting ctrl\n"); 1336 + 1337 + /* Get rid of reconnect work if its running */ 1338 + cancel_delayed_work_sync(&ctrl->reconnect_work); 1339 + 1340 + /* Disable the queue so ctrl delete won't free it */ 1341 + if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) { 1342 + /* Free this queue ourselves */ 1343 + nvme_rdma_stop_queue(queue); 1344 + nvme_rdma_destroy_queue_ib(queue); 1334 1345 1335 1346 /* Return non-zero so the cm_id will destroy implicitly */ 1336 - ctrl_deleted = 1; 1337 - 1338 - /* Free this queue ourselves */ 1339 - rdma_disconnect(queue->cm_id); 1340 - ib_drain_qp(queue->qp); 1341 - nvme_rdma_destroy_queue_ib(queue); 1347 + ret = 1; 1342 1348 } 1343 1349 1344 - out: 1345 - return ctrl_deleted; 1350 + /* Queue controller deletion */ 1351 + queue_work(nvme_rdma_wq, &ctrl->delete_work); 1352 + flush_work(&ctrl->delete_work); 1353 + return ret; 1346 1354 } 1347 1355 1348 1356 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, ··· 1644 1648 nvme_rdma_free_io_queues(ctrl); 1645 1649 } 1646 1650 1647 - if (ctrl->ctrl.state == NVME_CTRL_LIVE) 1651 + if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags)) 1648 1652 nvme_shutdown_ctrl(&ctrl->ctrl); 1649 1653 1650 1654 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); ··· 1653 1657 nvme_rdma_destroy_admin_queue(ctrl); 1654 1658 } 1655 1659 1660 + static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) 1661 + { 1662 + nvme_uninit_ctrl(&ctrl->ctrl); 1663 + if (shutdown) 1664 + nvme_rdma_shutdown_ctrl(ctrl); 1665 + 1666 + if (ctrl->ctrl.tagset) { 1667 + blk_cleanup_queue(ctrl->ctrl.connect_q); 1668 + blk_mq_free_tag_set(&ctrl->tag_set); 1669 + nvme_rdma_dev_put(ctrl->device); 1670 + } 1671 + 1672 + nvme_put_ctrl(&ctrl->ctrl); 1673 + } 1674 + 1656 1675 static void nvme_rdma_del_ctrl_work(struct work_struct *work) 1657 1676 { 1658 1677 struct nvme_rdma_ctrl *ctrl = container_of(work, 1659 1678 struct nvme_rdma_ctrl, delete_work); 1660 1679 1661 - nvme_remove_namespaces(&ctrl->ctrl); 1662 - nvme_rdma_shutdown_ctrl(ctrl); 1663 - nvme_uninit_ctrl(&ctrl->ctrl); 1664 - nvme_put_ctrl(&ctrl->ctrl); 1680 + __nvme_rdma_remove_ctrl(ctrl, true); 1665 1681 } 1666 1682 1667 1683 static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) ··· 1706 1698 struct nvme_rdma_ctrl *ctrl = container_of(work, 1707 1699 struct nvme_rdma_ctrl, delete_work); 1708 1700 1709 - nvme_remove_namespaces(&ctrl->ctrl); 1710 - nvme_uninit_ctrl(&ctrl->ctrl); 1711 - nvme_put_ctrl(&ctrl->ctrl); 1701 + __nvme_rdma_remove_ctrl(ctrl, false); 1712 1702 } 1713 1703 1714 1704 static void nvme_rdma_reset_ctrl_work(struct work_struct *work) ··· 1745 1739 if (ctrl->queue_count > 1) { 1746 1740 nvme_start_queues(&ctrl->ctrl); 1747 1741 nvme_queue_scan(&ctrl->ctrl); 1742 + nvme_queue_async_events(&ctrl->ctrl); 1748 1743 } 1749 1744 1750 1745 return;
+1 -5
drivers/nvme/target/admin-cmd.c
··· 13 13 */ 14 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 15 #include <linux/module.h> 16 - #include <linux/random.h> 17 16 #include <generated/utsrelease.h> 18 17 #include "nvmet.h" 19 18 ··· 82 83 { 83 84 struct nvmet_ctrl *ctrl = req->sq->ctrl; 84 85 struct nvme_id_ctrl *id; 85 - u64 serial; 86 86 u16 status = 0; 87 87 88 88 id = kzalloc(sizeof(*id), GFP_KERNEL); ··· 94 96 id->vid = 0; 95 97 id->ssvid = 0; 96 98 97 - /* generate a random serial number as our controllers are ephemeral: */ 98 - get_random_bytes(&serial, sizeof(serial)); 99 99 memset(id->sn, ' ', sizeof(id->sn)); 100 - snprintf(id->sn, sizeof(id->sn), "%llx", serial); 100 + snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); 101 101 102 102 memset(id->mn, ' ', sizeof(id->mn)); 103 103 strncpy((char *)id->mn, "Linux", sizeof(id->mn));
+4
drivers/nvme/target/core.c
··· 13 13 */ 14 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 15 #include <linux/module.h> 16 + #include <linux/random.h> 16 17 #include "nvmet.h" 17 18 18 19 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; ··· 728 727 729 728 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); 730 729 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); 730 + 731 + /* generate a random serial number as our controllers are ephemeral: */ 732 + get_random_bytes(&ctrl->serial, sizeof(ctrl->serial)); 731 733 732 734 kref_init(&ctrl->ref); 733 735 ctrl->subsys = subsys;
+1 -3
drivers/nvme/target/loop.c
··· 414 414 struct nvme_loop_ctrl *ctrl = container_of(work, 415 415 struct nvme_loop_ctrl, delete_work); 416 416 417 - nvme_remove_namespaces(&ctrl->ctrl); 418 - nvme_loop_shutdown_ctrl(ctrl); 419 417 nvme_uninit_ctrl(&ctrl->ctrl); 418 + nvme_loop_shutdown_ctrl(ctrl); 420 419 nvme_put_ctrl(&ctrl->ctrl); 421 420 } 422 421 ··· 500 501 nvme_loop_destroy_admin_queue(ctrl); 501 502 out_disable: 502 503 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 503 - nvme_remove_namespaces(&ctrl->ctrl); 504 504 nvme_uninit_ctrl(&ctrl->ctrl); 505 505 nvme_put_ctrl(&ctrl->ctrl); 506 506 }
+1
drivers/nvme/target/nvmet.h
··· 113 113 114 114 struct mutex lock; 115 115 u64 cap; 116 + u64 serial; 116 117 u32 cc; 117 118 u32 csts; 118 119
+74 -26
drivers/nvme/target/rdma.c
··· 77 77 NVMET_RDMA_Q_CONNECTING, 78 78 NVMET_RDMA_Q_LIVE, 79 79 NVMET_RDMA_Q_DISCONNECTING, 80 + NVMET_RDMA_IN_DEVICE_REMOVAL, 80 81 }; 81 82 82 83 struct nvmet_rdma_queue { ··· 616 615 if (!len) 617 616 return 0; 618 617 619 - /* use the already allocated data buffer if possible */ 620 - if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) { 621 - nvmet_rdma_use_inline_sg(rsp, len, 0); 622 - } else { 623 - status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, 624 - len); 625 - if (status) 626 - return status; 627 - } 618 + status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, 619 + len); 620 + if (status) 621 + return status; 628 622 629 623 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 630 624 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, ··· 980 984 struct nvmet_rdma_device *dev = queue->dev; 981 985 982 986 nvmet_rdma_free_queue(queue); 983 - rdma_destroy_id(cm_id); 987 + 988 + if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL) 989 + rdma_destroy_id(cm_id); 990 + 984 991 kref_put(&dev->ref, nvmet_rdma_free_dev); 985 992 } 986 993 ··· 1232 1233 switch (queue->state) { 1233 1234 case NVMET_RDMA_Q_CONNECTING: 1234 1235 case NVMET_RDMA_Q_LIVE: 1235 - disconnect = true; 1236 1236 queue->state = NVMET_RDMA_Q_DISCONNECTING; 1237 + case NVMET_RDMA_IN_DEVICE_REMOVAL: 1238 + disconnect = true; 1237 1239 break; 1238 1240 case NVMET_RDMA_Q_DISCONNECTING: 1239 1241 break; ··· 1272 1272 schedule_work(&queue->release_work); 1273 1273 } 1274 1274 1275 + /** 1276 + * nvme_rdma_device_removal() - Handle RDMA device removal 1277 + * @queue: nvmet rdma queue (cm id qp_context) 1278 + * @addr: nvmet address (cm_id context) 1279 + * 1280 + * DEVICE_REMOVAL event notifies us that the RDMA device is about 1281 + * to unplug so we should take care of destroying our RDMA resources. 1282 + * This event will be generated for each allocated cm_id. 1283 + * 1284 + * Note that this event can be generated on a normal queue cm_id 1285 + * and/or a device bound listener cm_id (where in this case 1286 + * queue will be null). 1287 + * 1288 + * we claim ownership on destroying the cm_id. For queues we move 1289 + * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port 1290 + * we nullify the priv to prevent double cm_id destruction and destroying 1291 + * the cm_id implicitely by returning a non-zero rc to the callout. 1292 + */ 1293 + static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1294 + struct nvmet_rdma_queue *queue) 1295 + { 1296 + unsigned long flags; 1297 + 1298 + if (!queue) { 1299 + struct nvmet_port *port = cm_id->context; 1300 + 1301 + /* 1302 + * This is a listener cm_id. Make sure that 1303 + * future remove_port won't invoke a double 1304 + * cm_id destroy. use atomic xchg to make sure 1305 + * we don't compete with remove_port. 1306 + */ 1307 + if (xchg(&port->priv, NULL) != cm_id) 1308 + return 0; 1309 + } else { 1310 + /* 1311 + * This is a queue cm_id. Make sure that 1312 + * release queue will not destroy the cm_id 1313 + * and schedule all ctrl queues removal (only 1314 + * if the queue is not disconnecting already). 1315 + */ 1316 + spin_lock_irqsave(&queue->state_lock, flags); 1317 + if (queue->state != NVMET_RDMA_Q_DISCONNECTING) 1318 + queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL; 1319 + spin_unlock_irqrestore(&queue->state_lock, flags); 1320 + nvmet_rdma_queue_disconnect(queue); 1321 + flush_scheduled_work(); 1322 + } 1323 + 1324 + /* 1325 + * We need to return 1 so that the core will destroy 1326 + * it's own ID. What a great API design.. 1327 + */ 1328 + return 1; 1329 + } 1330 + 1275 1331 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 1276 1332 struct rdma_cm_event *event) 1277 1333 { ··· 1350 1294 break; 1351 1295 case RDMA_CM_EVENT_ADDR_CHANGE: 1352 1296 case RDMA_CM_EVENT_DISCONNECTED: 1353 - case RDMA_CM_EVENT_DEVICE_REMOVAL: 1354 1297 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1355 - /* 1356 - * We can get the device removal callback even for a 1357 - * CM ID that we aren't actually using. In that case 1358 - * the context pointer is NULL, so we shouldn't try 1359 - * to disconnect a non-existing queue. But we also 1360 - * need to return 1 so that the core will destroy 1361 - * it's own ID. What a great API design.. 1362 - */ 1363 - if (queue) 1364 - nvmet_rdma_queue_disconnect(queue); 1365 - else 1366 - ret = 1; 1298 + nvmet_rdma_queue_disconnect(queue); 1299 + break; 1300 + case RDMA_CM_EVENT_DEVICE_REMOVAL: 1301 + ret = nvmet_rdma_device_removal(cm_id, queue); 1367 1302 break; 1368 1303 case RDMA_CM_EVENT_REJECTED: 1369 1304 case RDMA_CM_EVENT_UNREACHABLE: ··· 1443 1396 1444 1397 static void nvmet_rdma_remove_port(struct nvmet_port *port) 1445 1398 { 1446 - struct rdma_cm_id *cm_id = port->priv; 1399 + struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); 1447 1400 1448 - rdma_destroy_id(cm_id); 1401 + if (cm_id) 1402 + rdma_destroy_id(cm_id); 1449 1403 } 1450 1404 1451 1405 static struct nvmet_fabrics_ops nvmet_rdma_ops = {
+2
drivers/pci/msi.c
··· 1411 1411 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 1412 1412 pci_msi_domain_update_chip_ops(info); 1413 1413 1414 + info->flags |= MSI_FLAG_ACTIVATE_EARLY; 1415 + 1414 1416 domain = msi_create_irq_domain(fwnode, info, parent); 1415 1417 if (!domain) 1416 1418 return NULL;
+14 -13
drivers/perf/arm_pmu.c
··· 688 688 return 0; 689 689 } 690 690 691 - static DEFINE_MUTEX(arm_pmu_mutex); 691 + static DEFINE_SPINLOCK(arm_pmu_lock); 692 692 static LIST_HEAD(arm_pmu_list); 693 693 694 694 /* ··· 701 701 { 702 702 struct arm_pmu *pmu; 703 703 704 - mutex_lock(&arm_pmu_mutex); 704 + spin_lock(&arm_pmu_lock); 705 705 list_for_each_entry(pmu, &arm_pmu_list, entry) { 706 706 707 707 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) ··· 709 709 if (pmu->reset) 710 710 pmu->reset(pmu); 711 711 } 712 - mutex_unlock(&arm_pmu_mutex); 712 + spin_unlock(&arm_pmu_lock); 713 713 return 0; 714 714 } 715 715 ··· 821 821 if (!cpu_hw_events) 822 822 return -ENOMEM; 823 823 824 - mutex_lock(&arm_pmu_mutex); 824 + spin_lock(&arm_pmu_lock); 825 825 list_add_tail(&cpu_pmu->entry, &arm_pmu_list); 826 - mutex_unlock(&arm_pmu_mutex); 826 + spin_unlock(&arm_pmu_lock); 827 827 828 828 err = cpu_pm_pmu_register(cpu_pmu); 829 829 if (err) ··· 859 859 return 0; 860 860 861 861 out_unregister: 862 - mutex_lock(&arm_pmu_mutex); 862 + spin_lock(&arm_pmu_lock); 863 863 list_del(&cpu_pmu->entry); 864 - mutex_unlock(&arm_pmu_mutex); 864 + spin_unlock(&arm_pmu_lock); 865 865 free_percpu(cpu_hw_events); 866 866 return err; 867 867 } ··· 869 869 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 870 870 { 871 871 cpu_pm_pmu_unregister(cpu_pmu); 872 - mutex_lock(&arm_pmu_mutex); 872 + spin_lock(&arm_pmu_lock); 873 873 list_del(&cpu_pmu->entry); 874 - mutex_unlock(&arm_pmu_mutex); 874 + spin_unlock(&arm_pmu_lock); 875 875 free_percpu(cpu_pmu->hw_events); 876 876 } 877 877 ··· 967 967 968 968 /* If we didn't manage to parse anything, try the interrupt affinity */ 969 969 if (cpumask_weight(&pmu->supported_cpus) == 0) { 970 - if (!using_spi) { 971 - /* If using PPIs, check the affinity of the partition */ 972 - int ret, irq; 970 + int irq = platform_get_irq(pdev, 0); 973 971 974 - irq = platform_get_irq(pdev, 0); 972 + if (irq_is_percpu(irq)) { 973 + /* If using PPIs, check the affinity of the partition */ 974 + int ret; 975 + 975 976 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); 976 977 if (ret) { 977 978 kfree(irqs);
+92
drivers/platform/chrome/cros_ec_proto.c
··· 19 19 #include <linux/device.h> 20 20 #include <linux/module.h> 21 21 #include <linux/slab.h> 22 + #include <asm/unaligned.h> 22 23 23 24 #define EC_COMMAND_RETRIES 50 24 25 ··· 235 234 return ret; 236 235 } 237 236 237 + static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev, 238 + u16 cmd, u32 *mask) 239 + { 240 + struct ec_params_get_cmd_versions *pver; 241 + struct ec_response_get_cmd_versions *rver; 242 + struct cros_ec_command *msg; 243 + int ret; 244 + 245 + msg = kmalloc(sizeof(*msg) + max(sizeof(*rver), sizeof(*pver)), 246 + GFP_KERNEL); 247 + if (!msg) 248 + return -ENOMEM; 249 + 250 + msg->version = 0; 251 + msg->command = EC_CMD_GET_CMD_VERSIONS; 252 + msg->insize = sizeof(*rver); 253 + msg->outsize = sizeof(*pver); 254 + 255 + pver = (struct ec_params_get_cmd_versions *)msg->data; 256 + pver->cmd = cmd; 257 + 258 + ret = cros_ec_cmd_xfer(ec_dev, msg); 259 + if (ret > 0) { 260 + rver = (struct ec_response_get_cmd_versions *)msg->data; 261 + *mask = rver->version_mask; 262 + } 263 + 264 + kfree(msg); 265 + 266 + return ret; 267 + } 268 + 238 269 int cros_ec_query_all(struct cros_ec_device *ec_dev) 239 270 { 240 271 struct device *dev = ec_dev->dev; 241 272 struct cros_ec_command *proto_msg; 242 273 struct ec_response_get_protocol_info *proto_info; 274 + u32 ver_mask = 0; 243 275 int ret; 244 276 245 277 proto_msg = kzalloc(sizeof(*proto_msg) + sizeof(*proto_info), ··· 362 328 goto exit; 363 329 } 364 330 331 + /* Probe if MKBP event is supported */ 332 + ret = cros_ec_get_host_command_version_mask(ec_dev, 333 + EC_CMD_GET_NEXT_EVENT, 334 + &ver_mask); 335 + if (ret < 0 || ver_mask == 0) 336 + ec_dev->mkbp_event_supported = 0; 337 + else 338 + ec_dev->mkbp_event_supported = 1; 339 + 365 340 exit: 366 341 kfree(proto_msg); 367 342 return ret; ··· 440 397 return ret; 441 398 } 442 399 EXPORT_SYMBOL(cros_ec_cmd_xfer_status); 400 + 401 + static int get_next_event(struct cros_ec_device *ec_dev) 402 + { 403 + u8 buffer[sizeof(struct cros_ec_command) + sizeof(ec_dev->event_data)]; 404 + struct cros_ec_command *msg = (struct cros_ec_command *)&buffer; 405 + int ret; 406 + 407 + msg->version = 0; 408 + msg->command = EC_CMD_GET_NEXT_EVENT; 409 + msg->insize = sizeof(ec_dev->event_data); 410 + msg->outsize = 0; 411 + 412 + ret = cros_ec_cmd_xfer(ec_dev, msg); 413 + if (ret > 0) { 414 + ec_dev->event_size = ret - 1; 415 + memcpy(&ec_dev->event_data, msg->data, 416 + sizeof(ec_dev->event_data)); 417 + } 418 + 419 + return ret; 420 + } 421 + 422 + static int get_keyboard_state_event(struct cros_ec_device *ec_dev) 423 + { 424 + u8 buffer[sizeof(struct cros_ec_command) + 425 + sizeof(ec_dev->event_data.data)]; 426 + struct cros_ec_command *msg = (struct cros_ec_command *)&buffer; 427 + 428 + msg->version = 0; 429 + msg->command = EC_CMD_MKBP_STATE; 430 + msg->insize = sizeof(ec_dev->event_data.data); 431 + msg->outsize = 0; 432 + 433 + ec_dev->event_size = cros_ec_cmd_xfer(ec_dev, msg); 434 + ec_dev->event_data.event_type = EC_MKBP_EVENT_KEY_MATRIX; 435 + memcpy(&ec_dev->event_data.data, msg->data, 436 + sizeof(ec_dev->event_data.data)); 437 + 438 + return ec_dev->event_size; 439 + } 440 + 441 + int cros_ec_get_next_event(struct cros_ec_device *ec_dev) 442 + { 443 + if (ec_dev->mkbp_event_supported) 444 + return get_next_event(ec_dev); 445 + else 446 + return get_keyboard_state_event(ec_dev); 447 + } 448 + EXPORT_SYMBOL(cros_ec_get_next_event);
+2 -2
drivers/platform/x86/dell-wmi.c
··· 110 110 /* BIOS error detected */ 111 111 { KE_IGNORE, 0xe00d, { KEY_RESERVED } }, 112 112 113 - /* Unknown, defined in ACPI DSDT */ 114 - /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */ 113 + /* Battery was removed or inserted */ 114 + { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, 115 115 116 116 /* Wifi Catcher */ 117 117 { KE_KEY, 0xe011, { KEY_PROG2 } },
+13 -11
drivers/rapidio/rio_cm.c
··· 1080 1080 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, 1081 1081 long timeout) 1082 1082 { 1083 - struct rio_channel *ch = NULL; 1084 - struct rio_channel *new_ch = NULL; 1083 + struct rio_channel *ch; 1084 + struct rio_channel *new_ch; 1085 1085 struct conn_req *req; 1086 1086 struct cm_peer *peer; 1087 1087 int found = 0; ··· 1155 1155 1156 1156 spin_unlock_bh(&ch->lock); 1157 1157 riocm_put_channel(ch); 1158 + ch = NULL; 1158 1159 kfree(req); 1159 1160 1160 1161 down_read(&rdev_sem); ··· 1173 1172 if (!found) { 1174 1173 /* If peer device object not found, simply ignore the request */ 1175 1174 err = -ENODEV; 1176 - goto err_nodev; 1175 + goto err_put_new_ch; 1177 1176 } 1178 1177 1179 1178 new_ch->rdev = peer->rdev; ··· 1185 1184 1186 1185 *new_ch_id = new_ch->id; 1187 1186 return new_ch; 1187 + 1188 + err_put_new_ch: 1189 + spin_lock_bh(&idr_lock); 1190 + idr_remove(&ch_idr, new_ch->id); 1191 + spin_unlock_bh(&idr_lock); 1192 + riocm_put_channel(new_ch); 1193 + 1188 1194 err_put: 1189 - riocm_put_channel(ch); 1190 - err_nodev: 1191 - if (new_ch) { 1192 - spin_lock_bh(&idr_lock); 1193 - idr_remove(&ch_idr, new_ch->id); 1194 - spin_unlock_bh(&idr_lock); 1195 - riocm_put_channel(new_ch); 1196 - } 1195 + if (ch) 1196 + riocm_put_channel(ch); 1197 1197 *new_ch_id = 0; 1198 1198 return ERR_PTR(err); 1199 1199 }
+3 -3
drivers/regulator/Kconfig
··· 323 323 324 324 config REGULATOR_LP873X 325 325 tristate "TI LP873X Power regulators" 326 - depends on MFD_LP873X && OF 326 + depends on MFD_TI_LP873X && OF 327 327 help 328 328 This driver supports LP873X voltage regulator chips. LP873X 329 329 provides two step-down converters and two general-purpose LDO ··· 635 635 outputs which can be controlled by i2c communication. 636 636 637 637 config REGULATOR_RK808 638 - tristate "Rockchip RK808 Power regulators" 638 + tristate "Rockchip RK808/RK818 Power regulators" 639 639 depends on MFD_RK808 640 640 help 641 641 Select this option to enable the power regulator of ROCKCHIP 642 - PMIC RK808. 642 + PMIC RK808 and RK818. 643 643 This driver supports the control of different power rails of device 644 644 through regulator interface. The device supports multiple DCDC/LDO 645 645 outputs which can be controlled by i2c communication.
+112 -8
drivers/regulator/axp20x-regulator.c
··· 244 244 .ops = &axp20x_ops_sw, 245 245 }; 246 246 247 + static const struct regulator_linear_range axp806_dcdca_ranges[] = { 248 + REGULATOR_LINEAR_RANGE(600000, 0x0, 0x32, 10000), 249 + REGULATOR_LINEAR_RANGE(1120000, 0x33, 0x47, 20000), 250 + }; 251 + 252 + static const struct regulator_linear_range axp806_dcdcd_ranges[] = { 253 + REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2d, 20000), 254 + REGULATOR_LINEAR_RANGE(1600000, 0x2e, 0x3f, 100000), 255 + }; 256 + 257 + static const struct regulator_linear_range axp806_cldo2_ranges[] = { 258 + REGULATOR_LINEAR_RANGE(700000, 0x0, 0x1a, 100000), 259 + REGULATOR_LINEAR_RANGE(3400000, 0x1b, 0x1f, 200000), 260 + }; 261 + 262 + static const struct regulator_desc axp806_regulators[] = { 263 + AXP_DESC_RANGES(AXP806, DCDCA, "dcdca", "vina", axp806_dcdca_ranges, 264 + 72, AXP806_DCDCA_V_CTRL, 0x7f, AXP806_PWR_OUT_CTRL1, 265 + BIT(0)), 266 + AXP_DESC(AXP806, DCDCB, "dcdcb", "vinb", 1000, 2550, 50, 267 + AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(1)), 268 + AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc", axp806_dcdca_ranges, 269 + 72, AXP806_DCDCC_V_CTRL, 0x7f, AXP806_PWR_OUT_CTRL1, 270 + BIT(2)), 271 + AXP_DESC_RANGES(AXP806, DCDCD, "dcdcd", "vind", axp806_dcdcd_ranges, 272 + 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1, 273 + BIT(3)), 274 + AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100, 275 + AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)), 276 + AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100, 277 + AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)), 278 + AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100, 279 + AXP806_ALDO2_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(6)), 280 + AXP_DESC(AXP806, ALDO3, "aldo3", "aldoin", 700, 3300, 100, 281 + AXP806_ALDO3_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(7)), 282 + AXP_DESC(AXP806, BLDO1, "bldo1", "bldoin", 700, 1900, 100, 283 + AXP806_BLDO1_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(0)), 284 + AXP_DESC(AXP806, BLDO2, "bldo2", "bldoin", 700, 1900, 100, 285 + AXP806_BLDO2_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(1)), 286 + AXP_DESC(AXP806, BLDO3, "bldo3", "bldoin", 700, 1900, 100, 287 + AXP806_BLDO3_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(2)), 288 + AXP_DESC(AXP806, BLDO4, "bldo4", "bldoin", 700, 1900, 100, 289 + AXP806_BLDO4_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(3)), 290 + AXP_DESC(AXP806, CLDO1, "cldo1", "cldoin", 700, 3300, 100, 291 + AXP806_CLDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2, BIT(4)), 292 + AXP_DESC_RANGES(AXP806, CLDO2, "cldo2", "cldoin", axp806_cldo2_ranges, 293 + 32, AXP806_CLDO2_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2, 294 + BIT(5)), 295 + AXP_DESC(AXP806, CLDO3, "cldo3", "cldoin", 700, 3300, 100, 296 + AXP806_CLDO3_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2, BIT(6)), 297 + AXP_DESC_SW(AXP806, SW, "sw", "swin", AXP806_PWR_OUT_CTRL2, BIT(7)), 298 + }; 299 + 247 300 static const struct regulator_linear_range axp809_dcdc4_ranges[] = { 248 301 REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2f, 20000), 249 302 REGULATOR_LINEAR_RANGE(1800000, 0x30, 0x38, 100000), 250 - }; 251 - 252 - static const struct regulator_linear_range axp809_dldo1_ranges[] = { 253 - REGULATOR_LINEAR_RANGE(700000, 0x0, 0x1a, 100000), 254 - REGULATOR_LINEAR_RANGE(3400000, 0x1b, 0x1f, 200000), 255 303 }; 256 304 257 305 static const struct regulator_desc axp809_regulators[] = { ··· 326 278 AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(7)), 327 279 AXP_DESC(AXP809, ALDO3, "aldo3", "aldoin", 700, 3300, 100, 328 280 AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)), 329 - AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin", axp809_dldo1_ranges, 281 + AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin", axp806_cldo2_ranges, 330 282 32, AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, 331 283 BIT(3)), 332 284 AXP_DESC(AXP809, DLDO2, "dldo2", "dldoin", 700, 3300, 100, ··· 350 302 static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) 351 303 { 352 304 struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); 305 + unsigned int reg = AXP20X_DCDC_FREQ; 353 306 u32 min, max, def, step; 354 307 355 308 switch (axp20x->variant) { ··· 361 312 def = 1500; 362 313 step = 75; 363 314 break; 315 + case AXP806_ID: 316 + /* 317 + * AXP806 DCDC work frequency setting has the same range and 318 + * step as AXP22X, but at a different register. 319 + * Fall through to the check below. 320 + * (See include/linux/mfd/axp20x.h) 321 + */ 322 + reg = AXP806_DCDC_FREQ_CTRL; 364 323 case AXP221_ID: 365 324 case AXP223_ID: 366 325 case AXP809_ID: ··· 400 343 401 344 dcdcfreq = (dcdcfreq - min) / step; 402 345 403 - return regmap_update_bits(axp20x->regmap, AXP20X_DCDC_FREQ, 346 + return regmap_update_bits(axp20x->regmap, reg, 404 347 AXP20X_FREQ_DCDC_MASK, dcdcfreq); 405 348 } 406 349 ··· 434 377 static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode) 435 378 { 436 379 struct axp20x_dev *axp20x = rdev_get_drvdata(rdev); 380 + unsigned int reg = AXP20X_DCDC_MODE; 437 381 unsigned int mask; 438 382 439 383 switch (axp20x->variant) { ··· 450 392 workmode <<= ffs(mask) - 1; 451 393 break; 452 394 395 + case AXP806_ID: 396 + reg = AXP806_DCDC_MODE_CTRL2; 397 + /* 398 + * AXP806 DCDC regulator IDs have the same range as AXP22X. 399 + * Fall through to the check below. 400 + * (See include/linux/mfd/axp20x.h) 401 + */ 453 402 case AXP221_ID: 454 403 case AXP223_ID: 455 404 case AXP809_ID: ··· 473 408 return -EINVAL; 474 409 } 475 410 476 - return regmap_update_bits(rdev->regmap, AXP20X_DCDC_MODE, mask, workmode); 411 + return regmap_update_bits(rdev->regmap, reg, mask, workmode); 412 + } 413 + 414 + /* 415 + * This function checks whether a regulator is part of a poly-phase 416 + * output setup based on the registers settings. Returns true if it is. 417 + */ 418 + static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id) 419 + { 420 + u32 reg = 0; 421 + 422 + /* Only AXP806 has poly-phase outputs */ 423 + if (axp20x->variant != AXP806_ID) 424 + return false; 425 + 426 + regmap_read(axp20x->regmap, AXP806_DCDC_MODE_CTRL2, &reg); 427 + 428 + switch (id) { 429 + case AXP806_DCDCB: 430 + return (((reg & GENMASK(7, 6)) == BIT(6)) || 431 + ((reg & GENMASK(7, 6)) == BIT(7))); 432 + case AXP806_DCDCC: 433 + return ((reg & GENMASK(7, 6)) == BIT(7)); 434 + case AXP806_DCDCE: 435 + return !!(reg & BIT(5)); 436 + } 437 + 438 + return false; 477 439 } 478 440 479 441 static int axp20x_regulator_probe(struct platform_device *pdev) ··· 532 440 drivevbus = of_property_read_bool(pdev->dev.parent->of_node, 533 441 "x-powers,drive-vbus-en"); 534 442 break; 443 + case AXP806_ID: 444 + regulators = axp806_regulators; 445 + nregulators = AXP806_REG_ID_MAX; 446 + break; 535 447 case AXP809_ID: 536 448 regulators = axp809_regulators; 537 449 nregulators = AXP809_REG_ID_MAX; ··· 552 456 for (i = 0; i < nregulators; i++) { 553 457 const struct regulator_desc *desc = &regulators[i]; 554 458 struct regulator_desc *new_desc; 459 + 460 + /* 461 + * If this regulator is a slave in a poly-phase setup, 462 + * skip it, as its controls are bound to the master 463 + * regulator and won't work. 464 + */ 465 + if (axp20x_is_polyphase_slave(axp20x, i)) 466 + continue; 555 467 556 468 /* 557 469 * Regulators DC1SW and DC5LDO are connected internally,
+66
drivers/regulator/qcom_rpm-regulator.c
··· 448 448 }; 449 449 450 450 /* 451 + * PM8018 regulators 452 + */ 453 + static const struct qcom_rpm_reg pm8018_pldo = { 454 + .desc.linear_ranges = pldo_ranges, 455 + .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges), 456 + .desc.n_voltages = 161, 457 + .desc.ops = &uV_ops, 458 + .parts = &rpm8960_ldo_parts, 459 + .supports_force_mode_auto = false, 460 + .supports_force_mode_bypass = false, 461 + }; 462 + 463 + static const struct qcom_rpm_reg pm8018_nldo = { 464 + .desc.linear_ranges = nldo_ranges, 465 + .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges), 466 + .desc.n_voltages = 64, 467 + .desc.ops = &uV_ops, 468 + .parts = &rpm8960_ldo_parts, 469 + .supports_force_mode_auto = false, 470 + .supports_force_mode_bypass = false, 471 + }; 472 + 473 + static const struct qcom_rpm_reg pm8018_smps = { 474 + .desc.linear_ranges = smps_ranges, 475 + .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges), 476 + .desc.n_voltages = 154, 477 + .desc.ops = &uV_ops, 478 + .parts = &rpm8960_smps_parts, 479 + .supports_force_mode_auto = false, 480 + .supports_force_mode_bypass = false, 481 + }; 482 + 483 + static const struct qcom_rpm_reg pm8018_switch = { 484 + .desc.ops = &switch_ops, 485 + .parts = &rpm8960_switch_parts, 486 + }; 487 + 488 + /* 451 489 * PM8058 regulators 452 490 */ 453 491 static const struct qcom_rpm_reg pm8058_pldo = { ··· 793 755 const char *supply; 794 756 }; 795 757 758 + static const struct rpm_regulator_data rpm_pm8018_regulators[] = { 759 + { "s1", QCOM_RPM_PM8018_SMPS1, &pm8018_smps, "vdd_s1" }, 760 + { "s2", QCOM_RPM_PM8018_SMPS2, &pm8018_smps, "vdd_s2" }, 761 + { "s3", QCOM_RPM_PM8018_SMPS3, &pm8018_smps, "vdd_s3" }, 762 + { "s4", QCOM_RPM_PM8018_SMPS4, &pm8018_smps, "vdd_s4" }, 763 + { "s5", QCOM_RPM_PM8018_SMPS5, &pm8018_smps, "vdd_s5" }, 764 + 765 + { "l2", QCOM_RPM_PM8018_LDO2, &pm8018_pldo, "vdd_l2" }, 766 + { "l3", QCOM_RPM_PM8018_LDO3, &pm8018_pldo, "vdd_l3" }, 767 + { "l4", QCOM_RPM_PM8018_LDO4, &pm8018_pldo, "vdd_l4" }, 768 + { "l5", QCOM_RPM_PM8018_LDO5, &pm8018_pldo, "vdd_l5" }, 769 + { "l6", QCOM_RPM_PM8018_LDO6, &pm8018_pldo, "vdd_l7" }, 770 + { "l7", QCOM_RPM_PM8018_LDO7, &pm8018_pldo, "vdd_l7" }, 771 + { "l8", QCOM_RPM_PM8018_LDO8, &pm8018_nldo, "vdd_l8" }, 772 + { "l9", QCOM_RPM_PM8018_LDO9, &pm8921_nldo1200, 773 + "vdd_l9_l10_l11_l12" }, 774 + { "l10", QCOM_RPM_PM8018_LDO10, &pm8018_nldo, "vdd_l9_l10_l11_l12" }, 775 + { "l11", QCOM_RPM_PM8018_LDO11, &pm8018_nldo, "vdd_l9_l10_l11_l12" }, 776 + { "l12", QCOM_RPM_PM8018_LDO12, &pm8018_nldo, "vdd_l9_l10_l11_l12" }, 777 + { "l14", QCOM_RPM_PM8018_LDO14, &pm8018_pldo, "vdd_l14" }, 778 + 779 + { "lvs1", QCOM_RPM_PM8018_LVS1, &pm8018_switch, "lvs1_in" }, 780 + 781 + { } 782 + }; 783 + 796 784 static const struct rpm_regulator_data rpm_pm8058_regulators[] = { 797 785 { "l0", QCOM_RPM_PM8058_LDO0, &pm8058_nldo, "vdd_l0_l1_lvs" }, 798 786 { "l1", QCOM_RPM_PM8058_LDO1, &pm8058_nldo, "vdd_l0_l1_lvs" }, ··· 934 870 }; 935 871 936 872 static const struct of_device_id rpm_of_match[] = { 873 + { .compatible = "qcom,rpm-pm8018-regulators", 874 + .data = &rpm_pm8018_regulators }, 937 875 { .compatible = "qcom,rpm-pm8058-regulators", .data = &rpm_pm8058_regulators }, 938 876 { .compatible = "qcom,rpm-pm8901-regulators", .data = &rpm_pm8901_regulators }, 939 877 { .compatible = "qcom,rpm-pm8921-regulators", .data = &rpm_pm8921_regulators },
+136 -7
drivers/regulator/rk808-regulator.c
··· 1 1 /* 2 - * Regulator driver for Rockchip RK808 2 + * Regulator driver for Rockchip RK808/RK818 3 3 * 4 4 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd 5 5 * 6 6 * Author: Chris Zhong <zyw@rock-chips.com> 7 7 * Author: Zhang Qing <zhangqing@rock-chips.com> 8 + * 9 + * Copyright (C) 2016 PHYTEC Messtechnik GmbH 10 + * 11 + * Author: Wadim Egorov <w.egorov@phytec.de> 8 12 * 9 13 * This program is free software; you can redistribute it and/or modify it 10 14 * under the terms and conditions of the GNU General Public License, ··· 35 31 #define RK808_BUCK_VSEL_MASK 0x3f 36 32 #define RK808_BUCK4_VSEL_MASK 0xf 37 33 #define RK808_LDO_VSEL_MASK 0x1f 34 + 35 + #define RK818_BUCK_VSEL_MASK 0x3f 36 + #define RK818_BUCK4_VSEL_MASK 0x1f 37 + #define RK818_LDO_VSEL_MASK 0x1f 38 + #define RK818_LDO3_ON_VSEL_MASK 0xf 39 + #define RK818_BOOST_ON_VSEL_MASK 0xe0 38 40 39 41 /* Ramp rate definitions for buck1 / buck2 only */ 40 42 #define RK808_RAMP_RATE_OFFSET 3 ··· 464 454 RK808_DCDC_EN_REG, BIT(6)), 465 455 }; 466 456 457 + static const struct regulator_desc rk818_reg[] = { 458 + { 459 + .name = "DCDC_REG1", 460 + .supply_name = "vcc1", 461 + .of_match = of_match_ptr("DCDC_REG1"), 462 + .regulators_node = of_match_ptr("regulators"), 463 + .id = RK818_ID_DCDC1, 464 + .ops = &rk808_reg_ops, 465 + .type = REGULATOR_VOLTAGE, 466 + .min_uV = 712500, 467 + .uV_step = 12500, 468 + .n_voltages = 64, 469 + .vsel_reg = RK818_BUCK1_ON_VSEL_REG, 470 + .vsel_mask = RK818_BUCK_VSEL_MASK, 471 + .enable_reg = RK818_DCDC_EN_REG, 472 + .enable_mask = BIT(0), 473 + .owner = THIS_MODULE, 474 + }, { 475 + .name = "DCDC_REG2", 476 + .supply_name = "vcc2", 477 + .of_match = of_match_ptr("DCDC_REG2"), 478 + .regulators_node = of_match_ptr("regulators"), 479 + .id = RK818_ID_DCDC2, 480 + .ops = &rk808_reg_ops, 481 + .type = REGULATOR_VOLTAGE, 482 + .min_uV = 712500, 483 + .uV_step = 12500, 484 + .n_voltages = 64, 485 + .vsel_reg = RK818_BUCK2_ON_VSEL_REG, 486 + .vsel_mask = RK818_BUCK_VSEL_MASK, 487 + .enable_reg = RK818_DCDC_EN_REG, 488 + .enable_mask = BIT(1), 489 + .owner = THIS_MODULE, 490 + }, { 491 + .name = "DCDC_REG3", 492 + .supply_name = "vcc3", 493 + .of_match = of_match_ptr("DCDC_REG3"), 494 + .regulators_node = of_match_ptr("regulators"), 495 + .id = RK818_ID_DCDC3, 496 + .ops = &rk808_switch_ops, 497 + .type = REGULATOR_VOLTAGE, 498 + .n_voltages = 1, 499 + .enable_reg = RK818_DCDC_EN_REG, 500 + .enable_mask = BIT(2), 501 + .owner = THIS_MODULE, 502 + }, 503 + RK8XX_DESC(RK818_ID_DCDC4, "DCDC_REG4", "vcc4", 1800, 3600, 100, 504 + RK818_BUCK4_ON_VSEL_REG, RK818_BUCK4_VSEL_MASK, 505 + RK818_DCDC_EN_REG, BIT(3), 0), 506 + RK8XX_DESC(RK818_ID_BOOST, "DCDC_BOOST", "boost", 4700, 5400, 100, 507 + RK818_BOOST_LDO9_ON_VSEL_REG, RK818_BOOST_ON_VSEL_MASK, 508 + RK818_DCDC_EN_REG, BIT(4), 0), 509 + RK8XX_DESC(RK818_ID_LDO1, "LDO_REG1", "vcc6", 1800, 3400, 100, 510 + RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, 511 + BIT(0), 400), 512 + RK8XX_DESC(RK818_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100, 513 + RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, 514 + BIT(1), 400), 515 + { 516 + .name = "LDO_REG3", 517 + .supply_name = "vcc7", 518 + .of_match = of_match_ptr("LDO_REG3"), 519 + .regulators_node = of_match_ptr("regulators"), 520 + .id = RK818_ID_LDO3, 521 + .ops = &rk808_reg_ops_ranges, 522 + .type = REGULATOR_VOLTAGE, 523 + .n_voltages = 16, 524 + .linear_ranges = rk808_ldo3_voltage_ranges, 525 + .n_linear_ranges = ARRAY_SIZE(rk808_ldo3_voltage_ranges), 526 + .vsel_reg = RK818_LDO3_ON_VSEL_REG, 527 + .vsel_mask = RK818_LDO3_ON_VSEL_MASK, 528 + .enable_reg = RK818_LDO_EN_REG, 529 + .enable_mask = BIT(2), 530 + .enable_time = 400, 531 + .owner = THIS_MODULE, 532 + }, 533 + RK8XX_DESC(RK818_ID_LDO4, "LDO_REG4", "vcc8", 1800, 3400, 100, 534 + RK818_LDO4_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, 535 + BIT(3), 400), 536 + RK8XX_DESC(RK818_ID_LDO5, "LDO_REG5", "vcc7", 1800, 3400, 100, 537 + RK818_LDO5_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, 538 + BIT(4), 400), 539 + RK8XX_DESC(RK818_ID_LDO6, "LDO_REG6", "vcc8", 800, 2500, 100, 540 + RK818_LDO6_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, 541 + BIT(5), 400), 542 + RK8XX_DESC(RK818_ID_LDO7, "LDO_REG7", "vcc7", 800, 2500, 100, 543 + RK818_LDO7_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, 544 + BIT(6), 400), 545 + RK8XX_DESC(RK818_ID_LDO8, "LDO_REG8", "vcc8", 1800, 3400, 100, 546 + RK818_LDO8_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, 547 + BIT(7), 400), 548 + RK8XX_DESC(RK818_ID_LDO9, "LDO_REG9", "vcc9", 1800, 3400, 100, 549 + RK818_BOOST_LDO9_ON_VSEL_REG, RK818_LDO_VSEL_MASK, 550 + RK818_DCDC_EN_REG, BIT(5), 400), 551 + RK8XX_DESC_SWITCH(RK818_ID_SWITCH, "SWITCH_REG", "vcc9", 552 + RK818_DCDC_EN_REG, BIT(6)), 553 + RK8XX_DESC_SWITCH(RK818_ID_HDMI_SWITCH, "HDMI_SWITCH", "h_5v", 554 + RK818_H5V_EN_REG, BIT(0)), 555 + RK8XX_DESC_SWITCH(RK818_ID_OTG_SWITCH, "OTG_SWITCH", "usb", 556 + RK818_DCDC_EN_REG, BIT(7)), 557 + }; 558 + 467 559 static int rk808_regulator_dt_parse_pdata(struct device *dev, 468 560 struct device *client_dev, 469 561 struct regmap *map, ··· 611 499 struct regulator_config config = {}; 612 500 struct regulator_dev *rk808_rdev; 613 501 struct rk808_regulator_data *pdata; 614 - int ret, i; 502 + const struct regulator_desc *regulators; 503 + int ret, i, nregulators; 615 504 616 505 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 617 506 if (!pdata) ··· 625 512 626 513 platform_set_drvdata(pdev, pdata); 627 514 515 + switch (rk808->variant) { 516 + case RK808_ID: 517 + regulators = rk808_reg; 518 + nregulators = RK808_NUM_REGULATORS; 519 + break; 520 + case RK818_ID: 521 + regulators = rk818_reg; 522 + nregulators = RK818_NUM_REGULATORS; 523 + break; 524 + default: 525 + dev_err(&client->dev, "unsupported RK8XX ID %lu\n", 526 + rk808->variant); 527 + return -EINVAL; 528 + } 529 + 628 530 config.dev = &client->dev; 629 531 config.driver_data = pdata; 630 532 config.regmap = rk808->regmap; 631 533 632 534 /* Instantiate the regulators */ 633 - for (i = 0; i < RK808_NUM_REGULATORS; i++) { 535 + for (i = 0; i < nregulators; i++) { 634 536 rk808_rdev = devm_regulator_register(&pdev->dev, 635 - &rk808_reg[i], &config); 537 + &regulators[i], &config); 636 538 if (IS_ERR(rk808_rdev)) { 637 539 dev_err(&client->dev, 638 540 "failed to register %d regulator\n", i); ··· 668 540 669 541 module_platform_driver(rk808_regulator_driver); 670 542 671 - MODULE_DESCRIPTION("regulator driver for the rk808 series PMICs"); 672 - MODULE_AUTHOR("Chris Zhong<zyw@rock-chips.com>"); 673 - MODULE_AUTHOR("Zhang Qing<zhangqing@rock-chips.com>"); 543 + MODULE_DESCRIPTION("regulator driver for the RK808/RK818 series PMICs"); 544 + MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>"); 545 + MODULE_AUTHOR("Zhang Qing <zhangqing@rock-chips.com>"); 546 + MODULE_AUTHOR("Wadim Egorov <w.egorov@phytec.de>"); 674 547 MODULE_LICENSE("GPL"); 675 548 MODULE_ALIAS("platform:rk808-regulator");
+12 -2
drivers/rtc/Kconfig
··· 187 187 This driver can also be built as a module. If so, the module 188 188 will be called rtc-abx80x. 189 189 190 + config RTC_DRV_AC100 191 + tristate "X-Powers AC100" 192 + depends on MFD_AC100 193 + help 194 + If you say yes here you get support for the real-time clock found 195 + in X-Powers AC100 family peripheral ICs. 196 + 197 + This driver can also be built as a module. If so, the module 198 + will be called rtc-ac100. 199 + 190 200 config RTC_DRV_AS3722 191 201 tristate "ams AS3722 RTC driver" 192 202 depends on MFD_AS3722 ··· 338 328 will be called rtc-max77686. 339 329 340 330 config RTC_DRV_RK808 341 - tristate "Rockchip RK808 RTC" 331 + tristate "Rockchip RK808/RK818 RTC" 342 332 depends on MFD_RK808 343 333 help 344 334 If you say yes here you will get support for the 345 - RTC of RK808 PMIC. 335 + RTC of RK808 and RK818 PMIC. 346 336 347 337 This driver can also be built as a module. If so, the module 348 338 will be called rk808-rtc.
+1
drivers/rtc/Makefile
··· 27 27 obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o 28 28 obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o 29 29 obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o 30 + obj-$(CONFIG_RTC_DRV_AC100) += rtc-ac100.o 30 31 obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o 31 32 obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o 32 33 obj-$(CONFIG_RTC_DRV_ASM9260) += rtc-asm9260.o
+627
drivers/rtc/rtc-ac100.c
··· 1 + /* 2 + * RTC Driver for X-Powers AC100 3 + * 4 + * Copyright (c) 2016 Chen-Yu Tsai 5 + * 6 + * Chen-Yu Tsai <wens@csie.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope that it will be useful, but WITHOUT 13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 + * more details. 16 + */ 17 + 18 + #include <linux/bcd.h> 19 + #include <linux/clk-provider.h> 20 + #include <linux/device.h> 21 + #include <linux/interrupt.h> 22 + #include <linux/kernel.h> 23 + #include <linux/mfd/ac100.h> 24 + #include <linux/module.h> 25 + #include <linux/mutex.h> 26 + #include <linux/of.h> 27 + #include <linux/platform_device.h> 28 + #include <linux/regmap.h> 29 + #include <linux/rtc.h> 30 + #include <linux/types.h> 31 + 32 + /* Control register */ 33 + #define AC100_RTC_CTRL_24HOUR BIT(0) 34 + 35 + /* Clock output register bits */ 36 + #define AC100_CLKOUT_PRE_DIV_SHIFT 5 37 + #define AC100_CLKOUT_PRE_DIV_WIDTH 3 38 + #define AC100_CLKOUT_MUX_SHIFT 4 39 + #define AC100_CLKOUT_MUX_WIDTH 1 40 + #define AC100_CLKOUT_DIV_SHIFT 1 41 + #define AC100_CLKOUT_DIV_WIDTH 3 42 + #define AC100_CLKOUT_EN BIT(0) 43 + 44 + /* RTC */ 45 + #define AC100_RTC_SEC_MASK GENMASK(6, 0) 46 + #define AC100_RTC_MIN_MASK GENMASK(6, 0) 47 + #define AC100_RTC_HOU_MASK GENMASK(5, 0) 48 + #define AC100_RTC_WEE_MASK GENMASK(2, 0) 49 + #define AC100_RTC_DAY_MASK GENMASK(5, 0) 50 + #define AC100_RTC_MON_MASK GENMASK(4, 0) 51 + #define AC100_RTC_YEA_MASK GENMASK(7, 0) 52 + #define AC100_RTC_YEA_LEAP BIT(15) 53 + #define AC100_RTC_UPD_TRIGGER BIT(15) 54 + 55 + /* Alarm (wall clock) */ 56 + #define AC100_ALM_INT_ENABLE BIT(0) 57 + 58 + #define AC100_ALM_SEC_MASK GENMASK(6, 0) 59 + #define AC100_ALM_MIN_MASK GENMASK(6, 0) 60 + #define AC100_ALM_HOU_MASK GENMASK(5, 0) 61 + #define AC100_ALM_WEE_MASK GENMASK(2, 0) 62 + #define AC100_ALM_DAY_MASK GENMASK(5, 0) 63 + #define AC100_ALM_MON_MASK GENMASK(4, 0) 64 + #define AC100_ALM_YEA_MASK GENMASK(7, 0) 65 + #define AC100_ALM_ENABLE_FLAG BIT(15) 66 + #define AC100_ALM_UPD_TRIGGER BIT(15) 67 + 68 + /* 69 + * The year parameter passed to the driver is usually an offset relative to 70 + * the year 1900. This macro is used to convert this offset to another one 71 + * relative to the minimum year allowed by the hardware. 72 + * 73 + * The year range is 1970 - 2069. This range is selected to match Allwinner's 74 + * driver. 75 + */ 76 + #define AC100_YEAR_MIN 1970 77 + #define AC100_YEAR_MAX 2069 78 + #define AC100_YEAR_OFF (AC100_YEAR_MIN - 1900) 79 + 80 + struct ac100_clkout { 81 + struct clk_hw hw; 82 + struct regmap *regmap; 83 + u8 offset; 84 + }; 85 + 86 + #define to_ac100_clkout(_hw) container_of(_hw, struct ac100_clkout, hw) 87 + 88 + #define AC100_RTC_32K_NAME "ac100-rtc-32k" 89 + #define AC100_RTC_32K_RATE 32768 90 + #define AC100_CLKOUT_NUM 3 91 + 92 + static const char * const ac100_clkout_names[AC100_CLKOUT_NUM] = { 93 + "ac100-cko1-rtc", 94 + "ac100-cko2-rtc", 95 + "ac100-cko3-rtc", 96 + }; 97 + 98 + struct ac100_rtc_dev { 99 + struct rtc_device *rtc; 100 + struct device *dev; 101 + struct regmap *regmap; 102 + int irq; 103 + unsigned long alarm; 104 + 105 + struct clk_hw *rtc_32k_clk; 106 + struct ac100_clkout clks[AC100_CLKOUT_NUM]; 107 + struct clk_hw_onecell_data *clk_data; 108 + }; 109 + 110 + /** 111 + * Clock controls for 3 clock output pins 112 + */ 113 + 114 + static const struct clk_div_table ac100_clkout_prediv[] = { 115 + { .val = 0, .div = 1 }, 116 + { .val = 1, .div = 2 }, 117 + { .val = 2, .div = 4 }, 118 + { .val = 3, .div = 8 }, 119 + { .val = 4, .div = 16 }, 120 + { .val = 5, .div = 32 }, 121 + { .val = 6, .div = 64 }, 122 + { .val = 7, .div = 122 }, 123 + { }, 124 + }; 125 + 126 + /* Abuse the fact that one parent is 32768 Hz, and the other is 4 MHz */ 127 + static unsigned long ac100_clkout_recalc_rate(struct clk_hw *hw, 128 + unsigned long prate) 129 + { 130 + struct ac100_clkout *clk = to_ac100_clkout(hw); 131 + unsigned int reg, div; 132 + 133 + regmap_read(clk->regmap, clk->offset, &reg); 134 + 135 + /* Handle pre-divider first */ 136 + if (prate != AC100_RTC_32K_RATE) { 137 + div = (reg >> AC100_CLKOUT_PRE_DIV_SHIFT) & 138 + ((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1); 139 + prate = divider_recalc_rate(hw, prate, div, 140 + ac100_clkout_prediv, 0); 141 + } 142 + 143 + div = (reg >> AC100_CLKOUT_DIV_SHIFT) & 144 + (BIT(AC100_CLKOUT_DIV_WIDTH) - 1); 145 + return divider_recalc_rate(hw, prate, div, NULL, 146 + CLK_DIVIDER_POWER_OF_TWO); 147 + } 148 + 149 + static long ac100_clkout_round_rate(struct clk_hw *hw, unsigned long rate, 150 + unsigned long prate) 151 + { 152 + unsigned long best_rate = 0, tmp_rate, tmp_prate; 153 + int i; 154 + 155 + if (prate == AC100_RTC_32K_RATE) 156 + return divider_round_rate(hw, rate, &prate, NULL, 157 + AC100_CLKOUT_DIV_WIDTH, 158 + CLK_DIVIDER_POWER_OF_TWO); 159 + 160 + for (i = 0; ac100_clkout_prediv[i].div; i++) { 161 + tmp_prate = DIV_ROUND_UP(prate, ac100_clkout_prediv[i].val); 162 + tmp_rate = divider_round_rate(hw, rate, &tmp_prate, NULL, 163 + AC100_CLKOUT_DIV_WIDTH, 164 + CLK_DIVIDER_POWER_OF_TWO); 165 + 166 + if (tmp_rate > rate) 167 + continue; 168 + if (rate - tmp_rate < best_rate - tmp_rate) 169 + best_rate = tmp_rate; 170 + } 171 + 172 + return best_rate; 173 + } 174 + 175 + static int ac100_clkout_determine_rate(struct clk_hw *hw, 176 + struct clk_rate_request *req) 177 + { 178 + struct clk_hw *best_parent; 179 + unsigned long best = 0; 180 + int i, num_parents = clk_hw_get_num_parents(hw); 181 + 182 + for (i = 0; i < num_parents; i++) { 183 + struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i); 184 + unsigned long tmp, prate = clk_hw_get_rate(parent); 185 + 186 + tmp = ac100_clkout_round_rate(hw, req->rate, prate); 187 + 188 + if (tmp > req->rate) 189 + continue; 190 + if (req->rate - tmp < req->rate - best) { 191 + best = tmp; 192 + best_parent = parent; 193 + } 194 + } 195 + 196 + if (!best) 197 + return -EINVAL; 198 + 199 + req->best_parent_hw = best_parent; 200 + req->best_parent_rate = best; 201 + req->rate = best; 202 + 203 + return 0; 204 + } 205 + 206 + static int ac100_clkout_set_rate(struct clk_hw *hw, unsigned long rate, 207 + unsigned long prate) 208 + { 209 + struct ac100_clkout *clk = to_ac100_clkout(hw); 210 + int div = 0, pre_div = 0; 211 + 212 + do { 213 + div = divider_get_val(rate * ac100_clkout_prediv[pre_div].div, 214 + prate, NULL, AC100_CLKOUT_DIV_WIDTH, 215 + CLK_DIVIDER_POWER_OF_TWO); 216 + if (div >= 0) 217 + break; 218 + } while (prate != AC100_RTC_32K_RATE && 219 + ac100_clkout_prediv[++pre_div].div); 220 + 221 + if (div < 0) 222 + return div; 223 + 224 + pre_div = ac100_clkout_prediv[pre_div].val; 225 + 226 + regmap_update_bits(clk->regmap, clk->offset, 227 + ((1 << AC100_CLKOUT_DIV_WIDTH) - 1) << AC100_CLKOUT_DIV_SHIFT | 228 + ((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1) << AC100_CLKOUT_PRE_DIV_SHIFT, 229 + (div - 1) << AC100_CLKOUT_DIV_SHIFT | 230 + (pre_div - 1) << AC100_CLKOUT_PRE_DIV_SHIFT); 231 + 232 + return 0; 233 + } 234 + 235 + static int ac100_clkout_prepare(struct clk_hw *hw) 236 + { 237 + struct ac100_clkout *clk = to_ac100_clkout(hw); 238 + 239 + return regmap_update_bits(clk->regmap, clk->offset, AC100_CLKOUT_EN, 240 + AC100_CLKOUT_EN); 241 + } 242 + 243 + static void ac100_clkout_unprepare(struct clk_hw *hw) 244 + { 245 + struct ac100_clkout *clk = to_ac100_clkout(hw); 246 + 247 + regmap_update_bits(clk->regmap, clk->offset, AC100_CLKOUT_EN, 0); 248 + } 249 + 250 + static int ac100_clkout_is_prepared(struct clk_hw *hw) 251 + { 252 + struct ac100_clkout *clk = to_ac100_clkout(hw); 253 + unsigned int reg; 254 + 255 + regmap_read(clk->regmap, clk->offset, &reg); 256 + 257 + return reg & AC100_CLKOUT_EN; 258 + } 259 + 260 + static u8 ac100_clkout_get_parent(struct clk_hw *hw) 261 + { 262 + struct ac100_clkout *clk = to_ac100_clkout(hw); 263 + unsigned int reg; 264 + 265 + regmap_read(clk->regmap, clk->offset, &reg); 266 + 267 + return (reg >> AC100_CLKOUT_MUX_SHIFT) & 0x1; 268 + } 269 + 270 + static int ac100_clkout_set_parent(struct clk_hw *hw, u8 index) 271 + { 272 + struct ac100_clkout *clk = to_ac100_clkout(hw); 273 + 274 + return regmap_update_bits(clk->regmap, clk->offset, 275 + BIT(AC100_CLKOUT_MUX_SHIFT), 276 + index ? BIT(AC100_CLKOUT_MUX_SHIFT) : 0); 277 + } 278 + 279 + static const struct clk_ops ac100_clkout_ops = { 280 + .prepare = ac100_clkout_prepare, 281 + .unprepare = ac100_clkout_unprepare, 282 + .is_prepared = ac100_clkout_is_prepared, 283 + .recalc_rate = ac100_clkout_recalc_rate, 284 + .determine_rate = ac100_clkout_determine_rate, 285 + .get_parent = ac100_clkout_get_parent, 286 + .set_parent = ac100_clkout_set_parent, 287 + .set_rate = ac100_clkout_set_rate, 288 + }; 289 + 290 + static int ac100_rtc_register_clks(struct ac100_rtc_dev *chip) 291 + { 292 + struct device_node *np = chip->dev->of_node; 293 + const char *parents[2] = {AC100_RTC_32K_NAME}; 294 + int i, ret; 295 + 296 + chip->clk_data = devm_kzalloc(chip->dev, sizeof(*chip->clk_data) + 297 + sizeof(*chip->clk_data->hws) * 298 + AC100_CLKOUT_NUM, 299 + GFP_KERNEL); 300 + if (!chip->clk_data) 301 + return -ENOMEM; 302 + 303 + chip->rtc_32k_clk = clk_hw_register_fixed_rate(chip->dev, 304 + AC100_RTC_32K_NAME, 305 + NULL, 0, 306 + AC100_RTC_32K_RATE); 307 + if (IS_ERR(chip->rtc_32k_clk)) { 308 + ret = PTR_ERR(chip->rtc_32k_clk); 309 + dev_err(chip->dev, "Failed to register RTC-32k clock: %d\n", 310 + ret); 311 + return ret; 312 + } 313 + 314 + parents[1] = of_clk_get_parent_name(np, 0); 315 + if (!parents[1]) { 316 + dev_err(chip->dev, "Failed to get ADDA 4M clock\n"); 317 + return -EINVAL; 318 + } 319 + 320 + for (i = 0; i < AC100_CLKOUT_NUM; i++) { 321 + struct ac100_clkout *clk = &chip->clks[i]; 322 + struct clk_init_data init = { 323 + .name = ac100_clkout_names[i], 324 + .ops = &ac100_clkout_ops, 325 + .parent_names = parents, 326 + .num_parents = ARRAY_SIZE(parents), 327 + .flags = 0, 328 + }; 329 + 330 + clk->regmap = chip->regmap; 331 + clk->offset = AC100_CLKOUT_CTRL1 + i; 332 + clk->hw.init = &init; 333 + 334 + ret = devm_clk_hw_register(chip->dev, &clk->hw); 335 + if (ret) { 336 + dev_err(chip->dev, "Failed to register clk '%s': %d\n", 337 + init.name, ret); 338 + goto err_unregister_rtc_32k; 339 + } 340 + 341 + chip->clk_data->hws[i] = &clk->hw; 342 + } 343 + 344 + chip->clk_data->num = i; 345 + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, chip->clk_data); 346 + if (ret) 347 + goto err_unregister_rtc_32k; 348 + 349 + return 0; 350 + 351 + err_unregister_rtc_32k: 352 + clk_unregister_fixed_rate(chip->rtc_32k_clk->clk); 353 + 354 + return ret; 355 + } 356 + 357 + static void ac100_rtc_unregister_clks(struct ac100_rtc_dev *chip) 358 + { 359 + of_clk_del_provider(chip->dev->of_node); 360 + clk_unregister_fixed_rate(chip->rtc_32k_clk->clk); 361 + } 362 + 363 + /** 364 + * RTC related bits 365 + */ 366 + static int ac100_rtc_get_time(struct device *dev, struct rtc_time *rtc_tm) 367 + { 368 + struct ac100_rtc_dev *chip = dev_get_drvdata(dev); 369 + struct regmap *regmap = chip->regmap; 370 + u16 reg[7]; 371 + int ret; 372 + 373 + ret = regmap_bulk_read(regmap, AC100_RTC_SEC, reg, 7); 374 + if (ret) 375 + return ret; 376 + 377 + rtc_tm->tm_sec = bcd2bin(reg[0] & AC100_RTC_SEC_MASK); 378 + rtc_tm->tm_min = bcd2bin(reg[1] & AC100_RTC_MIN_MASK); 379 + rtc_tm->tm_hour = bcd2bin(reg[2] & AC100_RTC_HOU_MASK); 380 + rtc_tm->tm_wday = bcd2bin(reg[3] & AC100_RTC_WEE_MASK); 381 + rtc_tm->tm_mday = bcd2bin(reg[4] & AC100_RTC_DAY_MASK); 382 + rtc_tm->tm_mon = bcd2bin(reg[5] & AC100_RTC_MON_MASK) - 1; 383 + rtc_tm->tm_year = bcd2bin(reg[6] & AC100_RTC_YEA_MASK) + 384 + AC100_YEAR_OFF; 385 + 386 + return rtc_valid_tm(rtc_tm); 387 + } 388 + 389 + static int ac100_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm) 390 + { 391 + struct ac100_rtc_dev *chip = dev_get_drvdata(dev); 392 + struct regmap *regmap = chip->regmap; 393 + int year; 394 + u16 reg[8]; 395 + 396 + /* our RTC has a limited year range... */ 397 + year = rtc_tm->tm_year - AC100_YEAR_OFF; 398 + if (year < 0 || year > (AC100_YEAR_MAX - 1900)) { 399 + dev_err(dev, "rtc only supports year in range %d - %d\n", 400 + AC100_YEAR_MIN, AC100_YEAR_MAX); 401 + return -EINVAL; 402 + } 403 + 404 + /* convert to BCD */ 405 + reg[0] = bin2bcd(rtc_tm->tm_sec) & AC100_RTC_SEC_MASK; 406 + reg[1] = bin2bcd(rtc_tm->tm_min) & AC100_RTC_MIN_MASK; 407 + reg[2] = bin2bcd(rtc_tm->tm_hour) & AC100_RTC_HOU_MASK; 408 + reg[3] = bin2bcd(rtc_tm->tm_wday) & AC100_RTC_WEE_MASK; 409 + reg[4] = bin2bcd(rtc_tm->tm_mday) & AC100_RTC_DAY_MASK; 410 + reg[5] = bin2bcd(rtc_tm->tm_mon + 1) & AC100_RTC_MON_MASK; 411 + reg[6] = bin2bcd(year) & AC100_RTC_YEA_MASK; 412 + /* trigger write */ 413 + reg[7] = AC100_RTC_UPD_TRIGGER; 414 + 415 + /* Is it a leap year? */ 416 + if (is_leap_year(year + AC100_YEAR_OFF + 1900)) 417 + reg[6] |= AC100_RTC_YEA_LEAP; 418 + 419 + return regmap_bulk_write(regmap, AC100_RTC_SEC, reg, 8); 420 + } 421 + 422 + static int ac100_rtc_alarm_irq_enable(struct device *dev, unsigned int en) 423 + { 424 + struct ac100_rtc_dev *chip = dev_get_drvdata(dev); 425 + struct regmap *regmap = chip->regmap; 426 + unsigned int val; 427 + 428 + val = en ? AC100_ALM_INT_ENABLE : 0; 429 + 430 + return regmap_write(regmap, AC100_ALM_INT_ENA, val); 431 + } 432 + 433 + static int ac100_rtc_get_alarm(struct device *dev, struct rtc_wkalrm *alrm) 434 + { 435 + struct ac100_rtc_dev *chip = dev_get_drvdata(dev); 436 + struct regmap *regmap = chip->regmap; 437 + struct rtc_time *alrm_tm = &alrm->time; 438 + u16 reg[7]; 439 + unsigned int val; 440 + int ret; 441 + 442 + ret = regmap_read(regmap, AC100_ALM_INT_ENA, &val); 443 + if (ret) 444 + return ret; 445 + 446 + alrm->enabled = !!(val & AC100_ALM_INT_ENABLE); 447 + 448 + ret = regmap_bulk_read(regmap, AC100_ALM_SEC, reg, 7); 449 + if (ret) 450 + return ret; 451 + 452 + alrm_tm->tm_sec = bcd2bin(reg[0] & AC100_ALM_SEC_MASK); 453 + alrm_tm->tm_min = bcd2bin(reg[1] & AC100_ALM_MIN_MASK); 454 + alrm_tm->tm_hour = bcd2bin(reg[2] & AC100_ALM_HOU_MASK); 455 + alrm_tm->tm_wday = bcd2bin(reg[3] & AC100_ALM_WEE_MASK); 456 + alrm_tm->tm_mday = bcd2bin(reg[4] & AC100_ALM_DAY_MASK); 457 + alrm_tm->tm_mon = bcd2bin(reg[5] & AC100_ALM_MON_MASK) - 1; 458 + alrm_tm->tm_year = bcd2bin(reg[6] & AC100_ALM_YEA_MASK) + 459 + AC100_YEAR_OFF; 460 + 461 + return 0; 462 + } 463 + 464 + static int ac100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) 465 + { 466 + struct ac100_rtc_dev *chip = dev_get_drvdata(dev); 467 + struct regmap *regmap = chip->regmap; 468 + struct rtc_time *alrm_tm = &alrm->time; 469 + u16 reg[8]; 470 + int year; 471 + int ret; 472 + 473 + /* our alarm has a limited year range... */ 474 + year = alrm_tm->tm_year - AC100_YEAR_OFF; 475 + if (year < 0 || year > (AC100_YEAR_MAX - 1900)) { 476 + dev_err(dev, "alarm only supports year in range %d - %d\n", 477 + AC100_YEAR_MIN, AC100_YEAR_MAX); 478 + return -EINVAL; 479 + } 480 + 481 + /* convert to BCD */ 482 + reg[0] = (bin2bcd(alrm_tm->tm_sec) & AC100_ALM_SEC_MASK) | 483 + AC100_ALM_ENABLE_FLAG; 484 + reg[1] = (bin2bcd(alrm_tm->tm_min) & AC100_ALM_MIN_MASK) | 485 + AC100_ALM_ENABLE_FLAG; 486 + reg[2] = (bin2bcd(alrm_tm->tm_hour) & AC100_ALM_HOU_MASK) | 487 + AC100_ALM_ENABLE_FLAG; 488 + /* Do not enable weekday alarm */ 489 + reg[3] = bin2bcd(alrm_tm->tm_wday) & AC100_ALM_WEE_MASK; 490 + reg[4] = (bin2bcd(alrm_tm->tm_mday) & AC100_ALM_DAY_MASK) | 491 + AC100_ALM_ENABLE_FLAG; 492 + reg[5] = (bin2bcd(alrm_tm->tm_mon + 1) & AC100_ALM_MON_MASK) | 493 + AC100_ALM_ENABLE_FLAG; 494 + reg[6] = (bin2bcd(year) & AC100_ALM_YEA_MASK) | 495 + AC100_ALM_ENABLE_FLAG; 496 + /* trigger write */ 497 + reg[7] = AC100_ALM_UPD_TRIGGER; 498 + 499 + ret = regmap_bulk_write(regmap, AC100_ALM_SEC, reg, 8); 500 + if (ret) 501 + return ret; 502 + 503 + return ac100_rtc_alarm_irq_enable(dev, alrm->enabled); 504 + } 505 + 506 + static irqreturn_t ac100_rtc_irq(int irq, void *data) 507 + { 508 + struct ac100_rtc_dev *chip = data; 509 + struct regmap *regmap = chip->regmap; 510 + unsigned int val = 0; 511 + int ret; 512 + 513 + mutex_lock(&chip->rtc->ops_lock); 514 + 515 + /* read status */ 516 + ret = regmap_read(regmap, AC100_ALM_INT_STA, &val); 517 + if (ret) 518 + goto out; 519 + 520 + if (val & AC100_ALM_INT_ENABLE) { 521 + /* signal rtc framework */ 522 + rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF); 523 + 524 + /* clear status */ 525 + ret = regmap_write(regmap, AC100_ALM_INT_STA, val); 526 + if (ret) 527 + goto out; 528 + 529 + /* disable interrupt */ 530 + ret = ac100_rtc_alarm_irq_enable(chip->dev, 0); 531 + if (ret) 532 + goto out; 533 + } 534 + 535 + out: 536 + mutex_unlock(&chip->rtc->ops_lock); 537 + return IRQ_HANDLED; 538 + } 539 + 540 + static const struct rtc_class_ops ac100_rtc_ops = { 541 + .read_time = ac100_rtc_get_time, 542 + .set_time = ac100_rtc_set_time, 543 + .read_alarm = ac100_rtc_get_alarm, 544 + .set_alarm = ac100_rtc_set_alarm, 545 + .alarm_irq_enable = ac100_rtc_alarm_irq_enable, 546 + }; 547 + 548 + static int ac100_rtc_probe(struct platform_device *pdev) 549 + { 550 + struct ac100_dev *ac100 = dev_get_drvdata(pdev->dev.parent); 551 + struct ac100_rtc_dev *chip; 552 + int ret; 553 + 554 + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 555 + platform_set_drvdata(pdev, chip); 556 + chip->dev = &pdev->dev; 557 + chip->regmap = ac100->regmap; 558 + 559 + chip->irq = platform_get_irq(pdev, 0); 560 + if (chip->irq < 0) { 561 + dev_err(&pdev->dev, "No IRQ resource\n"); 562 + return chip->irq; 563 + } 564 + 565 + ret = devm_request_threaded_irq(&pdev->dev, chip->irq, NULL, 566 + ac100_rtc_irq, 567 + IRQF_SHARED | IRQF_ONESHOT, 568 + dev_name(&pdev->dev), chip); 569 + if (ret) { 570 + dev_err(&pdev->dev, "Could not request IRQ\n"); 571 + return ret; 572 + } 573 + 574 + /* always use 24 hour mode */ 575 + regmap_write_bits(chip->regmap, AC100_RTC_CTRL, AC100_RTC_CTRL_24HOUR, 576 + AC100_RTC_CTRL_24HOUR); 577 + 578 + /* disable counter alarm interrupt */ 579 + regmap_write(chip->regmap, AC100_ALM_INT_ENA, 0); 580 + 581 + /* clear counter alarm pending interrupts */ 582 + regmap_write(chip->regmap, AC100_ALM_INT_STA, AC100_ALM_INT_ENABLE); 583 + 584 + chip->rtc = devm_rtc_device_register(&pdev->dev, "rtc-ac100", 585 + &ac100_rtc_ops, THIS_MODULE); 586 + if (IS_ERR(chip->rtc)) { 587 + dev_err(&pdev->dev, "unable to register device\n"); 588 + return PTR_ERR(chip->rtc); 589 + } 590 + 591 + ret = ac100_rtc_register_clks(chip); 592 + if (ret) 593 + return ret; 594 + 595 + dev_info(&pdev->dev, "RTC enabled\n"); 596 + 597 + return 0; 598 + } 599 + 600 + static int ac100_rtc_remove(struct platform_device *pdev) 601 + { 602 + struct ac100_rtc_dev *chip = platform_get_drvdata(pdev); 603 + 604 + ac100_rtc_unregister_clks(chip); 605 + 606 + return 0; 607 + } 608 + 609 + static const struct of_device_id ac100_rtc_match[] = { 610 + { .compatible = "x-powers,ac100-rtc" }, 611 + { }, 612 + }; 613 + MODULE_DEVICE_TABLE(of, ac100_rtc_match); 614 + 615 + static struct platform_driver ac100_rtc_driver = { 616 + .probe = ac100_rtc_probe, 617 + .remove = ac100_rtc_remove, 618 + .driver = { 619 + .name = "ac100-rtc", 620 + .of_match_table = of_match_ptr(ac100_rtc_match), 621 + }, 622 + }; 623 + module_platform_driver(ac100_rtc_driver); 624 + 625 + MODULE_DESCRIPTION("X-Powers AC100 RTC driver"); 626 + MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); 627 + MODULE_LICENSE("GPL v2");
+1
drivers/rtc/rtc-pm8xxx.c
··· 428 428 */ 429 429 static const struct of_device_id pm8xxx_id_table[] = { 430 430 { .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs }, 431 + { .compatible = "qcom,pm8018-rtc", .data = &pm8921_regs }, 431 432 { .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs }, 432 433 { .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs }, 433 434 { },
+5 -1
drivers/s390/virtio/Makefile
··· 6 6 # it under the terms of the GNU General Public License (version 2 only) 7 7 # as published by the Free Software Foundation. 8 8 9 - obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o 9 + s390-virtio-objs := virtio_ccw.o 10 + ifdef CONFIG_S390_GUEST_OLD_TRANSPORT 11 + s390-virtio-objs += kvm_virtio.o 12 + endif 13 + obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
+3 -1
drivers/s390/virtio/kvm_virtio.c
··· 458 458 if (test_devices_support(total_memory_size) < 0) 459 459 return -ENODEV; 460 460 461 + pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n"); 462 + 461 463 rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); 462 464 if (rc) 463 465 return rc; ··· 484 482 } 485 483 486 484 /* code for early console output with virtio_console */ 487 - static __init int early_put_chars(u32 vtermno, const char *buf, int count) 485 + static int early_put_chars(u32 vtermno, const char *buf, int count) 488 486 { 489 487 char scratch[17]; 490 488 unsigned int len = count;
+6 -5
drivers/scsi/ipr.c
··· 10410 10410 __ipr_remove(pdev); 10411 10411 return rc; 10412 10412 } 10413 + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10414 + ioa_cfg->scan_enabled = 1; 10415 + schedule_work(&ioa_cfg->work_q); 10416 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10413 10417 10414 - scsi_scan_host(ioa_cfg->host); 10415 10418 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 10416 10419 10417 10420 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { ··· 10424 10421 } 10425 10422 } 10426 10423 10427 - spin_lock_irqsave(ioa_cfg->host->host_lock, flags); 10428 - ioa_cfg->scan_enabled = 1; 10429 - schedule_work(&ioa_cfg->work_q); 10430 - spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 10424 + scsi_scan_host(ioa_cfg->host); 10425 + 10431 10426 return 0; 10432 10427 } 10433 10428
+1
drivers/thermal/clock_cooling.c
··· 426 426 if (!ccdev) 427 427 return ERR_PTR(-ENOMEM); 428 428 429 + mutex_init(&ccdev->lock); 429 430 ccdev->dev = dev; 430 431 ccdev->clk = devm_clk_get(dev, clock_name); 431 432 if (IS_ERR(ccdev->clk))
+2
drivers/thermal/fair_share.c
··· 116 116 instance->target = get_target_state(tz, cdev, percentage, 117 117 cur_trip_level); 118 118 119 + mutex_lock(&instance->cdev->lock); 119 120 instance->cdev->updated = false; 121 + mutex_unlock(&instance->cdev->lock); 120 122 thermal_cdev_update(cdev); 121 123 } 122 124 return 0;
+2
drivers/thermal/gov_bang_bang.c
··· 71 71 dev_dbg(&instance->cdev->device, "target=%d\n", 72 72 (int)instance->target); 73 73 74 + mutex_lock(&instance->cdev->lock); 74 75 instance->cdev->updated = false; /* cdev needs update */ 76 + mutex_unlock(&instance->cdev->lock); 75 77 } 76 78 77 79 mutex_unlock(&tz->lock);
+59 -1
drivers/thermal/intel_pch_thermal.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/pci.h> 23 23 #include <linux/thermal.h> 24 + #include <linux/pm.h> 24 25 25 26 /* Intel PCH thermal Device IDs */ 26 27 #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ ··· 66 65 unsigned long crt_temp; 67 66 int hot_trip_id; 68 67 unsigned long hot_temp; 68 + bool bios_enabled; 69 69 }; 70 70 71 71 static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) ··· 77 75 *nr_trips = 0; 78 76 79 77 /* Check if BIOS has already enabled thermal sensor */ 80 - if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) 78 + if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) { 79 + ptd->bios_enabled = true; 81 80 goto read_trips; 81 + } 82 82 83 83 tsel = readb(ptd->hw_base + WPT_TSEL); 84 84 /* ··· 134 130 return 0; 135 131 } 136 132 133 + static int pch_wpt_suspend(struct pch_thermal_device *ptd) 134 + { 135 + u8 tsel; 136 + 137 + if (ptd->bios_enabled) 138 + return 0; 139 + 140 + tsel = readb(ptd->hw_base + WPT_TSEL); 141 + 142 + writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL); 143 + 144 + return 0; 145 + } 146 + 147 + static int pch_wpt_resume(struct pch_thermal_device *ptd) 148 + { 149 + u8 tsel; 150 + 151 + if (ptd->bios_enabled) 152 + return 0; 153 + 154 + tsel = readb(ptd->hw_base + WPT_TSEL); 155 + 156 + writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL); 157 + 158 + return 0; 159 + } 160 + 137 161 struct pch_dev_ops { 138 162 int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips); 139 163 int (*get_temp)(struct pch_thermal_device *ptd, int *temp); 164 + int (*suspend)(struct pch_thermal_device *ptd); 165 + int (*resume)(struct pch_thermal_device *ptd); 140 166 }; 141 167 142 168 ··· 174 140 static const struct pch_dev_ops pch_dev_ops_wpt = { 175 141 .hw_init = pch_wpt_init, 176 142 .get_temp = pch_wpt_get_temp, 143 + .suspend = pch_wpt_suspend, 144 + .resume = pch_wpt_resume, 177 145 }; 178 146 179 147 static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp) ··· 305 269 pci_disable_device(pdev); 306 270 } 307 271 272 + static int intel_pch_thermal_suspend(struct device *device) 273 + { 274 + struct pci_dev *pdev = to_pci_dev(device); 275 + struct pch_thermal_device *ptd = pci_get_drvdata(pdev); 276 + 277 + return ptd->ops->suspend(ptd); 278 + } 279 + 280 + static int intel_pch_thermal_resume(struct device *device) 281 + { 282 + struct pci_dev *pdev = to_pci_dev(device); 283 + struct pch_thermal_device *ptd = pci_get_drvdata(pdev); 284 + 285 + return ptd->ops->resume(ptd); 286 + } 287 + 308 288 static struct pci_device_id intel_pch_thermal_id[] = { 309 289 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, 310 290 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, ··· 328 276 }; 329 277 MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); 330 278 279 + static const struct dev_pm_ops intel_pch_pm_ops = { 280 + .suspend = intel_pch_thermal_suspend, 281 + .resume = intel_pch_thermal_resume, 282 + }; 283 + 331 284 static struct pci_driver intel_pch_thermal_driver = { 332 285 .name = "intel_pch_thermal", 333 286 .id_table = intel_pch_thermal_id, 334 287 .probe = intel_pch_thermal_probe, 335 288 .remove = intel_pch_thermal_remove, 289 + .driver.pm = &intel_pch_pm_ops, 336 290 }; 337 291 338 292 module_pci_driver(intel_pch_thermal_driver);
+7 -4
drivers/thermal/intel_powerclamp.c
··· 388 388 int sleeptime; 389 389 unsigned long target_jiffies; 390 390 unsigned int guard; 391 - unsigned int compensation = 0; 391 + unsigned int compensated_ratio; 392 392 int interval; /* jiffies to sleep for each attempt */ 393 393 unsigned int duration_jiffies = msecs_to_jiffies(duration); 394 394 unsigned int window_size_now; ··· 409 409 * c-states, thus we need to compensate the injected idle ratio 410 410 * to achieve the actual target reported by the HW. 411 411 */ 412 - compensation = get_compensation(target_ratio); 413 - interval = duration_jiffies*100/(target_ratio+compensation); 412 + compensated_ratio = target_ratio + 413 + get_compensation(target_ratio); 414 + if (compensated_ratio <= 0) 415 + compensated_ratio = 1; 416 + interval = duration_jiffies * 100 / compensated_ratio; 414 417 415 418 /* align idle time */ 416 419 target_jiffies = roundup(jiffies, interval); ··· 650 647 goto exit_set; 651 648 } else if (set_target_ratio > 0 && new_target_ratio == 0) { 652 649 pr_info("Stop forced idle injection\n"); 653 - set_target_ratio = 0; 654 650 end_power_clamp(); 651 + set_target_ratio = 0; 655 652 } else /* adjust currently running */ { 656 653 set_target_ratio = new_target_ratio; 657 654 /* make new set_target_ratio visible to other cpus */
+2
drivers/thermal/power_allocator.c
··· 529 529 continue; 530 530 531 531 instance->target = 0; 532 + mutex_lock(&instance->cdev->lock); 532 533 instance->cdev->updated = false; 534 + mutex_unlock(&instance->cdev->lock); 533 535 thermal_cdev_update(instance->cdev); 534 536 } 535 537 }
+2
drivers/thermal/step_wise.c
··· 175 175 update_passive_instance(tz, trip_type, -1); 176 176 177 177 instance->initialized = true; 178 + mutex_lock(&instance->cdev->lock); 178 179 instance->cdev->updated = false; /* cdev needs update */ 180 + mutex_unlock(&instance->cdev->lock); 179 181 } 180 182 181 183 mutex_unlock(&tz->lock);
+9 -5
drivers/thermal/thermal_core.c
··· 1093 1093 return ret; 1094 1094 1095 1095 instance->target = state; 1096 + mutex_lock(&cdev->lock); 1096 1097 cdev->updated = false; 1098 + mutex_unlock(&cdev->lock); 1097 1099 thermal_cdev_update(cdev); 1098 1100 1099 1101 return 0; ··· 1625 1623 struct thermal_instance *instance; 1626 1624 unsigned long target = 0; 1627 1625 1628 - /* cooling device is updated*/ 1629 - if (cdev->updated) 1630 - return; 1631 - 1632 1626 mutex_lock(&cdev->lock); 1627 + /* cooling device is updated*/ 1628 + if (cdev->updated) { 1629 + mutex_unlock(&cdev->lock); 1630 + return; 1631 + } 1632 + 1633 1633 /* Make sure cdev enters the deepest cooling state */ 1634 1634 list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { 1635 1635 dev_dbg(&cdev->device, "zone%d->target=%lu\n", ··· 1641 1637 if (instance->target > target) 1642 1638 target = instance->target; 1643 1639 } 1644 - mutex_unlock(&cdev->lock); 1645 1640 cdev->ops->set_cur_state(cdev, target); 1646 1641 cdev->updated = true; 1642 + mutex_unlock(&cdev->lock); 1647 1643 trace_cdev_update(cdev, target); 1648 1644 dev_dbg(&cdev->device, "set to state %lu\n", target); 1649 1645 }
+2
drivers/thermal/thermal_hwmon.c
··· 232 232 233 233 return result; 234 234 } 235 + EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs); 235 236 236 237 void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) 237 238 { ··· 271 270 hwmon_device_unregister(hwmon->device); 272 271 kfree(hwmon); 273 272 } 273 + EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
+49 -36
drivers/vfio/pci/vfio_pci_intrs.c
··· 564 564 } 565 565 566 566 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, 567 - uint32_t flags, void *data) 567 + unsigned int count, uint32_t flags, 568 + void *data) 568 569 { 569 - int32_t fd = *(int32_t *)data; 570 - 571 - if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) 572 - return -EINVAL; 573 - 574 570 /* DATA_NONE/DATA_BOOL enables loopback testing */ 575 571 if (flags & VFIO_IRQ_SET_DATA_NONE) { 576 - if (*ctx) 577 - eventfd_signal(*ctx, 1); 578 - return 0; 572 + if (*ctx) { 573 + if (count) { 574 + eventfd_signal(*ctx, 1); 575 + } else { 576 + eventfd_ctx_put(*ctx); 577 + *ctx = NULL; 578 + } 579 + return 0; 580 + } 579 581 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 580 - uint8_t trigger = *(uint8_t *)data; 582 + uint8_t trigger; 583 + 584 + if (!count) 585 + return -EINVAL; 586 + 587 + trigger = *(uint8_t *)data; 581 588 if (trigger && *ctx) 582 589 eventfd_signal(*ctx, 1); 590 + 591 + return 0; 592 + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 593 + int32_t fd; 594 + 595 + if (!count) 596 + return -EINVAL; 597 + 598 + fd = *(int32_t *)data; 599 + if (fd == -1) { 600 + if (*ctx) 601 + eventfd_ctx_put(*ctx); 602 + *ctx = NULL; 603 + } else if (fd >= 0) { 604 + struct eventfd_ctx *efdctx; 605 + 606 + efdctx = eventfd_ctx_fdget(fd); 607 + if (IS_ERR(efdctx)) 608 + return PTR_ERR(efdctx); 609 + 610 + if (*ctx) 611 + eventfd_ctx_put(*ctx); 612 + 613 + *ctx = efdctx; 614 + } 583 615 return 0; 584 616 } 585 617 586 - /* Handle SET_DATA_EVENTFD */ 587 - if (fd == -1) { 588 - if (*ctx) 589 - eventfd_ctx_put(*ctx); 590 - *ctx = NULL; 591 - return 0; 592 - } else if (fd >= 0) { 593 - struct eventfd_ctx *efdctx; 594 - efdctx = eventfd_ctx_fdget(fd); 595 - if (IS_ERR(efdctx)) 596 - return PTR_ERR(efdctx); 597 - if (*ctx) 598 - eventfd_ctx_put(*ctx); 599 - *ctx = efdctx; 600 - return 0; 601 - } else 602 - return -EINVAL; 618 + return -EINVAL; 603 619 } 604 620 605 621 static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, 606 622 unsigned index, unsigned start, 607 623 unsigned count, uint32_t flags, void *data) 608 624 { 609 - if (index != VFIO_PCI_ERR_IRQ_INDEX) 625 + if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) 610 626 return -EINVAL; 611 627 612 - /* 613 - * We should sanitize start & count, but that wasn't caught 614 - * originally, so this IRQ index must forever ignore them :-( 615 - */ 616 - 617 - return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data); 628 + return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, 629 + count, flags, data); 618 630 } 619 631 620 632 static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, 621 633 unsigned index, unsigned start, 622 634 unsigned count, uint32_t flags, void *data) 623 635 { 624 - if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1) 636 + if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) 625 637 return -EINVAL; 626 638 627 - return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data); 639 + return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, 640 + count, flags, data); 628 641 } 629 642 630 643 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
+5 -1
drivers/vhost/vsock.c
··· 307 307 308 308 vhost_disable_notify(&vsock->dev, vq); 309 309 for (;;) { 310 + u32 len; 311 + 310 312 if (!vhost_vsock_more_replies(vsock)) { 311 313 /* Stop tx until the device processes already 312 314 * pending replies. Leave tx virtqueue ··· 336 334 continue; 337 335 } 338 336 337 + len = pkt->len; 338 + 339 339 /* Only accept correctly addressed packets */ 340 340 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) 341 341 virtio_transport_recv_pkt(pkt); 342 342 else 343 343 virtio_transport_free_pkt(pkt); 344 344 345 - vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); 345 + vhost_add_used(vq, head, sizeof(pkt->hdr) + len); 346 346 added = true; 347 347 } 348 348
+3
drivers/virtio/virtio_ring.c
··· 327 327 * host should service the ring ASAP. */ 328 328 if (out_sgs) 329 329 vq->notify(&vq->vq); 330 + if (indirect) 331 + kfree(desc); 330 332 END_USE(vq); 331 333 return -ENOSPC; 332 334 } ··· 428 426 if (indirect) 429 427 kfree(desc); 430 428 429 + END_USE(vq); 431 430 return -EIO; 432 431 } 433 432
-27
fs/btrfs/delayed-ref.c
··· 862 862 return 0; 863 863 } 864 864 865 - int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info, 866 - struct btrfs_trans_handle *trans, 867 - u64 ref_root, u64 bytenr, u64 num_bytes) 868 - { 869 - struct btrfs_delayed_ref_root *delayed_refs; 870 - struct btrfs_delayed_ref_head *ref_head; 871 - int ret = 0; 872 - 873 - if (!fs_info->quota_enabled || !is_fstree(ref_root)) 874 - return 0; 875 - 876 - delayed_refs = &trans->transaction->delayed_refs; 877 - 878 - spin_lock(&delayed_refs->lock); 879 - ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0); 880 - if (!ref_head) { 881 - ret = -ENOENT; 882 - goto out; 883 - } 884 - WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root); 885 - ref_head->qgroup_ref_root = ref_root; 886 - ref_head->qgroup_reserved = num_bytes; 887 - out: 888 - spin_unlock(&delayed_refs->lock); 889 - return ret; 890 - } 891 - 892 865 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, 893 866 struct btrfs_trans_handle *trans, 894 867 u64 bytenr, u64 num_bytes,
-3
fs/btrfs/delayed-ref.h
··· 250 250 u64 parent, u64 ref_root, 251 251 u64 owner, u64 offset, u64 reserved, int action, 252 252 struct btrfs_delayed_extent_op *extent_op); 253 - int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info, 254 - struct btrfs_trans_handle *trans, 255 - u64 ref_root, u64 bytenr, u64 num_bytes); 256 253 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, 257 254 struct btrfs_trans_handle *trans, 258 255 u64 bytenr, u64 num_bytes,
+8
fs/btrfs/file.c
··· 2033 2033 */ 2034 2034 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2035 2035 &BTRFS_I(inode)->runtime_flags); 2036 + /* 2037 + * An ordered extent might have started before and completed 2038 + * already with io errors, in which case the inode was not 2039 + * updated and we end up here. So check the inode's mapping 2040 + * flags for any errors that might have happened while doing 2041 + * writeback of file data. 2042 + */ 2043 + ret = btrfs_inode_check_errors(inode); 2036 2044 inode_unlock(inode); 2037 2045 goto out; 2038 2046 }
+36 -10
fs/btrfs/inode.c
··· 3435 3435 found_key.offset = 0; 3436 3436 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 3437 3437 ret = PTR_ERR_OR_ZERO(inode); 3438 - if (ret && ret != -ESTALE) 3438 + if (ret && ret != -ENOENT) 3439 3439 goto out; 3440 3440 3441 - if (ret == -ESTALE && root == root->fs_info->tree_root) { 3441 + if (ret == -ENOENT && root == root->fs_info->tree_root) { 3442 3442 struct btrfs_root *dead_root; 3443 3443 struct btrfs_fs_info *fs_info = root->fs_info; 3444 3444 int is_dead_root = 0; ··· 3474 3474 * Inode is already gone but the orphan item is still there, 3475 3475 * kill the orphan item. 3476 3476 */ 3477 - if (ret == -ESTALE) { 3477 + if (ret == -ENOENT) { 3478 3478 trans = btrfs_start_transaction(root, 1); 3479 3479 if (IS_ERR(trans)) { 3480 3480 ret = PTR_ERR(trans); ··· 3633 3633 /* 3634 3634 * read an inode from the btree into the in-memory inode 3635 3635 */ 3636 - static void btrfs_read_locked_inode(struct inode *inode) 3636 + static int btrfs_read_locked_inode(struct inode *inode) 3637 3637 { 3638 3638 struct btrfs_path *path; 3639 3639 struct extent_buffer *leaf; ··· 3652 3652 filled = true; 3653 3653 3654 3654 path = btrfs_alloc_path(); 3655 - if (!path) 3655 + if (!path) { 3656 + ret = -ENOMEM; 3656 3657 goto make_bad; 3658 + } 3657 3659 3658 3660 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 3659 3661 3660 3662 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 3661 - if (ret) 3663 + if (ret) { 3664 + if (ret > 0) 3665 + ret = -ENOENT; 3662 3666 goto make_bad; 3667 + } 3663 3668 3664 3669 leaf = path->nodes[0]; 3665 3670 ··· 3817 3812 } 3818 3813 3819 3814 btrfs_update_iflags(inode); 3820 - return; 3815 + return 0; 3821 3816 3822 3817 make_bad: 3823 3818 btrfs_free_path(path); 3824 3819 make_bad_inode(inode); 3820 + return ret; 3825 3821 } 3826 3822 3827 3823 /* ··· 4210 4204 int err = 0; 4211 4205 struct btrfs_root *root = BTRFS_I(dir)->root; 4212 4206 struct btrfs_trans_handle *trans; 4207 + u64 last_unlink_trans; 4213 4208 4214 4209 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) 4215 4210 return -ENOTEMPTY; ··· 4233 4226 if (err) 4234 4227 goto out; 4235 4228 4229 + last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; 4230 + 4236 4231 /* now the directory is empty */ 4237 4232 err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry), 4238 4233 dentry->d_name.name, dentry->d_name.len); 4239 - if (!err) 4234 + if (!err) { 4240 4235 btrfs_i_size_write(inode, 0); 4236 + /* 4237 + * Propagate the last_unlink_trans value of the deleted dir to 4238 + * its parent directory. This is to prevent an unrecoverable 4239 + * log tree in the case we do something like this: 4240 + * 1) create dir foo 4241 + * 2) create snapshot under dir foo 4242 + * 3) delete the snapshot 4243 + * 4) rmdir foo 4244 + * 5) mkdir foo 4245 + * 6) fsync foo or some file inside foo 4246 + */ 4247 + if (last_unlink_trans >= trans->transid) 4248 + BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; 4249 + } 4241 4250 out: 4242 4251 btrfs_end_transaction(trans, root); 4243 4252 btrfs_btree_balance_dirty(root); ··· 5629 5606 return ERR_PTR(-ENOMEM); 5630 5607 5631 5608 if (inode->i_state & I_NEW) { 5632 - btrfs_read_locked_inode(inode); 5609 + int ret; 5610 + 5611 + ret = btrfs_read_locked_inode(inode); 5633 5612 if (!is_bad_inode(inode)) { 5634 5613 inode_tree_add(inode); 5635 5614 unlock_new_inode(inode); ··· 5640 5615 } else { 5641 5616 unlock_new_inode(inode); 5642 5617 iput(inode); 5643 - inode = ERR_PTR(-ESTALE); 5618 + ASSERT(ret < 0); 5619 + inode = ERR_PTR(ret < 0 ? ret : -ESTALE); 5644 5620 } 5645 5621 } 5646 5622
+162 -11
fs/btrfs/send.c
··· 231 231 u64 parent_ino; 232 232 u64 ino; 233 233 u64 gen; 234 - bool is_orphan; 235 234 struct list_head update_refs; 236 235 }; 237 236 ··· 272 273 int name_len; 273 274 char name[]; 274 275 }; 276 + 277 + static void inconsistent_snapshot_error(struct send_ctx *sctx, 278 + enum btrfs_compare_tree_result result, 279 + const char *what) 280 + { 281 + const char *result_string; 282 + 283 + switch (result) { 284 + case BTRFS_COMPARE_TREE_NEW: 285 + result_string = "new"; 286 + break; 287 + case BTRFS_COMPARE_TREE_DELETED: 288 + result_string = "deleted"; 289 + break; 290 + case BTRFS_COMPARE_TREE_CHANGED: 291 + result_string = "updated"; 292 + break; 293 + case BTRFS_COMPARE_TREE_SAME: 294 + ASSERT(0); 295 + result_string = "unchanged"; 296 + break; 297 + default: 298 + ASSERT(0); 299 + result_string = "unexpected"; 300 + } 301 + 302 + btrfs_err(sctx->send_root->fs_info, 303 + "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", 304 + result_string, what, sctx->cmp_key->objectid, 305 + sctx->send_root->root_key.objectid, 306 + (sctx->parent_root ? 307 + sctx->parent_root->root_key.objectid : 0)); 308 + } 275 309 276 310 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); 277 311 ··· 1893 1861 * was already unlinked/moved, so we can safely assume that we will not 1894 1862 * overwrite anything at this point in time. 1895 1863 */ 1896 - if (other_inode > sctx->send_progress) { 1864 + if (other_inode > sctx->send_progress || 1865 + is_waiting_for_move(sctx, other_inode)) { 1897 1866 ret = get_inode_info(sctx->parent_root, other_inode, NULL, 1898 1867 who_gen, NULL, NULL, NULL, NULL); 1899 1868 if (ret < 0) ··· 2535 2502 key.type = BTRFS_INODE_ITEM_KEY; 2536 2503 key.offset = 0; 2537 2504 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2505 + if (ret > 0) 2506 + ret = -ENOENT; 2538 2507 if (ret < 0) 2539 2508 goto out; 2540 2509 ··· 2982 2947 } 2983 2948 2984 2949 if (loc.objectid > send_progress) { 2950 + struct orphan_dir_info *odi; 2951 + 2952 + odi = get_orphan_dir_info(sctx, dir); 2953 + free_orphan_dir_info(sctx, odi); 2985 2954 ret = 0; 2986 2955 goto out; 2987 2956 } ··· 3086 3047 pm->parent_ino = parent_ino; 3087 3048 pm->ino = ino; 3088 3049 pm->gen = ino_gen; 3089 - pm->is_orphan = is_orphan; 3090 3050 INIT_LIST_HEAD(&pm->list); 3091 3051 INIT_LIST_HEAD(&pm->update_refs); 3092 3052 RB_CLEAR_NODE(&pm->node); ··· 3151 3113 return NULL; 3152 3114 } 3153 3115 3116 + static int path_loop(struct send_ctx *sctx, struct fs_path *name, 3117 + u64 ino, u64 gen, u64 *ancestor_ino) 3118 + { 3119 + int ret = 0; 3120 + u64 parent_inode = 0; 3121 + u64 parent_gen = 0; 3122 + u64 start_ino = ino; 3123 + 3124 + *ancestor_ino = 0; 3125 + while (ino != BTRFS_FIRST_FREE_OBJECTID) { 3126 + fs_path_reset(name); 3127 + 3128 + if (is_waiting_for_rm(sctx, ino)) 3129 + break; 3130 + if (is_waiting_for_move(sctx, ino)) { 3131 + if (*ancestor_ino == 0) 3132 + *ancestor_ino = ino; 3133 + ret = get_first_ref(sctx->parent_root, ino, 3134 + &parent_inode, &parent_gen, name); 3135 + } else { 3136 + ret = __get_cur_name_and_parent(sctx, ino, gen, 3137 + &parent_inode, 3138 + &parent_gen, name); 3139 + if (ret > 0) { 3140 + ret = 0; 3141 + break; 3142 + } 3143 + } 3144 + if (ret < 0) 3145 + break; 3146 + if (parent_inode == start_ino) { 3147 + ret = 1; 3148 + if (*ancestor_ino == 0) 3149 + *ancestor_ino = ino; 3150 + break; 3151 + } 3152 + ino = parent_inode; 3153 + gen = parent_gen; 3154 + } 3155 + return ret; 3156 + } 3157 + 3154 3158 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) 3155 3159 { 3156 3160 struct fs_path *from_path = NULL; ··· 3203 3123 u64 parent_ino, parent_gen; 3204 3124 struct waiting_dir_move *dm = NULL; 3205 3125 u64 rmdir_ino = 0; 3126 + u64 ancestor; 3127 + bool is_orphan; 3206 3128 int ret; 3207 3129 3208 3130 name = fs_path_alloc(); ··· 3217 3135 dm = get_waiting_dir_move(sctx, pm->ino); 3218 3136 ASSERT(dm); 3219 3137 rmdir_ino = dm->rmdir_ino; 3138 + is_orphan = dm->orphanized; 3220 3139 free_waiting_dir_move(sctx, dm); 3221 3140 3222 - if (pm->is_orphan) { 3141 + if (is_orphan) { 3223 3142 ret = gen_unique_name(sctx, pm->ino, 3224 3143 pm->gen, from_path); 3225 3144 } else { ··· 3238 3155 goto out; 3239 3156 3240 3157 sctx->send_progress = sctx->cur_ino + 1; 3158 + ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); 3159 + if (ret < 0) 3160 + goto out; 3161 + if (ret) { 3162 + LIST_HEAD(deleted_refs); 3163 + ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); 3164 + ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, 3165 + &pm->update_refs, &deleted_refs, 3166 + is_orphan); 3167 + if (ret < 0) 3168 + goto out; 3169 + if (rmdir_ino) { 3170 + dm = get_waiting_dir_move(sctx, pm->ino); 3171 + ASSERT(dm); 3172 + dm->rmdir_ino = rmdir_ino; 3173 + } 3174 + goto out; 3175 + } 3241 3176 fs_path_reset(name); 3242 3177 to_path = name; 3243 3178 name = NULL; ··· 3275 3174 /* already deleted */ 3276 3175 goto finish; 3277 3176 } 3278 - ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1); 3177 + ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino); 3279 3178 if (ret < 0) 3280 3179 goto out; 3281 3180 if (!ret) ··· 3305 3204 * and old parent(s). 3306 3205 */ 3307 3206 list_for_each_entry(cur, &pm->update_refs, list) { 3308 - if (cur->dir == rmdir_ino) 3207 + /* 3208 + * The parent inode might have been deleted in the send snapshot 3209 + */ 3210 + ret = get_inode_info(sctx->send_root, cur->dir, NULL, 3211 + NULL, NULL, NULL, NULL, NULL); 3212 + if (ret == -ENOENT) { 3213 + ret = 0; 3309 3214 continue; 3215 + } 3216 + if (ret < 0) 3217 + goto out; 3218 + 3310 3219 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 3311 3220 if (ret < 0) 3312 3221 goto out; ··· 3436 3325 u64 left_gen; 3437 3326 u64 right_gen; 3438 3327 int ret = 0; 3328 + struct waiting_dir_move *wdm; 3439 3329 3440 3330 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) 3441 3331 return 0; ··· 3495 3383 goto out; 3496 3384 } 3497 3385 3498 - if (is_waiting_for_move(sctx, di_key.objectid)) { 3386 + wdm = get_waiting_dir_move(sctx, di_key.objectid); 3387 + if (wdm && !wdm->orphanized) { 3499 3388 ret = add_pending_dir_move(sctx, 3500 3389 sctx->cur_ino, 3501 3390 sctx->cur_inode_gen, ··· 3583 3470 ret = is_ancestor(sctx->parent_root, 3584 3471 sctx->cur_ino, sctx->cur_inode_gen, 3585 3472 ino, path_before); 3586 - break; 3473 + if (ret) 3474 + break; 3587 3475 } 3588 3476 3589 3477 fs_path_reset(path_before); ··· 3757 3643 goto out; 3758 3644 if (ret) { 3759 3645 struct name_cache_entry *nce; 3646 + struct waiting_dir_move *wdm; 3760 3647 3761 3648 ret = orphanize_inode(sctx, ow_inode, ow_gen, 3762 3649 cur->full_path); 3763 3650 if (ret < 0) 3764 3651 goto out; 3652 + 3653 + /* 3654 + * If ow_inode has its rename operation delayed 3655 + * make sure that its orphanized name is used in 3656 + * the source path when performing its rename 3657 + * operation. 3658 + */ 3659 + if (is_waiting_for_move(sctx, ow_inode)) { 3660 + wdm = get_waiting_dir_move(sctx, 3661 + ow_inode); 3662 + ASSERT(wdm); 3663 + wdm->orphanized = true; 3664 + } 3665 + 3765 3666 /* 3766 3667 * Make sure we clear our orphanized inode's 3767 3668 * name from the name cache. This is because the ··· 3792 3663 name_cache_delete(sctx, nce); 3793 3664 kfree(nce); 3794 3665 } 3666 + 3667 + /* 3668 + * ow_inode might currently be an ancestor of 3669 + * cur_ino, therefore compute valid_path (the 3670 + * current path of cur_ino) again because it 3671 + * might contain the pre-orphanization name of 3672 + * ow_inode, which is no longer valid. 3673 + */ 3674 + fs_path_reset(valid_path); 3675 + ret = get_cur_path(sctx, sctx->cur_ino, 3676 + sctx->cur_inode_gen, valid_path); 3677 + if (ret < 0) 3678 + goto out; 3795 3679 } else { 3796 3680 ret = send_unlink(sctx, cur->full_path); 3797 3681 if (ret < 0) ··· 5744 5602 { 5745 5603 int ret = 0; 5746 5604 5747 - BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 5605 + if (sctx->cur_ino != sctx->cmp_key->objectid) { 5606 + inconsistent_snapshot_error(sctx, result, "reference"); 5607 + return -EIO; 5608 + } 5748 5609 5749 5610 if (!sctx->cur_inode_new_gen && 5750 5611 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { ··· 5772 5627 { 5773 5628 int ret = 0; 5774 5629 5775 - BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 5630 + if (sctx->cur_ino != sctx->cmp_key->objectid) { 5631 + inconsistent_snapshot_error(sctx, result, "xattr"); 5632 + return -EIO; 5633 + } 5776 5634 5777 5635 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 5778 5636 if (result == BTRFS_COMPARE_TREE_NEW) ··· 5799 5651 { 5800 5652 int ret = 0; 5801 5653 5802 - BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 5654 + if (sctx->cur_ino != sctx->cmp_key->objectid) { 5655 + inconsistent_snapshot_error(sctx, result, "extent"); 5656 + return -EIO; 5657 + } 5803 5658 5804 5659 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 5805 5660 if (result != BTRFS_COMPARE_TREE_DELETED)
+77 -8
fs/btrfs/tree-log.c
··· 4469 4469 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4470 4470 const int slot, 4471 4471 const struct btrfs_key *key, 4472 - struct inode *inode) 4472 + struct inode *inode, 4473 + u64 *other_ino) 4473 4474 { 4474 4475 int ret; 4475 4476 struct btrfs_path *search_path; ··· 4529 4528 search_path, parent, 4530 4529 name, this_name_len, 0); 4531 4530 if (di && !IS_ERR(di)) { 4532 - ret = 1; 4531 + struct btrfs_key di_key; 4532 + 4533 + btrfs_dir_item_key_to_cpu(search_path->nodes[0], 4534 + di, &di_key); 4535 + if (di_key.type == BTRFS_INODE_ITEM_KEY) { 4536 + ret = 1; 4537 + *other_ino = di_key.objectid; 4538 + } else { 4539 + ret = -EAGAIN; 4540 + } 4533 4541 goto out; 4534 4542 } else if (IS_ERR(di)) { 4535 4543 ret = PTR_ERR(di); ··· 4732 4722 if ((min_key.type == BTRFS_INODE_REF_KEY || 4733 4723 min_key.type == BTRFS_INODE_EXTREF_KEY) && 4734 4724 BTRFS_I(inode)->generation == trans->transid) { 4725 + u64 other_ino = 0; 4726 + 4735 4727 ret = btrfs_check_ref_name_override(path->nodes[0], 4736 4728 path->slots[0], 4737 - &min_key, inode); 4729 + &min_key, inode, 4730 + &other_ino); 4738 4731 if (ret < 0) { 4739 4732 err = ret; 4740 4733 goto out_unlock; 4741 4734 } else if (ret > 0) { 4742 - err = 1; 4743 - btrfs_set_log_full_commit(root->fs_info, trans); 4744 - goto out_unlock; 4735 + struct btrfs_key inode_key; 4736 + struct inode *other_inode; 4737 + 4738 + if (ins_nr > 0) { 4739 + ins_nr++; 4740 + } else { 4741 + ins_nr = 1; 4742 + ins_start_slot = path->slots[0]; 4743 + } 4744 + ret = copy_items(trans, inode, dst_path, path, 4745 + &last_extent, ins_start_slot, 4746 + ins_nr, inode_only, 4747 + logged_isize); 4748 + if (ret < 0) { 4749 + err = ret; 4750 + goto out_unlock; 4751 + } 4752 + ins_nr = 0; 4753 + btrfs_release_path(path); 4754 + inode_key.objectid = other_ino; 4755 + inode_key.type = BTRFS_INODE_ITEM_KEY; 4756 + inode_key.offset = 0; 4757 + other_inode = btrfs_iget(root->fs_info->sb, 4758 + &inode_key, root, 4759 + NULL); 4760 + /* 4761 + * If the other inode that had a conflicting dir 4762 + * entry was deleted in the current transaction, 4763 + * we don't need to do more work nor fallback to 4764 + * a transaction commit. 4765 + */ 4766 + if (IS_ERR(other_inode) && 4767 + PTR_ERR(other_inode) == -ENOENT) { 4768 + goto next_key; 4769 + } else if (IS_ERR(other_inode)) { 4770 + err = PTR_ERR(other_inode); 4771 + goto out_unlock; 4772 + } 4773 + /* 4774 + * We are safe logging the other inode without 4775 + * acquiring its i_mutex as long as we log with 4776 + * the LOG_INODE_EXISTS mode. We're safe against 4777 + * concurrent renames of the other inode as well 4778 + * because during a rename we pin the log and 4779 + * update the log with the new name before we 4780 + * unpin it. 4781 + */ 4782 + err = btrfs_log_inode(trans, root, other_inode, 4783 + LOG_INODE_EXISTS, 4784 + 0, LLONG_MAX, ctx); 4785 + iput(other_inode); 4786 + if (err) 4787 + goto out_unlock; 4788 + else 4789 + goto next_key; 4745 4790 } 4746 4791 } 4747 4792 ··· 4864 4799 ins_nr = 0; 4865 4800 } 4866 4801 btrfs_release_path(path); 4867 - 4802 + next_key: 4868 4803 if (min_key.offset < (u64)-1) { 4869 4804 min_key.offset++; 4870 4805 } else if (min_key.type < max_key.type) { ··· 5058 4993 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5059 4994 break; 5060 4995 5061 - if (IS_ROOT(parent)) 4996 + if (IS_ROOT(parent)) { 4997 + inode = d_inode(parent); 4998 + if (btrfs_must_commit_transaction(trans, inode)) 4999 + ret = 1; 5062 5000 break; 5001 + } 5063 5002 5064 5003 parent = dget_parent(parent); 5065 5004 dput(old_parent);
+4 -1
fs/ceph/caps.c
··· 1347 1347 { 1348 1348 struct inode *inode = &ci->vfs_inode; 1349 1349 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1350 - struct ceph_mds_session *session = *psession; 1350 + struct ceph_mds_session *session = NULL; 1351 1351 int mds; 1352 + 1352 1353 dout("ceph_flush_snaps %p\n", inode); 1354 + if (psession) 1355 + session = *psession; 1353 1356 retry: 1354 1357 spin_lock(&ci->i_ceph_lock); 1355 1358 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
+1
fs/ceph/mds_client.c
··· 2759 2759 } else { 2760 2760 path = NULL; 2761 2761 pathlen = 0; 2762 + pathbase = 0; 2762 2763 } 2763 2764 2764 2765 spin_lock(&ci->i_ceph_lock);
+6
fs/fs-writeback.c
··· 1949 1949 { 1950 1950 struct backing_dev_info *bdi; 1951 1951 1952 + /* 1953 + * If we are expecting writeback progress we must submit plugged IO. 1954 + */ 1955 + if (blk_needs_flush_plug(current)) 1956 + blk_schedule_flush_plug(current); 1957 + 1952 1958 if (!nr_pages) 1953 1959 nr_pages = get_nr_dirty_pages(); 1954 1960
+2
fs/nfs/nfs42proc.c
··· 338 338 case 0: 339 339 break; 340 340 case -NFS4ERR_EXPIRED: 341 + case -NFS4ERR_ADMIN_REVOKED: 342 + case -NFS4ERR_DELEG_REVOKED: 341 343 case -NFS4ERR_STALE_STATEID: 342 344 case -NFS4ERR_OLD_STATEID: 343 345 case -NFS4ERR_BAD_STATEID:
+4
fs/nfs/nfs4_fs.h
··· 396 396 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); 397 397 extern void nfs4_kill_renewd(struct nfs_client *); 398 398 extern void nfs4_renew_state(struct work_struct *); 399 + extern void nfs4_set_lease_period(struct nfs_client *clp, 400 + unsigned long lease, 401 + unsigned long lastrenewed); 402 + 399 403 400 404 /* nfs4state.c */ 401 405 struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp);
+3 -6
fs/nfs/nfs4proc.c
··· 4237 4237 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4238 4238 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4239 4239 if (err == 0) { 4240 - struct nfs_client *clp = server->nfs_client; 4241 - 4242 - spin_lock(&clp->cl_lock); 4243 - clp->cl_lease_time = fsinfo->lease_time * HZ; 4244 - clp->cl_last_renewal = now; 4245 - spin_unlock(&clp->cl_lock); 4240 + nfs4_set_lease_period(server->nfs_client, 4241 + fsinfo->lease_time * HZ, 4242 + now); 4246 4243 break; 4247 4244 } 4248 4245 err = nfs4_handle_exception(server, err, &exception);
+20
fs/nfs/nfs4renewd.c
··· 136 136 cancel_delayed_work_sync(&clp->cl_renewd); 137 137 } 138 138 139 + /** 140 + * nfs4_set_lease_period - Sets the lease period on a nfs_client 141 + * 142 + * @clp: pointer to nfs_client 143 + * @lease: new value for lease period 144 + * @lastrenewed: time at which lease was last renewed 145 + */ 146 + void nfs4_set_lease_period(struct nfs_client *clp, 147 + unsigned long lease, 148 + unsigned long lastrenewed) 149 + { 150 + spin_lock(&clp->cl_lock); 151 + clp->cl_lease_time = lease; 152 + clp->cl_last_renewal = lastrenewed; 153 + spin_unlock(&clp->cl_lock); 154 + 155 + /* Cap maximum reconnect timeout at 1/2 lease period */ 156 + rpc_cap_max_reconnect_timeout(clp->cl_rpcclient, lease >> 1); 157 + } 158 + 139 159 /* 140 160 * Local variables: 141 161 * c-basic-offset: 8
+3 -6
fs/nfs/nfs4state.c
··· 277 277 { 278 278 int status; 279 279 struct nfs_fsinfo fsinfo; 280 + unsigned long now; 280 281 281 282 if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { 282 283 nfs4_schedule_state_renewal(clp); 283 284 return 0; 284 285 } 285 286 287 + now = jiffies; 286 288 status = nfs4_proc_get_lease_time(clp, &fsinfo); 287 289 if (status == 0) { 288 - /* Update lease time and schedule renewal */ 289 - spin_lock(&clp->cl_lock); 290 - clp->cl_lease_time = fsinfo.lease_time * HZ; 291 - clp->cl_last_renewal = jiffies; 292 - spin_unlock(&clp->cl_lock); 293 - 290 + nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now); 294 291 nfs4_schedule_state_renewal(clp); 295 292 } 296 293
+48 -17
fs/nfsd/nfs4state.c
··· 4903 4903 return nfs_ok; 4904 4904 } 4905 4905 4906 + static __be32 4907 + nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) 4908 + { 4909 + struct nfs4_ol_stateid *stp = openlockstateid(s); 4910 + __be32 ret; 4911 + 4912 + mutex_lock(&stp->st_mutex); 4913 + 4914 + ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 4915 + if (ret) 4916 + goto out; 4917 + 4918 + ret = nfserr_locks_held; 4919 + if (check_for_locks(stp->st_stid.sc_file, 4920 + lockowner(stp->st_stateowner))) 4921 + goto out; 4922 + 4923 + release_lock_stateid(stp); 4924 + ret = nfs_ok; 4925 + 4926 + out: 4927 + mutex_unlock(&stp->st_mutex); 4928 + nfs4_put_stid(s); 4929 + return ret; 4930 + } 4931 + 4906 4932 __be32 4907 4933 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4908 4934 struct nfsd4_free_stateid *free_stateid) ··· 4936 4910 stateid_t *stateid = &free_stateid->fr_stateid; 4937 4911 struct nfs4_stid *s; 4938 4912 struct nfs4_delegation *dp; 4939 - struct nfs4_ol_stateid *stp; 4940 4913 struct nfs4_client *cl = cstate->session->se_client; 4941 4914 __be32 ret = nfserr_bad_stateid; 4942 4915 ··· 4954 4929 ret = nfserr_locks_held; 4955 4930 break; 4956 4931 case NFS4_LOCK_STID: 4957 - ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 4958 - if (ret) 4959 - break; 4960 - stp = openlockstateid(s); 4961 - ret = nfserr_locks_held; 4962 - if (check_for_locks(stp->st_stid.sc_file, 4963 - lockowner(stp->st_stateowner))) 4964 - break; 4965 - WARN_ON(!unhash_lock_stateid(stp)); 4932 + atomic_inc(&s->sc_count); 4966 4933 spin_unlock(&cl->cl_lock); 4967 - nfs4_put_stid(s); 4968 - ret = nfs_ok; 4934 + ret = nfsd4_free_lock_stateid(stateid, s); 4969 4935 goto out; 4970 4936 case NFS4_REVOKED_DELEG_STID: 4971 4937 dp = delegstateid(s); ··· 5523 5507 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 5524 5508 struct nfs4_ol_stateid *ost, 5525 5509 struct nfsd4_lock *lock, 5526 - struct nfs4_ol_stateid **lst, bool *new) 5510 + struct nfs4_ol_stateid **plst, bool *new) 5527 5511 { 5528 5512 __be32 status; 5529 5513 struct nfs4_file *fi = ost->st_stid.sc_file; ··· 5531 5515 struct nfs4_client *cl = oo->oo_owner.so_client; 5532 5516 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 5533 5517 struct nfs4_lockowner *lo; 5518 + struct nfs4_ol_stateid *lst; 5534 5519 unsigned int strhashval; 5520 + bool hashed; 5535 5521 5536 5522 lo = find_lockowner_str(cl, &lock->lk_new_owner); 5537 5523 if (!lo) { ··· 5549 5531 goto out; 5550 5532 } 5551 5533 5552 - *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 5553 - if (*lst == NULL) { 5534 + retry: 5535 + lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 5536 + if (lst == NULL) { 5554 5537 status = nfserr_jukebox; 5555 5538 goto out; 5556 5539 } 5540 + 5541 + mutex_lock(&lst->st_mutex); 5542 + 5543 + /* See if it's still hashed to avoid race with FREE_STATEID */ 5544 + spin_lock(&cl->cl_lock); 5545 + hashed = !list_empty(&lst->st_perfile); 5546 + spin_unlock(&cl->cl_lock); 5547 + 5548 + if (!hashed) { 5549 + mutex_unlock(&lst->st_mutex); 5550 + nfs4_put_stid(&lst->st_stid); 5551 + goto retry; 5552 + } 5557 5553 status = nfs_ok; 5554 + *plst = lst; 5558 5555 out: 5559 5556 nfs4_put_stateowner(&lo->lo_owner); 5560 5557 return status; ··· 5636 5603 goto out; 5637 5604 status = lookup_or_create_lock_state(cstate, open_stp, lock, 5638 5605 &lock_stp, &new); 5639 - if (status == nfs_ok) 5640 - mutex_lock(&lock_stp->st_mutex); 5641 5606 } else { 5642 5607 status = nfs4_preprocess_seqid_op(cstate, 5643 5608 lock->lk_old_lock_seqid,
+6 -3
fs/nfsd/vfs.c
··· 1252 1252 if (IS_ERR(dchild)) 1253 1253 return nfserrno(host_err); 1254 1254 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); 1255 - if (err) { 1256 - dput(dchild); 1255 + /* 1256 + * We unconditionally drop our ref to dchild as fh_compose will have 1257 + * already grabbed its own ref for it. 1258 + */ 1259 + dput(dchild); 1260 + if (err) 1257 1261 return err; 1258 - } 1259 1262 return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type, 1260 1263 rdev, resfhp); 1261 1264 }
+1 -3
fs/pipe.c
··· 144 144 struct page *page = buf->page; 145 145 146 146 if (page_count(page) == 1) { 147 - if (memcg_kmem_enabled()) { 147 + if (memcg_kmem_enabled()) 148 148 memcg_kmem_uncharge(page, 0); 149 - __ClearPageKmemcg(page); 150 - } 151 149 __SetPageLocked(page); 152 150 return 0; 153 151 }
+1 -1
fs/proc/meminfo.c
··· 46 46 cached = 0; 47 47 48 48 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 49 - pages[lru] = global_page_state(NR_LRU_BASE + lru); 49 + pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 50 50 51 51 available = si_mem_available(); 52 52
+25 -2
include/asm-generic/qrwlock.h
··· 25 25 #include <asm-generic/qrwlock_types.h> 26 26 27 27 /* 28 - * Writer states & reader shift and bias 28 + * Writer states & reader shift and bias. 29 + * 30 + * | +0 | +1 | +2 | +3 | 31 + * ----+----+----+----+----+ 32 + * LE | 78 | 56 | 34 | 12 | 0x12345678 33 + * ----+----+----+----+----+ 34 + * | wr | rd | 35 + * +----+----+----+----+ 36 + * 37 + * ----+----+----+----+----+ 38 + * BE | 12 | 34 | 56 | 78 | 0x12345678 39 + * ----+----+----+----+----+ 40 + * | rd | wr | 41 + * +----+----+----+----+ 29 42 */ 30 43 #define _QW_WAITING 1 /* A writer is waiting */ 31 44 #define _QW_LOCKED 0xff /* A writer holds the lock */ ··· 147 134 } 148 135 149 136 /** 137 + * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock 138 + * @lock : Pointer to queue rwlock structure 139 + * Return: the write byte address of a queue rwlock 140 + */ 141 + static inline u8 *__qrwlock_write_byte(struct qrwlock *lock) 142 + { 143 + return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN); 144 + } 145 + 146 + /** 150 147 * queued_write_unlock - release write lock of a queue rwlock 151 148 * @lock : Pointer to queue rwlock structure 152 149 */ 153 150 static inline void queued_write_unlock(struct qrwlock *lock) 154 151 { 155 - smp_store_release((u8 *)&lock->cnts, 0); 152 + smp_store_release(__qrwlock_write_byte(lock), 0); 156 153 } 157 154 158 155 /*
+2 -1
include/drm/ttm/ttm_bo_driver.h
··· 962 962 * 963 963 * @bo: A pointer to a struct ttm_buffer_object. 964 964 * @evict: 1: This is an eviction. Don't try to pipeline. 965 + * @interruptible: Sleep interruptible if waiting. 965 966 * @no_wait_gpu: Return immediately if the GPU is busy. 966 967 * @new_mem: struct ttm_mem_reg indicating where to move. 967 968 * ··· 977 976 */ 978 977 979 978 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 980 - bool evict, bool no_wait_gpu, 979 + bool evict, bool interruptible, bool no_wait_gpu, 981 980 struct ttm_mem_reg *new_mem); 982 981 983 982 /**
+22
include/dt-bindings/mfd/qcom-rpm.h
··· 147 147 #define QCOM_RPM_SMB208_S1b 137 148 148 #define QCOM_RPM_SMB208_S2a 138 149 149 #define QCOM_RPM_SMB208_S2b 139 150 + #define QCOM_RPM_PM8018_SMPS1 140 151 + #define QCOM_RPM_PM8018_SMPS2 141 152 + #define QCOM_RPM_PM8018_SMPS3 142 153 + #define QCOM_RPM_PM8018_SMPS4 143 154 + #define QCOM_RPM_PM8018_SMPS5 144 155 + #define QCOM_RPM_PM8018_LDO1 145 156 + #define QCOM_RPM_PM8018_LDO2 146 157 + #define QCOM_RPM_PM8018_LDO3 147 158 + #define QCOM_RPM_PM8018_LDO4 148 159 + #define QCOM_RPM_PM8018_LDO5 149 160 + #define QCOM_RPM_PM8018_LDO6 150 161 + #define QCOM_RPM_PM8018_LDO7 151 162 + #define QCOM_RPM_PM8018_LDO8 152 163 + #define QCOM_RPM_PM8018_LDO9 153 164 + #define QCOM_RPM_PM8018_LDO10 154 165 + #define QCOM_RPM_PM8018_LDO11 155 166 + #define QCOM_RPM_PM8018_LDO12 156 167 + #define QCOM_RPM_PM8018_LDO13 157 168 + #define QCOM_RPM_PM8018_LDO14 158 169 + #define QCOM_RPM_PM8018_LVS1 159 170 + #define QCOM_RPM_PM8018_NCP 160 171 + #define QCOM_RPM_VOLTAGE_CORNER 161 150 172 151 173 /* 152 174 * Constants used to select force mode for regulators.
+2 -1
include/linux/bvec.h
··· 74 74 "Attempted to advance past end of bvec iter\n"); 75 75 76 76 while (bytes) { 77 - unsigned len = min(bytes, bvec_iter_len(bv, *iter)); 77 + unsigned iter_len = bvec_iter_len(bv, *iter); 78 + unsigned len = min(bytes, iter_len); 78 79 79 80 bytes -= len; 80 81 iter->bi_size -= len;
+12
include/linux/kvm_host.h
··· 1113 1113 /* create, destroy, and name are mandatory */ 1114 1114 struct kvm_device_ops { 1115 1115 const char *name; 1116 + 1117 + /* 1118 + * create is called holding kvm->lock and any operations not suitable 1119 + * to do while holding the lock should be deferred to init (see 1120 + * below). 1121 + */ 1116 1122 int (*create)(struct kvm_device *dev, u32 type); 1123 + 1124 + /* 1125 + * init is called after create if create is successful and is called 1126 + * outside of holding kvm->lock. 1127 + */ 1128 + void (*init)(struct kvm_device *dev); 1117 1129 1118 1130 /* 1119 1131 * Destroy is responsible for freeing dev.
+178
include/linux/mfd/ac100.h
··· 1 + /* 2 + * Functions and registers to access AC100 codec / RTC combo IC. 3 + * 4 + * Copyright (C) 2016 Chen-Yu Tsai 5 + * 6 + * Chen-Yu Tsai <wens@csie.org> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #ifndef __LINUX_MFD_AC100_H 14 + #define __LINUX_MFD_AC100_H 15 + 16 + #include <linux/regmap.h> 17 + 18 + struct ac100_dev { 19 + struct device *dev; 20 + struct regmap *regmap; 21 + }; 22 + 23 + /* Audio codec related registers */ 24 + #define AC100_CHIP_AUDIO_RST 0x00 25 + #define AC100_PLL_CTRL1 0x01 26 + #define AC100_PLL_CTRL2 0x02 27 + #define AC100_SYSCLK_CTRL 0x03 28 + #define AC100_MOD_CLK_ENA 0x04 29 + #define AC100_MOD_RST_CTRL 0x05 30 + #define AC100_I2S_SR_CTRL 0x06 31 + 32 + /* I2S1 interface */ 33 + #define AC100_I2S1_CLK_CTRL 0x10 34 + #define AC100_I2S1_SND_OUT_CTRL 0x11 35 + #define AC100_I2S1_SND_IN_CTRL 0x12 36 + #define AC100_I2S1_MXR_SRC 0x13 37 + #define AC100_I2S1_VOL_CTRL1 0x14 38 + #define AC100_I2S1_VOL_CTRL2 0x15 39 + #define AC100_I2S1_VOL_CTRL3 0x16 40 + #define AC100_I2S1_VOL_CTRL4 0x17 41 + #define AC100_I2S1_MXR_GAIN 0x18 42 + 43 + /* I2S2 interface */ 44 + #define AC100_I2S2_CLK_CTRL 0x20 45 + #define AC100_I2S2_SND_OUT_CTRL 0x21 46 + #define AC100_I2S2_SND_IN_CTRL 0x22 47 + #define AC100_I2S2_MXR_SRC 0x23 48 + #define AC100_I2S2_VOL_CTRL1 0x24 49 + #define AC100_I2S2_VOL_CTRL2 0x25 50 + #define AC100_I2S2_VOL_CTRL3 0x26 51 + #define AC100_I2S2_VOL_CTRL4 0x27 52 + #define AC100_I2S2_MXR_GAIN 0x28 53 + 54 + /* I2S3 interface */ 55 + #define AC100_I2S3_CLK_CTRL 0x30 56 + #define AC100_I2S3_SND_OUT_CTRL 0x31 57 + #define AC100_I2S3_SND_IN_CTRL 0x32 58 + #define AC100_I2S3_SIG_PATH_CTRL 0x33 59 + 60 + /* ADC digital controls */ 61 + #define AC100_ADC_DIG_CTRL 0x40 62 + #define AC100_ADC_VOL_CTRL 0x41 63 + 64 + /* HMIC plug sensing / key detection */ 65 + #define AC100_HMIC_CTRL1 0x44 66 + #define AC100_HMIC_CTRL2 0x45 67 + #define AC100_HMIC_STATUS 0x46 68 + 69 + /* DAC digital controls */ 70 + #define AC100_DAC_DIG_CTRL 0x48 71 + #define AC100_DAC_VOL_CTRL 0x49 72 + #define AC100_DAC_MXR_SRC 0x4c 73 + #define AC100_DAC_MXR_GAIN 0x4d 74 + 75 + /* Analog controls */ 76 + #define AC100_ADC_APC_CTRL 0x50 77 + #define AC100_ADC_SRC 0x51 78 + #define AC100_ADC_SRC_BST_CTRL 0x52 79 + #define AC100_OUT_MXR_DAC_A_CTRL 0x53 80 + #define AC100_OUT_MXR_SRC 0x54 81 + #define AC100_OUT_MXR_SRC_BST 0x55 82 + #define AC100_HPOUT_CTRL 0x56 83 + #define AC100_ERPOUT_CTRL 0x57 84 + #define AC100_SPKOUT_CTRL 0x58 85 + #define AC100_LINEOUT_CTRL 0x59 86 + 87 + /* ADC digital audio processing (high pass filter & auto gain control */ 88 + #define AC100_ADC_DAP_L_STA 0x80 89 + #define AC100_ADC_DAP_R_STA 0x81 90 + #define AC100_ADC_DAP_L_CTRL 0x82 91 + #define AC100_ADC_DAP_R_CTRL 0x83 92 + #define AC100_ADC_DAP_L_T_L 0x84 /* Left Target Level */ 93 + #define AC100_ADC_DAP_R_T_L 0x85 /* Right Target Level */ 94 + #define AC100_ADC_DAP_L_H_A_C 0x86 /* Left High Avg. Coef */ 95 + #define AC100_ADC_DAP_L_L_A_C 0x87 /* Left Low Avg. Coef */ 96 + #define AC100_ADC_DAP_R_H_A_C 0x88 /* Right High Avg. Coef */ 97 + #define AC100_ADC_DAP_R_L_A_C 0x89 /* Right Low Avg. Coef */ 98 + #define AC100_ADC_DAP_L_D_T 0x8a /* Left Decay Time */ 99 + #define AC100_ADC_DAP_L_A_T 0x8b /* Left Attack Time */ 100 + #define AC100_ADC_DAP_R_D_T 0x8c /* Right Decay Time */ 101 + #define AC100_ADC_DAP_R_A_T 0x8d /* Right Attack Time */ 102 + #define AC100_ADC_DAP_N_TH 0x8e /* Noise Threshold */ 103 + #define AC100_ADC_DAP_L_H_N_A_C 0x8f /* Left High Noise Avg. Coef */ 104 + #define AC100_ADC_DAP_L_L_N_A_C 0x90 /* Left Low Noise Avg. Coef */ 105 + #define AC100_ADC_DAP_R_H_N_A_C 0x91 /* Right High Noise Avg. Coef */ 106 + #define AC100_ADC_DAP_R_L_N_A_C 0x92 /* Right Low Noise Avg. Coef */ 107 + #define AC100_ADC_DAP_H_HPF_C 0x93 /* High High-Pass-Filter Coef */ 108 + #define AC100_ADC_DAP_L_HPF_C 0x94 /* Low High-Pass-Filter Coef */ 109 + #define AC100_ADC_DAP_OPT 0x95 /* AGC Optimum */ 110 + 111 + /* DAC digital audio processing (high pass filter & dynamic range control) */ 112 + #define AC100_DAC_DAP_CTRL 0xa0 113 + #define AC100_DAC_DAP_H_HPF_C 0xa1 /* High High-Pass-Filter Coef */ 114 + #define AC100_DAC_DAP_L_HPF_C 0xa2 /* Low High-Pass-Filter Coef */ 115 + #define AC100_DAC_DAP_L_H_E_A_C 0xa3 /* Left High Energy Avg Coef */ 116 + #define AC100_DAC_DAP_L_L_E_A_C 0xa4 /* Left Low Energy Avg Coef */ 117 + #define AC100_DAC_DAP_R_H_E_A_C 0xa5 /* Right High Energy Avg Coef */ 118 + #define AC100_DAC_DAP_R_L_E_A_C 0xa6 /* Right Low Energy Avg Coef */ 119 + #define AC100_DAC_DAP_H_G_D_T_C 0xa7 /* High Gain Delay Time Coef */ 120 + #define AC100_DAC_DAP_L_G_D_T_C 0xa8 /* Low Gain Delay Time Coef */ 121 + #define AC100_DAC_DAP_H_G_A_T_C 0xa9 /* High Gain Attack Time Coef */ 122 + #define AC100_DAC_DAP_L_G_A_T_C 0xaa /* Low Gain Attack Time Coef */ 123 + #define AC100_DAC_DAP_H_E_TH 0xab /* High Energy Threshold */ 124 + #define AC100_DAC_DAP_L_E_TH 0xac /* Low Energy Threshold */ 125 + #define AC100_DAC_DAP_H_G_K 0xad /* High Gain K parameter */ 126 + #define AC100_DAC_DAP_L_G_K 0xae /* Low Gain K parameter */ 127 + #define AC100_DAC_DAP_H_G_OFF 0xaf /* High Gain offset */ 128 + #define AC100_DAC_DAP_L_G_OFF 0xb0 /* Low Gain offset */ 129 + #define AC100_DAC_DAP_OPT 0xb1 /* DRC optimum */ 130 + 131 + /* Digital audio processing enable */ 132 + #define AC100_ADC_DAP_ENA 0xb4 133 + #define AC100_DAC_DAP_ENA 0xb5 134 + 135 + /* SRC control */ 136 + #define AC100_SRC1_CTRL1 0xb8 137 + #define AC100_SRC1_CTRL2 0xb9 138 + #define AC100_SRC1_CTRL3 0xba 139 + #define AC100_SRC1_CTRL4 0xbb 140 + #define AC100_SRC2_CTRL1 0xbc 141 + #define AC100_SRC2_CTRL2 0xbd 142 + #define AC100_SRC2_CTRL3 0xbe 143 + #define AC100_SRC2_CTRL4 0xbf 144 + 145 + /* RTC clk control */ 146 + #define AC100_CLK32K_ANALOG_CTRL 0xc0 147 + #define AC100_CLKOUT_CTRL1 0xc1 148 + #define AC100_CLKOUT_CTRL2 0xc2 149 + #define AC100_CLKOUT_CTRL3 0xc3 150 + 151 + /* RTC module */ 152 + #define AC100_RTC_RST 0xc6 153 + #define AC100_RTC_CTRL 0xc7 154 + #define AC100_RTC_SEC 0xc8 /* second */ 155 + #define AC100_RTC_MIN 0xc9 /* minute */ 156 + #define AC100_RTC_HOU 0xca /* hour */ 157 + #define AC100_RTC_WEE 0xcb /* weekday */ 158 + #define AC100_RTC_DAY 0xcc /* day */ 159 + #define AC100_RTC_MON 0xcd /* month */ 160 + #define AC100_RTC_YEA 0xce /* year */ 161 + #define AC100_RTC_UPD 0xcf /* update trigger */ 162 + 163 + /* RTC alarm */ 164 + #define AC100_ALM_INT_ENA 0xd0 165 + #define AC100_ALM_INT_STA 0xd1 166 + #define AC100_ALM_SEC 0xd8 167 + #define AC100_ALM_MIN 0xd9 168 + #define AC100_ALM_HOU 0xda 169 + #define AC100_ALM_WEE 0xdb 170 + #define AC100_ALM_DAY 0xdc 171 + #define AC100_ALM_MON 0xdd 172 + #define AC100_ALM_YEA 0xde 173 + #define AC100_ALM_UPD 0xdf 174 + 175 + /* RTC general purpose register 0 ~ 15 */ 176 + #define AC100_RTC_GP(x) (0xe0 + (x)) 177 + 178 + #endif /* __LINUX_MFD_AC100_H */
+9
include/linux/mfd/arizona/core.h
··· 13 13 #ifndef _WM_ARIZONA_CORE_H 14 14 #define _WM_ARIZONA_CORE_H 15 15 16 + #include <linux/clk.h> 16 17 #include <linux/interrupt.h> 17 18 #include <linux/notifier.h> 18 19 #include <linux/regmap.h> ··· 21 20 #include <linux/mfd/arizona/pdata.h> 22 21 23 22 #define ARIZONA_MAX_CORE_SUPPLIES 2 23 + 24 + enum { 25 + ARIZONA_MCLK1, 26 + ARIZONA_MCLK2, 27 + ARIZONA_NUM_MCLK 28 + }; 24 29 25 30 enum arizona_type { 26 31 WM5102 = 1, ··· 145 138 146 139 struct mutex clk_lock; 147 140 int clk32k_ref; 141 + 142 + struct clk *mclk[ARIZONA_NUM_MCLK]; 148 143 149 144 bool ctrlif_error; 150 145
+60
include/linux/mfd/axp20x.h
··· 20 20 AXP221_ID, 21 21 AXP223_ID, 22 22 AXP288_ID, 23 + AXP806_ID, 23 24 AXP809_ID, 24 25 NR_AXP20X_VARIANTS, 25 26 }; ··· 91 90 #define AXP22X_ALDO2_V_OUT 0x29 92 91 #define AXP22X_ALDO3_V_OUT 0x2a 93 92 #define AXP22X_CHRG_CTRL3 0x35 93 + 94 + #define AXP806_STARTUP_SRC 0x00 95 + #define AXP806_CHIP_ID 0x03 96 + #define AXP806_PWR_OUT_CTRL1 0x10 97 + #define AXP806_PWR_OUT_CTRL2 0x11 98 + #define AXP806_DCDCA_V_CTRL 0x12 99 + #define AXP806_DCDCB_V_CTRL 0x13 100 + #define AXP806_DCDCC_V_CTRL 0x14 101 + #define AXP806_DCDCD_V_CTRL 0x15 102 + #define AXP806_DCDCE_V_CTRL 0x16 103 + #define AXP806_ALDO1_V_CTRL 0x17 104 + #define AXP806_ALDO2_V_CTRL 0x18 105 + #define AXP806_ALDO3_V_CTRL 0x19 106 + #define AXP806_DCDC_MODE_CTRL1 0x1a 107 + #define AXP806_DCDC_MODE_CTRL2 0x1b 108 + #define AXP806_DCDC_FREQ_CTRL 0x1c 109 + #define AXP806_BLDO1_V_CTRL 0x20 110 + #define AXP806_BLDO2_V_CTRL 0x21 111 + #define AXP806_BLDO3_V_CTRL 0x22 112 + #define AXP806_BLDO4_V_CTRL 0x23 113 + #define AXP806_CLDO1_V_CTRL 0x24 114 + #define AXP806_CLDO2_V_CTRL 0x25 115 + #define AXP806_CLDO3_V_CTRL 0x26 116 + #define AXP806_VREF_TEMP_WARN_L 0xf3 94 117 95 118 /* Interrupt */ 96 119 #define AXP152_IRQ1_EN 0x40 ··· 291 266 }; 292 267 293 268 enum { 269 + AXP806_DCDCA = 0, 270 + AXP806_DCDCB, 271 + AXP806_DCDCC, 272 + AXP806_DCDCD, 273 + AXP806_DCDCE, 274 + AXP806_ALDO1, 275 + AXP806_ALDO2, 276 + AXP806_ALDO3, 277 + AXP806_BLDO1, 278 + AXP806_BLDO2, 279 + AXP806_BLDO3, 280 + AXP806_BLDO4, 281 + AXP806_CLDO1, 282 + AXP806_CLDO2, 283 + AXP806_CLDO3, 284 + AXP806_SW, 285 + AXP806_REG_ID_MAX, 286 + }; 287 + 288 + enum { 294 289 AXP809_DCDC1 = 0, 295 290 AXP809_DCDC2, 296 291 AXP809_DCDC3, ··· 457 412 AXP288_IRQ_TIMER, 458 413 AXP288_IRQ_MV_CHNG, 459 414 AXP288_IRQ_BC_USB_CHNG, 415 + }; 416 + 417 + enum axp806_irqs { 418 + AXP806_IRQ_DIE_TEMP_HIGH_LV1, 419 + AXP806_IRQ_DIE_TEMP_HIGH_LV2, 420 + AXP806_IRQ_DCDCA_V_LOW, 421 + AXP806_IRQ_DCDCB_V_LOW, 422 + AXP806_IRQ_DCDCC_V_LOW, 423 + AXP806_IRQ_DCDCD_V_LOW, 424 + AXP806_IRQ_DCDCE_V_LOW, 425 + AXP806_IRQ_PWROK_LONG, 426 + AXP806_IRQ_PWROK_SHORT, 427 + AXP806_IRQ_WAKEUP, 428 + AXP806_IRQ_PWROK_FALL, 429 + AXP806_IRQ_PWROK_RISE, 460 430 }; 461 431 462 432 enum axp809_irqs {
+18
include/linux/mfd/cros_ec.h
··· 109 109 * should check msg.result for the EC's result code. 110 110 * @pkt_xfer: send packet to EC and get response 111 111 * @lock: one transaction at a time 112 + * @mkbp_event_supported: true if this EC supports the MKBP event protocol. 113 + * @event_notifier: interrupt event notifier for transport devices. 114 + * @event_data: raw payload transferred with the MKBP event. 115 + * @event_size: size in bytes of the event data. 112 116 */ 113 117 struct cros_ec_device { 114 118 ··· 141 137 int (*pkt_xfer)(struct cros_ec_device *ec, 142 138 struct cros_ec_command *msg); 143 139 struct mutex lock; 140 + bool mkbp_event_supported; 141 + struct blocking_notifier_head event_notifier; 142 + 143 + struct ec_response_get_next_event event_data; 144 + int event_size; 144 145 }; 145 146 146 147 /* struct cros_ec_platform - ChromeOS EC platform information ··· 277 268 * @return 0 if ok, -ve on error 278 269 */ 279 270 int cros_ec_query_all(struct cros_ec_device *ec_dev); 271 + 272 + /** 273 + * cros_ec_get_next_event - Fetch next event from the ChromeOS EC 274 + * 275 + * @ec_dev: Device to fetch event from 276 + * 277 + * Returns: 0 on success, Linux error number on failure 278 + */ 279 + int cros_ec_get_next_event(struct cros_ec_device *ec_dev); 280 280 281 281 /* sysfs stuff */ 282 282 extern struct attribute_group cros_ec_attr_group;
+34
include/linux/mfd/cros_ec_commands.h
··· 1793 1793 }; 1794 1794 } __packed; 1795 1795 1796 + /* 1797 + * Command for retrieving the next pending MKBP event from the EC device 1798 + * 1799 + * The device replies with UNAVAILABLE if there aren't any pending events. 1800 + */ 1801 + #define EC_CMD_GET_NEXT_EVENT 0x67 1802 + 1803 + enum ec_mkbp_event { 1804 + /* Keyboard matrix changed. The event data is the new matrix state. */ 1805 + EC_MKBP_EVENT_KEY_MATRIX = 0, 1806 + 1807 + /* New host event. The event data is 4 bytes of host event flags. */ 1808 + EC_MKBP_EVENT_HOST_EVENT = 1, 1809 + 1810 + /* New Sensor FIFO data. The event data is fifo_info structure. */ 1811 + EC_MKBP_EVENT_SENSOR_FIFO = 2, 1812 + 1813 + /* Number of MKBP events */ 1814 + EC_MKBP_EVENT_COUNT, 1815 + }; 1816 + 1817 + union ec_response_get_next_data { 1818 + uint8_t key_matrix[13]; 1819 + 1820 + /* Unaligned */ 1821 + uint32_t host_event; 1822 + } __packed; 1823 + 1824 + struct ec_response_get_next_event { 1825 + uint8_t event_type; 1826 + /* Followed by event data if any */ 1827 + union ec_response_get_next_data data; 1828 + } __packed; 1829 + 1796 1830 /*****************************************************************************/ 1797 1831 /* Temperature sensor commands */ 1798 1832
+269
include/linux/mfd/lp873x.h
··· 1 + /* 2 + * Functions to access LP873X power management chip. 3 + * 4 + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License as 8 + * published by the Free Software Foundation version 2. 9 + * 10 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 + * kind, whether express or implied; without even the implied warranty 12 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + */ 15 + 16 + #ifndef __LINUX_MFD_LP873X_H 17 + #define __LINUX_MFD_LP873X_H 18 + 19 + #include <linux/i2c.h> 20 + #include <linux/regulator/driver.h> 21 + #include <linux/regulator/machine.h> 22 + 23 + /* LP873x chip id list */ 24 + #define LP873X 0x00 25 + 26 + /* All register addresses */ 27 + #define LP873X_REG_DEV_REV 0X00 28 + #define LP873X_REG_OTP_REV 0X01 29 + #define LP873X_REG_BUCK0_CTRL_1 0X02 30 + #define LP873X_REG_BUCK0_CTRL_2 0X03 31 + #define LP873X_REG_BUCK1_CTRL_1 0X04 32 + #define LP873X_REG_BUCK1_CTRL_2 0X05 33 + #define LP873X_REG_BUCK0_VOUT 0X06 34 + #define LP873X_REG_BUCK1_VOUT 0X07 35 + #define LP873X_REG_LDO0_CTRL 0X08 36 + #define LP873X_REG_LDO1_CTRL 0X09 37 + #define LP873X_REG_LDO0_VOUT 0X0A 38 + #define LP873X_REG_LDO1_VOUT 0X0B 39 + #define LP873X_REG_BUCK0_DELAY 0X0C 40 + #define LP873X_REG_BUCK1_DELAY 0X0D 41 + #define LP873X_REG_LDO0_DELAY 0X0E 42 + #define LP873X_REG_LDO1_DELAY 0X0F 43 + #define LP873X_REG_GPO_DELAY 0X10 44 + #define LP873X_REG_GPO2_DELAY 0X11 45 + #define LP873X_REG_GPO_CTRL 0X12 46 + #define LP873X_REG_CONFIG 0X13 47 + #define LP873X_REG_PLL_CTRL 0X14 48 + #define LP873X_REG_PGOOD_CTRL1 0X15 49 + #define LP873X_REG_PGOOD_CTRL2 0X16 50 + #define LP873X_REG_PG_FAULT 0X17 51 + #define LP873X_REG_RESET 0X18 52 + #define LP873X_REG_INT_TOP_1 0X19 53 + #define LP873X_REG_INT_TOP_2 0X1A 54 + #define LP873X_REG_INT_BUCK 0X1B 55 + #define LP873X_REG_INT_LDO 0X1C 56 + #define LP873X_REG_TOP_STAT 0X1D 57 + #define LP873X_REG_BUCK_STAT 0X1E 58 + #define LP873X_REG_LDO_STAT 0x1F 59 + #define LP873X_REG_TOP_MASK_1 0x20 60 + #define LP873X_REG_TOP_MASK_2 0x21 61 + #define LP873X_REG_BUCK_MASK 0x22 62 + #define LP873X_REG_LDO_MASK 0x23 63 + #define LP873X_REG_SEL_I_LOAD 0x24 64 + #define LP873X_REG_I_LOAD_2 0x25 65 + #define LP873X_REG_I_LOAD_1 0x26 66 + 67 + #define LP873X_REG_MAX LP873X_REG_I_LOAD_1 68 + 69 + /* Register field definitions */ 70 + #define LP873X_DEV_REV_DEV_ID 0xC0 71 + #define LP873X_DEV_REV_ALL_LAYER 0x30 72 + #define LP873X_DEV_REV_METAL_LAYER 0x0F 73 + 74 + #define LP873X_OTP_REV_OTP_ID 0xFF 75 + 76 + #define LP873X_BUCK0_CTRL_1_BUCK0_FPWM BIT(3) 77 + #define LP873X_BUCK0_CTRL_1_BUCK0_RDIS_EN BIT(2) 78 + #define LP873X_BUCK0_CTRL_1_BUCK0_EN_PIN_CTRL BIT(1) 79 + #define LP873X_BUCK0_CTRL_1_BUCK0_EN BIT(0) 80 + 81 + #define LP873X_BUCK0_CTRL_2_BUCK0_ILIM 0x38 82 + #define LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE 0x07 83 + 84 + #define LP873X_BUCK1_CTRL_1_BUCK1_FPWM BIT(3) 85 + #define LP873X_BUCK1_CTRL_1_BUCK1_RDIS_EN BIT(2) 86 + #define LP873X_BUCK1_CTRL_1_BUCK1_EN_PIN_CTRL BIT(1) 87 + #define LP873X_BUCK1_CTRL_1_BUCK1_EN BIT(0) 88 + 89 + #define LP873X_BUCK1_CTRL_2_BUCK1_ILIM 0x38 90 + #define LP873X_BUCK1_CTRL_2_BUCK1_SLEW_RATE 0x07 91 + 92 + #define LP873X_BUCK0_VOUT_BUCK0_VSET 0xFF 93 + 94 + #define LP873X_BUCK1_VOUT_BUCK1_VSET 0xFF 95 + 96 + #define LP873X_LDO0_CTRL_LDO0_RDIS_EN BIT(2) 97 + #define LP873X_LDO0_CTRL_LDO0_EN_PIN_CTRL BIT(1) 98 + #define LP873X_LDO0_CTRL_LDO0_EN BIT(0) 99 + 100 + #define LP873X_LDO1_CTRL_LDO1_RDIS_EN BIT(2) 101 + #define LP873X_LDO1_CTRL_LDO1_EN_PIN_CTRL BIT(1) 102 + #define LP873X_LDO1_CTRL_LDO1_EN BIT(0) 103 + 104 + #define LP873X_LDO0_VOUT_LDO0_VSET 0x1F 105 + 106 + #define LP873X_LDO1_VOUT_LDO1_VSET 0x1F 107 + 108 + #define LP873X_BUCK0_DELAY_BUCK0_SD_DELAY 0xF0 109 + #define LP873X_BUCK0_DELAY_BUCK0_SU_DELAY 0x0F 110 + 111 + #define LP873X_BUCK1_DELAY_BUCK1_SD_DELAY 0xF0 112 + #define LP873X_BUCK1_DELAY_BUCK1_SU_DELAY 0x0F 113 + 114 + #define LP873X_LDO0_DELAY_LDO0_SD_DELAY 0xF0 115 + #define LP873X_LDO0_DELAY_LDO0_SU_DELAY 0x0F 116 + 117 + #define LP873X_LDO1_DELAY_LDO1_SD_DELAY 0xF0 118 + #define LP873X_LDO1_DELAY_LDO1_SU_DELAY 0x0F 119 + 120 + #define LP873X_GPO_DELAY_GPO_SD_DELAY 0xF0 121 + #define LP873X_GPO_DELAY_GPO_SU_DELAY 0x0F 122 + 123 + #define LP873X_GPO2_DELAY_GPO2_SD_DELAY 0xF0 124 + #define LP873X_GPO2_DELAY_GPO2_SU_DELAY 0x0F 125 + 126 + #define LP873X_GPO_CTRL_GPO2_OD BIT(6) 127 + #define LP873X_GPO_CTRL_GPO2_EN_PIN_CTRL BIT(5) 128 + #define LP873X_GPO_CTRL_GPO2_EN BIT(4) 129 + #define LP873X_GPO_CTRL_GPO_OD BIT(2) 130 + #define LP873X_GPO_CTRL_GPO_EN_PIN_CTRL BIT(1) 131 + #define LP873X_GPO_CTRL_GPO_EN BIT(0) 132 + 133 + #define LP873X_CONFIG_SU_DELAY_SEL BIT(6) 134 + #define LP873X_CONFIG_SD_DELAY_SEL BIT(5) 135 + #define LP873X_CONFIG_CLKIN_PIN_SEL BIT(4) 136 + #define LP873X_CONFIG_CLKIN_PD BIT(3) 137 + #define LP873X_CONFIG_EN_PD BIT(2) 138 + #define LP873X_CONFIG_TDIE_WARN_LEVEL BIT(1) 139 + #define LP873X_EN_SPREAD_SPEC BIT(0) 140 + 141 + #define LP873X_PLL_CTRL_EN_PLL BIT(6) 142 + #define LP873X_EXT_CLK_FREQ 0x1F 143 + 144 + #define LP873X_PGOOD_CTRL1_PGOOD_POL BIT(7) 145 + #define LP873X_PGOOD_CTRL1_PGOOD_OD BIT(6) 146 + #define LP873X_PGOOD_CTRL1_PGOOD_WINDOW_LDO BIT(5) 147 + #define LP873X_PGOOD_CTRL1_PGOOD_WINDOWN_BUCK BIT(4) 148 + #define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO1 BIT(3) 149 + #define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO0 BIT(2) 150 + #define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK1 BIT(1) 151 + #define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK0 BIT(0) 152 + 153 + #define LP873X_PGOOD_CTRL2_EN_PGOOD_TWARN BIT(2) 154 + #define LP873X_PGOOD_CTRL2_EN_PG_FAULT_GATE BIT(1) 155 + #define LP873X_PGOOD_CTRL2_PGOOD_MODE BIT(0) 156 + 157 + #define LP873X_PG_FAULT_PG_FAULT_LDO1 BIT(3) 158 + #define LP873X_PG_FAULT_PG_FAULT_LDO0 BIT(2) 159 + #define LP873X_PG_FAULT_PG_FAULT_BUCK1 BIT(1) 160 + #define LP873X_PG_FAULT_PG_FAULT_BUCK0 BIT(0) 161 + 162 + #define LP873X_RESET_SW_RESET BIT(0) 163 + 164 + #define LP873X_INT_TOP_1_PGOOD_INT BIT(7) 165 + #define LP873X_INT_TOP_1_LDO_INT BIT(6) 166 + #define LP873X_INT_TOP_1_BUCK_INT BIT(5) 167 + #define LP873X_INT_TOP_1_SYNC_CLK_INT BIT(4) 168 + #define LP873X_INT_TOP_1_TDIE_SD_INT BIT(3) 169 + #define LP873X_INT_TOP_1_TDIE_WARN_INT BIT(2) 170 + #define LP873X_INT_TOP_1_OVP_INT BIT(1) 171 + #define LP873X_INT_TOP_1_I_MEAS_INT BIT(0) 172 + 173 + #define LP873X_INT_TOP_2_RESET_REG_INT BIT(0) 174 + 175 + #define LP873X_INT_BUCK_BUCK1_PG_INT BIT(6) 176 + #define LP873X_INT_BUCK_BUCK1_SC_INT BIT(5) 177 + #define LP873X_INT_BUCK_BUCK1_ILIM_INT BIT(4) 178 + #define LP873X_INT_BUCK_BUCK0_PG_INT BIT(2) 179 + #define LP873X_INT_BUCK_BUCK0_SC_INT BIT(1) 180 + #define LP873X_INT_BUCK_BUCK0_ILIM_INT BIT(0) 181 + 182 + #define LP873X_INT_LDO_LDO1_PG_INT BIT(6) 183 + #define LP873X_INT_LDO_LDO1_SC_INT BIT(5) 184 + #define LP873X_INT_LDO_LDO1_ILIM_INT BIT(4) 185 + #define LP873X_INT_LDO_LDO0_PG_INT BIT(2) 186 + #define LP873X_INT_LDO_LDO0_SC_INT BIT(1) 187 + #define LP873X_INT_LDO_LDO0_ILIM_INT BIT(0) 188 + 189 + #define LP873X_TOP_STAT_PGOOD_STAT BIT(7) 190 + #define LP873X_TOP_STAT_SYNC_CLK_STAT BIT(4) 191 + #define LP873X_TOP_STAT_TDIE_SD_STAT BIT(3) 192 + #define LP873X_TOP_STAT_TDIE_WARN_STAT BIT(2) 193 + #define LP873X_TOP_STAT_OVP_STAT BIT(1) 194 + 195 + #define LP873X_BUCK_STAT_BUCK1_STAT BIT(7) 196 + #define LP873X_BUCK_STAT_BUCK1_PG_STAT BIT(6) 197 + #define LP873X_BUCK_STAT_BUCK1_ILIM_STAT BIT(4) 198 + #define LP873X_BUCK_STAT_BUCK0_STAT BIT(3) 199 + #define LP873X_BUCK_STAT_BUCK0_PG_STAT BIT(2) 200 + #define LP873X_BUCK_STAT_BUCK0_ILIM_STAT BIT(0) 201 + 202 + #define LP873X_LDO_STAT_LDO1_STAT BIT(7) 203 + #define LP873X_LDO_STAT_LDO1_PG_STAT BIT(6) 204 + #define LP873X_LDO_STAT_LDO1_ILIM_STAT BIT(4) 205 + #define LP873X_LDO_STAT_LDO0_STAT BIT(3) 206 + #define LP873X_LDO_STAT_LDO0_PG_STAT BIT(2) 207 + #define LP873X_LDO_STAT_LDO0_ILIM_STAT BIT(0) 208 + 209 + #define LP873X_TOP_MASK_1_PGOOD_INT_MASK BIT(7) 210 + #define LP873X_TOP_MASK_1_SYNC_CLK_MASK BIT(4) 211 + #define LP873X_TOP_MASK_1_TDIE_WARN_MASK BIT(2) 212 + #define LP873X_TOP_MASK_1_I_MEAS_MASK BIT(0) 213 + 214 + #define LP873X_TOP_MASK_2_RESET_REG_MASK BIT(0) 215 + 216 + #define LP873X_BUCK_MASK_BUCK1_PGF_MASK BIT(7) 217 + #define LP873X_BUCK_MASK_BUCK1_PGR_MASK BIT(6) 218 + #define LP873X_BUCK_MASK_BUCK1_ILIM_MASK BIT(4) 219 + #define LP873X_BUCK_MASK_BUCK0_PGF_MASK BIT(3) 220 + #define LP873X_BUCK_MASK_BUCK0_PGR_MASK BIT(2) 221 + #define LP873X_BUCK_MASK_BUCK0_ILIM_MASK BIT(0) 222 + 223 + #define LP873X_LDO_MASK_LDO1_PGF_MASK BIT(7) 224 + #define LP873X_LDO_MASK_LDO1_PGR_MASK BIT(6) 225 + #define LP873X_LDO_MASK_LDO1_ILIM_MASK BIT(4) 226 + #define LP873X_LDO_MASK_LDO0_PGF_MASK BIT(3) 227 + #define LP873X_LDO_MASK_LDO0_PGR_MASK BIT(2) 228 + #define LP873X_LDO_MASK_LDO0_ILIM_MASK BIT(0) 229 + 230 + #define LP873X_SEL_I_LOAD_CURRENT_BUCK_SELECT BIT(0) 231 + 232 + #define LP873X_I_LOAD_2_BUCK_LOAD_CURRENT BIT(0) 233 + 234 + #define LP873X_I_LOAD_1_BUCK_LOAD_CURRENT 0xFF 235 + 236 + #define LP873X_MAX_REG_ID LP873X_LDO_1 237 + 238 + /* Number of step-down converters available */ 239 + #define LP873X_NUM_BUCK 2 240 + /* Number of LDO voltage regulators available */ 241 + #define LP873X_NUM_LDO 2 242 + /* Number of total regulators available */ 243 + #define LP873X_NUM_REGULATOR (LP873X_NUM_BUCK + LP873X_NUM_LDO) 244 + 245 + enum lp873x_regulator_id { 246 + /* BUCK's */ 247 + LP873X_BUCK_0, 248 + LP873X_BUCK_1, 249 + /* LDOs */ 250 + LP873X_LDO_0, 251 + LP873X_LDO_1, 252 + }; 253 + 254 + /** 255 + * struct lp873x - state holder for the lp873x driver 256 + * @dev: struct device pointer for MFD device 257 + * @rev: revision of the lp873x 258 + * @lock: lock guarding the data structure 259 + * @regmap: register map of the lp873x PMIC 260 + * 261 + * Device data may be used to access the LP873X chip 262 + */ 263 + struct lp873x { 264 + struct device *dev; 265 + u8 rev; 266 + struct mutex lock; /* lock guarding the data structure */ 267 + struct regmap *regmap; 268 + }; 269 + #endif /* __LINUX_MFD_LP873X_H */
+146 -10
include/linux/mfd/rk808.h
··· 1 1 /* 2 - * rk808.h for Rockchip RK808 2 + * Register definitions for Rockchip's RK808/RK818 PMIC 3 3 * 4 4 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd 5 5 * 6 6 * Author: Chris Zhong <zyw@rock-chips.com> 7 7 * Author: Zhang Qing <zhangqing@rock-chips.com> 8 + * 9 + * Copyright (C) 2016 PHYTEC Messtechnik GmbH 10 + * 11 + * Author: Wadim Egorov <w.egorov@phytec.de> 8 12 * 9 13 * This program is free software; you can redistribute it and/or modify it 10 14 * under the terms and conditions of the GNU General Public License, ··· 20 16 * more details. 21 17 */ 22 18 23 - #ifndef __LINUX_REGULATOR_rk808_H 24 - #define __LINUX_REGULATOR_rk808_H 19 + #ifndef __LINUX_REGULATOR_RK808_H 20 + #define __LINUX_REGULATOR_RK808_H 25 21 26 22 #include <linux/regulator/machine.h> 27 23 #include <linux/regmap.h> ··· 32 28 33 29 #define RK808_DCDC1 0 /* (0+RK808_START) */ 34 30 #define RK808_LDO1 4 /* (4+RK808_START) */ 35 - #define RK808_NUM_REGULATORS 14 31 + #define RK808_NUM_REGULATORS 14 36 32 37 33 enum rk808_reg { 38 34 RK808_ID_DCDC1, ··· 69 65 #define RK808_RTC_INT_REG 0x12 70 66 #define RK808_RTC_COMP_LSB_REG 0x13 71 67 #define RK808_RTC_COMP_MSB_REG 0x14 68 + #define RK808_ID_MSB 0x17 69 + #define RK808_ID_LSB 0x18 72 70 #define RK808_CLK32OUT_REG 0x20 73 71 #define RK808_VB_MON_REG 0x21 74 72 #define RK808_THERMAL_REG 0x22 ··· 121 115 #define RK808_INT_STS_MSK_REG2 0x4f 122 116 #define RK808_IO_POL_REG 0x50 123 117 124 - /* IRQ Definitions */ 118 + /* RK818 */ 119 + #define RK818_DCDC1 0 120 + #define RK818_LDO1 4 121 + #define RK818_NUM_REGULATORS 17 122 + 123 + enum rk818_reg { 124 + RK818_ID_DCDC1, 125 + RK818_ID_DCDC2, 126 + RK818_ID_DCDC3, 127 + RK818_ID_DCDC4, 128 + RK818_ID_BOOST, 129 + RK818_ID_LDO1, 130 + RK818_ID_LDO2, 131 + RK818_ID_LDO3, 132 + RK818_ID_LDO4, 133 + RK818_ID_LDO5, 134 + RK818_ID_LDO6, 135 + RK818_ID_LDO7, 136 + RK818_ID_LDO8, 137 + RK818_ID_LDO9, 138 + RK818_ID_SWITCH, 139 + RK818_ID_HDMI_SWITCH, 140 + RK818_ID_OTG_SWITCH, 141 + }; 142 + 143 + #define RK818_DCDC_EN_REG 0x23 144 + #define RK818_LDO_EN_REG 0x24 145 + #define RK818_SLEEP_SET_OFF_REG1 0x25 146 + #define RK818_SLEEP_SET_OFF_REG2 0x26 147 + #define RK818_DCDC_UV_STS_REG 0x27 148 + #define RK818_DCDC_UV_ACT_REG 0x28 149 + #define RK818_LDO_UV_STS_REG 0x29 150 + #define RK818_LDO_UV_ACT_REG 0x2a 151 + #define RK818_DCDC_PG_REG 0x2b 152 + #define RK818_LDO_PG_REG 0x2c 153 + #define RK818_VOUT_MON_TDB_REG 0x2d 154 + #define RK818_BUCK1_CONFIG_REG 0x2e 155 + #define RK818_BUCK1_ON_VSEL_REG 0x2f 156 + #define RK818_BUCK1_SLP_VSEL_REG 0x30 157 + #define RK818_BUCK2_CONFIG_REG 0x32 158 + #define RK818_BUCK2_ON_VSEL_REG 0x33 159 + #define RK818_BUCK2_SLP_VSEL_REG 0x34 160 + #define RK818_BUCK3_CONFIG_REG 0x36 161 + #define RK818_BUCK4_CONFIG_REG 0x37 162 + #define RK818_BUCK4_ON_VSEL_REG 0x38 163 + #define RK818_BUCK4_SLP_VSEL_REG 0x39 164 + #define RK818_BOOST_CONFIG_REG 0x3a 165 + #define RK818_LDO1_ON_VSEL_REG 0x3b 166 + #define RK818_LDO1_SLP_VSEL_REG 0x3c 167 + #define RK818_LDO2_ON_VSEL_REG 0x3d 168 + #define RK818_LDO2_SLP_VSEL_REG 0x3e 169 + #define RK818_LDO3_ON_VSEL_REG 0x3f 170 + #define RK818_LDO3_SLP_VSEL_REG 0x40 171 + #define RK818_LDO4_ON_VSEL_REG 0x41 172 + #define RK818_LDO4_SLP_VSEL_REG 0x42 173 + #define RK818_LDO5_ON_VSEL_REG 0x43 174 + #define RK818_LDO5_SLP_VSEL_REG 0x44 175 + #define RK818_LDO6_ON_VSEL_REG 0x45 176 + #define RK818_LDO6_SLP_VSEL_REG 0x46 177 + #define RK818_LDO7_ON_VSEL_REG 0x47 178 + #define RK818_LDO7_SLP_VSEL_REG 0x48 179 + #define RK818_LDO8_ON_VSEL_REG 0x49 180 + #define RK818_LDO8_SLP_VSEL_REG 0x4a 181 + #define RK818_BOOST_LDO9_ON_VSEL_REG 0x54 182 + #define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55 183 + #define RK818_DEVCTRL_REG 0x4b 184 + #define RK818_INT_STS_REG1 0X4c 185 + #define RK818_INT_STS_MSK_REG1 0x4d 186 + #define RK818_INT_STS_REG2 0x4e 187 + #define RK818_INT_STS_MSK_REG2 0x4f 188 + #define RK818_IO_POL_REG 0x50 189 + #define RK818_H5V_EN_REG 0x52 190 + #define RK818_SLEEP_SET_OFF_REG3 0x53 191 + #define RK818_BOOST_LDO9_ON_VSEL_REG 0x54 192 + #define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55 193 + #define RK818_BOOST_CTRL_REG 0x56 194 + #define RK818_DCDC_ILMAX 0x90 195 + #define RK818_USB_CTRL_REG 0xa1 196 + 197 + #define RK818_H5V_EN BIT(0) 198 + #define RK818_REF_RDY_CTRL BIT(1) 199 + #define RK818_USB_ILIM_SEL_MASK 0xf 200 + #define RK818_USB_ILMIN_2000MA 0x7 201 + #define RK818_USB_CHG_SD_VSEL_MASK 0x70 202 + 203 + /* RK808 IRQ Definitions */ 125 204 #define RK808_IRQ_VOUT_LO 0 126 205 #define RK808_IRQ_VB_LO 1 127 206 #define RK808_IRQ_PWRON 2 ··· 227 136 #define RK808_IRQ_RTC_PERIOD_MSK BIT(6) 228 137 #define RK808_IRQ_PLUG_IN_INT_MSK BIT(0) 229 138 #define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1) 139 + 140 + /* RK818 IRQ Definitions */ 141 + #define RK818_IRQ_VOUT_LO 0 142 + #define RK818_IRQ_VB_LO 1 143 + #define RK818_IRQ_PWRON 2 144 + #define RK818_IRQ_PWRON_LP 3 145 + #define RK818_IRQ_HOTDIE 4 146 + #define RK818_IRQ_RTC_ALARM 5 147 + #define RK818_IRQ_RTC_PERIOD 6 148 + #define RK818_IRQ_USB_OV 7 149 + #define RK818_IRQ_PLUG_IN 8 150 + #define RK818_IRQ_PLUG_OUT 9 151 + #define RK818_IRQ_CHG_OK 10 152 + #define RK818_IRQ_CHG_TE 11 153 + #define RK818_IRQ_CHG_TS1 12 154 + #define RK818_IRQ_TS2 13 155 + #define RK818_IRQ_CHG_CVTLIM 14 156 + #define RK818_IRQ_DISCHG_ILIM 7 157 + 158 + #define RK818_IRQ_VOUT_LO_MSK BIT(0) 159 + #define RK818_IRQ_VB_LO_MSK BIT(1) 160 + #define RK818_IRQ_PWRON_MSK BIT(2) 161 + #define RK818_IRQ_PWRON_LP_MSK BIT(3) 162 + #define RK818_IRQ_HOTDIE_MSK BIT(4) 163 + #define RK818_IRQ_RTC_ALARM_MSK BIT(5) 164 + #define RK818_IRQ_RTC_PERIOD_MSK BIT(6) 165 + #define RK818_IRQ_USB_OV_MSK BIT(7) 166 + #define RK818_IRQ_PLUG_IN_MSK BIT(0) 167 + #define RK818_IRQ_PLUG_OUT_MSK BIT(1) 168 + #define RK818_IRQ_CHG_OK_MSK BIT(2) 169 + #define RK818_IRQ_CHG_TE_MSK BIT(3) 170 + #define RK818_IRQ_CHG_TS1_MSK BIT(4) 171 + #define RK818_IRQ_TS2_MSK BIT(5) 172 + #define RK818_IRQ_CHG_CVTLIM_MSK BIT(6) 173 + #define RK818_IRQ_DISCHG_ILIM_MSK BIT(7) 174 + 175 + #define RK818_NUM_IRQ 16 230 176 231 177 #define RK808_VBAT_LOW_2V8 0x00 232 178 #define RK808_VBAT_LOW_2V9 0x01 ··· 319 191 BOOST_ILMIN_250MA, 320 192 }; 321 193 322 - struct rk808 { 323 - struct i2c_client *i2c; 324 - struct regmap_irq_chip_data *irq_data; 325 - struct regmap *regmap; 194 + enum { 195 + RK808_ID = 0x0000, 196 + RK818_ID = 0x8181, 326 197 }; 327 - #endif /* __LINUX_REGULATOR_rk808_H */ 198 + 199 + struct rk808 { 200 + struct i2c_client *i2c; 201 + struct regmap_irq_chip_data *irq_data; 202 + struct regmap *regmap; 203 + long variant; 204 + const struct regmap_config *regmap_cfg; 205 + const struct regmap_irq_chip *regmap_irq_chip; 206 + }; 207 + #endif /* __LINUX_REGULATOR_RK808_H */
+2
include/linux/mmzone.h
··· 68 68 69 69 #ifdef CONFIG_CMA 70 70 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 71 + # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 71 72 #else 72 73 # define is_migrate_cma(migratetype) false 74 + # define is_migrate_cma_page(_page) false 73 75 #endif 74 76 75 77 #define for_each_migratetype_order(order, type) \
+2
include/linux/msi.h
··· 270 270 MSI_FLAG_MULTI_PCI_MSI = (1 << 2), 271 271 /* Support PCI MSIX interrupts */ 272 272 MSI_FLAG_PCI_MSIX = (1 << 3), 273 + /* Needs early activate, required for PCI */ 274 + MSI_FLAG_ACTIVATE_EARLY = (1 << 4), 273 275 }; 274 276 275 277 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
+4
include/linux/perf_event.h
··· 743 743 u64 parent_gen; 744 744 u64 generation; 745 745 int pin_count; 746 + #ifdef CONFIG_CGROUP_PERF 746 747 int nr_cgroups; /* cgroup evts */ 748 + #endif 747 749 void *task_ctx_data; /* pmu specific data */ 748 750 struct rcu_head rcu_head; 749 751 }; ··· 771 769 unsigned int hrtimer_active; 772 770 773 771 struct pmu *unique_pmu; 772 + #ifdef CONFIG_CGROUP_PERF 774 773 struct perf_cgroup *cgrp; 774 + #endif 775 775 }; 776 776 777 777 struct perf_output_handle {
+15 -33
include/linux/printk.h
··· 266 266 * and other debug macros are compiled out unless either DEBUG is defined 267 267 * or CONFIG_DYNAMIC_DEBUG is set. 268 268 */ 269 - 270 - #ifdef CONFIG_PRINTK 271 - 272 - asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...); 273 - asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...); 274 - asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...); 275 - asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...); 276 - asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...); 277 - asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...); 278 - asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...); 279 - 280 - #define pr_emerg(fmt, ...) __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__) 281 - #define pr_alert(fmt, ...) __pr_alert(pr_fmt(fmt), ##__VA_ARGS__) 282 - #define pr_crit(fmt, ...) __pr_crit(pr_fmt(fmt), ##__VA_ARGS__) 283 - #define pr_err(fmt, ...) __pr_err(pr_fmt(fmt), ##__VA_ARGS__) 284 - #define pr_warn(fmt, ...) __pr_warn(pr_fmt(fmt), ##__VA_ARGS__) 285 - #define pr_notice(fmt, ...) __pr_notice(pr_fmt(fmt), ##__VA_ARGS__) 286 - #define pr_info(fmt, ...) __pr_info(pr_fmt(fmt), ##__VA_ARGS__) 287 - 288 - #else 289 - 290 - #define pr_emerg(fmt, ...) printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) 291 - #define pr_alert(fmt, ...) printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) 292 - #define pr_crit(fmt, ...) printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) 293 - #define pr_err(fmt, ...) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) 294 - #define pr_warn(fmt, ...) printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) 295 - #define pr_notice(fmt, ...) printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) 296 - #define pr_info(fmt, ...) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 297 - 298 - #endif 299 - 300 - #define pr_warning pr_warn 301 - 269 + #define pr_emerg(fmt, ...) \ 270 + printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) 271 + #define pr_alert(fmt, ...) \ 272 + printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) 273 + #define pr_crit(fmt, ...) \ 274 + printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) 275 + #define pr_err(fmt, ...) \ 276 + printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) 277 + #define pr_warning(fmt, ...) \ 278 + printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) 279 + #define pr_warn pr_warning 280 + #define pr_notice(fmt, ...) \ 281 + printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) 282 + #define pr_info(fmt, ...) \ 283 + printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 302 284 /* 303 285 * Like KERN_CONT, pr_cont() should only be used when continuing 304 286 * a line with no newline ('\n') enclosed. Otherwise it defaults
+12
include/linux/slab.h
··· 155 155 void kzfree(const void *); 156 156 size_t ksize(const void *); 157 157 158 + #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 159 + const char *__check_heap_object(const void *ptr, unsigned long n, 160 + struct page *page); 161 + #else 162 + static inline const char *__check_heap_object(const void *ptr, 163 + unsigned long n, 164 + struct page *page) 165 + { 166 + return NULL; 167 + } 168 + #endif 169 + 158 170 /* 159 171 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 160 172 * alignment larger than the alignment of a 64-bit integer.
+2
include/linux/sunrpc/clnt.h
··· 195 195 struct rpc_xprt *, 196 196 void *), 197 197 void *data); 198 + void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, 199 + unsigned long timeo); 198 200 199 201 const char *rpc_proc_name(const struct rpc_task *task); 200 202 #endif /* __KERNEL__ */
+2 -1
include/linux/sunrpc/xprt.h
··· 218 218 struct work_struct task_cleanup; 219 219 struct timer_list timer; 220 220 unsigned long last_used, 221 - idle_timeout; 221 + idle_timeout, 222 + max_reconnect_timeout; 222 223 223 224 /* 224 225 * Send stuff
+24
include/linux/thread_info.h
··· 105 105 106 106 #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) 107 107 108 + #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES 109 + static inline int arch_within_stack_frames(const void * const stack, 110 + const void * const stackend, 111 + const void *obj, unsigned long len) 112 + { 113 + return 0; 114 + } 115 + #endif 116 + 117 + #ifdef CONFIG_HARDENED_USERCOPY 118 + extern void __check_object_size(const void *ptr, unsigned long n, 119 + bool to_user); 120 + 121 + static inline void check_object_size(const void *ptr, unsigned long n, 122 + bool to_user) 123 + { 124 + __check_object_size(ptr, n, to_user); 125 + } 126 + #else 127 + static inline void check_object_size(const void *ptr, unsigned long n, 128 + bool to_user) 129 + { } 130 + #endif /* CONFIG_HARDENED_USERCOPY */ 131 + 108 132 #endif /* __KERNEL__ */ 109 133 110 134 #endif /* _LINUX_THREAD_INFO_H */
+2 -2
include/linux/uaccess.h
··· 114 114 #ifndef user_access_begin 115 115 #define user_access_begin() do { } while (0) 116 116 #define user_access_end() do { } while (0) 117 - #define unsafe_get_user(x, ptr) __get_user(x, ptr) 118 - #define unsafe_put_user(x, ptr) __put_user(x, ptr) 117 + #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) 118 + #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) 119 119 #endif 120 120 121 121 #endif /* __LINUX_UACCESS_H__ */
+11 -3
include/trace/events/timer.h
··· 330 330 #ifdef CONFIG_NO_HZ_COMMON 331 331 332 332 #define TICK_DEP_NAMES \ 333 - tick_dep_name(NONE) \ 333 + tick_dep_mask_name(NONE) \ 334 334 tick_dep_name(POSIX_TIMER) \ 335 335 tick_dep_name(PERF_EVENTS) \ 336 336 tick_dep_name(SCHED) \ 337 337 tick_dep_name_end(CLOCK_UNSTABLE) 338 338 339 339 #undef tick_dep_name 340 + #undef tick_dep_mask_name 340 341 #undef tick_dep_name_end 341 342 342 - #define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); 343 - #define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); 343 + /* The MASK will convert to their bits and they need to be processed too */ 344 + #define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \ 345 + TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); 346 + #define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \ 347 + TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); 348 + /* NONE only has a mask defined for it */ 349 + #define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep); 344 350 345 351 TICK_DEP_NAMES 346 352 347 353 #undef tick_dep_name 354 + #undef tick_dep_mask_name 348 355 #undef tick_dep_name_end 349 356 350 357 #define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, 358 + #define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep }, 351 359 #define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep } 352 360 353 361 #define show_tick_dep_name(val) \
+1 -1
include/uapi/linux/virtio_vsock.h
··· 32 32 */ 33 33 34 34 #ifndef _UAPI_LINUX_VIRTIO_VSOCK_H 35 - #define _UAPI_LINUX_VIRTIO_VOSCK_H 35 + #define _UAPI_LINUX_VIRTIO_VSOCK_H 36 36 37 37 #include <linux/types.h> 38 38 #include <linux/virtio_ids.h>
+2 -2
include/uapi/misc/cxl.h
··· 136 136 * 137 137 * Of course the contents will be ABI, but that's up the AFU driver. 138 138 */ 139 - size_t data_size; 140 - u8 data[]; 139 + __u32 data_size; 140 + __u8 data[]; 141 141 }; 142 142 143 143 struct cxl_event {
+2
init/Kconfig
··· 1761 1761 1762 1762 config SLAB 1763 1763 bool "SLAB" 1764 + select HAVE_HARDENED_USERCOPY_ALLOCATOR 1764 1765 help 1765 1766 The regular slab allocator that is established and known to work 1766 1767 well in all environments. It organizes cache hot objects in ··· 1769 1768 1770 1769 config SLUB 1771 1770 bool "SLUB (Unqueued Allocator)" 1771 + select HAVE_HARDENED_USERCOPY_ALLOCATOR 1772 1772 help 1773 1773 SLUB is a slab allocator that minimizes cache line usage 1774 1774 instead of managing queues of cached objects (SLAB approach).
+54 -23
kernel/events/core.c
··· 843 843 } 844 844 } 845 845 } 846 + 847 + /* 848 + * Update cpuctx->cgrp so that it is set when first cgroup event is added and 849 + * cleared when last cgroup event is removed. 850 + */ 851 + static inline void 852 + list_update_cgroup_event(struct perf_event *event, 853 + struct perf_event_context *ctx, bool add) 854 + { 855 + struct perf_cpu_context *cpuctx; 856 + 857 + if (!is_cgroup_event(event)) 858 + return; 859 + 860 + if (add && ctx->nr_cgroups++) 861 + return; 862 + else if (!add && --ctx->nr_cgroups) 863 + return; 864 + /* 865 + * Because cgroup events are always per-cpu events, 866 + * this will always be called from the right CPU. 867 + */ 868 + cpuctx = __get_cpu_context(ctx); 869 + cpuctx->cgrp = add ? event->cgrp : NULL; 870 + } 871 + 846 872 #else /* !CONFIG_CGROUP_PERF */ 847 873 848 874 static inline bool ··· 946 920 struct perf_event_context *ctx) 947 921 { 948 922 } 923 + 924 + static inline void 925 + list_update_cgroup_event(struct perf_event *event, 926 + struct perf_event_context *ctx, bool add) 927 + { 928 + } 929 + 949 930 #endif 950 931 951 932 /* ··· 1425 1392 static void 1426 1393 list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1427 1394 { 1395 + 1428 1396 lockdep_assert_held(&ctx->lock); 1429 1397 1430 1398 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); ··· 1446 1412 list_add_tail(&event->group_entry, list); 1447 1413 } 1448 1414 1449 - if (is_cgroup_event(event)) 1450 - ctx->nr_cgroups++; 1415 + list_update_cgroup_event(event, ctx, true); 1451 1416 1452 1417 list_add_rcu(&event->event_entry, &ctx->event_list); 1453 1418 ctx->nr_events++; ··· 1614 1581 static void 1615 1582 list_del_event(struct perf_event *event, struct perf_event_context *ctx) 1616 1583 { 1617 - struct perf_cpu_context *cpuctx; 1618 - 1619 1584 WARN_ON_ONCE(event->ctx != ctx); 1620 1585 lockdep_assert_held(&ctx->lock); 1621 1586 ··· 1625 1594 1626 1595 event->attach_state &= ~PERF_ATTACH_CONTEXT; 1627 1596 1628 - if (is_cgroup_event(event)) { 1629 - ctx->nr_cgroups--; 1630 - /* 1631 - * Because cgroup events are always per-cpu events, this will 1632 - * always be called from the right CPU. 1633 - */ 1634 - cpuctx = __get_cpu_context(ctx); 1635 - /* 1636 - * If there are no more cgroup events then clear cgrp to avoid 1637 - * stale pointer in update_cgrp_time_from_cpuctx(). 1638 - */ 1639 - if (!ctx->nr_cgroups) 1640 - cpuctx->cgrp = NULL; 1641 - } 1597 + list_update_cgroup_event(event, ctx, false); 1642 1598 1643 1599 ctx->nr_events--; 1644 1600 if (event->attr.inherit_stat) ··· 1734 1716 static inline int 1735 1717 event_filter_match(struct perf_event *event) 1736 1718 { 1737 - return (event->cpu == -1 || event->cpu == smp_processor_id()) 1738 - && perf_cgroup_match(event) && pmu_filter_match(event); 1719 + return (event->cpu == -1 || event->cpu == smp_processor_id()) && 1720 + perf_cgroup_match(event) && pmu_filter_match(event); 1739 1721 } 1740 1722 1741 1723 static void ··· 1755 1737 * maintained, otherwise bogus information is return 1756 1738 * via read() for time_enabled, time_running: 1757 1739 */ 1758 - if (event->state == PERF_EVENT_STATE_INACTIVE 1759 - && !event_filter_match(event)) { 1740 + if (event->state == PERF_EVENT_STATE_INACTIVE && 1741 + !event_filter_match(event)) { 1760 1742 delta = tstamp - event->tstamp_stopped; 1761 1743 event->tstamp_running += delta; 1762 1744 event->tstamp_stopped = tstamp; ··· 2254 2236 2255 2237 lockdep_assert_held(&ctx->mutex); 2256 2238 2257 - event->ctx = ctx; 2258 2239 if (event->cpu != -1) 2259 2240 event->cpu = cpu; 2241 + 2242 + /* 2243 + * Ensures that if we can observe event->ctx, both the event and ctx 2244 + * will be 'complete'. See perf_iterate_sb_cpu(). 2245 + */ 2246 + smp_store_release(&event->ctx, ctx); 2260 2247 2261 2248 if (!task) { 2262 2249 cpu_function_call(cpu, __perf_install_in_context, event); ··· 5992 5969 struct perf_event *event; 5993 5970 5994 5971 list_for_each_entry_rcu(event, &pel->list, sb_list) { 5972 + /* 5973 + * Skip events that are not fully formed yet; ensure that 5974 + * if we observe event->ctx, both event and ctx will be 5975 + * complete enough. See perf_install_in_context(). 5976 + */ 5977 + if (!smp_load_acquire(&event->ctx)) 5978 + continue; 5979 + 5995 5980 if (event->state < PERF_EVENT_STATE_INACTIVE) 5996 5981 continue; 5997 5982 if (!event_filter_match(event))
+22 -1
kernel/futex.c
··· 179 179 * Futex flags used to encode options to functions and preserve them across 180 180 * restarts. 181 181 */ 182 - #define FLAGS_SHARED 0x01 182 + #ifdef CONFIG_MMU 183 + # define FLAGS_SHARED 0x01 184 + #else 185 + /* 186 + * NOMMU does not have per process address space. Let the compiler optimize 187 + * code away. 188 + */ 189 + # define FLAGS_SHARED 0x00 190 + #endif 183 191 #define FLAGS_CLOCKRT 0x02 184 192 #define FLAGS_HAS_TIMEOUT 0x04 185 193 ··· 413 405 if (!key->both.ptr) 414 406 return; 415 407 408 + /* 409 + * On MMU less systems futexes are always "private" as there is no per 410 + * process address space. We need the smp wmb nevertheless - yes, 411 + * arch/blackfin has MMU less SMP ... 412 + */ 413 + if (!IS_ENABLED(CONFIG_MMU)) { 414 + smp_mb(); /* explicit smp_mb(); (B) */ 415 + return; 416 + } 417 + 416 418 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { 417 419 case FUT_OFF_INODE: 418 420 ihold(key->shared.inode); /* implies smp_mb(); (B) */ ··· 453 435 WARN_ON_ONCE(1); 454 436 return; 455 437 } 438 + 439 + if (!IS_ENABLED(CONFIG_MMU)) 440 + return; 456 441 457 442 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { 458 443 case FUT_OFF_INODE:
+11
kernel/irq/msi.c
··· 359 359 else 360 360 dev_dbg(dev, "irq [%d-%d] for MSI\n", 361 361 virq, virq + desc->nvec_used - 1); 362 + /* 363 + * This flag is set by the PCI layer as we need to activate 364 + * the MSI entries before the PCI layer enables MSI in the 365 + * card. Otherwise the card latches a random msi message. 366 + */ 367 + if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { 368 + struct irq_data *irq_data; 369 + 370 + irq_data = irq_domain_get_irq_data(domain, desc->irq); 371 + irq_domain_activate_irq(irq_data); 372 + } 362 373 } 363 374 364 375 return 0;
+1 -1
kernel/locking/qspinlock_paravirt.h
··· 450 450 goto gotlock; 451 451 } 452 452 } 453 - WRITE_ONCE(pn->state, vcpu_halted); 453 + WRITE_ONCE(pn->state, vcpu_hashed); 454 454 qstat_inc(qstat_pv_wait_head, true); 455 455 qstat_inc(qstat_pv_wait_again, waitcnt); 456 456 pv_wait(&l->locked, _Q_SLOW_VAL);
-1
kernel/locking/qspinlock_stat.h
··· 153 153 */ 154 154 if ((counter == qstat_pv_latency_kick) || 155 155 (counter == qstat_pv_latency_wake)) { 156 - stat = 0; 157 156 if (kicks) 158 157 stat = DIV_ROUND_CLOSEST_ULL(stat, kicks); 159 158 }
+2 -2
kernel/power/hibernate.c
··· 300 300 save_processor_state(); 301 301 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); 302 302 error = swsusp_arch_suspend(); 303 + /* Restore control flow magically appears here */ 304 + restore_processor_state(); 303 305 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); 304 306 if (error) 305 307 printk(KERN_ERR "PM: Error %d creating hibernation image\n", 306 308 error); 307 - /* Restore control flow magically appears here */ 308 - restore_processor_state(); 309 309 if (!in_suspend) 310 310 events_check_enabled = false; 311 311
+6 -10
kernel/printk/internal.h
··· 16 16 */ 17 17 #include <linux/percpu.h> 18 18 19 - typedef __printf(2, 0) int (*printk_func_t)(int level, const char *fmt, 20 - va_list args); 19 + typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); 21 20 22 - __printf(2, 0) 23 - int vprintk_default(int level, const char *fmt, va_list args); 21 + int __printf(1, 0) vprintk_default(const char *fmt, va_list args); 24 22 25 23 #ifdef CONFIG_PRINTK_NMI 26 24 ··· 31 33 * via per-CPU variable. 32 34 */ 33 35 DECLARE_PER_CPU(printk_func_t, printk_func); 34 - __printf(2, 0) 35 - static inline int vprintk_func(int level, const char *fmt, va_list args) 36 + static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args) 36 37 { 37 - return this_cpu_read(printk_func)(level, fmt, args); 38 + return this_cpu_read(printk_func)(fmt, args); 38 39 } 39 40 40 41 extern atomic_t nmi_message_lost; ··· 44 47 45 48 #else /* CONFIG_PRINTK_NMI */ 46 49 47 - __printf(2, 0) 48 - static inline int vprintk_func(int level, const char *fmt, va_list args) 50 + static inline __printf(1, 0) int vprintk_func(const char *fmt, va_list args) 49 51 { 50 - return vprintk_default(level, fmt, args); 52 + return vprintk_default(fmt, args); 51 53 } 52 54 53 55 static inline int get_nmi_message_lost(void)
+2 -11
kernel/printk/nmi.c
··· 58 58 * one writer running. But the buffer might get flushed from another 59 59 * CPU, so we need to be careful. 60 60 */ 61 - static int vprintk_nmi(int level, const char *fmt, va_list args) 61 + static int vprintk_nmi(const char *fmt, va_list args) 62 62 { 63 63 struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); 64 64 int add = 0; ··· 79 79 if (!len) 80 80 smp_rmb(); 81 81 82 - if (level != LOGLEVEL_DEFAULT) { 83 - add = snprintf(s->buffer + len, sizeof(s->buffer) - len, 84 - KERN_SOH "%c", '0' + level); 85 - add += vsnprintf(s->buffer + len + add, 86 - sizeof(s->buffer) - len - add, 87 - fmt, args); 88 - } else { 89 - add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, 90 - fmt, args); 91 - } 82 + add = vsnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args); 92 83 93 84 /* 94 85 * Do it once again if the buffer has been flushed in the meantime.
+3 -24
kernel/printk/printk.c
··· 1930 1930 } 1931 1931 EXPORT_SYMBOL(printk_emit); 1932 1932 1933 - #ifdef CONFIG_PRINTK 1934 - #define define_pr_level(func, loglevel) \ 1935 - asmlinkage __visible void func(const char *fmt, ...) \ 1936 - { \ 1937 - va_list args; \ 1938 - \ 1939 - va_start(args, fmt); \ 1940 - vprintk_default(loglevel, fmt, args); \ 1941 - va_end(args); \ 1942 - } \ 1943 - EXPORT_SYMBOL(func) 1944 - 1945 - define_pr_level(__pr_emerg, LOGLEVEL_EMERG); 1946 - define_pr_level(__pr_alert, LOGLEVEL_ALERT); 1947 - define_pr_level(__pr_crit, LOGLEVEL_CRIT); 1948 - define_pr_level(__pr_err, LOGLEVEL_ERR); 1949 - define_pr_level(__pr_warn, LOGLEVEL_WARNING); 1950 - define_pr_level(__pr_notice, LOGLEVEL_NOTICE); 1951 - define_pr_level(__pr_info, LOGLEVEL_INFO); 1952 - #endif 1953 - 1954 - int vprintk_default(int level, const char *fmt, va_list args) 1933 + int vprintk_default(const char *fmt, va_list args) 1955 1934 { 1956 1935 int r; 1957 1936 ··· 1940 1961 return r; 1941 1962 } 1942 1963 #endif 1943 - r = vprintk_emit(0, level, NULL, 0, fmt, args); 1964 + r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); 1944 1965 1945 1966 return r; 1946 1967 } ··· 1973 1994 int r; 1974 1995 1975 1996 va_start(args, fmt); 1976 - r = vprintk_func(LOGLEVEL_DEFAULT, fmt, args); 1997 + r = vprintk_func(fmt, args); 1977 1998 va_end(args); 1978 1999 1979 2000 return r;
+19
kernel/sched/core.c
··· 74 74 #include <linux/context_tracking.h> 75 75 #include <linux/compiler.h> 76 76 #include <linux/frame.h> 77 + #include <linux/prefetch.h> 77 78 78 79 #include <asm/switch_to.h> 79 80 #include <asm/tlb.h> ··· 2973 2972 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2974 2973 2975 2974 /* 2975 + * The function fair_sched_class.update_curr accesses the struct curr 2976 + * and its field curr->exec_start; when called from task_sched_runtime(), 2977 + * we observe a high rate of cache misses in practice. 2978 + * Prefetching this data results in improved performance. 2979 + */ 2980 + static inline void prefetch_curr_exec_start(struct task_struct *p) 2981 + { 2982 + #ifdef CONFIG_FAIR_GROUP_SCHED 2983 + struct sched_entity *curr = (&p->se)->cfs_rq->curr; 2984 + #else 2985 + struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 2986 + #endif 2987 + prefetch(curr); 2988 + prefetch(&curr->exec_start); 2989 + } 2990 + 2991 + /* 2976 2992 * Return accounted runtime for the task. 2977 2993 * In case the task is currently running, return the runtime plus current's 2978 2994 * pending runtime that have not been accounted yet. ··· 3023 3005 * thread, breaking clock_gettime(). 3024 3006 */ 3025 3007 if (task_current(rq, p) && task_on_rq_queued(p)) { 3008 + prefetch_curr_exec_start(p); 3026 3009 update_rq_clock(rq); 3027 3010 p->sched_class->update_curr(rq); 3028 3011 }
+1 -1
kernel/sched/cpudeadline.c
··· 168 168 169 169 if (old_idx == IDX_INVALID) { 170 170 cp->size++; 171 - cp->elements[cp->size - 1].dl = 0; 171 + cp->elements[cp->size - 1].dl = dl; 172 172 cp->elements[cp->size - 1].cpu = cpu; 173 173 cp->elements[cpu].idx = cp->size - 1; 174 174 cpudl_change_key(cp, cp->size - 1, dl);
+9 -1
kernel/sched/cputime.c
··· 508 508 */ 509 509 void account_idle_ticks(unsigned long ticks) 510 510 { 511 + cputime_t cputime, steal; 511 512 512 513 if (sched_clock_irqtime) { 513 514 irqtime_account_idle_ticks(ticks); 514 515 return; 515 516 } 516 517 517 - account_idle_time(jiffies_to_cputime(ticks)); 518 + cputime = jiffies_to_cputime(ticks); 519 + steal = steal_account_process_time(cputime); 520 + 521 + if (steal >= cputime) 522 + return; 523 + 524 + cputime -= steal; 525 + account_idle_time(cputime); 518 526 } 519 527 520 528 /*
+4 -1
kernel/sched/deadline.c
··· 658 658 * 659 659 * XXX figure out if select_task_rq_dl() deals with offline cpus. 660 660 */ 661 - if (unlikely(!rq->online)) 661 + if (unlikely(!rq->online)) { 662 + lockdep_unpin_lock(&rq->lock, rf.cookie); 662 663 rq = dl_task_offline_migration(rq, p); 664 + rf.cookie = lockdep_pin_lock(&rq->lock); 665 + } 663 666 664 667 /* 665 668 * Queueing this task back might have overloaded rq, check if we need
+1 -1
kernel/sched/fair.c
··· 4269 4269 pcfs_rq = tg->parent->cfs_rq[cpu]; 4270 4270 4271 4271 cfs_rq->throttle_count = pcfs_rq->throttle_count; 4272 - pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 4272 + cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); 4273 4273 } 4274 4274 4275 4275 /* conditionally throttle active cfs_rq's from put_prev_entity() */
+4 -1
kernel/time/timer.c
··· 1496 1496 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1497 1497 u64 expires = KTIME_MAX; 1498 1498 unsigned long nextevt; 1499 + bool is_max_delta; 1499 1500 1500 1501 /* 1501 1502 * Pretend that there is no timer pending if the cpu is offline. ··· 1507 1506 1508 1507 spin_lock(&base->lock); 1509 1508 nextevt = __next_timer_interrupt(base); 1509 + is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); 1510 1510 base->next_expiry = nextevt; 1511 1511 /* 1512 1512 * We have a fresh next event. Check whether we can forward the base: ··· 1521 1519 expires = basem; 1522 1520 base->is_idle = false; 1523 1521 } else { 1524 - expires = basem + (nextevt - basej) * TICK_NSEC; 1522 + if (!is_max_delta) 1523 + expires = basem + (nextevt - basej) * TICK_NSEC; 1525 1524 /* 1526 1525 * If we expect to sleep more than a tick, mark the base idle: 1527 1526 */
+4 -4
lib/strncpy_from_user.c
··· 40 40 unsigned long c, data; 41 41 42 42 /* Fall back to byte-at-a-time if we get a page fault */ 43 - if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) 44 - break; 43 + unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time); 44 + 45 45 *(unsigned long *)(dst+res) = c; 46 46 if (has_zero(c, &data, &constants)) { 47 47 data = prep_zero_mask(c, data, &constants); ··· 56 56 while (max) { 57 57 char c; 58 58 59 - if (unlikely(unsafe_get_user(c,src+res))) 60 - return -EFAULT; 59 + unsafe_get_user(c,src+res, efault); 61 60 dst[res] = c; 62 61 if (!c) 63 62 return res; ··· 75 76 * Nope: we hit the address space limit, and we still had more 76 77 * characters the caller would have wanted. That's an EFAULT. 77 78 */ 79 + efault: 78 80 return -EFAULT; 79 81 } 80 82
+3 -4
lib/strnlen_user.c
··· 45 45 src -= align; 46 46 max += align; 47 47 48 - if (unlikely(unsafe_get_user(c,(unsigned long __user *)src))) 49 - return 0; 48 + unsafe_get_user(c, (unsigned long __user *)src, efault); 50 49 c |= aligned_byte_mask(align); 51 50 52 51 for (;;) { ··· 60 61 if (unlikely(max <= sizeof(unsigned long))) 61 62 break; 62 63 max -= sizeof(unsigned long); 63 - if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res)))) 64 - return 0; 64 + unsafe_get_user(c, (unsigned long __user *)(src+res), efault); 65 65 } 66 66 res -= align; 67 67 ··· 75 77 * Nope: we hit the address space limit, and we still had more 76 78 * characters the caller would have wanted. That's 0. 77 79 */ 80 + efault: 78 81 return 0; 79 82 } 80 83
+4
mm/Makefile
··· 21 21 KCOV_INSTRUMENT_mmzone.o := n 22 22 KCOV_INSTRUMENT_vmstat.o := n 23 23 24 + # Since __builtin_frame_address does work as used, disable the warning. 25 + CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address) 26 + 24 27 mmu-y := nommu.o 25 28 mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ 26 29 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ ··· 102 99 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o 103 100 obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o 104 101 obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o 102 + obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
+1
mm/hugetlb.c
··· 1448 1448 list_del(&page->lru); 1449 1449 h->free_huge_pages--; 1450 1450 h->free_huge_pages_node[nid]--; 1451 + h->max_huge_pages--; 1451 1452 update_and_free_page(h, page); 1452 1453 } 1453 1454 spin_unlock(&hugetlb_lock);
+2 -5
mm/kasan/quarantine.c
··· 217 217 new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / 218 218 QUARANTINE_FRACTION; 219 219 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); 220 - if (WARN_ONCE(new_quarantine_size < percpu_quarantines, 221 - "Too little memory, disabling global KASAN quarantine.\n")) 222 - new_quarantine_size = 0; 223 - else 224 - new_quarantine_size -= percpu_quarantines; 220 + new_quarantine_size = (new_quarantine_size < percpu_quarantines) ? 221 + 0 : new_quarantine_size - percpu_quarantines; 225 222 WRITE_ONCE(quarantine_size, new_quarantine_size); 226 223 227 224 last = global_quarantine.head;
+70 -16
mm/memcontrol.c
··· 2337 2337 return 0; 2338 2338 2339 2339 memcg = get_mem_cgroup_from_mm(current->mm); 2340 - if (!mem_cgroup_is_root(memcg)) 2340 + if (!mem_cgroup_is_root(memcg)) { 2341 2341 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); 2342 + if (!ret) 2343 + __SetPageKmemcg(page); 2344 + } 2342 2345 css_put(&memcg->css); 2343 2346 return ret; 2344 2347 } ··· 2368 2365 page_counter_uncharge(&memcg->memsw, nr_pages); 2369 2366 2370 2367 page->mem_cgroup = NULL; 2368 + 2369 + /* slab pages do not have PageKmemcg flag set */ 2370 + if (PageKmemcg(page)) 2371 + __ClearPageKmemcg(page); 2372 + 2371 2373 css_put_many(&memcg->css, nr_pages); 2372 2374 } 2373 2375 #endif /* !CONFIG_SLOB */ ··· 4077 4069 4078 4070 static DEFINE_IDR(mem_cgroup_idr); 4079 4071 4080 - static void mem_cgroup_id_get(struct mem_cgroup *memcg) 4072 + static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4081 4073 { 4082 - atomic_inc(&memcg->id.ref); 4074 + atomic_add(n, &memcg->id.ref); 4083 4075 } 4084 4076 4085 - static void mem_cgroup_id_put(struct mem_cgroup *memcg) 4077 + static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) 4086 4078 { 4087 - if (atomic_dec_and_test(&memcg->id.ref)) { 4079 + while (!atomic_inc_not_zero(&memcg->id.ref)) { 4080 + /* 4081 + * The root cgroup cannot be destroyed, so it's refcount must 4082 + * always be >= 1. 4083 + */ 4084 + if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { 4085 + VM_BUG_ON(1); 4086 + break; 4087 + } 4088 + memcg = parent_mem_cgroup(memcg); 4089 + if (!memcg) 4090 + memcg = root_mem_cgroup; 4091 + } 4092 + return memcg; 4093 + } 4094 + 4095 + static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) 4096 + { 4097 + if (atomic_sub_and_test(n, &memcg->id.ref)) { 4088 4098 idr_remove(&mem_cgroup_idr, memcg->id.id); 4089 4099 memcg->id.id = 0; 4090 4100 4091 4101 /* Memcg ID pins CSS */ 4092 4102 css_put(&memcg->css); 4093 4103 } 4104 + } 4105 + 4106 + static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) 4107 + { 4108 + mem_cgroup_id_get_many(memcg, 1); 4109 + } 4110 + 4111 + static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) 4112 + { 4113 + mem_cgroup_id_put_many(memcg, 1); 4094 4114 } 4095 4115 4096 4116 /** ··· 4755 4719 if (!mem_cgroup_is_root(mc.from)) 4756 4720 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4757 4721 4722 + mem_cgroup_id_put_many(mc.from, mc.moved_swap); 4723 + 4758 4724 /* 4759 4725 * we charged both to->memory and to->memsw, so we 4760 4726 * should uncharge to->memory. ··· 4764 4726 if (!mem_cgroup_is_root(mc.to)) 4765 4727 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4766 4728 4767 - css_put_many(&mc.from->css, mc.moved_swap); 4729 + mem_cgroup_id_get_many(mc.to, mc.moved_swap); 4730 + css_put_many(&mc.to->css, mc.moved_swap); 4768 4731 4769 - /* we've already done css_get(mc.to) */ 4770 4732 mc.moved_swap = 0; 4771 4733 } 4772 4734 memcg_oom_recover(from); ··· 5575 5537 else 5576 5538 nr_file += nr_pages; 5577 5539 pgpgout++; 5578 - } else 5540 + } else { 5579 5541 nr_kmem += 1 << compound_order(page); 5542 + __ClearPageKmemcg(page); 5543 + } 5580 5544 5581 5545 page->mem_cgroup = NULL; 5582 5546 } while (next != page_list); ··· 5830 5790 */ 5831 5791 void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5832 5792 { 5833 - struct mem_cgroup *memcg; 5793 + struct mem_cgroup *memcg, *swap_memcg; 5834 5794 unsigned short oldid; 5835 5795 5836 5796 VM_BUG_ON_PAGE(PageLRU(page), page); ··· 5845 5805 if (!memcg) 5846 5806 return; 5847 5807 5848 - mem_cgroup_id_get(memcg); 5849 - oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5808 + /* 5809 + * In case the memcg owning these pages has been offlined and doesn't 5810 + * have an ID allocated to it anymore, charge the closest online 5811 + * ancestor for the swap instead and transfer the memory+swap charge. 5812 + */ 5813 + swap_memcg = mem_cgroup_id_get_online(memcg); 5814 + oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); 5850 5815 VM_BUG_ON_PAGE(oldid, page); 5851 - mem_cgroup_swap_statistics(memcg, true); 5816 + mem_cgroup_swap_statistics(swap_memcg, true); 5852 5817 5853 5818 page->mem_cgroup = NULL; 5854 5819 5855 5820 if (!mem_cgroup_is_root(memcg)) 5856 5821 page_counter_uncharge(&memcg->memory, 1); 5822 + 5823 + if (memcg != swap_memcg) { 5824 + if (!mem_cgroup_is_root(swap_memcg)) 5825 + page_counter_charge(&swap_memcg->memsw, 1); 5826 + page_counter_uncharge(&memcg->memsw, 1); 5827 + } 5857 5828 5858 5829 /* 5859 5830 * Interrupts should be disabled here because the caller holds the ··· 5904 5853 if (!memcg) 5905 5854 return 0; 5906 5855 5907 - if (!mem_cgroup_is_root(memcg) && 5908 - !page_counter_try_charge(&memcg->swap, 1, &counter)) 5909 - return -ENOMEM; 5856 + memcg = mem_cgroup_id_get_online(memcg); 5910 5857 5911 - mem_cgroup_id_get(memcg); 5858 + if (!mem_cgroup_is_root(memcg) && 5859 + !page_counter_try_charge(&memcg->swap, 1, &counter)) { 5860 + mem_cgroup_id_put(memcg); 5861 + return -ENOMEM; 5862 + } 5863 + 5912 5864 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5913 5865 VM_BUG_ON_PAGE(oldid, page); 5914 5866 mem_cgroup_swap_statistics(memcg, true);
+2
mm/memory_hotplug.c
··· 1219 1219 1220 1220 /* init node's zones as empty zones, we don't have any present pages.*/ 1221 1221 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 1222 + pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 1222 1223 1223 1224 /* 1224 1225 * The node we allocated has no zone fallback lists. For avoiding ··· 1250 1249 static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 1251 1250 { 1252 1251 arch_refresh_nodedata(nid, NULL); 1252 + free_percpu(pgdat->per_cpu_nodestats); 1253 1253 arch_free_nodedata(pgdat); 1254 1254 return; 1255 1255 }
+1 -1
mm/oom_kill.c
··· 764 764 { 765 765 struct mm_struct *mm = task->mm; 766 766 struct task_struct *p; 767 - bool ret; 767 + bool ret = true; 768 768 769 769 /* 770 770 * Skip tasks without mm because it might have passed its exit_mm and
+42 -26
mm/page_alloc.c
··· 1008 1008 } 1009 1009 if (PageMappingFlags(page)) 1010 1010 page->mapping = NULL; 1011 - if (memcg_kmem_enabled() && PageKmemcg(page)) { 1011 + if (memcg_kmem_enabled() && PageKmemcg(page)) 1012 1012 memcg_kmem_uncharge(page, order); 1013 - __ClearPageKmemcg(page); 1014 - } 1015 1013 if (check_free) 1016 1014 bad += free_pages_check(page); 1017 1015 if (bad) ··· 3754 3756 } 3755 3757 3756 3758 out: 3757 - if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) { 3758 - if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) { 3759 - __free_pages(page, order); 3760 - page = NULL; 3761 - } else 3762 - __SetPageKmemcg(page); 3759 + if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 3760 + unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { 3761 + __free_pages(page, order); 3762 + page = NULL; 3763 3763 } 3764 3764 3765 3765 if (kmemcheck_enabled && page) ··· 4060 4064 int lru; 4061 4065 4062 4066 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 4063 - pages[lru] = global_page_state(NR_LRU_BASE + lru); 4067 + pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 4064 4068 4065 4069 for_each_zone(zone) 4066 4070 wmark_low += zone->watermark[WMARK_LOW]; ··· 4757 4761 } 4758 4762 #endif 4759 4763 4764 + static void setup_min_unmapped_ratio(void); 4765 + static void setup_min_slab_ratio(void); 4760 4766 #else /* CONFIG_NUMA */ 4761 4767 4762 4768 static void set_zonelist_order(void) ··· 5880 5882 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 5881 5883 #ifdef CONFIG_NUMA 5882 5884 zone->node = nid; 5883 - pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio) 5884 - / 100; 5885 - pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100; 5886 5885 #endif 5887 5886 zone->name = zone_names[j]; 5888 5887 zone->zone_pgdat = pgdat; ··· 6800 6805 setup_per_zone_wmarks(); 6801 6806 refresh_zone_stat_thresholds(); 6802 6807 setup_per_zone_lowmem_reserve(); 6808 + 6809 + #ifdef CONFIG_NUMA 6810 + setup_min_unmapped_ratio(); 6811 + setup_min_slab_ratio(); 6812 + #endif 6813 + 6803 6814 return 0; 6804 6815 } 6805 6816 core_initcall(init_per_zone_wmark_min) ··· 6847 6846 } 6848 6847 6849 6848 #ifdef CONFIG_NUMA 6850 - int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6851 - void __user *buffer, size_t *length, loff_t *ppos) 6849 + static void setup_min_unmapped_ratio(void) 6852 6850 { 6853 - struct pglist_data *pgdat; 6851 + pg_data_t *pgdat; 6854 6852 struct zone *zone; 6855 - int rc; 6856 - 6857 - rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6858 - if (rc) 6859 - return rc; 6860 6853 6861 6854 for_each_online_pgdat(pgdat) 6862 - pgdat->min_slab_pages = 0; 6855 + pgdat->min_unmapped_pages = 0; 6863 6856 6864 6857 for_each_zone(zone) 6865 6858 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * 6866 6859 sysctl_min_unmapped_ratio) / 100; 6867 - return 0; 6868 6860 } 6869 6861 6870 - int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6862 + 6863 + int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6871 6864 void __user *buffer, size_t *length, loff_t *ppos) 6872 6865 { 6873 - struct pglist_data *pgdat; 6874 - struct zone *zone; 6875 6866 int rc; 6876 6867 6877 6868 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6878 6869 if (rc) 6879 6870 return rc; 6871 + 6872 + setup_min_unmapped_ratio(); 6873 + 6874 + return 0; 6875 + } 6876 + 6877 + static void setup_min_slab_ratio(void) 6878 + { 6879 + pg_data_t *pgdat; 6880 + struct zone *zone; 6880 6881 6881 6882 for_each_online_pgdat(pgdat) 6882 6883 pgdat->min_slab_pages = 0; ··· 6886 6883 for_each_zone(zone) 6887 6884 zone->zone_pgdat->min_slab_pages += (zone->managed_pages * 6888 6885 sysctl_min_slab_ratio) / 100; 6886 + } 6887 + 6888 + int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6889 + void __user *buffer, size_t *length, loff_t *ppos) 6890 + { 6891 + int rc; 6892 + 6893 + rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6894 + if (rc) 6895 + return rc; 6896 + 6897 + setup_min_slab_ratio(); 6898 + 6889 6899 return 0; 6890 6900 } 6891 6901 #endif
+4 -3
mm/rmap.c
··· 1284 1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1285 1285 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1286 1286 } else { 1287 - if (PageTransCompound(page)) { 1288 - VM_BUG_ON_PAGE(!PageLocked(page), page); 1287 + if (PageTransCompound(page) && page_mapping(page)) { 1288 + VM_WARN_ON_ONCE(!PageLocked(page)); 1289 + 1289 1290 SetPageDoubleMap(compound_head(page)); 1290 1291 if (PageMlocked(page)) 1291 1292 clear_page_mlock(compound_head(page)); ··· 1304 1303 { 1305 1304 int i, nr = 1; 1306 1305 1307 - VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1306 + VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1308 1307 lock_page_memcg(page); 1309 1308 1310 1309 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
+3 -1
mm/shmem.c
··· 3975 3975 3976 3976 struct kobj_attribute shmem_enabled_attr = 3977 3977 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 3978 + #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 3978 3979 3980 + #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3979 3981 bool shmem_huge_enabled(struct vm_area_struct *vma) 3980 3982 { 3981 3983 struct inode *inode = file_inode(vma->vm_file); ··· 4008 4006 return false; 4009 4007 } 4010 4008 } 4011 - #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 4009 + #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 4012 4010 4013 4011 #else /* !CONFIG_SHMEM */ 4014 4012
+30
mm/slab.c
··· 4441 4441 module_init(slab_proc_init); 4442 4442 #endif 4443 4443 4444 + #ifdef CONFIG_HARDENED_USERCOPY 4445 + /* 4446 + * Rejects objects that are incorrectly sized. 4447 + * 4448 + * Returns NULL if check passes, otherwise const char * to name of cache 4449 + * to indicate an error. 4450 + */ 4451 + const char *__check_heap_object(const void *ptr, unsigned long n, 4452 + struct page *page) 4453 + { 4454 + struct kmem_cache *cachep; 4455 + unsigned int objnr; 4456 + unsigned long offset; 4457 + 4458 + /* Find and validate object. */ 4459 + cachep = page->slab_cache; 4460 + objnr = obj_to_index(cachep, page, (void *)ptr); 4461 + BUG_ON(objnr >= cachep->num); 4462 + 4463 + /* Find offset within object. */ 4464 + offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); 4465 + 4466 + /* Allow address range falling entirely within object size. */ 4467 + if (offset <= cachep->object_size && n <= cachep->object_size - offset) 4468 + return NULL; 4469 + 4470 + return cachep->name; 4471 + } 4472 + #endif /* CONFIG_HARDENED_USERCOPY */ 4473 + 4444 4474 /** 4445 4475 * ksize - get the actual amount of memory allocated for a given object 4446 4476 * @objp: Pointer to the object
+45 -1
mm/slub.c
··· 3629 3629 */ 3630 3630 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3631 3631 { 3632 + LIST_HEAD(discard); 3632 3633 struct page *page, *h; 3633 3634 3634 3635 BUG_ON(irqs_disabled()); ··· 3637 3636 list_for_each_entry_safe(page, h, &n->partial, lru) { 3638 3637 if (!page->inuse) { 3639 3638 remove_partial(n, page); 3640 - discard_slab(s, page); 3639 + list_add(&page->lru, &discard); 3641 3640 } else { 3642 3641 list_slab_objects(s, page, 3643 3642 "Objects remaining in %s on __kmem_cache_shutdown()"); 3644 3643 } 3645 3644 } 3646 3645 spin_unlock_irq(&n->list_lock); 3646 + 3647 + list_for_each_entry_safe(page, h, &discard, lru) 3648 + discard_slab(s, page); 3647 3649 } 3648 3650 3649 3651 /* ··· 3767 3763 } 3768 3764 EXPORT_SYMBOL(__kmalloc_node); 3769 3765 #endif 3766 + 3767 + #ifdef CONFIG_HARDENED_USERCOPY 3768 + /* 3769 + * Rejects objects that are incorrectly sized. 3770 + * 3771 + * Returns NULL if check passes, otherwise const char * to name of cache 3772 + * to indicate an error. 3773 + */ 3774 + const char *__check_heap_object(const void *ptr, unsigned long n, 3775 + struct page *page) 3776 + { 3777 + struct kmem_cache *s; 3778 + unsigned long offset; 3779 + size_t object_size; 3780 + 3781 + /* Find object and usable object size. */ 3782 + s = page->slab_cache; 3783 + object_size = slab_ksize(s); 3784 + 3785 + /* Reject impossible pointers. */ 3786 + if (ptr < page_address(page)) 3787 + return s->name; 3788 + 3789 + /* Find offset within object. */ 3790 + offset = (ptr - page_address(page)) % s->size; 3791 + 3792 + /* Adjust for redzone and reject if within the redzone. */ 3793 + if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { 3794 + if (offset < s->red_left_pad) 3795 + return s->name; 3796 + offset -= s->red_left_pad; 3797 + } 3798 + 3799 + /* Allow address range falling entirely within object size. */ 3800 + if (offset <= object_size && n <= object_size - offset) 3801 + return NULL; 3802 + 3803 + return s->name; 3804 + } 3805 + #endif /* CONFIG_HARDENED_USERCOPY */ 3770 3806 3771 3807 static size_t __ksize(const void *object) 3772 3808 {
+268
mm/usercopy.c
··· 1 + /* 2 + * This implements the various checks for CONFIG_HARDENED_USERCOPY*, 3 + * which are designed to protect kernel memory from needless exposure 4 + * and overwrite under many unintended conditions. This code is based 5 + * on PAX_USERCOPY, which is: 6 + * 7 + * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source 8 + * Security Inc. 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + * 14 + */ 15 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 + 17 + #include <linux/mm.h> 18 + #include <linux/slab.h> 19 + #include <asm/sections.h> 20 + 21 + enum { 22 + BAD_STACK = -1, 23 + NOT_STACK = 0, 24 + GOOD_FRAME, 25 + GOOD_STACK, 26 + }; 27 + 28 + /* 29 + * Checks if a given pointer and length is contained by the current 30 + * stack frame (if possible). 31 + * 32 + * Returns: 33 + * NOT_STACK: not at all on the stack 34 + * GOOD_FRAME: fully within a valid stack frame 35 + * GOOD_STACK: fully on the stack (when can't do frame-checking) 36 + * BAD_STACK: error condition (invalid stack position or bad stack frame) 37 + */ 38 + static noinline int check_stack_object(const void *obj, unsigned long len) 39 + { 40 + const void * const stack = task_stack_page(current); 41 + const void * const stackend = stack + THREAD_SIZE; 42 + int ret; 43 + 44 + /* Object is not on the stack at all. */ 45 + if (obj + len <= stack || stackend <= obj) 46 + return NOT_STACK; 47 + 48 + /* 49 + * Reject: object partially overlaps the stack (passing the 50 + * the check above means at least one end is within the stack, 51 + * so if this check fails, the other end is outside the stack). 52 + */ 53 + if (obj < stack || stackend < obj + len) 54 + return BAD_STACK; 55 + 56 + /* Check if object is safely within a valid frame. */ 57 + ret = arch_within_stack_frames(stack, stackend, obj, len); 58 + if (ret) 59 + return ret; 60 + 61 + return GOOD_STACK; 62 + } 63 + 64 + static void report_usercopy(const void *ptr, unsigned long len, 65 + bool to_user, const char *type) 66 + { 67 + pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", 68 + to_user ? "exposure" : "overwrite", 69 + to_user ? "from" : "to", ptr, type ? : "unknown", len); 70 + /* 71 + * For greater effect, it would be nice to do do_group_exit(), 72 + * but BUG() actually hooks all the lock-breaking and per-arch 73 + * Oops code, so that is used here instead. 74 + */ 75 + BUG(); 76 + } 77 + 78 + /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ 79 + static bool overlaps(const void *ptr, unsigned long n, unsigned long low, 80 + unsigned long high) 81 + { 82 + unsigned long check_low = (uintptr_t)ptr; 83 + unsigned long check_high = check_low + n; 84 + 85 + /* Does not overlap if entirely above or entirely below. */ 86 + if (check_low >= high || check_high < low) 87 + return false; 88 + 89 + return true; 90 + } 91 + 92 + /* Is this address range in the kernel text area? */ 93 + static inline const char *check_kernel_text_object(const void *ptr, 94 + unsigned long n) 95 + { 96 + unsigned long textlow = (unsigned long)_stext; 97 + unsigned long texthigh = (unsigned long)_etext; 98 + unsigned long textlow_linear, texthigh_linear; 99 + 100 + if (overlaps(ptr, n, textlow, texthigh)) 101 + return "<kernel text>"; 102 + 103 + /* 104 + * Some architectures have virtual memory mappings with a secondary 105 + * mapping of the kernel text, i.e. there is more than one virtual 106 + * kernel address that points to the kernel image. It is usually 107 + * when there is a separate linear physical memory mapping, in that 108 + * __pa() is not just the reverse of __va(). This can be detected 109 + * and checked: 110 + */ 111 + textlow_linear = (unsigned long)__va(__pa(textlow)); 112 + /* No different mapping: we're done. */ 113 + if (textlow_linear == textlow) 114 + return NULL; 115 + 116 + /* Check the secondary mapping... */ 117 + texthigh_linear = (unsigned long)__va(__pa(texthigh)); 118 + if (overlaps(ptr, n, textlow_linear, texthigh_linear)) 119 + return "<linear kernel text>"; 120 + 121 + return NULL; 122 + } 123 + 124 + static inline const char *check_bogus_address(const void *ptr, unsigned long n) 125 + { 126 + /* Reject if object wraps past end of memory. */ 127 + if (ptr + n < ptr) 128 + return "<wrapped address>"; 129 + 130 + /* Reject if NULL or ZERO-allocation. */ 131 + if (ZERO_OR_NULL_PTR(ptr)) 132 + return "<null>"; 133 + 134 + return NULL; 135 + } 136 + 137 + static inline const char *check_heap_object(const void *ptr, unsigned long n, 138 + bool to_user) 139 + { 140 + struct page *page, *endpage; 141 + const void *end = ptr + n - 1; 142 + bool is_reserved, is_cma; 143 + 144 + /* 145 + * Some architectures (arm64) return true for virt_addr_valid() on 146 + * vmalloced addresses. Work around this by checking for vmalloc 147 + * first. 148 + */ 149 + if (is_vmalloc_addr(ptr)) 150 + return NULL; 151 + 152 + if (!virt_addr_valid(ptr)) 153 + return NULL; 154 + 155 + page = virt_to_head_page(ptr); 156 + 157 + /* Check slab allocator for flags and size. */ 158 + if (PageSlab(page)) 159 + return __check_heap_object(ptr, n, page); 160 + 161 + /* 162 + * Sometimes the kernel data regions are not marked Reserved (see 163 + * check below). And sometimes [_sdata,_edata) does not cover 164 + * rodata and/or bss, so check each range explicitly. 165 + */ 166 + 167 + /* Allow reads of kernel rodata region (if not marked as Reserved). */ 168 + if (ptr >= (const void *)__start_rodata && 169 + end <= (const void *)__end_rodata) { 170 + if (!to_user) 171 + return "<rodata>"; 172 + return NULL; 173 + } 174 + 175 + /* Allow kernel data region (if not marked as Reserved). */ 176 + if (ptr >= (const void *)_sdata && end <= (const void *)_edata) 177 + return NULL; 178 + 179 + /* Allow kernel bss region (if not marked as Reserved). */ 180 + if (ptr >= (const void *)__bss_start && 181 + end <= (const void *)__bss_stop) 182 + return NULL; 183 + 184 + /* Is the object wholly within one base page? */ 185 + if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == 186 + ((unsigned long)end & (unsigned long)PAGE_MASK))) 187 + return NULL; 188 + 189 + /* Allow if start and end are inside the same compound page. */ 190 + endpage = virt_to_head_page(end); 191 + if (likely(endpage == page)) 192 + return NULL; 193 + 194 + /* 195 + * Reject if range is entirely either Reserved (i.e. special or 196 + * device memory), or CMA. Otherwise, reject since the object spans 197 + * several independently allocated pages. 198 + */ 199 + is_reserved = PageReserved(page); 200 + is_cma = is_migrate_cma_page(page); 201 + if (!is_reserved && !is_cma) 202 + goto reject; 203 + 204 + for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 205 + page = virt_to_head_page(ptr); 206 + if (is_reserved && !PageReserved(page)) 207 + goto reject; 208 + if (is_cma && !is_migrate_cma_page(page)) 209 + goto reject; 210 + } 211 + 212 + return NULL; 213 + 214 + reject: 215 + return "<spans multiple pages>"; 216 + } 217 + 218 + /* 219 + * Validates that the given object is: 220 + * - not bogus address 221 + * - known-safe heap or stack object 222 + * - not in kernel text 223 + */ 224 + void __check_object_size(const void *ptr, unsigned long n, bool to_user) 225 + { 226 + const char *err; 227 + 228 + /* Skip all tests if size is zero. */ 229 + if (!n) 230 + return; 231 + 232 + /* Check for invalid addresses. */ 233 + err = check_bogus_address(ptr, n); 234 + if (err) 235 + goto report; 236 + 237 + /* Check for bad heap object. */ 238 + err = check_heap_object(ptr, n, to_user); 239 + if (err) 240 + goto report; 241 + 242 + /* Check for bad stack object. */ 243 + switch (check_stack_object(ptr, n)) { 244 + case NOT_STACK: 245 + /* Object is not touching the current process stack. */ 246 + break; 247 + case GOOD_FRAME: 248 + case GOOD_STACK: 249 + /* 250 + * Object is either in the correct frame (when it 251 + * is possible to check) or just generally on the 252 + * process stack (when frame checking not available). 253 + */ 254 + return; 255 + default: 256 + err = "<process stack>"; 257 + goto report; 258 + } 259 + 260 + /* Check for object in kernel to avoid text exposure. */ 261 + err = check_kernel_text_object(ptr, n); 262 + if (!err) 263 + return; 264 + 265 + report: 266 + report_usercopy(ptr, n, to_user, err); 267 + } 268 + EXPORT_SYMBOL(__check_object_size);
+2 -2
net/9p/trans_virtio.c
··· 507 507 /* wakeup anybody waiting for slots to pin pages */ 508 508 wake_up(&vp_wq); 509 509 } 510 - kfree(in_pages); 511 - kfree(out_pages); 510 + kvfree(in_pages); 511 + kvfree(out_pages); 512 512 return err; 513 513 } 514 514
+1 -1
net/ceph/mon_client.c
··· 574 574 put_generic_request(req); 575 575 } 576 576 577 - void cancel_generic_request(struct ceph_mon_generic_request *req) 577 + static void cancel_generic_request(struct ceph_mon_generic_request *req) 578 578 { 579 579 struct ceph_mon_client *monc = req->monc; 580 580 struct ceph_mon_generic_request *lookup_req;
+1 -1
net/ceph/osd_client.c
··· 4220 4220 4221 4221 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len), 4222 4222 GFP_NOIO); 4223 - if (!pages) { 4223 + if (IS_ERR(pages)) { 4224 4224 ceph_msg_put(m); 4225 4225 return NULL; 4226 4226 }
+1 -7
net/ceph/string_table.c
··· 84 84 } 85 85 EXPORT_SYMBOL(ceph_find_or_create_string); 86 86 87 - static void ceph_free_string(struct rcu_head *head) 88 - { 89 - struct ceph_string *cs = container_of(head, struct ceph_string, rcu); 90 - kfree(cs); 91 - } 92 - 93 87 void ceph_release_string(struct kref *ref) 94 88 { 95 89 struct ceph_string *cs = container_of(ref, struct ceph_string, kref); ··· 95 101 } 96 102 spin_unlock(&string_tree_lock); 97 103 98 - call_rcu(&cs->rcu, ceph_free_string); 104 + kfree_rcu(cs, rcu); 99 105 } 100 106 EXPORT_SYMBOL(ceph_release_string); 101 107
+5 -3
net/sunrpc/auth_gss/auth_gss.c
··· 340 340 } 341 341 342 342 static struct gss_upcall_msg * 343 - __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) 343 + __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth) 344 344 { 345 345 struct gss_upcall_msg *pos; 346 346 list_for_each_entry(pos, &pipe->in_downcall, list) { 347 347 if (!uid_eq(pos->uid, uid)) 348 + continue; 349 + if (auth && pos->auth->service != auth->service) 348 350 continue; 349 351 atomic_inc(&pos->count); 350 352 dprintk("RPC: %s found msg %p\n", __func__, pos); ··· 367 365 struct gss_upcall_msg *old; 368 366 369 367 spin_lock(&pipe->lock); 370 - old = __gss_find_upcall(pipe, gss_msg->uid); 368 + old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth); 371 369 if (old == NULL) { 372 370 atomic_inc(&gss_msg->count); 373 371 list_add(&gss_msg->list, &pipe->in_downcall); ··· 716 714 err = -ENOENT; 717 715 /* Find a matching upcall */ 718 716 spin_lock(&pipe->lock); 719 - gss_msg = __gss_find_upcall(pipe, uid); 717 + gss_msg = __gss_find_upcall(pipe, uid, NULL); 720 718 if (gss_msg == NULL) { 721 719 spin_unlock(&pipe->lock); 722 720 goto err_put_ctx;
+24
net/sunrpc/clnt.c
··· 2638 2638 { 2639 2639 struct rpc_xprt_switch *xps; 2640 2640 struct rpc_xprt *xprt; 2641 + unsigned long reconnect_timeout; 2641 2642 unsigned char resvport; 2642 2643 int ret = 0; 2643 2644 ··· 2650 2649 return -EAGAIN; 2651 2650 } 2652 2651 resvport = xprt->resvport; 2652 + reconnect_timeout = xprt->max_reconnect_timeout; 2653 2653 rcu_read_unlock(); 2654 2654 2655 2655 xprt = xprt_create_transport(xprtargs); ··· 2659 2657 goto out_put_switch; 2660 2658 } 2661 2659 xprt->resvport = resvport; 2660 + xprt->max_reconnect_timeout = reconnect_timeout; 2662 2661 2663 2662 rpc_xprt_switch_set_roundrobin(xps); 2664 2663 if (setup) { ··· 2675 2672 return ret; 2676 2673 } 2677 2674 EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); 2675 + 2676 + static int 2677 + rpc_xprt_cap_max_reconnect_timeout(struct rpc_clnt *clnt, 2678 + struct rpc_xprt *xprt, 2679 + void *data) 2680 + { 2681 + unsigned long timeout = *((unsigned long *)data); 2682 + 2683 + if (timeout < xprt->max_reconnect_timeout) 2684 + xprt->max_reconnect_timeout = timeout; 2685 + return 0; 2686 + } 2687 + 2688 + void 2689 + rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt, unsigned long timeo) 2690 + { 2691 + rpc_clnt_iterate_for_each_xprt(clnt, 2692 + rpc_xprt_cap_max_reconnect_timeout, 2693 + &timeo); 2694 + } 2695 + EXPORT_SYMBOL_GPL(rpc_cap_max_reconnect_timeout); 2678 2696 2679 2697 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 2680 2698 static void rpc_show_header(void)
+18 -8
net/sunrpc/xprt.c
··· 680 680 spin_unlock_bh(&xprt->transport_lock); 681 681 } 682 682 683 + static bool 684 + xprt_has_timer(const struct rpc_xprt *xprt) 685 + { 686 + return xprt->idle_timeout != 0; 687 + } 688 + 689 + static void 690 + xprt_schedule_autodisconnect(struct rpc_xprt *xprt) 691 + __must_hold(&xprt->transport_lock) 692 + { 693 + if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) 694 + mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); 695 + } 696 + 683 697 static void 684 698 xprt_init_autodisconnect(unsigned long data) 685 699 { ··· 702 688 spin_lock(&xprt->transport_lock); 703 689 if (!list_empty(&xprt->recv)) 704 690 goto out_abort; 691 + /* Reset xprt->last_used to avoid connect/autodisconnect cycling */ 692 + xprt->last_used = jiffies; 705 693 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 706 694 goto out_abort; 707 695 spin_unlock(&xprt->transport_lock); ··· 741 725 goto out; 742 726 xprt->snd_task =NULL; 743 727 xprt->ops->release_xprt(xprt, NULL); 728 + xprt_schedule_autodisconnect(xprt); 744 729 out: 745 730 spin_unlock_bh(&xprt->transport_lock); 746 731 wake_up_bit(&xprt->state, XPRT_LOCKED); ··· 903 886 } else 904 887 task->tk_status = 0; 905 888 spin_unlock_bh(&xprt->transport_lock); 906 - } 907 - 908 - static inline int xprt_has_timer(struct rpc_xprt *xprt) 909 - { 910 - return xprt->idle_timeout != 0; 911 889 } 912 890 913 891 /** ··· 1292 1280 if (!list_empty(&req->rq_list)) 1293 1281 list_del(&req->rq_list); 1294 1282 xprt->last_used = jiffies; 1295 - if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) 1296 - mod_timer(&xprt->timer, 1297 - xprt->last_used + xprt->idle_timeout); 1283 + xprt_schedule_autodisconnect(xprt); 1298 1284 spin_unlock_bh(&xprt->transport_lock); 1299 1285 if (req->rq_buffer) 1300 1286 xprt->ops->buf_free(req->rq_buffer);
+47 -13
net/sunrpc/xprtsock.c
··· 177 177 * increase over time if the server is down or not responding. 178 178 */ 179 179 #define XS_TCP_INIT_REEST_TO (3U * HZ) 180 - #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) 181 180 182 181 /* 183 182 * TCP idle timeout; client drops the transport socket if it is idle ··· 2172 2173 write_unlock_bh(&sk->sk_callback_lock); 2173 2174 } 2174 2175 xs_udp_do_set_buffer_size(xprt); 2176 + 2177 + xprt->stat.connect_start = jiffies; 2175 2178 } 2176 2179 2177 2180 static void xs_udp_setup_socket(struct work_struct *work) ··· 2237 2236 unsigned int keepcnt = xprt->timeout->to_retries + 1; 2238 2237 unsigned int opt_on = 1; 2239 2238 unsigned int timeo; 2239 + unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC; 2240 2240 2241 2241 /* TCP Keepalive options */ 2242 2242 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, ··· 2248 2246 (char *)&keepidle, sizeof(keepidle)); 2249 2247 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, 2250 2248 (char *)&keepcnt, sizeof(keepcnt)); 2249 + 2250 + /* Avoid temporary address, they are bad for long-lived 2251 + * connections such as NFS mounts. 2252 + * RFC4941, section 3.6 suggests that: 2253 + * Individual applications, which have specific 2254 + * knowledge about the normal duration of connections, 2255 + * MAY override this as appropriate. 2256 + */ 2257 + kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES, 2258 + (char *)&addr_pref, sizeof(addr_pref)); 2251 2259 2252 2260 /* TCP user timeout (see RFC5482) */ 2253 2261 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * ··· 2307 2295 /* SYN_SENT! */ 2308 2296 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2309 2297 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2298 + break; 2299 + case -EADDRNOTAVAIL: 2300 + /* Source port number is unavailable. Try a new one! */ 2301 + transport->srcport = 0; 2310 2302 } 2311 2303 out: 2312 2304 return ret; ··· 2385 2369 xprt_wake_pending_tasks(xprt, status); 2386 2370 } 2387 2371 2372 + static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt) 2373 + { 2374 + unsigned long start, now = jiffies; 2375 + 2376 + start = xprt->stat.connect_start + xprt->reestablish_timeout; 2377 + if (time_after(start, now)) 2378 + return start - now; 2379 + return 0; 2380 + } 2381 + 2382 + static void xs_reconnect_backoff(struct rpc_xprt *xprt) 2383 + { 2384 + xprt->reestablish_timeout <<= 1; 2385 + if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) 2386 + xprt->reestablish_timeout = xprt->max_reconnect_timeout; 2387 + if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2388 + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2389 + } 2390 + 2388 2391 /** 2389 2392 * xs_connect - connect a socket to a remote endpoint 2390 2393 * @xprt: pointer to transport structure ··· 2421 2386 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2422 2387 { 2423 2388 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2389 + unsigned long delay = 0; 2424 2390 2425 2391 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2426 2392 ··· 2433 2397 /* Start by resetting any existing state */ 2434 2398 xs_reset_transport(transport); 2435 2399 2436 - queue_delayed_work(xprtiod_workqueue, 2437 - &transport->connect_worker, 2438 - xprt->reestablish_timeout); 2439 - xprt->reestablish_timeout <<= 1; 2440 - if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2441 - xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2442 - if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 2443 - xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 2444 - } else { 2400 + delay = xs_reconnect_delay(xprt); 2401 + xs_reconnect_backoff(xprt); 2402 + 2403 + } else 2445 2404 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2446 - queue_delayed_work(xprtiod_workqueue, 2447 - &transport->connect_worker, 0); 2448 - } 2405 + 2406 + queue_delayed_work(xprtiod_workqueue, 2407 + &transport->connect_worker, 2408 + delay); 2449 2409 } 2450 2410 2451 2411 /** ··· 2992 2960 2993 2961 xprt->ops = &xs_tcp_ops; 2994 2962 xprt->timeout = &xs_tcp_default_timeout; 2963 + 2964 + xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 2995 2965 2996 2966 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); 2997 2967 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
+7 -3
scripts/Kbuild.include
··· 108 108 as-instr = $(call try-run,\ 109 109 printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) 110 110 111 + # Do not attempt to build with gcc plugins during cc-option tests. 112 + # (And this uses delayed resolution so the flags will be up to date.) 113 + CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) 114 + 111 115 # cc-option 112 116 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) 113 117 114 118 cc-option = $(call try-run,\ 115 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) 119 + $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) 116 120 117 121 # cc-option-yn 118 122 # Usage: flag := $(call cc-option-yn,-march=winchip-c6) 119 123 cc-option-yn = $(call try-run,\ 120 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n) 124 + $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n) 121 125 122 126 # cc-option-align 123 127 # Prefix align with either -falign or -malign ··· 131 127 # cc-disable-warning 132 128 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable) 133 129 cc-disable-warning = $(call try-run,\ 134 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) 130 + $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) 135 131 136 132 # cc-name 137 133 # Expands to either gcc or clang
+30 -13
scripts/Makefile.gcc-plugins
··· 19 19 endif 20 20 endif 21 21 22 - GCC_PLUGINS_CFLAGS := $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) 22 + GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) 23 23 24 - export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN SANCOV_PLUGIN 24 + export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN 25 25 26 - ifeq ($(PLUGINCC),) 27 - ifneq ($(GCC_PLUGINS_CFLAGS),) 28 - ifeq ($(call cc-ifversion, -ge, 0405, y), y) 29 - PLUGINCC := $(shell $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)") 30 - $(warning warning: your gcc installation does not support plugins, perhaps the necessary headers are missing?) 31 - else 32 - $(warning warning: your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least) 33 - endif 34 - endif 35 - else 26 + ifneq ($(PLUGINCC),) 36 27 # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication. 37 28 GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS)) 38 29 endif 39 30 40 31 KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) 41 32 GCC_PLUGIN := $(gcc-plugin-y) 42 - 33 + GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y) 43 34 endif 35 + 36 + # If plugins aren't supported, abort the build before hard-to-read compiler 37 + # errors start getting spewed by the main build. 38 + PHONY += gcc-plugins-check 39 + gcc-plugins-check: FORCE 40 + ifdef CONFIG_GCC_PLUGINS 41 + ifeq ($(PLUGINCC),) 42 + ifneq ($(GCC_PLUGINS_CFLAGS),) 43 + ifeq ($(call cc-ifversion, -ge, 0405, y), y) 44 + $(Q)$(srctree)/scripts/gcc-plugin.sh --show-error "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)" || true 45 + @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc installation does not support plugins, perhaps the necessary headers are missing?" >&2 && exit 1 46 + else 47 + @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc version does not support plugins, you should upgrade it to at least gcc 4.5" >&2 && exit 1 48 + endif 49 + endif 50 + endif 51 + endif 52 + @: 53 + 54 + # Actually do the build, if requested. 55 + PHONY += gcc-plugins 56 + gcc-plugins: scripts_basic gcc-plugins-check 57 + ifdef CONFIG_GCC_PLUGINS 58 + $(Q)$(MAKE) $(build)=scripts/gcc-plugins 59 + endif 60 + @:
+14
scripts/gcc-plugin.sh
··· 1 1 #!/bin/sh 2 2 srctree=$(dirname "$0") 3 + 4 + SHOW_ERROR= 5 + if [ "$1" = "--show-error" ] ; then 6 + SHOW_ERROR=1 7 + shift || true 8 + fi 9 + 3 10 gccplugins_dir=$($3 -print-file-name=plugin) 4 11 plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF 5 12 #include "gcc-common.h" ··· 20 13 21 14 if [ $? -ne 0 ] 22 15 then 16 + if [ -n "$SHOW_ERROR" ] ; then 17 + echo "${plugincc}" >&2 18 + fi 23 19 exit 1 24 20 fi 25 21 ··· 57 47 then 58 48 echo "$2" 59 49 exit 0 50 + fi 51 + 52 + if [ -n "$SHOW_ERROR" ] ; then 53 + echo "${plugincc}" >&2 60 54 fi 61 55 exit 1
+7 -5
scripts/gcc-plugins/Makefile
··· 12 12 export HOST_EXTRACXXFLAGS 13 13 endif 14 14 15 - export GCCPLUGINS_DIR HOSTLIBS 16 - 17 15 ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN)) 18 16 GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN)) 19 17 endif 20 18 21 - $(HOSTLIBS)-y := $(GCC_PLUGIN) 19 + export HOSTLIBS 20 + 21 + $(HOSTLIBS)-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p))) 22 22 always := $($(HOSTLIBS)-y) 23 23 24 - cyc_complexity_plugin-objs := cyc_complexity_plugin.o 25 - sancov_plugin-objs := sancov_plugin.o 24 + $(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o)) 25 + 26 + subdir-y := $(GCC_PLUGIN_SUBDIR) 27 + subdir- += $(GCC_PLUGIN_SUBDIR) 26 28 27 29 clean-files += *.so
+1 -1
scripts/get_maintainer.pl
··· 432 432 die "$P: file '${file}' not found\n"; 433 433 } 434 434 } 435 - if ($from_filename || vcs_file_exists($file)) { 435 + if ($from_filename || ($file ne "&STDIN" && vcs_file_exists($file))) { 436 436 $file =~ s/^\Q${cur_path}\E//; #strip any absolute path 437 437 $file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree 438 438 push(@files, $file);
+28
security/Kconfig
··· 118 118 this low address space will need the permission specific to the 119 119 systems running LSM. 120 120 121 + config HAVE_HARDENED_USERCOPY_ALLOCATOR 122 + bool 123 + help 124 + The heap allocator implements __check_heap_object() for 125 + validating memory ranges against heap object sizes in 126 + support of CONFIG_HARDENED_USERCOPY. 127 + 128 + config HAVE_ARCH_HARDENED_USERCOPY 129 + bool 130 + help 131 + The architecture supports CONFIG_HARDENED_USERCOPY by 132 + calling check_object_size() just before performing the 133 + userspace copies in the low level implementation of 134 + copy_to_user() and copy_from_user(). 135 + 136 + config HARDENED_USERCOPY 137 + bool "Harden memory copies between kernel and userspace" 138 + depends on HAVE_ARCH_HARDENED_USERCOPY 139 + select BUG 140 + help 141 + This option checks for obviously wrong memory regions when 142 + copying memory to/from the kernel (via copy_to_user() and 143 + copy_from_user() functions) by rejecting memory ranges that 144 + are larger than the specified heap object, span multiple 145 + separately allocates pages, are not on the process stack, 146 + or are part of the kernel text. This kills entire classes 147 + of heap overflow exploits and similar kernel memory exposures. 148 + 121 149 source security/selinux/Kconfig 122 150 source security/smack/Kconfig 123 151 source security/tomoyo/Kconfig
+20 -12
sound/pci/hda/hda_intel.c
··· 906 906 struct snd_card *card = dev_get_drvdata(dev); 907 907 struct azx *chip; 908 908 struct hda_intel *hda; 909 + struct hdac_bus *bus; 909 910 910 911 if (!card) 911 912 return 0; 912 913 913 914 chip = card->private_data; 914 915 hda = container_of(chip, struct hda_intel, chip); 916 + bus = azx_bus(chip); 915 917 if (chip->disabled || hda->init_failed || !chip->running) 916 918 return 0; 917 919 918 - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 919 - && hda->need_i915_power) { 920 - snd_hdac_display_power(azx_bus(chip), true); 921 - snd_hdac_i915_set_bclk(azx_bus(chip)); 920 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 921 + snd_hdac_display_power(bus, true); 922 + if (hda->need_i915_power) 923 + snd_hdac_i915_set_bclk(bus); 922 924 } 925 + 923 926 if (chip->msi) 924 927 if (pci_enable_msi(pci) < 0) 925 928 chip->msi = 0; ··· 931 928 azx_init_pci(chip); 932 929 933 930 hda_intel_init_chip(chip, true); 931 + 932 + /* power down again for link-controlled chips */ 933 + if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) && 934 + !hda->need_i915_power) 935 + snd_hdac_display_power(bus, false); 934 936 935 937 snd_power_change_state(card, SNDRV_CTL_POWER_D0); 936 938 ··· 1016 1008 1017 1009 chip = card->private_data; 1018 1010 hda = container_of(chip, struct hda_intel, chip); 1011 + bus = azx_bus(chip); 1019 1012 if (chip->disabled || hda->init_failed) 1020 1013 return 0; 1021 1014 ··· 1024 1015 return 0; 1025 1016 1026 1017 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 1027 - bus = azx_bus(chip); 1028 - if (hda->need_i915_power) { 1029 - snd_hdac_display_power(bus, true); 1018 + snd_hdac_display_power(bus, true); 1019 + if (hda->need_i915_power) 1030 1020 snd_hdac_i915_set_bclk(bus); 1031 - } else { 1032 - /* toggle codec wakeup bit for STATESTS read */ 1033 - snd_hdac_set_codec_wakeup(bus, true); 1034 - snd_hdac_set_codec_wakeup(bus, false); 1035 - } 1036 1021 } 1037 1022 1038 1023 /* Read STATESTS before controller reset */ ··· 1045 1042 /* disable controller Wake Up event*/ 1046 1043 azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & 1047 1044 ~STATESTS_INT_MASK); 1045 + 1046 + /* power down again for link-controlled chips */ 1047 + if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) && 1048 + !hda->need_i915_power) 1049 + snd_hdac_display_power(bus, false); 1048 1050 1049 1051 trace_azx_runtime_resume(chip); 1050 1052 return 0;
+2
sound/usb/quirks.c
··· 1128 1128 { 1129 1129 /* devices which do not support reading the sample rate. */ 1130 1130 switch (chip->usb_id) { 1131 + case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */ 1131 1132 case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ 1132 1133 case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ 1133 1134 case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */ ··· 1139 1138 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ 1140 1139 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1141 1140 case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ 1141 + case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ 1142 1142 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ 1143 1143 case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */ 1144 1144 case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+3 -6
tools/arch/x86/include/asm/cpufeatures.h
··· 225 225 #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ 226 226 #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ 227 227 #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 228 - #define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ 229 228 #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 230 229 #define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 231 230 #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ ··· 300 301 #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 301 302 #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 302 303 #define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ 303 - #define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */ 304 - #define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */ 305 - 306 - 307 304 #ifdef CONFIG_X86_32 308 305 /* 309 306 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional ··· 307 312 */ 308 313 #define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ 309 314 #endif 310 - 315 + #define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ 316 + #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ 317 + #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 311 318 #endif /* _ASM_X86_CPUFEATURES_H */
+2
tools/arch/x86/include/asm/disabled-features.h
··· 56 56 #define DISABLED_MASK14 0 57 57 #define DISABLED_MASK15 0 58 58 #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) 59 + #define DISABLED_MASK17 0 60 + #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 59 61 60 62 #endif /* _ASM_X86_DISABLED_FEATURES_H */
+2
tools/arch/x86/include/asm/required-features.h
··· 99 99 #define REQUIRED_MASK14 0 100 100 #define REQUIRED_MASK15 0 101 101 #define REQUIRED_MASK16 0 102 + #define REQUIRED_MASK17 0 103 + #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 102 104 103 105 #endif /* _ASM_X86_REQUIRED_FEATURES_H */
+1 -3
tools/arch/x86/include/uapi/asm/vmx.h
··· 78 78 #define EXIT_REASON_PML_FULL 62 79 79 #define EXIT_REASON_XSAVES 63 80 80 #define EXIT_REASON_XRSTORS 64 81 - #define EXIT_REASON_PCOMMIT 65 82 81 83 82 #define VMX_EXIT_REASONS \ 84 83 { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ ··· 126 127 { EXIT_REASON_INVVPID, "INVVPID" }, \ 127 128 { EXIT_REASON_INVPCID, "INVPCID" }, \ 128 129 { EXIT_REASON_XSAVES, "XSAVES" }, \ 129 - { EXIT_REASON_XRSTORS, "XRSTORS" }, \ 130 - { EXIT_REASON_PCOMMIT, "PCOMMIT" } 130 + { EXIT_REASON_XRSTORS, "XRSTORS" } 131 131 132 132 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 133 133 #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
+85 -1
tools/include/uapi/linux/bpf.h
··· 84 84 BPF_MAP_TYPE_PERCPU_HASH, 85 85 BPF_MAP_TYPE_PERCPU_ARRAY, 86 86 BPF_MAP_TYPE_STACK_TRACE, 87 + BPF_MAP_TYPE_CGROUP_ARRAY, 87 88 }; 88 89 89 90 enum bpf_prog_type { ··· 94 93 BPF_PROG_TYPE_SCHED_CLS, 95 94 BPF_PROG_TYPE_SCHED_ACT, 96 95 BPF_PROG_TYPE_TRACEPOINT, 96 + BPF_PROG_TYPE_XDP, 97 97 }; 98 98 99 99 #define BPF_PSEUDO_MAP_FD 1 ··· 315 313 */ 316 314 BPF_FUNC_skb_get_tunnel_opt, 317 315 BPF_FUNC_skb_set_tunnel_opt, 316 + 317 + /** 318 + * bpf_skb_change_proto(skb, proto, flags) 319 + * Change protocol of the skb. Currently supported is 320 + * v4 -> v6, v6 -> v4 transitions. The helper will also 321 + * resize the skb. eBPF program is expected to fill the 322 + * new headers via skb_store_bytes and lX_csum_replace. 323 + * @skb: pointer to skb 324 + * @proto: new skb->protocol type 325 + * @flags: reserved 326 + * Return: 0 on success or negative error 327 + */ 328 + BPF_FUNC_skb_change_proto, 329 + 330 + /** 331 + * bpf_skb_change_type(skb, type) 332 + * Change packet type of skb. 333 + * @skb: pointer to skb 334 + * @type: new skb->pkt_type type 335 + * Return: 0 on success or negative error 336 + */ 337 + BPF_FUNC_skb_change_type, 338 + 339 + /** 340 + * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb 341 + * @skb: pointer to skb 342 + * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 343 + * @index: index of the cgroup in the bpf_map 344 + * Return: 345 + * == 0 skb failed the cgroup2 descendant test 346 + * == 1 skb succeeded the cgroup2 descendant test 347 + * < 0 error 348 + */ 349 + BPF_FUNC_skb_in_cgroup, 350 + 351 + /** 352 + * bpf_get_hash_recalc(skb) 353 + * Retrieve and possibly recalculate skb->hash. 354 + * @skb: pointer to skb 355 + * Return: hash 356 + */ 357 + BPF_FUNC_get_hash_recalc, 358 + 359 + /** 360 + * u64 bpf_get_current_task(void) 361 + * Returns current task_struct 362 + * Return: current 363 + */ 364 + BPF_FUNC_get_current_task, 365 + 366 + /** 367 + * bpf_probe_write_user(void *dst, void *src, int len) 368 + * safely attempt to write to a location 369 + * @dst: destination address in userspace 370 + * @src: source address on stack 371 + * @len: number of bytes to copy 372 + * Return: 0 on success or negative error 373 + */ 374 + BPF_FUNC_probe_write_user, 375 + 318 376 __BPF_FUNC_MAX_ID, 319 377 }; 320 378 ··· 409 347 #define BPF_F_ZERO_CSUM_TX (1ULL << 1) 410 348 #define BPF_F_DONT_FRAGMENT (1ULL << 2) 411 349 412 - /* BPF_FUNC_perf_event_output flags. */ 350 + /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ 413 351 #define BPF_F_INDEX_MASK 0xffffffffULL 414 352 #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 353 + /* BPF_FUNC_perf_event_output for sk_buff input context. */ 354 + #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) 415 355 416 356 /* user accessible mirror of in-kernel sk_buff. 417 357 * new fields can only be added to the end of this structure ··· 448 384 __u8 tunnel_ttl; 449 385 __u16 tunnel_ext; 450 386 __u32 tunnel_label; 387 + }; 388 + 389 + /* User return codes for XDP prog type. 390 + * A valid XDP program must return one of these defined values. All other 391 + * return codes are reserved for future use. Unknown return codes will result 392 + * in packet drop. 393 + */ 394 + enum xdp_action { 395 + XDP_ABORTED = 0, 396 + XDP_DROP, 397 + XDP_PASS, 398 + XDP_TX, 399 + }; 400 + 401 + /* user accessible metadata for XDP packet hook 402 + * new fields must be added to the end of this structure 403 + */ 404 + struct xdp_md { 405 + __u32 data; 406 + __u32 data_end; 451 407 }; 452 408 453 409 #endif /* _UAPI__LINUX_BPF_H__ */
+9 -1
tools/perf/Documentation/perf-probe.txt
··· 176 176 177 177 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.) 178 178 '$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters. 179 - 'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type. 179 + 'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), signedness casting (u/s), "string" and bitfield are supported. (see TYPES for detail) 180 180 181 181 On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid. 182 + 183 + TYPES 184 + ----- 185 + Basic types (u8/u16/u32/u64/s8/s16/s32/s64) are integer types. Prefix 's' and 'u' means those types are signed and unsigned respectively. Traced arguments are shown in decimal (signed) or hex (unsigned). You can also use 's' or 'u' to specify only signedness and leave its size auto-detected by perf probe. 186 + String type is a special type, which fetches a "null-terminated" string from kernel space. This means it will fail and store NULL if the string container has been paged out. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type. 187 + Bitfield is another special type, which takes 3 parameters, bit-width, bit-offset, and container-size (usually 32). The syntax is; 188 + 189 + b<bit-width>@<bit-offset>/<container-size> 182 190 183 191 LINE SYNTAX 184 192 -----------
+2 -2
tools/perf/Documentation/perf-script.txt
··· 116 116 --fields:: 117 117 Comma separated list of fields to print. Options are: 118 118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, 119 - srcline, period, iregs, brstack, brstacksym, flags. 120 - Field list can be prepended with the type, trace, sw or hw, 119 + srcline, period, iregs, brstack, brstacksym, flags, bpf-output, 120 + callindent. Field list can be prepended with the type, trace, sw or hw, 121 121 to indicate to which event type the field list applies. 122 122 e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace 123 123
+23 -4
tools/perf/arch/powerpc/util/sym-handling.c
··· 54 54 #endif 55 55 56 56 #if defined(_CALL_ELF) && _CALL_ELF == 2 57 - bool arch__prefers_symtab(void) 58 - { 59 - return true; 60 - } 61 57 62 58 #ifdef HAVE_LIBELF_SUPPORT 63 59 void arch__sym_update(struct symbol *s, GElf_Sym *sym) ··· 96 100 tev->point.offset += lep_offset; 97 101 } 98 102 } 103 + 104 + void arch__post_process_probe_trace_events(struct perf_probe_event *pev, 105 + int ntevs) 106 + { 107 + struct probe_trace_event *tev; 108 + struct map *map; 109 + struct symbol *sym = NULL; 110 + struct rb_node *tmp; 111 + int i = 0; 112 + 113 + map = get_target_map(pev->target, pev->uprobes); 114 + if (!map || map__load(map, NULL) < 0) 115 + return; 116 + 117 + for (i = 0; i < ntevs; i++) { 118 + tev = &pev->tevs[i]; 119 + map__for_each_symbol(map, sym, tmp) { 120 + if (map->unmap_ip(map, sym->start) == tev->point.address) 121 + arch__fix_tev_from_maps(pev, tev, map, sym); 122 + } 123 + } 124 + } 125 + 99 126 #endif
+1 -1
tools/perf/builtin-script.c
··· 2116 2116 "Valid types: hw,sw,trace,raw. " 2117 2117 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," 2118 2118 "addr,symoff,period,iregs,brstack,brstacksym,flags," 2119 - "callindent", parse_output_fields), 2119 + "bpf-output,callindent", parse_output_fields), 2120 2120 OPT_BOOLEAN('a', "all-cpus", &system_wide, 2121 2121 "system-wide collection from all CPUs"), 2122 2122 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
+23 -8
tools/perf/builtin-stat.c
··· 331 331 return 0; 332 332 } 333 333 334 - static void read_counters(bool close_counters) 334 + static void read_counters(void) 335 335 { 336 336 struct perf_evsel *counter; 337 337 ··· 341 341 342 342 if (perf_stat_process_counter(&stat_config, counter)) 343 343 pr_warning("failed to process counter %s\n", counter->name); 344 - 345 - if (close_counters) { 346 - perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 347 - thread_map__nr(evsel_list->threads)); 348 - } 349 344 } 350 345 } 351 346 ··· 348 353 { 349 354 struct timespec ts, rs; 350 355 351 - read_counters(false); 356 + read_counters(); 352 357 353 358 clock_gettime(CLOCK_MONOTONIC, &ts); 354 359 diff_timespec(&rs, &ts, &ref_time); ··· 373 378 */ 374 379 if (!target__none(&target) || initial_delay) 375 380 perf_evlist__enable(evsel_list); 381 + } 382 + 383 + static void disable_counters(void) 384 + { 385 + /* 386 + * If we don't have tracee (attaching to task or cpu), counters may 387 + * still be running. To get accurate group ratios, we must stop groups 388 + * from counting before reading their constituent counters. 389 + */ 390 + if (!target__none(&target)) 391 + perf_evlist__disable(evsel_list); 376 392 } 377 393 378 394 static volatile int workload_exec_errno; ··· 663 657 } 664 658 } 665 659 660 + disable_counters(); 661 + 666 662 t1 = rdclock(); 667 663 668 664 update_stats(&walltime_nsecs_stats, t1 - t0); 669 665 670 - read_counters(true); 666 + /* 667 + * Closing a group leader splits the group, and as we only disable 668 + * group leaders, results in remaining events becoming enabled. To 669 + * avoid arbitrary skew, we must read all counters before closing any 670 + * group leaders. 671 + */ 672 + read_counters(); 673 + perf_evlist__close(evsel_list); 671 674 672 675 return WEXITSTATUS(status); 673 676 }
+37 -23
tools/perf/util/probe-event.c
··· 170 170 module = "kernel"; 171 171 172 172 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 173 + /* short_name is "[module]" */ 173 174 if (strncmp(pos->dso->short_name + 1, module, 174 - pos->dso->short_name_len - 2) == 0) { 175 + pos->dso->short_name_len - 2) == 0 && 176 + module[pos->dso->short_name_len - 2] == '\0') { 175 177 return pos; 176 178 } 177 179 } 178 180 return NULL; 179 181 } 180 182 181 - static struct map *get_target_map(const char *target, bool user) 183 + struct map *get_target_map(const char *target, bool user) 182 184 { 183 185 /* Init maps of given executable or kernel */ 184 186 if (user) ··· 387 385 if (uprobes) 388 386 address = sym->start; 389 387 else 390 - address = map->unmap_ip(map, sym->start); 388 + address = map->unmap_ip(map, sym->start) - map->reloc; 391 389 break; 392 390 } 393 391 if (!address) { ··· 666 664 return ret; 667 665 } 668 666 669 - /* Post processing the probe events */ 670 - static int post_process_probe_trace_events(struct probe_trace_event *tevs, 671 - int ntevs, const char *module, 672 - bool uprobe) 667 + static int 668 + post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, 669 + int ntevs) 673 670 { 674 671 struct ref_reloc_sym *reloc_sym; 675 672 char *tmp; 676 673 int i, skipped = 0; 677 - 678 - if (uprobe) 679 - return add_exec_to_probe_trace_events(tevs, ntevs, module); 680 - 681 - /* Note that currently ref_reloc_sym based probe is not for drivers */ 682 - if (module) 683 - return add_module_to_probe_trace_events(tevs, ntevs, module); 684 674 685 675 reloc_sym = kernel_get_ref_reloc_sym(); 686 676 if (!reloc_sym) { ··· 703 709 reloc_sym->unrelocated_addr; 704 710 } 705 711 return skipped; 712 + } 713 + 714 + void __weak 715 + arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unused, 716 + int ntevs __maybe_unused) 717 + { 718 + } 719 + 720 + /* Post processing the probe events */ 721 + static int post_process_probe_trace_events(struct perf_probe_event *pev, 722 + struct probe_trace_event *tevs, 723 + int ntevs, const char *module, 724 + bool uprobe) 725 + { 726 + int ret; 727 + 728 + if (uprobe) 729 + ret = add_exec_to_probe_trace_events(tevs, ntevs, module); 730 + else if (module) 731 + /* Currently ref_reloc_sym based probe is not for drivers */ 732 + ret = add_module_to_probe_trace_events(tevs, ntevs, module); 733 + else 734 + ret = post_process_kernel_probe_trace_events(tevs, ntevs); 735 + 736 + if (ret >= 0) 737 + arch__post_process_probe_trace_events(pev, ntevs); 738 + 739 + return ret; 706 740 } 707 741 708 742 /* Try to find perf_probe_event with debuginfo */ ··· 771 749 772 750 if (ntevs > 0) { /* Succeeded to find trace events */ 773 751 pr_debug("Found %d probe_trace_events.\n", ntevs); 774 - ret = post_process_probe_trace_events(*tevs, ntevs, 752 + ret = post_process_probe_trace_events(pev, *tevs, ntevs, 775 753 pev->target, pev->uprobes); 776 754 if (ret < 0 || ret == ntevs) { 777 755 clear_probe_trace_events(*tevs, ntevs); ··· 2958 2936 return err; 2959 2937 } 2960 2938 2961 - bool __weak arch__prefers_symtab(void) { return false; } 2962 - 2963 2939 /* Concatinate two arrays */ 2964 2940 static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b) 2965 2941 { ··· 3177 3157 ret = find_probe_trace_events_from_cache(pev, tevs); 3178 3158 if (ret > 0 || pev->sdt) /* SDT can be found only in the cache */ 3179 3159 return ret == 0 ? -ENOENT : ret; /* Found in probe cache */ 3180 - 3181 - if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) { 3182 - ret = find_probe_trace_events_from_map(pev, tevs); 3183 - if (ret > 0) 3184 - return ret; /* Found in symbol table */ 3185 - } 3186 3160 3187 3161 /* Convert perf_probe_event with debuginfo */ 3188 3162 ret = try_to_find_probe_trace_events(pev, tevs);
+5 -1
tools/perf/util/probe-event.h
··· 158 158 int show_available_vars(struct perf_probe_event *pevs, int npevs, 159 159 struct strfilter *filter); 160 160 int show_available_funcs(const char *module, struct strfilter *filter, bool user); 161 - bool arch__prefers_symtab(void); 162 161 void arch__fix_tev_from_maps(struct perf_probe_event *pev, 163 162 struct probe_trace_event *tev, struct map *map, 164 163 struct symbol *sym); ··· 171 172 172 173 int copy_to_probe_trace_arg(struct probe_trace_arg *tvar, 173 174 struct perf_probe_arg *pvar); 175 + 176 + struct map *get_target_map(const char *target, bool user); 177 + 178 + void arch__post_process_probe_trace_events(struct perf_probe_event *pev, 179 + int ntevs); 174 180 175 181 #endif /*_PROBE_EVENT_H */
+12 -3
tools/perf/util/probe-finder.c
··· 297 297 char sbuf[STRERR_BUFSIZE]; 298 298 int bsize, boffs, total; 299 299 int ret; 300 + char sign; 300 301 301 302 /* TODO: check all types */ 302 - if (cast && strcmp(cast, "string") != 0) { 303 + if (cast && strcmp(cast, "string") != 0 && 304 + strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) { 303 305 /* Non string type is OK */ 306 + /* and respect signedness cast */ 304 307 tvar->type = strdup(cast); 305 308 return (tvar->type == NULL) ? -ENOMEM : 0; 306 309 } ··· 364 361 return (tvar->type == NULL) ? -ENOMEM : 0; 365 362 } 366 363 364 + if (cast && (strcmp(cast, "u") == 0)) 365 + sign = 'u'; 366 + else if (cast && (strcmp(cast, "s") == 0)) 367 + sign = 's'; 368 + else 369 + sign = die_is_signed_type(&type) ? 's' : 'u'; 370 + 367 371 ret = dwarf_bytesize(&type); 368 372 if (ret <= 0) 369 373 /* No size ... try to use default type */ ··· 383 373 dwarf_diename(&type), MAX_BASIC_TYPE_BITS); 384 374 ret = MAX_BASIC_TYPE_BITS; 385 375 } 386 - ret = snprintf(buf, 16, "%c%d", 387 - die_is_signed_type(&type) ? 's' : 'u', ret); 376 + ret = snprintf(buf, 16, "%c%d", sign, ret); 388 377 389 378 formatted: 390 379 if (ret < 0 || ret >= 16) {
+5 -1
tools/perf/util/sort.c
··· 588 588 } else { 589 589 pevent_event_info(&seq, evsel->tp_format, &rec); 590 590 } 591 - return seq.buffer; 591 + /* 592 + * Trim the buffer, it starts at 4KB and we're not going to 593 + * add anything more to this buffer. 594 + */ 595 + return realloc(seq.buffer, seq.len + 1); 592 596 } 593 597 594 598 static int64_t
+2
tools/testing/nvdimm/test/nfit.c
··· 13 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 14 #include <linux/platform_device.h> 15 15 #include <linux/dma-mapping.h> 16 + #include <linux/workqueue.h> 16 17 #include <linux/libnvdimm.h> 17 18 #include <linux/vmalloc.h> 18 19 #include <linux/device.h> ··· 1475 1474 if (nfit_test->setup != nfit_test0_setup) 1476 1475 return 0; 1477 1476 1477 + flush_work(&acpi_desc->work); 1478 1478 nfit_test->setup_hotplug = 1; 1479 1479 nfit_test->setup(nfit_test); 1480 1480
+1 -1
tools/testing/selftests/powerpc/Makefile
··· 8 8 9 9 GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown") 10 10 11 - CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) 11 + CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) 12 12 13 13 export CFLAGS 14 14
+4 -13
virt/kvm/arm/vgic/vgic-init.c
··· 73 73 int i, vcpu_lock_idx = -1, ret; 74 74 struct kvm_vcpu *vcpu; 75 75 76 - mutex_lock(&kvm->lock); 77 - 78 - if (irqchip_in_kernel(kvm)) { 79 - ret = -EEXIST; 80 - goto out; 81 - } 76 + if (irqchip_in_kernel(kvm)) 77 + return -EEXIST; 82 78 83 79 /* 84 80 * This function is also called by the KVM_CREATE_IRQCHIP handler, ··· 83 87 * the proper checks already. 84 88 */ 85 89 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && 86 - !kvm_vgic_global_state.can_emulate_gicv2) { 87 - ret = -ENODEV; 88 - goto out; 89 - } 90 + !kvm_vgic_global_state.can_emulate_gicv2) 91 + return -ENODEV; 90 92 91 93 /* 92 94 * Any time a vcpu is run, vcpu_load is called which tries to grab the ··· 132 138 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); 133 139 mutex_unlock(&vcpu->mutex); 134 140 } 135 - 136 - out: 137 - mutex_unlock(&kvm->lock); 138 141 return ret; 139 142 } 140 143
+15 -1
virt/kvm/kvm_main.c
··· 696 696 { 697 697 struct kvm_device *dev, *tmp; 698 698 699 + /* 700 + * We do not need to take the kvm->lock here, because nobody else 701 + * has a reference to the struct kvm at this point and therefore 702 + * cannot access the devices list anyhow. 703 + */ 699 704 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { 700 705 list_del(&dev->vm_node); 701 706 dev->ops->destroy(dev); ··· 2837 2832 dev->ops = ops; 2838 2833 dev->kvm = kvm; 2839 2834 2835 + mutex_lock(&kvm->lock); 2840 2836 ret = ops->create(dev, cd->type); 2841 2837 if (ret < 0) { 2838 + mutex_unlock(&kvm->lock); 2842 2839 kfree(dev); 2843 2840 return ret; 2844 2841 } 2842 + list_add(&dev->vm_node, &kvm->devices); 2843 + mutex_unlock(&kvm->lock); 2844 + 2845 + if (ops->init) 2846 + ops->init(dev); 2845 2847 2846 2848 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2847 2849 if (ret < 0) { 2848 2850 ops->destroy(dev); 2851 + mutex_lock(&kvm->lock); 2852 + list_del(&dev->vm_node); 2853 + mutex_unlock(&kvm->lock); 2849 2854 return ret; 2850 2855 } 2851 2856 2852 - list_add(&dev->vm_node, &kvm->devices); 2853 2857 kvm_get_kvm(kvm); 2854 2858 cd->fd = ret; 2855 2859 return 0;