Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

+1370 -799
+1
.mailmap
··· 59 59 James Bottomley <jejb@titanic.il.steeleye.com> 60 60 James E Wilson <wilson@specifix.com> 61 61 James Ketrenos <jketreno@io.(none)> 62 + <javier@osg.samsung.com> <javier.martinez@collabora.co.uk> 62 63 Jean Tourrilhes <jt@hpl.hp.com> 63 64 Jeff Garzik <jgarzik@pretzel.yyz.us> 64 65 Jens Axboe <axboe@suse.de>
+7
Documentation/arm/OMAP/README
··· 1 + This file contains documentation for running mainline 2 + kernel on omaps. 3 + 4 + KERNEL NEW DEPENDENCIES 5 + v4.3+ Update is needed for custom .config files to make sure 6 + CONFIG_REGULATOR_PBIAS is enabled for MMC1 to work 7 + properly.
+12 -2
MAINTAINERS
··· 894 894 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 895 895 S: Maintained 896 896 897 - ARM/Allwinner A1X SoC support 897 + ARM/Allwinner sunXi SoC support 898 898 M: Maxime Ripard <maxime.ripard@free-electrons.com> 899 + M: Chen-Yu Tsai <wens@csie.org> 899 900 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 900 901 S: Maintained 901 - N: sun[x4567]i 902 + N: sun[x456789]i 902 903 903 904 ARM/Allwinner SoC Clock Support 904 905 M: Emilio López <emilio@elopez.com.ar> ··· 4427 4426 L: linuxppc-dev@lists.ozlabs.org 4428 4427 S: Maintained 4429 4428 F: drivers/net/ethernet/freescale/ucc_geth* 4429 + 4430 + FREESCALE eTSEC ETHERNET DRIVER (GIANFAR) 4431 + M: Claudiu Manoil <claudiu.manoil@freescale.com> 4432 + L: netdev@vger.kernel.org 4433 + S: Maintained 4434 + F: drivers/net/ethernet/freescale/gianfar* 4435 + X: drivers/net/ethernet/freescale/gianfar_ptp.c 4436 + F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt 4430 4437 4431 4438 FREESCALE QUICC ENGINE UCC UART DRIVER 4432 4439 M: Timur Tabi <timur@tabi.org> ··· 11706 11697 ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR 11707 11698 M: Minchan Kim <minchan@kernel.org> 11708 11699 M: Nitin Gupta <ngupta@vflare.org> 11700 + R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> 11709 11701 L: linux-mm@kvack.org 11710 11702 S: Maintained 11711 11703 F: mm/zsmalloc.c
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 3 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Blurry Fish Butt 6 6 7 7 # *DOCUMENTATION*
+1
arch/arm/Kconfig
··· 645 645 646 646 config ARCH_RPC 647 647 bool "RiscPC" 648 + depends on MMU 648 649 select ARCH_ACORN 649 650 select ARCH_MAY_HAVE_PC_FDC 650 651 select ARCH_SPARSEMEM_ENABLE
+2 -1
arch/arm/boot/dts/am57xx-beagle-x15.dts
··· 402 402 /* SMPS9 unused */ 403 403 404 404 ldo1_reg: ldo1 { 405 - /* VDD_SD */ 405 + /* VDD_SD / VDDSHV8 */ 406 406 regulator-name = "ldo1"; 407 407 regulator-min-microvolt = <1800000>; 408 408 regulator-max-microvolt = <3300000>; 409 409 regulator-boot-on; 410 + regulator-always-on; 410 411 }; 411 412 412 413 ldo2_reg: ldo2 {
+1 -1
arch/arm/boot/dts/armada-385-db-ap.dts
··· 46 46 47 47 / { 48 48 model = "Marvell Armada 385 Access Point Development Board"; 49 - compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x"; 49 + compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada380"; 50 50 51 51 chosen { 52 52 stdout-path = "serial1:115200n8";
+3 -3
arch/arm/boot/dts/berlin2q.dtsi
··· 152 152 }; 153 153 154 154 usb_phy2: phy@a2f400 { 155 - compatible = "marvell,berlin2-usb-phy"; 155 + compatible = "marvell,berlin2cd-usb-phy"; 156 156 reg = <0xa2f400 0x128>; 157 157 #phy-cells = <0>; 158 158 resets = <&chip_rst 0x104 14>; ··· 170 170 }; 171 171 172 172 usb_phy0: phy@b74000 { 173 - compatible = "marvell,berlin2-usb-phy"; 173 + compatible = "marvell,berlin2cd-usb-phy"; 174 174 reg = <0xb74000 0x128>; 175 175 #phy-cells = <0>; 176 176 resets = <&chip_rst 0x104 12>; ··· 178 178 }; 179 179 180 180 usb_phy1: phy@b78000 { 181 - compatible = "marvell,berlin2-usb-phy"; 181 + compatible = "marvell,berlin2cd-usb-phy"; 182 182 reg = <0xb78000 0x128>; 183 183 #phy-cells = <0>; 184 184 resets = <&chip_rst 0x104 13>;
+5
arch/arm/boot/dts/exynos5420-peach-pit.dts
··· 915 915 }; 916 916 }; 917 917 918 + &pmu_system_controller { 919 + assigned-clocks = <&pmu_system_controller 0>; 920 + assigned-clock-parents = <&clock CLK_FIN_PLL>; 921 + }; 922 + 918 923 &rtc { 919 924 status = "okay"; 920 925 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
+5
arch/arm/boot/dts/exynos5800-peach-pi.dts
··· 878 878 }; 879 879 }; 880 880 881 + &pmu_system_controller { 882 + assigned-clocks = <&pmu_system_controller 0>; 883 + assigned-clock-parents = <&clock CLK_FIN_PLL>; 884 + }; 885 + 881 886 &rtc { 882 887 status = "okay"; 883 888 clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
+2 -2
arch/arm/boot/dts/imx7d.dtsi
··· 588 588 status = "disabled"; 589 589 }; 590 590 591 - uart2: serial@30870000 { 591 + uart2: serial@30890000 { 592 592 compatible = "fsl,imx7d-uart", 593 593 "fsl,imx6q-uart"; 594 - reg = <0x30870000 0x10000>; 594 + reg = <0x30890000 0x10000>; 595 595 interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; 596 596 clocks = <&clks IMX7D_UART2_ROOT_CLK>, 597 597 <&clks IMX7D_UART2_ROOT_CLK>;
+1 -1
arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
··· 12 12 13 13 / { 14 14 model = "LogicPD Zoom DM3730 Torpedo Development Kit"; 15 - compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap36xx"; 15 + compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3"; 16 16 17 17 gpio_keys { 18 18 compatible = "gpio-keys";
+12 -11
arch/arm/boot/dts/meson.dtsi
··· 67 67 68 68 timer@c1109940 { 69 69 compatible = "amlogic,meson6-timer"; 70 - reg = <0xc1109940 0x14>; 70 + reg = <0xc1109940 0x18>; 71 71 interrupts = <0 10 1>; 72 72 }; 73 73 ··· 80 80 wdt: watchdog@c1109900 { 81 81 compatible = "amlogic,meson6-wdt"; 82 82 reg = <0xc1109900 0x8>; 83 + interrupts = <0 0 1>; 83 84 }; 84 85 85 86 uart_AO: serial@c81004c0 { 86 87 compatible = "amlogic,meson-uart"; 87 - reg = <0xc81004c0 0x14>; 88 + reg = <0xc81004c0 0x18>; 88 89 interrupts = <0 90 1>; 89 90 clocks = <&clk81>; 90 91 status = "disabled"; 91 92 }; 92 93 93 - uart_A: serial@c81084c0 { 94 + uart_A: serial@c11084c0 { 94 95 compatible = "amlogic,meson-uart"; 95 - reg = <0xc81084c0 0x14>; 96 - interrupts = <0 90 1>; 96 + reg = <0xc11084c0 0x18>; 97 + interrupts = <0 26 1>; 97 98 clocks = <&clk81>; 98 99 status = "disabled"; 99 100 }; 100 101 101 - uart_B: serial@c81084dc { 102 + uart_B: serial@c11084dc { 102 103 compatible = "amlogic,meson-uart"; 103 - reg = <0xc81084dc 0x14>; 104 - interrupts = <0 90 1>; 104 + reg = <0xc11084dc 0x18>; 105 + interrupts = <0 75 1>; 105 106 clocks = <&clk81>; 106 107 status = "disabled"; 107 108 }; 108 109 109 - uart_C: serial@c8108700 { 110 + uart_C: serial@c1108700 { 110 111 compatible = "amlogic,meson-uart"; 111 - reg = <0xc8108700 0x14>; 112 - interrupts = <0 90 1>; 112 + reg = <0xc1108700 0x18>; 113 + interrupts = <0 93 1>; 113 114 clocks = <&clk81>; 114 115 status = "disabled"; 115 116 };
+1 -1
arch/arm/boot/dts/omap3-evm-37xx.dts
··· 13 13 14 14 / { 15 15 model = "TI OMAP37XX EVM (TMDSEVM3730)"; 16 - compatible = "ti,omap3-evm-37xx", "ti,omap36xx"; 16 + compatible = "ti,omap3-evm-37xx", "ti,omap3630", "ti,omap3"; 17 17 18 18 memory { 19 19 device_type = "memory";
+1 -1
arch/arm/boot/dts/ste-hrefv60plus.dtsi
··· 56 56 /* VMMCI level-shifter enable */ 57 57 default_hrefv60_cfg2 { 58 58 pins = "GPIO169_D22"; 59 - ste,config = <&gpio_out_lo>; 59 + ste,config = <&gpio_out_hi>; 60 60 }; 61 61 /* VMMCI level-shifter voltage select */ 62 62 default_hrefv60_cfg3 {
+2
arch/arm/boot/dts/tegra114.dtsi
··· 234 234 gpio-controller; 235 235 #interrupt-cells = <2>; 236 236 interrupt-controller; 237 + /* 237 238 gpio-ranges = <&pinmux 0 0 246>; 239 + */ 238 240 }; 239 241 240 242 apbmisc@70000800 {
+2
arch/arm/boot/dts/tegra124.dtsi
··· 258 258 gpio-controller; 259 259 #interrupt-cells = <2>; 260 260 interrupt-controller; 261 + /* 261 262 gpio-ranges = <&pinmux 0 0 251>; 263 + */ 262 264 }; 263 265 264 266 apbdma: dma@0,60020000 {
+2
arch/arm/boot/dts/tegra20.dtsi
··· 244 244 gpio-controller; 245 245 #interrupt-cells = <2>; 246 246 interrupt-controller; 247 + /* 247 248 gpio-ranges = <&pinmux 0 0 224>; 249 + */ 248 250 }; 249 251 250 252 apbmisc@70000800 {
+2
arch/arm/boot/dts/tegra30.dtsi
··· 349 349 gpio-controller; 350 350 #interrupt-cells = <2>; 351 351 interrupt-controller; 352 + /* 352 353 gpio-ranges = <&pinmux 0 0 248>; 354 + */ 353 355 }; 354 356 355 357 apbmisc@70000800 {
+1 -1
arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
··· 85 85 }; 86 86 87 87 &ethsc { 88 - interrupts = <0 50 4>; 88 + interrupts = <0 52 4>; 89 89 }; 90 90 91 91 &serial0 {
+1
arch/arm/kvm/Kconfig
··· 21 21 depends on MMU && OF 22 22 select PREEMPT_NOTIFIERS 23 23 select ANON_INODES 24 + select ARM_GIC 24 25 select HAVE_KVM_CPU_RELAX_INTERCEPT 25 26 select HAVE_KVM_ARCH_TLB_FLUSH_ALL 26 27 select KVM_MMIO
+1 -1
arch/arm/kvm/arm.c
··· 1080 1080 */ 1081 1081 err = kvm_timer_hyp_init(); 1082 1082 if (err) 1083 - goto out_free_mappings; 1083 + goto out_free_context; 1084 1084 1085 1085 #ifndef CONFIG_HOTPLUG_CPU 1086 1086 free_boot_hyp_pgd();
+3 -5
arch/arm/mach-exynos/pm_domains.c
··· 200 200 args.args_count = 0; 201 201 child_domain = of_genpd_get_from_provider(&args); 202 202 if (IS_ERR(child_domain)) 203 - goto next_pd; 203 + continue; 204 204 205 205 if (of_parse_phandle_with_args(np, "power-domains", 206 206 "#power-domain-cells", 0, &args) != 0) 207 - goto next_pd; 207 + continue; 208 208 209 209 parent_domain = of_genpd_get_from_provider(&args); 210 210 if (IS_ERR(parent_domain)) 211 - goto next_pd; 211 + continue; 212 212 213 213 if (pm_genpd_add_subdomain(parent_domain, child_domain)) 214 214 pr_warn("%s failed to add subdomain: %s\n", ··· 216 216 else 217 217 pr_info("%s has as child subdomain: %s.\n", 218 218 parent_domain->name, child_domain->name); 219 - next_pd: 220 - of_node_put(np); 221 219 } 222 220 223 221 return 0;
+2
arch/arm/mach-omap2/Kconfig
··· 49 49 select OMAP_INTERCONNECT 50 50 select OMAP_INTERCONNECT_BARRIER 51 51 select PM_OPP if PM 52 + select ZONE_DMA if ARM_LPAE 52 53 53 54 config SOC_AM33XX 54 55 bool "TI AM33XX" ··· 79 78 select OMAP_INTERCONNECT 80 79 select OMAP_INTERCONNECT_BARRIER 81 80 select PM_OPP if PM 81 + select ZONE_DMA if ARM_LPAE 82 82 83 83 config ARCH_OMAP2PLUS 84 84 bool
+10
arch/arm/mach-omap2/board-generic.c
··· 106 106 MACHINE_END 107 107 108 108 static const char *const omap36xx_boards_compat[] __initconst = { 109 + "ti,omap3630", 109 110 "ti,omap36xx", 110 111 NULL, 111 112 }; ··· 244 243 }; 245 244 246 245 DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)") 246 + #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) 247 + .dma_zone_size = SZ_2G, 248 + #endif 247 249 .reserve = omap_reserve, 248 250 .smp = smp_ops(omap4_smp_ops), 249 251 .map_io = omap5_map_io, ··· 292 288 }; 293 289 294 290 DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)") 291 + #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) 292 + .dma_zone_size = SZ_2G, 293 + #endif 295 294 .reserve = omap_reserve, 296 295 .smp = smp_ops(omap4_smp_ops), 297 296 .map_io = dra7xx_map_io, ··· 315 308 }; 316 309 317 310 DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)") 311 + #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) 312 + .dma_zone_size = SZ_2G, 313 + #endif 318 314 .reserve = omap_reserve, 319 315 .map_io = dra7xx_map_io, 320 316 .init_early = dra7xx_init_early,
+8 -1
arch/arm/mach-omap2/pdata-quirks.c
··· 559 559 560 560 void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) 561 561 { 562 - omap_sdrc_init(NULL, NULL); 562 + /* 563 + * We still need this for omap2420 and omap3 PM to work, others are 564 + * using drivers/misc/sram.c already. 565 + */ 566 + if (of_machine_is_compatible("ti,omap2420") || 567 + of_machine_is_compatible("ti,omap3")) 568 + omap_sdrc_init(NULL, NULL); 569 + 563 570 pdata_quirks_check(auxdata_quirks); 564 571 of_platform_populate(NULL, omap_dt_match_table, 565 572 omap_auxdata_lookup, NULL);
+5 -4
arch/arm/mach-pxa/pxa3xx.c
··· 42 42 #define PECR_IS(n) ((1 << ((n) * 2)) << 29) 43 43 44 44 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); 45 - #ifdef CONFIG_PM 46 - 47 - #define ISRAM_START 0x5c000000 48 - #define ISRAM_SIZE SZ_256K 49 45 50 46 /* 51 47 * NAND NFC: DFI bus arbitration subset ··· 49 53 #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0)) 50 54 #define NDCR_ND_ARB_EN (1 << 12) 51 55 #define NDCR_ND_ARB_CNTL (1 << 19) 56 + 57 + #ifdef CONFIG_PM 58 + 59 + #define ISRAM_START 0x5c000000 60 + #define ISRAM_SIZE SZ_256K 52 61 53 62 static void __iomem *sram; 54 63 static unsigned long wakeup_src;
+1 -1
arch/arm/plat-orion/common.c
··· 495 495 496 496 d->netdev = &orion_ge00.dev; 497 497 for (i = 0; i < d->nr_chips; i++) 498 - d->chip[i].host_dev = &orion_ge00_shared.dev; 498 + d->chip[i].host_dev = &orion_ge_mvmdio.dev; 499 499 orion_switch_device.dev.platform_data = d; 500 500 501 501 platform_device_register(&orion_switch_device);
+13 -4
arch/arm/vdso/vdsomunge.c
··· 45 45 * it does. 46 46 */ 47 47 48 - #include <byteswap.h> 49 48 #include <elf.h> 50 49 #include <errno.h> 51 50 #include <fcntl.h> ··· 57 58 #include <sys/stat.h> 58 59 #include <sys/types.h> 59 60 #include <unistd.h> 61 + 62 + #define swab16(x) \ 63 + ((((x) & 0x00ff) << 8) | \ 64 + (((x) & 0xff00) >> 8)) 65 + 66 + #define swab32(x) \ 67 + ((((x) & 0x000000ff) << 24) | \ 68 + (((x) & 0x0000ff00) << 8) | \ 69 + (((x) & 0x00ff0000) >> 8) | \ 70 + (((x) & 0xff000000) << 24)) 60 71 61 72 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 62 73 #define HOST_ORDER ELFDATA2LSB ··· 113 104 114 105 static Elf32_Word read_elf_word(Elf32_Word word, bool swap) 115 106 { 116 - return swap ? bswap_32(word) : word; 107 + return swap ? swab32(word) : word; 117 108 } 118 109 119 110 static Elf32_Half read_elf_half(Elf32_Half half, bool swap) 120 111 { 121 - return swap ? bswap_16(half) : half; 112 + return swap ? swab16(half) : half; 122 113 } 123 114 124 115 static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap) 125 116 { 126 - *dst = swap ? bswap_32(val) : val; 117 + *dst = swap ? swab32(val) : val; 127 118 } 128 119 129 120 int main(int argc, char **argv)
+9 -7
arch/arm64/kernel/armv8_deprecated.c
··· 284 284 __asm__ __volatile__( \ 285 285 ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 286 286 CONFIG_ARM64_PAN) \ 287 - " mov %w2, %w1\n" \ 288 - "0: ldxr"B" %w1, [%3]\n" \ 289 - "1: stxr"B" %w0, %w2, [%3]\n" \ 287 + "0: ldxr"B" %w2, [%3]\n" \ 288 + "1: stxr"B" %w0, %w1, [%3]\n" \ 290 289 " cbz %w0, 2f\n" \ 291 290 " mov %w0, %w4\n" \ 291 + " b 3f\n" \ 292 292 "2:\n" \ 293 + " mov %w1, %w2\n" \ 294 + "3:\n" \ 293 295 " .pushsection .fixup,\"ax\"\n" \ 294 296 " .align 2\n" \ 295 - "3: mov %w0, %w5\n" \ 296 - " b 2b\n" \ 297 + "4: mov %w0, %w5\n" \ 298 + " b 3b\n" \ 297 299 " .popsection" \ 298 300 " .pushsection __ex_table,\"a\"\n" \ 299 301 " .align 3\n" \ 300 - " .quad 0b, 3b\n" \ 301 - " .quad 1b, 3b\n" \ 302 + " .quad 0b, 4b\n" \ 303 + " .quad 1b, 4b\n" \ 302 304 " .popsection\n" \ 303 305 ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 304 306 CONFIG_ARM64_PAN) \
+12 -2
arch/arm64/kernel/efi-stub.c
··· 25 25 unsigned long kernel_size, kernel_memsize = 0; 26 26 unsigned long nr_pages; 27 27 void *old_image_addr = (void *)*image_addr; 28 + unsigned long preferred_offset; 29 + 30 + /* 31 + * The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond 32 + * a 2 MB aligned base, which itself may be lower than dram_base, as 33 + * long as the resulting offset equals or exceeds it. 34 + */ 35 + preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET; 36 + if (preferred_offset < dram_base) 37 + preferred_offset += SZ_2M; 28 38 29 39 /* Relocate the image, if required. */ 30 40 kernel_size = _edata - _text; 31 - if (*image_addr != (dram_base + TEXT_OFFSET)) { 41 + if (*image_addr != preferred_offset) { 32 42 kernel_memsize = kernel_size + (_end - _edata); 33 43 34 44 /* ··· 52 42 * Mustang), we can still place the kernel at the address 53 43 * 'dram_base + TEXT_OFFSET'. 54 44 */ 55 - *image_addr = *reserve_addr = dram_base + TEXT_OFFSET; 45 + *image_addr = *reserve_addr = preferred_offset; 56 46 nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) / 57 47 EFI_PAGE_SIZE; 58 48 status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+1 -5
arch/arm64/kernel/stacktrace.c
··· 48 48 49 49 frame->sp = fp + 0x10; 50 50 frame->fp = *(unsigned long *)(fp); 51 - /* 52 - * -4 here because we care about the PC at time of bl, 53 - * not where the return will go. 54 - */ 55 - frame->pc = *(unsigned long *)(fp + 8) - 4; 51 + frame->pc = *(unsigned long *)(fp + 8); 56 52 57 53 return 0; 58 54 }
+13 -9
arch/arm64/kernel/suspend.c
··· 80 80 if (ret == 0) { 81 81 /* 82 82 * We are resuming from reset with TTBR0_EL1 set to the 83 - * idmap to enable the MMU; restore the active_mm mappings in 84 - * TTBR0_EL1 unless the active_mm == &init_mm, in which case 85 - * the thread entered cpu_suspend with TTBR0_EL1 set to 86 - * reserved TTBR0 page tables and should be restored as such. 83 + * idmap to enable the MMU; set the TTBR0 to the reserved 84 + * page tables to prevent speculative TLB allocations, flush 85 + * the local tlb and set the default tcr_el1.t0sz so that 86 + * the TTBR0 address space set-up is properly restored. 87 + * If the current active_mm != &init_mm we entered cpu_suspend 88 + * with mappings in TTBR0 that must be restored, so we switch 89 + * them back to complete the address space configuration 90 + * restoration before returning. 87 91 */ 88 - if (mm == &init_mm) 89 - cpu_set_reserved_ttbr0(); 90 - else 91 - cpu_switch_mm(mm->pgd, mm); 92 - 92 + cpu_set_reserved_ttbr0(); 93 93 flush_tlb_all(); 94 + cpu_set_default_tcr_t0sz(); 95 + 96 + if (mm != &init_mm) 97 + cpu_switch_mm(mm->pgd, mm); 94 98 95 99 /* 96 100 * Restore per-cpu offset before any kernel
+1 -1
arch/ia64/include/asm/unistd.h
··· 11 11 12 12 13 13 14 - #define NR_syscalls 321 /* length of syscall table */ 14 + #define NR_syscalls 322 /* length of syscall table */ 15 15 16 16 /* 17 17 * The following defines stop scripts/checksyscalls.sh from complaining about
+1
arch/ia64/include/uapi/asm/unistd.h
··· 334 334 #define __NR_execveat 1342 335 335 #define __NR_userfaultfd 1343 336 336 #define __NR_membarrier 1344 337 + #define __NR_kcmp 1345 337 338 338 339 #endif /* _UAPI_ASM_IA64_UNISTD_H */
+1
arch/ia64/kernel/entry.S
··· 1770 1770 data8 sys_execveat 1771 1771 data8 sys_userfaultfd 1772 1772 data8 sys_membarrier 1773 + data8 sys_kcmp // 1345 1773 1774 1774 1775 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
-7
arch/powerpc/include/asm/cache.h
··· 3 3 4 4 #ifdef __KERNEL__ 5 5 6 - #include <asm/reg.h> 7 6 8 7 /* bytes per L1 cache line */ 9 8 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) ··· 39 40 }; 40 41 41 42 extern struct ppc64_caches ppc64_caches; 42 - 43 - static inline void logmpp(u64 x) 44 - { 45 - asm volatile(PPC_LOGMPP(R1) : : "r" (x)); 46 - } 47 - 48 43 #endif /* __powerpc64__ && ! __ASSEMBLY__ */ 49 44 50 45 #if defined(__ASSEMBLY__)
-2
arch/powerpc/include/asm/kvm_host.h
··· 297 297 u32 arch_compat; 298 298 ulong pcr; 299 299 ulong dpdes; /* doorbell state (POWER8) */ 300 - void *mpp_buffer; /* Micro Partition Prefetch buffer */ 301 - bool mpp_buffer_is_valid; 302 300 ulong conferring_threads; 303 301 }; 304 302
-17
arch/powerpc/include/asm/ppc-opcode.h
··· 141 141 #define PPC_INST_ISEL 0x7c00001e 142 142 #define PPC_INST_ISEL_MASK 0xfc00003e 143 143 #define PPC_INST_LDARX 0x7c0000a8 144 - #define PPC_INST_LOGMPP 0x7c0007e4 145 144 #define PPC_INST_LSWI 0x7c0004aa 146 145 #define PPC_INST_LSWX 0x7c00042a 147 146 #define PPC_INST_LWARX 0x7c000028 ··· 284 285 #define __PPC_EH(eh) 0 285 286 #endif 286 287 287 - /* POWER8 Micro Partition Prefetch (MPP) parameters */ 288 - /* Address mask is common for LOGMPP instruction and MPPR SPR */ 289 - #define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL 290 - 291 - /* Bits 60 and 61 of MPP SPR should be set to one of the following */ 292 - /* Aborting the fetch is indeed setting 00 in the table size bits */ 293 - #define PPC_MPPR_FETCH_ABORT (0x0ULL << 60) 294 - #define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60) 295 - 296 - /* Bits 54 and 55 of register for LOGMPP instruction should be set to: */ 297 - #define PPC_LOGMPP_LOG_L2 (0x02ULL << 54) 298 - #define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54) 299 - #define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54) 300 - 301 288 /* Deal with instructions that older assemblers aren't aware of */ 302 289 #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ 303 290 __PPC_RA(a) | __PPC_RB(b)) ··· 292 307 #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ 293 308 ___PPC_RT(t) | ___PPC_RA(a) | \ 294 309 ___PPC_RB(b) | __PPC_EH(eh)) 295 - #define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \ 296 - __PPC_RB(b)) 297 310 #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ 298 311 ___PPC_RT(t) | ___PPC_RA(a) | \ 299 312 ___PPC_RB(b) | __PPC_EH(eh))
-1
arch/powerpc/include/asm/reg.h
··· 226 226 #define CTRL_TE 0x00c00000 /* thread enable */ 227 227 #define CTRL_RUNLATCH 0x1 228 228 #define SPRN_DAWR 0xB4 229 - #define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */ 230 229 #define SPRN_RPR 0xBA /* Relative Priority Register */ 231 230 #define SPRN_CIABR 0xBB 232 231 #define CIABR_PRIV 0x3
+1 -1
arch/powerpc/kernel/dma.c
··· 303 303 dev->coherent_dma_mask = mask; 304 304 return 0; 305 305 } 306 - EXPORT_SYMBOL_GPL(dma_set_coherent_mask); 306 + EXPORT_SYMBOL(dma_set_coherent_mask); 307 307 308 308 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 309 309
+3
arch/powerpc/kernel/rtas.c
··· 1043 1043 if (!capable(CAP_SYS_ADMIN)) 1044 1044 return -EPERM; 1045 1045 1046 + if (!rtas.entry) 1047 + return -EINVAL; 1048 + 1046 1049 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) 1047 1050 return -EFAULT; 1048 1051
+1 -54
arch/powerpc/kvm/book3s_hv.c
··· 36 36 37 37 #include <asm/reg.h> 38 38 #include <asm/cputable.h> 39 - #include <asm/cache.h> 40 39 #include <asm/cacheflush.h> 41 40 #include <asm/tlbflush.h> 42 41 #include <asm/uaccess.h> ··· 73 74 #define TB_NIL (~(u64)0) 74 75 75 76 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); 76 - 77 - #if defined(CONFIG_PPC_64K_PAGES) 78 - #define MPP_BUFFER_ORDER 0 79 - #elif defined(CONFIG_PPC_4K_PAGES) 80 - #define MPP_BUFFER_ORDER 3 81 - #endif 82 77 83 78 static int dynamic_mt_modes = 6; 84 79 module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); ··· 1448 1455 vcore->kvm = kvm; 1449 1456 INIT_LIST_HEAD(&vcore->preempt_list); 1450 1457 1451 - vcore->mpp_buffer_is_valid = false; 1452 - 1453 - if (cpu_has_feature(CPU_FTR_ARCH_207S)) 1454 - vcore->mpp_buffer = (void *)__get_free_pages( 1455 - GFP_KERNEL|__GFP_ZERO, 1456 - MPP_BUFFER_ORDER); 1457 - 1458 1458 return vcore; 1459 1459 } 1460 1460 ··· 1878 1892 } 1879 1893 } 1880 1894 return 1; 1881 - } 1882 - 1883 - static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc) 1884 - { 1885 - phys_addr_t phy_addr, mpp_addr; 1886 - 1887 - phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer); 1888 - mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; 1889 - 1890 - mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT); 1891 - logmpp(mpp_addr | PPC_LOGMPP_LOG_L2); 1892 - 1893 - vc->mpp_buffer_is_valid = true; 1894 - } 1895 - 1896 - static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc) 1897 - { 1898 - phys_addr_t phy_addr, mpp_addr; 1899 - 1900 - phy_addr = virt_to_phys(vc->mpp_buffer); 1901 - mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; 1902 - 1903 - /* We must abort any in-progress save operations to ensure 1904 - * the table is valid so that prefetch engine knows when to 1905 - * stop prefetching. */ 1906 - logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT); 1907 - mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE); 1908 1895 } 1909 1896 1910 1897 /* ··· 2430 2471 2431 2472 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 2432 2473 2433 - if (vc->mpp_buffer_is_valid) 2434 - kvmppc_start_restoring_l2_cache(vc); 2435 - 2436 2474 __kvmppc_vcore_entry(); 2437 - 2438 - if (vc->mpp_buffer) 2439 - kvmppc_start_saving_l2_cache(vc); 2440 2475 2441 2476 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 2442 2477 ··· 3026 3073 { 3027 3074 long int i; 3028 3075 3029 - for (i = 0; i < KVM_MAX_VCORES; ++i) { 3030 - if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) { 3031 - struct kvmppc_vcore *vc = kvm->arch.vcores[i]; 3032 - free_pages((unsigned long)vc->mpp_buffer, 3033 - MPP_BUFFER_ORDER); 3034 - } 3076 + for (i = 0; i < KVM_MAX_VCORES; ++i) 3035 3077 kfree(kvm->arch.vcores[i]); 3036 - } 3037 3078 kvm->arch.online_vcores = 0; 3038 3079 } 3039 3080
+24 -5
arch/powerpc/platforms/powernv/smp.c
··· 171 171 * so clear LPCR:PECE1. We keep PECE2 enabled. 172 172 */ 173 173 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); 174 + 175 + /* 176 + * Hard-disable interrupts, and then clear irq_happened flags 177 + * that we can safely ignore while off-line, since they 178 + * are for things for which we do no processing when off-line 179 + * (or in the case of HMI, all the processing we need to do 180 + * is done in lower-level real-mode code). 181 + */ 182 + hard_irq_disable(); 183 + local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI); 184 + 174 185 while (!generic_check_cpu_restart(cpu)) { 186 + /* 187 + * Clear IPI flag, since we don't handle IPIs while 188 + * offline, except for those when changing micro-threading 189 + * mode, which are handled explicitly below, and those 190 + * for coming online, which are handled via 191 + * generic_check_cpu_restart() calls. 192 + */ 193 + kvmppc_set_host_ipi(cpu, 0); 175 194 176 195 ppc64_runlatch_off(); 177 196 ··· 215 196 * having finished executing in a KVM guest, then srr1 216 197 * contains 0. 217 198 */ 218 - if ((srr1 & wmask) == SRR1_WAKEEE) { 199 + if (((srr1 & wmask) == SRR1_WAKEEE) || 200 + (local_paca->irq_happened & PACA_IRQ_EE)) { 219 201 icp_native_flush_interrupt(); 220 - local_paca->irq_happened &= PACA_IRQ_HARD_DIS; 221 - smp_mb(); 222 202 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { 223 203 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 224 204 asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); 225 - kvmppc_set_host_ipi(cpu, 0); 226 205 } 206 + local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL); 207 + smp_mb(); 227 208 228 209 if (cpu_core_split_required()) 229 210 continue; 230 211 231 - if (!generic_check_cpu_restart(cpu)) 212 + if (srr1 && !generic_check_cpu_restart(cpu)) 232 213 DBG("CPU%d Unexpected exit while offline !\n", cpu); 233 214 } 234 215 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
+2 -2
arch/um/Makefile
··· 70 70 71 71 USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \ 72 72 $(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \ 73 - -D_FILE_OFFSET_BITS=64 -idirafter include \ 74 - -D__KERNEL__ -D__UM_HOST__ 73 + -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \ 74 + -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__ 75 75 76 76 #This will adjust *FLAGS accordingly to the platform. 77 77 include $(ARCH_DIR)/Makefile-os-$(OS)
+1 -1
arch/um/kernel/trap.c
··· 220 220 show_regs(container_of(regs, struct pt_regs, regs)); 221 221 panic("Segfault with no mm"); 222 222 } 223 - else if (!is_user && address < TASK_SIZE) { 223 + else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) { 224 224 show_regs(container_of(regs, struct pt_regs, regs)); 225 225 panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx", 226 226 address, ip);
+3 -3
arch/um/os-Linux/helper.c
··· 96 96 "ret = %d\n", -n); 97 97 ret = n; 98 98 } 99 - CATCH_EINTR(waitpid(pid, NULL, __WCLONE)); 99 + CATCH_EINTR(waitpid(pid, NULL, __WALL)); 100 100 } 101 101 102 102 out_free2: ··· 129 129 return err; 130 130 } 131 131 if (stack_out == NULL) { 132 - CATCH_EINTR(pid = waitpid(pid, &status, __WCLONE)); 132 + CATCH_EINTR(pid = waitpid(pid, &status, __WALL)); 133 133 if (pid < 0) { 134 134 err = -errno; 135 135 printk(UM_KERN_ERR "run_helper_thread - wait failed, " ··· 148 148 int helper_wait(int pid) 149 149 { 150 150 int ret, status; 151 - int wflags = __WCLONE; 151 + int wflags = __WALL; 152 152 153 153 CATCH_EINTR(ret = waitpid(pid, &status, wflags)); 154 154 if (ret < 0) {
+6 -2
arch/x86/boot/compressed/eboot.c
··· 667 667 bool conout_found = false; 668 668 void *dummy = NULL; 669 669 u32 h = handles[i]; 670 + u32 current_fb_base; 670 671 671 672 status = efi_call_early(handle_protocol, h, 672 673 proto, (void **)&gop32); ··· 679 678 if (status == EFI_SUCCESS) 680 679 conout_found = true; 681 680 682 - status = __gop_query32(gop32, &info, &size, &fb_base); 681 + status = __gop_query32(gop32, &info, &size, &current_fb_base); 683 682 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 684 683 /* 685 684 * Systems that use the UEFI Console Splitter may ··· 693 692 pixel_format = info->pixel_format; 694 693 pixel_info = info->pixel_information; 695 694 pixels_per_scan_line = info->pixels_per_scan_line; 695 + fb_base = current_fb_base; 696 696 697 697 /* 698 698 * Once we've found a GOP supporting ConOut, ··· 772 770 bool conout_found = false; 773 771 void *dummy = NULL; 774 772 u64 h = handles[i]; 773 + u32 current_fb_base; 775 774 776 775 status = efi_call_early(handle_protocol, h, 777 776 proto, (void **)&gop64); ··· 784 781 if (status == EFI_SUCCESS) 785 782 conout_found = true; 786 783 787 - status = __gop_query64(gop64, &info, &size, &fb_base); 784 + status = __gop_query64(gop64, &info, &size, &current_fb_base); 788 785 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 789 786 /* 790 787 * Systems that use the UEFI Console Splitter may ··· 798 795 pixel_format = info->pixel_format; 799 796 pixel_info = info->pixel_information; 800 797 pixels_per_scan_line = info->pixels_per_scan_line; 798 + fb_base = current_fb_base; 801 799 802 800 /* 803 801 * Once we've found a GOP supporting ConOut,
+2 -3
arch/x86/include/asm/string_64.h
··· 27 27 function. */ 28 28 29 29 #define __HAVE_ARCH_MEMCPY 1 30 + extern void *memcpy(void *to, const void *from, size_t len); 30 31 extern void *__memcpy(void *to, const void *from, size_t len); 31 32 32 33 #ifndef CONFIG_KMEMCHECK 33 - #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 34 - extern void *memcpy(void *to, const void *from, size_t len); 35 - #else 34 + #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 36 35 #define memcpy(dst, src, len) \ 37 36 ({ \ 38 37 size_t __len = (len); \
+4
arch/x86/kernel/apic/io_apic.c
··· 2907 2907 struct irq_data *irq_data; 2908 2908 struct mp_chip_data *data; 2909 2909 struct irq_alloc_info *info = arg; 2910 + unsigned long flags; 2910 2911 2911 2912 if (!info || nr_irqs > 1) 2912 2913 return -EINVAL; ··· 2940 2939 2941 2940 cfg = irqd_cfg(irq_data); 2942 2941 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); 2942 + 2943 + local_irq_save(flags); 2943 2944 if (info->ioapic_entry) 2944 2945 mp_setup_entry(cfg, data, info->ioapic_entry); 2945 2946 mp_register_handler(virq, data->trigger); 2946 2947 if (virq < nr_legacy_irqs()) 2947 2948 legacy_pic->mask(virq); 2949 + local_irq_restore(flags); 2948 2950 2949 2951 apic_printk(APIC_VERBOSE, KERN_DEBUG 2950 2952 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n",
+3 -3
arch/x86/kernel/process.c
··· 550 550 if (sp < bottom || sp > top) 551 551 return 0; 552 552 553 - fp = READ_ONCE(*(unsigned long *)sp); 553 + fp = READ_ONCE_NOCHECK(*(unsigned long *)sp); 554 554 do { 555 555 if (fp < bottom || fp > top) 556 556 return 0; 557 - ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); 557 + ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long))); 558 558 if (!in_sched_functions(ip)) 559 559 return ip; 560 - fp = READ_ONCE(*(unsigned long *)fp); 560 + fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); 561 561 } while (count++ < 16 && p->state != TASK_RUNNING); 562 562 return 0; 563 563 }
+8
arch/x86/kernel/setup.c
··· 1173 1173 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, 1174 1174 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 1175 1175 KERNEL_PGD_PTRS); 1176 + 1177 + /* 1178 + * sync back low identity map too. It is used for example 1179 + * in the 32-bit EFI stub. 1180 + */ 1181 + clone_pgd_range(initial_page_table, 1182 + swapper_pg_dir + KERNEL_PGD_BOUNDARY, 1183 + KERNEL_PGD_PTRS); 1176 1184 #endif 1177 1185 1178 1186 tboot_probe();
+11 -4
arch/x86/kernel/smpboot.c
··· 509 509 */ 510 510 #define UDELAY_10MS_DEFAULT 10000 511 511 512 - static unsigned int init_udelay = UDELAY_10MS_DEFAULT; 512 + static unsigned int init_udelay = INT_MAX; 513 513 514 514 static int __init cpu_init_udelay(char *str) 515 515 { ··· 522 522 static void __init smp_quirk_init_udelay(void) 523 523 { 524 524 /* if cmdline changed it from default, leave it alone */ 525 - if (init_udelay != UDELAY_10MS_DEFAULT) 525 + if (init_udelay != INT_MAX) 526 526 return; 527 527 528 528 /* if modern processor, use no delay */ 529 529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 530 530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) 531 531 init_udelay = 0; 532 + 533 + /* else, use legacy delay */ 534 + init_udelay = UDELAY_10MS_DEFAULT; 532 535 } 533 536 534 537 /* ··· 660 657 /* 661 658 * Give the other CPU some time to accept the IPI. 662 659 */ 663 - if (init_udelay) 660 + if (init_udelay == 0) 661 + udelay(10); 662 + else 664 663 udelay(300); 665 664 666 665 pr_debug("Startup point 1\n"); ··· 673 668 /* 674 669 * Give the other CPU some time to accept the IPI. 675 670 */ 676 - if (init_udelay) 671 + if (init_udelay == 0) 672 + udelay(10); 673 + else 677 674 udelay(200); 678 675 679 676 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
+4 -1
arch/x86/um/ldt.c
··· 12 12 #include <skas.h> 13 13 #include <sysdep/tls.h> 14 14 15 - extern int modify_ldt(int func, void *ptr, unsigned long bytecount); 15 + static inline int modify_ldt (int func, void *ptr, unsigned long bytecount) 16 + { 17 + return syscall(__NR_modify_ldt, func, ptr, bytecount); 18 + } 16 19 17 20 static long write_ldt_entry(struct mm_id *mm_idp, int func, 18 21 struct user_desc *desc, void **addr, int done)
+1 -1
block/blk-core.c
··· 576 576 q->queue_lock = &q->__queue_lock; 577 577 spin_unlock_irq(lock); 578 578 579 - bdi_destroy(&q->backing_dev_info); 579 + bdi_unregister(&q->backing_dev_info); 580 580 581 581 /* @q is and will stay empty, shutdown and put */ 582 582 blk_put_queue(q);
+22 -9
block/blk-lib.c
··· 26 26 bio_put(bio); 27 27 } 28 28 29 - /* 30 - * Ensure that max discard sectors doesn't overflow bi_size and hopefully 31 - * it is of the proper granularity as long as the granularity is a power 32 - * of two. 33 - */ 34 - #define MAX_BIO_SECTORS ((1U << 31) >> 9) 35 - 36 29 /** 37 30 * blkdev_issue_discard - queue a discard 38 31 * @bdev: blockdev to issue discard for ··· 43 50 DECLARE_COMPLETION_ONSTACK(wait); 44 51 struct request_queue *q = bdev_get_queue(bdev); 45 52 int type = REQ_WRITE | REQ_DISCARD; 53 + unsigned int granularity; 54 + int alignment; 46 55 struct bio_batch bb; 47 56 struct bio *bio; 48 57 int ret = 0; ··· 55 60 56 61 if (!blk_queue_discard(q)) 57 62 return -EOPNOTSUPP; 63 + 64 + /* Zero-sector (unknown) and one-sector granularities are the same. */ 65 + granularity = max(q->limits.discard_granularity >> 9, 1U); 66 + alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; 58 67 59 68 if (flags & BLKDEV_DISCARD_SECURE) { 60 69 if (!blk_queue_secdiscard(q)) ··· 73 74 blk_start_plug(&plug); 74 75 while (nr_sects) { 75 76 unsigned int req_sects; 76 - sector_t end_sect; 77 + sector_t end_sect, tmp; 77 78 78 79 bio = bio_alloc(gfp_mask, 1); 79 80 if (!bio) { ··· 81 82 break; 82 83 } 83 84 84 - req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS); 85 + /* Make sure bi_size doesn't overflow */ 86 + req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); 87 + 88 + /* 89 + * If splitting a request, and the next starting sector would be 90 + * misaligned, stop the discard at the previous aligned sector. 91 + */ 85 92 end_sect = sector + req_sects; 93 + tmp = end_sect; 94 + if (req_sects < nr_sects && 95 + sector_div(tmp, granularity) != alignment) { 96 + end_sect = end_sect - alignment; 97 + sector_div(end_sect, granularity); 98 + end_sect = end_sect * granularity + alignment; 99 + req_sects = end_sect - sector; 100 + } 86 101 87 102 bio->bi_iter.bi_sector = sector; 88 103 bio->bi_end_io = bio_batch_end_io;
+1
block/blk-mq-tag.c
··· 641 641 { 642 642 bt_free(&tags->bitmap_tags); 643 643 bt_free(&tags->breserved_tags); 644 + free_cpumask_var(tags->cpumask); 644 645 kfree(tags); 645 646 } 646 647
+1 -3
block/blk-mq.c
··· 2296 2296 int i; 2297 2297 2298 2298 for (i = 0; i < set->nr_hw_queues; i++) { 2299 - if (set->tags[i]) { 2299 + if (set->tags[i]) 2300 2300 blk_mq_free_rq_map(set, set->tags[i], i); 2301 - free_cpumask_var(set->tags[i]->cpumask); 2302 - } 2303 2301 } 2304 2302 2305 2303 kfree(set->tags);
+1
block/blk-sysfs.c
··· 540 540 struct request_queue *q = 541 541 container_of(kobj, struct request_queue, kobj); 542 542 543 + bdi_exit(&q->backing_dev_info); 543 544 blkcg_exit_queue(q); 544 545 545 546 if (q->elevator) {
+1 -1
crypto/ablkcipher.c
··· 706 706 err: 707 707 if (err != -EAGAIN) 708 708 break; 709 - if (signal_pending(current)) { 709 + if (fatal_signal_pending(current)) { 710 710 err = -EINTR; 711 711 break; 712 712 }
+1 -1
crypto/algapi.c
··· 345 345 crypto_alg_tested(larval->alg.cra_driver_name, 0); 346 346 } 347 347 348 - err = wait_for_completion_interruptible(&larval->completion); 348 + err = wait_for_completion_killable(&larval->completion); 349 349 WARN_ON(err); 350 350 351 351 out:
+3 -3
crypto/api.c
··· 172 172 struct crypto_larval *larval = (void *)alg; 173 173 long timeout; 174 174 175 - timeout = wait_for_completion_interruptible_timeout( 175 + timeout = wait_for_completion_killable_timeout( 176 176 &larval->completion, 60 * HZ); 177 177 178 178 alg = larval->adult; ··· 445 445 err: 446 446 if (err != -EAGAIN) 447 447 break; 448 - if (signal_pending(current)) { 448 + if (fatal_signal_pending(current)) { 449 449 err = -EINTR; 450 450 break; 451 451 } ··· 562 562 err: 563 563 if (err != -EAGAIN) 564 564 break; 565 - if (signal_pending(current)) { 565 + if (fatal_signal_pending(current)) { 566 566 err = -EINTR; 567 567 break; 568 568 }
+1 -1
crypto/crypto_user.c
··· 375 375 err = PTR_ERR(alg); 376 376 if (err != -EAGAIN) 377 377 break; 378 - if (signal_pending(current)) { 378 + if (fatal_signal_pending(current)) { 379 379 err = -EINTR; 380 380 break; 381 381 }
+1 -1
drivers/base/dma-contiguous.c
··· 187 187 * global one. Requires architecture specific dev_get_cma_area() helper 188 188 * function. 189 189 */ 190 - struct page *dma_alloc_from_contiguous(struct device *dev, int count, 190 + struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, 191 191 unsigned int align) 192 192 { 193 193 if (align > CONFIG_CMA_ALIGNMENT)
+30 -6
drivers/block/nbd.c
··· 60 60 bool disconnect; /* a disconnect has been requested by user */ 61 61 62 62 struct timer_list timeout_timer; 63 + spinlock_t tasks_lock; 63 64 struct task_struct *task_recv; 64 65 struct task_struct *task_send; 65 66 ··· 141 140 static void nbd_xmit_timeout(unsigned long arg) 142 141 { 143 142 struct nbd_device *nbd = (struct nbd_device *)arg; 144 - struct task_struct *task; 143 + unsigned long flags; 145 144 146 145 if (list_empty(&nbd->queue_head)) 147 146 return; 148 147 149 148 nbd->disconnect = true; 150 149 151 - task = READ_ONCE(nbd->task_recv); 152 - if (task) 153 - force_sig(SIGKILL, task); 150 + spin_lock_irqsave(&nbd->tasks_lock, flags); 154 151 155 - task = READ_ONCE(nbd->task_send); 156 - if (task) 152 + if (nbd->task_recv) 153 + force_sig(SIGKILL, nbd->task_recv); 154 + 155 + if (nbd->task_send) 157 156 force_sig(SIGKILL, nbd->task_send); 157 + 158 + spin_unlock_irqrestore(&nbd->tasks_lock, flags); 158 159 159 160 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); 160 161 } ··· 406 403 { 407 404 struct request *req; 408 405 int ret; 406 + unsigned long flags; 409 407 410 408 BUG_ON(nbd->magic != NBD_MAGIC); 411 409 412 410 sk_set_memalloc(nbd->sock->sk); 413 411 412 + spin_lock_irqsave(&nbd->tasks_lock, flags); 414 413 nbd->task_recv = current; 414 + spin_unlock_irqrestore(&nbd->tasks_lock, flags); 415 415 416 416 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 417 417 if (ret) { 418 418 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 419 + 420 + spin_lock_irqsave(&nbd->tasks_lock, flags); 419 421 nbd->task_recv = NULL; 422 + spin_unlock_irqrestore(&nbd->tasks_lock, flags); 423 + 420 424 return ret; 421 425 } 422 426 ··· 439 429 440 430 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 441 431 432 + spin_lock_irqsave(&nbd->tasks_lock, flags); 442 433 nbd->task_recv = NULL; 434 + spin_unlock_irqrestore(&nbd->tasks_lock, flags); 443 435 444 436 if (signal_pending(current)) { 445 437 siginfo_t info; ··· 546 534 { 547 535 struct nbd_device *nbd = data; 548 536 struct request *req; 537 + unsigned long flags; 549 538 539 + spin_lock_irqsave(&nbd->tasks_lock, flags); 550 540 nbd->task_send = current; 541 + spin_unlock_irqrestore(&nbd->tasks_lock, flags); 551 542 552 543 set_user_nice(current, MIN_NICE); 553 544 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { ··· 587 572 nbd_handle_req(nbd, req); 588 573 } 589 574 575 + spin_lock_irqsave(&nbd->tasks_lock, flags); 590 576 nbd->task_send = NULL; 577 + spin_unlock_irqrestore(&nbd->tasks_lock, flags); 578 + 579 + /* Clear maybe pending signals */ 580 + if (signal_pending(current)) { 581 + siginfo_t info; 582 + dequeue_signal_lock(current, &current->blocked, &info); 583 + } 591 584 592 585 return 0; 593 586 } ··· 1075 1052 nbd_dev[i].magic = NBD_MAGIC; 1076 1053 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); 1077 1054 spin_lock_init(&nbd_dev[i].queue_lock); 1055 + spin_lock_init(&nbd_dev[i].tasks_lock); 1078 1056 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 1079 1057 mutex_init(&nbd_dev[i].tx_lock); 1080 1058 init_timer(&nbd_dev[i].timeout_timer);
+15 -9
drivers/block/nvme-core.c
··· 603 603 struct nvme_iod *iod = ctx; 604 604 struct request *req = iod_get_private(iod); 605 605 struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); 606 - 607 606 u16 status = le16_to_cpup(&cqe->status) >> 1; 607 + bool requeue = false; 608 + int error = 0; 608 609 609 610 if (unlikely(status)) { 610 611 if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 611 612 && (jiffies - req->start_time) < req->timeout) { 612 613 unsigned long flags; 613 614 615 + requeue = true; 614 616 blk_mq_requeue_request(req); 615 617 spin_lock_irqsave(req->q->queue_lock, flags); 616 618 if (!blk_queue_stopped(req->q)) 617 619 blk_mq_kick_requeue_list(req->q); 618 620 spin_unlock_irqrestore(req->q->queue_lock, flags); 619 - return; 621 + goto release_iod; 620 622 } 621 623 622 624 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 623 625 if (cmd_rq->ctx == CMD_CTX_CANCELLED) 624 - status = -EINTR; 626 + error = -EINTR; 627 + else 628 + error = status; 625 629 } else { 626 - status = nvme_error_status(status); 630 + error = nvme_error_status(status); 627 631 } 628 632 } 629 633 ··· 639 635 if (cmd_rq->aborted) 640 636 dev_warn(nvmeq->dev->dev, 641 637 "completing aborted command with status:%04x\n", 642 - status); 638 + error); 643 639 640 + release_iod: 644 641 if (iod->nents) { 645 642 dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, 646 643 rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); ··· 654 649 } 655 650 nvme_free_iod(nvmeq->dev, iod); 656 651 657 - blk_mq_complete_request(req, status); 652 + if (likely(!requeue)) 653 + blk_mq_complete_request(req, error); 658 654 } 659 655 660 656 /* length is in bytes. gfp flags indicates whether we may sleep. */ ··· 1810 1804 1811 1805 length = (io.nblocks + 1) << ns->lba_shift; 1812 1806 meta_len = (io.nblocks + 1) * ns->ms; 1813 - metadata = (void __user *)(unsigned long)io.metadata; 1807 + metadata = (void __user *)(uintptr_t)io.metadata; 1814 1808 write = io.opcode & 1; 1815 1809 1816 1810 if (ns->ext) { ··· 1850 1844 c.rw.metadata = cpu_to_le64(meta_dma); 1851 1845 1852 1846 status = __nvme_submit_sync_cmd(ns->queue, &c, NULL, 1853 - (void __user *)io.addr, length, NULL, 0); 1847 + (void __user *)(uintptr_t)io.addr, length, NULL, 0); 1854 1848 unmap: 1855 1849 if (meta) { 1856 1850 if (status == NVME_SC_SUCCESS && !write) { ··· 1892 1886 timeout = msecs_to_jiffies(cmd.timeout_ms); 1893 1887 1894 1888 status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, 1895 - NULL, (void __user *)cmd.addr, cmd.data_len, 1889 + NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1896 1890 &cmd.result, timeout); 1897 1891 if (status >= 0) { 1898 1892 if (put_user(cmd.result, &ucmd->result))
+48 -36
drivers/block/rbd.c
··· 96 96 #define RBD_MINORS_PER_MAJOR 256 97 97 #define RBD_SINGLE_MAJOR_PART_SHIFT 4 98 98 99 + #define RBD_MAX_PARENT_CHAIN_LEN 16 100 + 99 101 #define RBD_SNAP_DEV_NAME_PREFIX "snap_" 100 102 #define RBD_MAX_SNAP_NAME_LEN \ 101 103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) ··· 428 426 size_t count); 429 427 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, 430 428 size_t count); 431 - static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); 429 + static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); 432 430 static void rbd_spec_put(struct rbd_spec *spec); 433 431 434 432 static int rbd_dev_id_to_minor(int dev_id) ··· 3780 3778 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 3781 3779 q->limits.discard_zeroes_data = 1; 3782 3780 3781 + if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 3782 + q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; 3783 + 3783 3784 disk->queue = q; 3784 3785 3785 3786 q->queuedata = rbd_dev; ··· 5136 5131 return ret; 5137 5132 } 5138 5133 5139 - static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) 5134 + /* 5135 + * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() -> 5136 + * rbd_dev_image_probe() recursion depth, which means it's also the 5137 + * length of the already discovered part of the parent chain. 5138 + */ 5139 + static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) 5140 5140 { 5141 5141 struct rbd_device *parent = NULL; 5142 - struct rbd_spec *parent_spec; 5143 - struct rbd_client *rbdc; 5144 5142 int ret; 5145 5143 5146 5144 if (!rbd_dev->parent_spec) 5147 5145 return 0; 5148 - /* 5149 - * We need to pass a reference to the client and the parent 5150 - * spec when creating the parent rbd_dev. Images related by 5151 - * parent/child relationships always share both. 5152 - */ 5153 - parent_spec = rbd_spec_get(rbd_dev->parent_spec); 5154 - rbdc = __rbd_get_client(rbd_dev->rbd_client); 5155 5146 5156 - ret = -ENOMEM; 5157 - parent = rbd_dev_create(rbdc, parent_spec, NULL); 5158 - if (!parent) 5147 + if (++depth > RBD_MAX_PARENT_CHAIN_LEN) { 5148 + pr_info("parent chain is too long (%d)\n", depth); 5149 + ret = -EINVAL; 5159 5150 goto out_err; 5160 - 5161 - ret = rbd_dev_image_probe(parent, false); 5162 - if (ret < 0) 5163 - goto out_err; 5164 - rbd_dev->parent = parent; 5165 - atomic_set(&rbd_dev->parent_ref, 1); 5166 - 5167 - return 0; 5168 - out_err: 5169 - if (parent) { 5170 - rbd_dev_unparent(rbd_dev); 5171 - rbd_dev_destroy(parent); 5172 - } else { 5173 - rbd_put_client(rbdc); 5174 - rbd_spec_put(parent_spec); 5175 5151 } 5176 5152 5153 + parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec, 5154 + NULL); 5155 + if (!parent) { 5156 + ret = -ENOMEM; 5157 + goto out_err; 5158 + } 5159 + 5160 + /* 5161 + * Images related by parent/child relationships always share 5162 + * rbd_client and spec/parent_spec, so bump their refcounts. 5163 + */ 5164 + __rbd_get_client(rbd_dev->rbd_client); 5165 + rbd_spec_get(rbd_dev->parent_spec); 5166 + 5167 + ret = rbd_dev_image_probe(parent, depth); 5168 + if (ret < 0) 5169 + goto out_err; 5170 + 5171 + rbd_dev->parent = parent; 5172 + atomic_set(&rbd_dev->parent_ref, 1); 5173 + return 0; 5174 + 5175 + out_err: 5176 + rbd_dev_unparent(rbd_dev); 5177 + if (parent) 5178 + rbd_dev_destroy(parent); 5177 5179 return ret; 5178 5180 } 5179 5181 ··· 5298 5286 * parent), initiate a watch on its header object before using that 5299 5287 * object to get detailed information about the rbd image. 5300 5288 */ 5301 - static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) 5289 + static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) 5302 5290 { 5303 5291 int ret; 5304 5292 ··· 5316 5304 if (ret) 5317 5305 goto err_out_format; 5318 5306 5319 - if (mapping) { 5307 + if (!depth) { 5320 5308 ret = rbd_dev_header_watch_sync(rbd_dev); 5321 5309 if (ret) { 5322 5310 if (ret == -ENOENT) ··· 5337 5325 * Otherwise this is a parent image, identified by pool, image 5338 5326 * and snap ids - need to fill in names for those ids. 5339 5327 */ 5340 - if (mapping) 5328 + if (!depth) 5341 5329 ret = rbd_spec_fill_snap_id(rbd_dev); 5342 5330 else 5343 5331 ret = rbd_spec_fill_names(rbd_dev); ··· 5359 5347 * Need to warn users if this image is the one being 5360 5348 * mapped and has a parent. 5361 5349 */ 5362 - if (mapping && rbd_dev->parent_spec) 5350 + if (!depth && rbd_dev->parent_spec) 5363 5351 rbd_warn(rbd_dev, 5364 5352 "WARNING: kernel layering is EXPERIMENTAL!"); 5365 5353 } 5366 5354 5367 - ret = rbd_dev_probe_parent(rbd_dev); 5355 + ret = rbd_dev_probe_parent(rbd_dev, depth); 5368 5356 if (ret) 5369 5357 goto err_out_probe; 5370 5358 ··· 5375 5363 err_out_probe: 5376 5364 rbd_dev_unprobe(rbd_dev); 5377 5365 err_out_watch: 5378 - if (mapping) 5366 + if (!depth) 5379 5367 rbd_dev_header_unwatch_sync(rbd_dev); 5380 5368 out_header_name: 5381 5369 kfree(rbd_dev->header_name); ··· 5438 5426 spec = NULL; /* rbd_dev now owns this */ 5439 5427 rbd_opts = NULL; /* rbd_dev now owns this */ 5440 5428 5441 - rc = rbd_dev_image_probe(rbd_dev, true); 5429 + rc = rbd_dev_image_probe(rbd_dev, 0); 5442 5430 if (rc < 0) 5443 5431 goto err_out_rbd_dev; 5444 5432
+2 -1
drivers/block/xen-blkfront.c
··· 1956 1956 break; 1957 1957 /* Missed the backend's Closing state -- fallthrough */ 1958 1958 case XenbusStateClosing: 1959 - blkfront_closing(info); 1959 + if (info) 1960 + blkfront_closing(info); 1960 1961 break; 1961 1962 } 1962 1963 }
+3 -2
drivers/bus/arm-ccn.c
··· 1184 1184 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) 1185 1185 break; 1186 1186 target = cpumask_any_but(cpu_online_mask, cpu); 1187 - if (target < 0) 1187 + if (target >= nr_cpu_ids) 1188 1188 break; 1189 1189 perf_pmu_migrate_context(&dt->pmu, cpu, target); 1190 1190 cpumask_set_cpu(target, &dt->cpu); 1191 - WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); 1191 + if (ccn->irq) 1192 + WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); 1192 1193 default: 1193 1194 break; 1194 1195 }
+2 -1
drivers/clk/clkdev.c
··· 333 333 if (IS_ERR(r)) 334 334 return PTR_ERR(r); 335 335 336 - l = clkdev_create(r, alias, "%s", alias_dev_name); 336 + l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL, 337 + alias_dev_name); 337 338 clk_put(r); 338 339 339 340 return l ? 0 : -ENODEV;
+1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1654 1654 u8 fan_max_rpm; 1655 1655 /* dpm */ 1656 1656 bool dpm_enabled; 1657 + bool sysfs_initialized; 1657 1658 struct amdgpu_dpm dpm; 1658 1659 const struct firmware *fw; /* SMC firmware */ 1659 1660 uint32_t fw_version;
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 184 184 goto cleanup; 185 185 } 186 186 187 - fence_get(work->excl); 188 - for (i = 0; i < work->shared_count; ++i) 189 - fence_get(work->shared[i]); 190 - 191 187 amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); 192 188 amdgpu_bo_unreserve(new_rbo); 193 189
+11 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 294 294 struct amdgpu_device *adev = dev_get_drvdata(dev); 295 295 umode_t effective_mode = attr->mode; 296 296 297 - /* Skip limit attributes if DPM is not enabled */ 297 + /* Skip attributes if DPM is not enabled */ 298 298 if (!adev->pm.dpm_enabled && 299 299 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 300 - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 300 + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 301 + attr == &sensor_dev_attr_pwm1.dev_attr.attr || 302 + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 303 + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 304 + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 301 305 return 0; 302 306 303 307 /* Skip fan attributes if fan is not present */ ··· 695 691 { 696 692 int ret; 697 693 694 + if (adev->pm.sysfs_initialized) 695 + return 0; 696 + 698 697 if (adev->pm.funcs->get_temperature == NULL) 699 698 return 0; 700 699 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, ··· 725 718 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 726 719 return ret; 727 720 } 721 + 722 + adev->pm.sysfs_initialized = true; 728 723 729 724 return 0; 730 725 }
+3
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
··· 2997 2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2998 2998 int ret; 2999 2999 3000 + if (!amdgpu_dpm) 3001 + return 0; 3002 + 3000 3003 /* init the sysfs and debugfs files late */ 3001 3004 ret = amdgpu_pm_sysfs_init(adev); 3002 3005 if (ret)
+4 -3
drivers/gpu/drm/drm_dp_mst_topology.c
··· 1194 1194 1195 1195 list_for_each_entry(port, &mstb->ports, next) { 1196 1196 if (port->port_num == port_num) { 1197 - if (!port->mstb) { 1197 + mstb = port->mstb; 1198 + if (!mstb) { 1198 1199 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); 1199 - return NULL; 1200 + goto out; 1200 1201 } 1201 1202 1202 - mstb = port->mstb; 1203 1203 break; 1204 1204 } 1205 1205 } 1206 1206 } 1207 1207 kref_get(&mstb->kref); 1208 + out: 1208 1209 mutex_unlock(&mgr->lock); 1209 1210 return mstb; 1210 1211 }
+1 -1
drivers/gpu/drm/i915/i915_gem_shrinker.c
··· 143 143 } 144 144 145 145 /** 146 - * i915_gem_shrink - Shrink buffer object caches completely 146 + * i915_gem_shrink_all - Shrink buffer object caches completely 147 147 * @dev_priv: i915 device 148 148 * 149 149 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
+4 -1
drivers/gpu/drm/i915/i915_gem_userptr.c
··· 804 804 * Also note, that the object created here is not currently a "first class" 805 805 * object, in that several ioctls are banned. These are the CPU access 806 806 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use 807 - * direct access via your pointer rather than use those ioctls. 807 + * direct access via your pointer rather than use those ioctls. Another 808 + * restriction is that we do not allow userptr surfaces to be pinned to the 809 + * hardware and so we reject any attempt to create a framebuffer out of a 810 + * userptr. 808 811 * 809 812 * If you think this is a good interface to use to pass GPU memory between 810 813 * drivers, please use dma-buf instead. In fact, wherever possible use
+67 -53
drivers/gpu/drm/i915/intel_display.c
··· 1724 1724 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1725 1725 } 1726 1726 1727 + /* 1728 + * Apparently we need to have VGA mode enabled prior to changing 1729 + * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1730 + * dividers, even though the register value does change. 1731 + */ 1732 + I915_WRITE(reg, 0); 1733 + 1734 + I915_WRITE(reg, dpll); 1735 + 1727 1736 /* Wait for the clocks to stabilize. */ 1728 1737 POSTING_READ(reg); 1729 1738 udelay(150); ··· 14116 14107 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14117 14108 struct drm_i915_gem_object *obj = intel_fb->obj; 14118 14109 14110 + if (obj->userptr.mm) { 14111 + DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14112 + return -EINVAL; 14113 + } 14114 + 14119 14115 return drm_gem_handle_create(file, &obj->base, handle); 14120 14116 } 14121 14117 ··· 14911 14897 /* restore vblank interrupts to correct state */ 14912 14898 drm_crtc_vblank_reset(&crtc->base); 14913 14899 if (crtc->active) { 14900 + struct intel_plane *plane; 14901 + 14914 14902 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 14915 14903 update_scanline_offset(crtc); 14916 14904 drm_crtc_vblank_on(&crtc->base); 14905 + 14906 + /* Disable everything but the primary plane */ 14907 + for_each_intel_plane_on_crtc(dev, crtc, plane) { 14908 + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 14909 + continue; 14910 + 14911 + plane->disable_plane(&plane->base, &crtc->base); 14912 + } 14917 14913 } 14918 14914 14919 14915 /* We need to sanitize the plane -> pipe mapping first because this will ··· 15091 15067 i915_redisable_vga_power_on(dev); 15092 15068 } 15093 15069 15094 - static bool primary_get_hw_state(struct intel_crtc *crtc) 15070 + static bool primary_get_hw_state(struct intel_plane *plane) 15095 15071 { 15096 - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 15072 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15097 15073 15098 - return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE); 15074 + return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 15099 15075 } 15100 15076 15101 - static void readout_plane_state(struct intel_crtc *crtc, 15102 - struct intel_crtc_state *crtc_state) 15077 + /* FIXME read out full plane state for all planes */ 15078 + static void readout_plane_state(struct intel_crtc *crtc) 15103 15079 { 15104 - struct intel_plane *p; 15105 - struct intel_plane_state *plane_state; 15106 - bool active = crtc_state->base.active; 15080 + struct drm_plane *primary = crtc->base.primary; 15081 + struct intel_plane_state *plane_state = 15082 + to_intel_plane_state(primary->state); 15107 15083 15108 - for_each_intel_plane(crtc->base.dev, p) { 15109 - if (crtc->pipe != p->pipe) 15110 - continue; 15084 + plane_state->visible = 15085 + primary_get_hw_state(to_intel_plane(primary)); 15111 15086 15112 - plane_state = to_intel_plane_state(p->base.state); 15113 - 15114 - if (p->base.type == DRM_PLANE_TYPE_PRIMARY) { 15115 - plane_state->visible = primary_get_hw_state(crtc); 15116 - if (plane_state->visible) 15117 - crtc->base.state->plane_mask |= 15118 - 1 << drm_plane_index(&p->base); 15119 - } else { 15120 - if (active) 15121 - p->disable_plane(&p->base, &crtc->base); 15122 - 15123 - plane_state->visible = false; 15124 - } 15125 - } 15087 + if (plane_state->visible) 15088 + crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); 15126 15089 } 15127 15090 15128 15091 static void intel_modeset_readout_hw_state(struct drm_device *dev) ··· 15132 15121 crtc->base.state->active = crtc->active; 15133 15122 crtc->base.enabled = crtc->active; 15134 15123 15135 - memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15136 - if (crtc->base.state->active) { 15137 - intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); 15138 - intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); 15139 - WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15140 - 15141 - /* 15142 - * The initial mode needs to be set in order to keep 15143 - * the atomic core happy. It wants a valid mode if the 15144 - * crtc's enabled, so we do the above call. 15145 - * 15146 - * At this point some state updated by the connectors 15147 - * in their ->detect() callback has not run yet, so 15148 - * no recalculation can be done yet. 15149 - * 15150 - * Even if we could do a recalculation and modeset 15151 - * right now it would cause a double modeset if 15152 - * fbdev or userspace chooses a different initial mode. 15153 - * 15154 - * If that happens, someone indicated they wanted a 15155 - * mode change, which means it's safe to do a full 15156 - * recalculation. 15157 - */ 15158 - crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 15159 - } 15160 - 15161 - crtc->base.hwmode = crtc->config->base.adjusted_mode; 15162 - readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state)); 15124 + readout_plane_state(crtc); 15163 15125 15164 15126 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15165 15127 crtc->base.base.id, ··· 15190 15206 connector->base.base.id, 15191 15207 connector->base.name, 15192 15208 connector->base.encoder ? "enabled" : "disabled"); 15209 + } 15210 + 15211 + for_each_intel_crtc(dev, crtc) { 15212 + crtc->base.hwmode = crtc->config->base.adjusted_mode; 15213 + 15214 + memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15215 + if (crtc->base.state->active) { 15216 + intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); 15217 + intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); 15218 + WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15219 + 15220 + /* 15221 + * The initial mode needs to be set in order to keep 15222 + * the atomic core happy. It wants a valid mode if the 15223 + * crtc's enabled, so we do the above call. 15224 + * 15225 + * At this point some state updated by the connectors 15226 + * in their ->detect() callback has not run yet, so 15227 + * no recalculation can be done yet. 15228 + * 15229 + * Even if we could do a recalculation and modeset 15230 + * right now it would cause a double modeset if 15231 + * fbdev or userspace chooses a different initial mode. 15232 + * 15233 + * If that happens, someone indicated they wanted a 15234 + * mode change, which means it's safe to do a full 15235 + * recalculation. 15236 + */ 15237 + crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 15238 + } 15193 15239 } 15194 15240 } 15195 15241
+1
drivers/gpu/drm/i915/intel_lrc.c
··· 1659 1659 if (flush_domains) { 1660 1660 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 1661 1661 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 1662 + flags |= PIPE_CONTROL_FLUSH_ENABLE; 1662 1663 } 1663 1664 1664 1665 if (invalidate_domains) {
+2
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 347 347 if (flush_domains) { 348 348 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 349 349 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 350 + flags |= PIPE_CONTROL_FLUSH_ENABLE; 350 351 } 351 352 if (invalidate_domains) { 352 353 flags |= PIPE_CONTROL_TLB_INVALIDATE; ··· 419 418 if (flush_domains) { 420 419 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 421 420 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 421 + flags |= PIPE_CONTROL_FLUSH_ENABLE; 422 422 } 423 423 if (invalidate_domains) { 424 424 flags |= PIPE_CONTROL_TLB_INVALIDATE;
+3 -2
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 227 227 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 228 228 struct nvkm_vma *vma; 229 229 230 - if (nvbo->bo.mem.mem_type == TTM_PL_TT) 230 + if (is_power_of_2(nvbo->valid_domains)) 231 + rep->domain = nvbo->valid_domains; 232 + else if (nvbo->bo.mem.mem_type == TTM_PL_TT) 231 233 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 232 234 else 233 235 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 234 - 235 236 rep->offset = nvbo->bo.offset; 236 237 if (cli->vm) { 237 238 vma = nouveau_bo_vma_find(nvbo, cli->vm);
+1
drivers/gpu/drm/radeon/radeon.h
··· 1658 1658 u8 fan_max_rpm; 1659 1659 /* dpm */ 1660 1660 bool dpm_enabled; 1661 + bool sysfs_initialized; 1661 1662 struct radeon_dpm dpm; 1662 1663 }; 1663 1664
+27 -16
drivers/gpu/drm/radeon/radeon_pm.c
··· 717 717 struct radeon_device *rdev = dev_get_drvdata(dev); 718 718 umode_t effective_mode = attr->mode; 719 719 720 - /* Skip limit attributes if DPM is not enabled */ 720 + /* Skip attributes if DPM is not enabled */ 721 721 if (rdev->pm.pm_method != PM_METHOD_DPM && 722 722 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 723 - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 723 + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 724 + attr == &sensor_dev_attr_pwm1.dev_attr.attr || 725 + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 726 + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 727 + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 724 728 return 0; 725 729 726 730 /* Skip fan attributes if fan is not present */ ··· 1528 1524 1529 1525 if (rdev->pm.pm_method == PM_METHOD_DPM) { 1530 1526 if (rdev->pm.dpm_enabled) { 1531 - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1532 - if (ret) 1533 - DRM_ERROR("failed to create device file for dpm state\n"); 1534 - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1535 - if (ret) 1536 - DRM_ERROR("failed to create device file for dpm state\n"); 1537 - /* XXX: these are noops for dpm but are here for backwards compat */ 1538 - ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1539 - if (ret) 1540 - DRM_ERROR("failed to create device file for power profile\n"); 1541 - ret = device_create_file(rdev->dev, &dev_attr_power_method); 1542 - if (ret) 1543 - DRM_ERROR("failed to create device file for power method\n"); 1527 + if (!rdev->pm.sysfs_initialized) { 1528 + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1529 + if (ret) 1530 + DRM_ERROR("failed to create device file for dpm state\n"); 1531 + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1532 + if (ret) 1533 + DRM_ERROR("failed to create device file for dpm state\n"); 1534 + /* XXX: these are noops for dpm but are here for backwards compat */ 1535 + ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1536 + if (ret) 1537 + DRM_ERROR("failed to create device file for power profile\n"); 1538 + ret = device_create_file(rdev->dev, &dev_attr_power_method); 1539 + if (ret) 1540 + DRM_ERROR("failed to create device file for power method\n"); 1541 + if (!ret) 1542 + rdev->pm.sysfs_initialized = true; 1543 + } 1544 1544 1545 1545 mutex_lock(&rdev->pm.mutex); 1546 1546 ret = radeon_dpm_late_enable(rdev); ··· 1560 1552 } 1561 1553 } 1562 1554 } else { 1563 - if (rdev->pm.num_power_states > 1) { 1555 + if ((rdev->pm.num_power_states > 1) && 1556 + (!rdev->pm.sysfs_initialized)) { 1564 1557 /* where's the best place to put these? */ 1565 1558 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1566 1559 if (ret) ··· 1569 1560 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1570 1561 if (ret) 1571 1562 DRM_ERROR("failed to create device file for power method\n"); 1563 + if (!ret) 1564 + rdev->pm.sysfs_initialized = true; 1572 1565 } 1573 1566 } 1574 1567 return ret;
+20 -14
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 415 415 * 416 416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 417 417 * command buffers left that are not submitted to hardware, Make sure 418 - * IRQ handling is turned on. Otherwise, make sure it's turned off. This 419 - * function may return -EAGAIN to indicate it should be rerun due to 420 - * possibly missed IRQs if IRQs has just been turned on. 418 + * IRQ handling is turned on. Otherwise, make sure it's turned off. 421 419 */ 422 - static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 420 + static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 423 421 { 424 - int notempty = 0; 422 + int notempty; 425 423 struct vmw_cmdbuf_context *ctx; 426 424 int i; 427 425 426 + retry: 427 + notempty = 0; 428 428 for_each_cmdbuf_ctx(man, i, ctx) 429 429 vmw_cmdbuf_ctx_process(man, ctx, &notempty); 430 430 ··· 440 440 man->irq_on = true; 441 441 442 442 /* Rerun in case we just missed an irq. */ 443 - return -EAGAIN; 443 + goto retry; 444 444 } 445 - 446 - return 0; 447 445 } 448 446 449 447 /** ··· 466 468 header->cb_context = cb_context; 467 469 list_add_tail(&header->list, &man->ctx[cb_context].submitted); 468 470 469 - if (vmw_cmdbuf_man_process(man) == -EAGAIN) 470 - vmw_cmdbuf_man_process(man); 471 + vmw_cmdbuf_man_process(man); 471 472 } 472 473 473 474 /** ··· 485 488 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; 486 489 487 490 spin_lock(&man->lock); 488 - if (vmw_cmdbuf_man_process(man) == -EAGAIN) 489 - (void) vmw_cmdbuf_man_process(man); 491 + vmw_cmdbuf_man_process(man); 490 492 spin_unlock(&man->lock); 491 493 } 492 494 ··· 503 507 struct vmw_cmdbuf_man *man = 504 508 container_of(work, struct vmw_cmdbuf_man, work); 505 509 struct vmw_cmdbuf_header *entry, *next; 510 + uint32_t dummy; 506 511 bool restart = false; 507 512 508 513 spin_lock_bh(&man->lock); ··· 520 523 if (restart && vmw_cmdbuf_startstop(man, true)) 521 524 DRM_ERROR("Failed restarting command buffer context 0.\n"); 522 525 526 + /* Send a new fence in case one was removed */ 527 + vmw_fifo_send_fence(man->dev_priv, &dummy); 523 528 } 524 529 525 530 /** ··· 681 682 DRM_MM_SEARCH_DEFAULT, 682 683 DRM_MM_CREATE_DEFAULT); 683 684 if (ret) { 684 - (void) vmw_cmdbuf_man_process(man); 685 + vmw_cmdbuf_man_process(man); 685 686 ret = drm_mm_insert_node_generic(&man->mm, info->node, 686 687 info->page_size, 0, 0, 687 688 DRM_MM_SEARCH_DEFAULT, ··· 1167 1168 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); 1168 1169 1169 1170 man->has_pool = true; 1170 - man->default_size = default_size; 1171 + 1172 + /* 1173 + * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to 1174 + * prevent deadlocks from happening when vmw_cmdbuf_space_pool() 1175 + * needs to wait for space and we block on further command 1176 + * submissions to be able to free up space. 1177 + */ 1178 + man->default_size = VMW_CMDBUF_INLINE_SIZE; 1171 1179 DRM_INFO("Using command buffers with %s pool.\n", 1172 1180 (man->using_mob) ? "MOB" : "DMA"); 1173 1181
-2
drivers/i2c/busses/i2c-mv64xxx.c
··· 669 669 struct i2c_msg *msgs = drv_data->msgs; 670 670 int num = drv_data->num_msgs; 671 671 672 - return false; 673 - 674 672 if (!drv_data->offload_enabled) 675 673 return false; 676 674
+5 -5
drivers/i2c/busses/i2c-pnx.c
··· 600 600 { 601 601 struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); 602 602 603 - clk_disable(alg_data->clk); 603 + clk_disable_unprepare(alg_data->clk); 604 604 605 605 return 0; 606 606 } ··· 609 609 { 610 610 struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); 611 611 612 - return clk_enable(alg_data->clk); 612 + return clk_prepare_enable(alg_data->clk); 613 613 } 614 614 615 615 static SIMPLE_DEV_PM_OPS(i2c_pnx_pm, ··· 672 672 if (IS_ERR(alg_data->ioaddr)) 673 673 return PTR_ERR(alg_data->ioaddr); 674 674 675 - ret = clk_enable(alg_data->clk); 675 + ret = clk_prepare_enable(alg_data->clk); 676 676 if (ret) 677 677 return ret; 678 678 ··· 726 726 return 0; 727 727 728 728 out_clock: 729 - clk_disable(alg_data->clk); 729 + clk_disable_unprepare(alg_data->clk); 730 730 return ret; 731 731 } 732 732 ··· 735 735 struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); 736 736 737 737 i2c_del_adapter(&alg_data->adapter); 738 - clk_disable(alg_data->clk); 738 + clk_disable_unprepare(alg_data->clk); 739 739 740 740 return 0; 741 741 }
-6
drivers/iio/accel/st_accel_core.c
··· 149 149 #define ST_ACCEL_4_BDU_MASK 0x40 150 150 #define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21 151 151 #define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04 152 - #define ST_ACCEL_4_IG1_EN_ADDR 0x21 153 - #define ST_ACCEL_4_IG1_EN_MASK 0x08 154 152 #define ST_ACCEL_4_MULTIREAD_BIT true 155 153 156 154 /* CUSTOM VALUES FOR SENSOR 5 */ ··· 487 489 .drdy_irq = { 488 490 .addr = ST_ACCEL_4_DRDY_IRQ_ADDR, 489 491 .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK, 490 - .ig1 = { 491 - .en_addr = ST_ACCEL_4_IG1_EN_ADDR, 492 - .en_mask = ST_ACCEL_4_IG1_EN_MASK, 493 - }, 494 492 }, 495 493 .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT, 496 494 .bootime = 2, /* guess */
+34
drivers/iio/adc/twl4030-madc.c
··· 45 45 #include <linux/types.h> 46 46 #include <linux/gfp.h> 47 47 #include <linux/err.h> 48 + #include <linux/regulator/consumer.h> 48 49 49 50 #include <linux/iio/iio.h> 51 + 52 + #define TWL4030_USB_SEL_MADC_MCPC (1<<3) 53 + #define TWL4030_USB_CARKIT_ANA_CTRL 0xBB 50 54 51 55 /** 52 56 * struct twl4030_madc_data - a container for madc info 53 57 * @dev: Pointer to device structure for madc 54 58 * @lock: Mutex protecting this data structure 59 + * @regulator: Pointer to bias regulator for madc 55 60 * @requests: Array of request struct corresponding to SW1, SW2 and RT 56 61 * @use_second_irq: IRQ selection (main or co-processor) 57 62 * @imr: Interrupt mask register of MADC ··· 65 60 struct twl4030_madc_data { 66 61 struct device *dev; 67 62 struct mutex lock; /* mutex protecting this data structure */ 63 + struct regulator *usb3v1; 68 64 struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; 69 65 bool use_second_irq; 70 66 u8 imr; ··· 847 841 } 848 842 twl4030_madc = madc; 849 843 844 + /* Configure MADC[3:6] */ 845 + ret = twl_i2c_read_u8(TWL_MODULE_USB, &regval, 846 + TWL4030_USB_CARKIT_ANA_CTRL); 847 + if (ret) { 848 + dev_err(&pdev->dev, "unable to read reg CARKIT_ANA_CTRL 0x%X\n", 849 + TWL4030_USB_CARKIT_ANA_CTRL); 850 + goto err_i2c; 851 + } 852 + regval |= TWL4030_USB_SEL_MADC_MCPC; 853 + ret = twl_i2c_write_u8(TWL_MODULE_USB, regval, 854 + TWL4030_USB_CARKIT_ANA_CTRL); 855 + if (ret) { 856 + dev_err(&pdev->dev, "unable to write reg CARKIT_ANA_CTRL 0x%X\n", 857 + TWL4030_USB_CARKIT_ANA_CTRL); 858 + goto err_i2c; 859 + } 860 + 861 + /* Enable 3v1 bias regulator for MADC[3:6] */ 862 + madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1"); 863 + if (IS_ERR(madc->usb3v1)) 864 + return -ENODEV; 865 + 866 + ret = regulator_enable(madc->usb3v1); 867 + if (ret) 868 + dev_err(madc->dev, "could not enable 3v1 bias regulator\n"); 869 + 850 870 ret = iio_device_register(iio_dev); 851 871 if (ret) { 852 872 dev_err(&pdev->dev, "could not register iio device\n"); ··· 897 865 898 866 twl4030_madc_set_current_generator(madc, 0, 0); 899 867 twl4030_madc_set_power(madc, 0); 868 + 869 + regulator_disable(madc->usb3v1); 900 870 901 871 return 0; 902 872 }
+1 -1
drivers/infiniband/core/cache.c
··· 508 508 memset(&gid_attr, 0, sizeof(gid_attr)); 509 509 gid_attr.ndev = ndev; 510 510 511 + mutex_lock(&table->lock); 511 512 ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT); 512 513 513 514 /* Coudn't find default GID location */ 514 515 WARN_ON(ix < 0); 515 516 516 - mutex_lock(&table->lock); 517 517 if (!__ib_cache_gid_get(ib_dev, port, ix, 518 518 &current_gid, &current_gid_attr) && 519 519 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
+9 -1
drivers/infiniband/core/cm.c
··· 835 835 case IB_CM_SIDR_REQ_RCVD: 836 836 spin_unlock_irq(&cm_id_priv->lock); 837 837 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 838 + spin_lock_irq(&cm.lock); 839 + if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) 840 + rb_erase(&cm_id_priv->sidr_id_node, 841 + &cm.remote_sidr_table); 842 + spin_unlock_irq(&cm.lock); 838 843 break; 839 844 case IB_CM_REQ_SENT: 840 845 case IB_CM_MRA_REQ_RCVD: ··· 3177 3172 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3178 3173 3179 3174 spin_lock_irqsave(&cm.lock, flags); 3180 - rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3175 + if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { 3176 + rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); 3177 + RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); 3178 + } 3181 3179 spin_unlock_irqrestore(&cm.lock, flags); 3182 3180 return 0; 3183 3181
+3 -3
drivers/infiniband/core/cma.c
··· 1067 1067 sizeof(req->local_gid)); 1068 1068 req->has_gid = true; 1069 1069 req->service_id = req_param->primary_path->service_id; 1070 - req->pkey = req_param->bth_pkey; 1070 + req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1071 1071 break; 1072 1072 case IB_CM_SIDR_REQ_RECEIVED: 1073 1073 req->device = sidr_param->listen_id->device; 1074 1074 req->port = sidr_param->port; 1075 1075 req->has_gid = false; 1076 1076 req->service_id = sidr_param->service_id; 1077 - req->pkey = sidr_param->bth_pkey; 1077 + req->pkey = sidr_param->pkey; 1078 1078 break; 1079 1079 default: 1080 1080 return -EINVAL; ··· 1324 1324 bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id), 1325 1325 cma_port_from_service_id(req.service_id)); 1326 1326 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); 1327 - if (IS_ERR(id_priv)) { 1327 + if (IS_ERR(id_priv) && *net_dev) { 1328 1328 dev_put(*net_dev); 1329 1329 *net_dev = NULL; 1330 1330 }
+27 -8
drivers/infiniband/core/roce_gid_mgmt.c
··· 250 250 u8 port, struct net_device *ndev) 251 251 { 252 252 struct in_device *in_dev; 253 + struct sin_list { 254 + struct list_head list; 255 + struct sockaddr_in ip; 256 + }; 257 + struct sin_list *sin_iter; 258 + struct sin_list *sin_temp; 253 259 260 + LIST_HEAD(sin_list); 254 261 if (ndev->reg_state >= NETREG_UNREGISTERING) 255 262 return; 256 263 257 - in_dev = in_dev_get(ndev); 258 - if (!in_dev) 264 + rcu_read_lock(); 265 + in_dev = __in_dev_get_rcu(ndev); 266 + if (!in_dev) { 267 + rcu_read_unlock(); 259 268 return; 269 + } 260 270 261 271 for_ifa(in_dev) { 262 - struct sockaddr_in ip; 272 + struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 263 273 264 - ip.sin_family = AF_INET; 265 - ip.sin_addr.s_addr = ifa->ifa_address; 266 - update_gid_ip(GID_ADD, ib_dev, port, ndev, 267 - (struct sockaddr *)&ip); 274 + if (!entry) { 275 + pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n"); 276 + continue; 277 + } 278 + entry->ip.sin_family = AF_INET; 279 + entry->ip.sin_addr.s_addr = ifa->ifa_address; 280 + list_add_tail(&entry->list, &sin_list); 268 281 } 269 282 endfor_ifa(in_dev); 283 + rcu_read_unlock(); 270 284 271 - in_dev_put(in_dev); 285 + list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) { 286 + update_gid_ip(GID_ADD, ib_dev, port, ndev, 287 + (struct sockaddr *)&sin_iter->ip); 288 + list_del(&sin_iter->list); 289 + kfree(sin_iter); 290 + } 272 291 } 273 292 274 293 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
+6 -1
drivers/infiniband/core/ucma.c
··· 1624 1624 if (!file) 1625 1625 return -ENOMEM; 1626 1626 1627 + file->close_wq = create_singlethread_workqueue("ucma_close_id"); 1628 + if (!file->close_wq) { 1629 + kfree(file); 1630 + return -ENOMEM; 1631 + } 1632 + 1627 1633 INIT_LIST_HEAD(&file->event_list); 1628 1634 INIT_LIST_HEAD(&file->ctx_list); 1629 1635 init_waitqueue_head(&file->poll_wait); 1630 1636 mutex_init(&file->mut); 1631 - file->close_wq = create_singlethread_workqueue("ucma_close_id"); 1632 1637 1633 1638 filp->private_data = file; 1634 1639 file->filp = filp;
+42 -6
drivers/input/mouse/alps.c
··· 100 100 #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 101 101 #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 102 102 6-byte ALPS packet */ 103 - #define ALPS_DELL 0x100 /* device is a Dell laptop */ 103 + #define ALPS_STICK_BITS 0x100 /* separate stick button bits */ 104 104 #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ 105 105 106 106 static const struct alps_model_info alps_model_data[] = { ··· 157 157 158 158 static const struct alps_protocol_info alps_v8_protocol_data = { 159 159 ALPS_PROTO_V8, 0x18, 0x18, 0 160 + }; 161 + 162 + /* 163 + * Some v2 models report the stick buttons in separate bits 164 + */ 165 + static const struct dmi_system_id alps_dmi_has_separate_stick_buttons[] = { 166 + #if defined(CONFIG_DMI) && defined(CONFIG_X86) 167 + { 168 + /* Extrapolated from other entries */ 169 + .matches = { 170 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 171 + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D420"), 172 + }, 173 + }, 174 + { 175 + /* Reported-by: Hans de Bruin <jmdebruin@xmsnet.nl> */ 176 + .matches = { 177 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 178 + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D430"), 179 + }, 180 + }, 181 + { 182 + /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 183 + .matches = { 184 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 185 + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D620"), 186 + }, 187 + }, 188 + { 189 + /* Extrapolated from other entries */ 190 + .matches = { 191 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 192 + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D630"), 193 + }, 194 + }, 195 + #endif 196 + { } 160 197 }; 161 198 162 199 static void alps_set_abs_params_st(struct alps_data *priv, ··· 290 253 return; 291 254 } 292 255 293 - /* Dell non interleaved V2 dualpoint has separate stick button bits */ 294 - if (priv->proto_version == ALPS_PROTO_V2 && 295 - priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) { 256 + /* Some models have separate stick button bits */ 257 + if (priv->flags & ALPS_STICK_BITS) { 296 258 left |= packet[0] & 1; 297 259 right |= packet[0] & 2; 298 260 middle |= packet[0] & 4; ··· 2588 2552 priv->byte0 = protocol->byte0; 2589 2553 priv->mask0 = protocol->mask0; 2590 2554 priv->flags = protocol->flags; 2591 - if (dmi_name_in_vendors("Dell")) 2592 - priv->flags |= ALPS_DELL; 2593 2555 2594 2556 priv->x_max = 2000; 2595 2557 priv->y_max = 1400; ··· 2602 2568 priv->set_abs_params = alps_set_abs_params_st; 2603 2569 priv->x_max = 1023; 2604 2570 priv->y_max = 767; 2571 + if (dmi_check_system(alps_dmi_has_separate_stick_buttons)) 2572 + priv->flags |= ALPS_STICK_BITS; 2605 2573 break; 2606 2574 2607 2575 case ALPS_PROTO_V3:
+1
drivers/input/touchscreen/Kconfig
··· 1006 1006 config TOUCHSCREEN_SUR40 1007 1007 tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen" 1008 1008 depends on USB && MEDIA_USB_SUPPORT && HAS_DMA 1009 + depends on VIDEO_V4L2 1009 1010 select INPUT_POLLDEV 1010 1011 select VIDEOBUF2_DMA_SG 1011 1012 help
+2 -2
drivers/input/touchscreen/lpc32xx_ts.c
··· 139 139 tsc_readl(tsc, LPC32XX_TSC_CON) & 140 140 ~LPC32XX_TSC_ADCCON_AUTO_EN); 141 141 142 - clk_disable(tsc->clk); 142 + clk_disable_unprepare(tsc->clk); 143 143 } 144 144 145 145 static void lpc32xx_setup_tsc(struct lpc32xx_tsc *tsc) 146 146 { 147 147 u32 tmp; 148 148 149 - clk_enable(tsc->clk); 149 + clk_prepare_enable(tsc->clk); 150 150 151 151 tmp = tsc_readl(tsc, LPC32XX_TSC_CON) & ~LPC32XX_TSC_ADCCON_POWER_UP; 152 152
+2 -2
drivers/iommu/amd_iommu.c
··· 1974 1974 static void clear_dte_entry(u16 devid) 1975 1975 { 1976 1976 /* remove entry from the device table seen by the hardware */ 1977 - amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; 1978 - amd_iommu_dev_table[devid].data[1] = 0; 1977 + amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; 1978 + amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK; 1979 1979 1980 1980 amd_iommu_apply_erratum_63(devid); 1981 1981 }
+1
drivers/iommu/amd_iommu_types.h
··· 295 295 #define IOMMU_PTE_IR (1ULL << 61) 296 296 #define IOMMU_PTE_IW (1ULL << 62) 297 297 298 + #define DTE_FLAG_MASK (0x3ffULL << 32) 298 299 #define DTE_FLAG_IOTLB (0x01UL << 32) 299 300 #define DTE_FLAG_GV (0x01ULL << 55) 300 301 #define DTE_GLX_SHIFT (56)
+7
drivers/iommu/amd_iommu_v2.c
··· 516 516 goto out; 517 517 } 518 518 519 + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { 520 + /* handle_mm_fault would BUG_ON() */ 521 + up_read(&mm->mmap_sem); 522 + handle_fault_error(fault); 523 + goto out; 524 + } 525 + 519 526 ret = handle_mm_fault(mm, vma, address, write); 520 527 if (ret & VM_FAULT_ERROR) { 521 528 /* failed to service fault */
+8 -4
drivers/iommu/intel-iommu.c
··· 2115 2115 return -ENOMEM; 2116 2116 /* It is large page*/ 2117 2117 if (largepage_lvl > 1) { 2118 + unsigned long nr_superpages, end_pfn; 2119 + 2118 2120 pteval |= DMA_PTE_LARGE_PAGE; 2119 2121 lvl_pages = lvl_to_nr_pages(largepage_lvl); 2122 + 2123 + nr_superpages = sg_res / lvl_pages; 2124 + end_pfn = iov_pfn + nr_superpages * lvl_pages - 1; 2125 + 2120 2126 /* 2121 2127 * Ensure that old small page tables are 2122 - * removed to make room for superpage, 2123 - * if they exist. 2128 + * removed to make room for superpage(s). 2124 2129 */ 2125 - dma_pte_free_pagetable(domain, iov_pfn, 2126 - iov_pfn + lvl_pages - 1); 2130 + dma_pte_free_pagetable(domain, iov_pfn, end_pfn); 2127 2131 } else { 2128 2132 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; 2129 2133 }
+1 -1
drivers/md/dm-cache-metadata.c
··· 634 634 635 635 disk_super = dm_block_data(sblock); 636 636 637 + disk_super->flags = cpu_to_le32(cmd->flags); 637 638 if (mutator) 638 639 update_flags(disk_super, mutator); 639 640 640 - disk_super->flags = cpu_to_le32(cmd->flags); 641 641 disk_super->mapping_root = cpu_to_le64(cmd->root); 642 642 disk_super->hint_root = cpu_to_le64(cmd->hint_root); 643 643 disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+11 -6
drivers/md/persistent-data/dm-btree-remove.c
··· 301 301 { 302 302 int s; 303 303 uint32_t max_entries = le32_to_cpu(left->header.max_entries); 304 - unsigned target = (nr_left + nr_center + nr_right) / 3; 305 - BUG_ON(target > max_entries); 304 + unsigned total = nr_left + nr_center + nr_right; 305 + unsigned target_right = total / 3; 306 + unsigned remainder = (target_right * 3) != total; 307 + unsigned target_left = target_right + remainder; 308 + 309 + BUG_ON(target_left > max_entries); 310 + BUG_ON(target_right > max_entries); 306 311 307 312 if (nr_left < nr_right) { 308 - s = nr_left - target; 313 + s = nr_left - target_left; 309 314 310 315 if (s < 0 && nr_center < -s) { 311 316 /* not enough in central node */ ··· 321 316 } else 322 317 shift(left, center, s); 323 318 324 - shift(center, right, target - nr_right); 319 + shift(center, right, target_right - nr_right); 325 320 326 321 } else { 327 - s = target - nr_right; 322 + s = target_right - nr_right; 328 323 if (s > 0 && nr_center < s) { 329 324 /* not enough in central node */ 330 325 shift(center, right, nr_center); ··· 334 329 } else 335 330 shift(center, right, s); 336 331 337 - shift(left, center, nr_left - target); 332 + shift(left, center, nr_left - target_left); 338 333 } 339 334 340 335 *key_ptr(parent, c->index) = center->keys[0];
+1 -1
drivers/md/persistent-data/dm-btree.c
··· 523 523 524 524 r = new_block(s->info, &right); 525 525 if (r < 0) { 526 - /* FIXME: put left */ 526 + unlock_block(s->info, left); 527 527 return r; 528 528 } 529 529
+9 -4
drivers/md/raid1.c
··· 2195 2195 bio_trim(wbio, sector - r1_bio->sector, sectors); 2196 2196 wbio->bi_iter.bi_sector += rdev->data_offset; 2197 2197 wbio->bi_bdev = rdev->bdev; 2198 - if (submit_bio_wait(WRITE, wbio) == 0) 2198 + if (submit_bio_wait(WRITE, wbio) < 0) 2199 2199 /* failure! */ 2200 2200 ok = rdev_set_badblocks(rdev, sector, 2201 2201 sectors, 0) ··· 2258 2258 rdev_dec_pending(conf->mirrors[m].rdev, 2259 2259 conf->mddev); 2260 2260 } 2261 - if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2262 - close_write(r1_bio); 2263 2261 if (fail) { 2264 2262 spin_lock_irq(&conf->device_lock); 2265 2263 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); 2266 2264 spin_unlock_irq(&conf->device_lock); 2267 2265 md_wakeup_thread(conf->mddev->thread); 2268 - } else 2266 + } else { 2267 + if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2268 + close_write(r1_bio); 2269 2269 raid_end_bio_io(r1_bio); 2270 + } 2270 2271 } 2271 2272 2272 2273 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) ··· 2386 2385 r1_bio = list_first_entry(&tmp, struct r1bio, 2387 2386 retry_list); 2388 2387 list_del(&r1_bio->retry_list); 2388 + if (mddev->degraded) 2389 + set_bit(R1BIO_Degraded, &r1_bio->state); 2390 + if (test_bit(R1BIO_WriteError, &r1_bio->state)) 2391 + close_write(r1_bio); 2389 2392 raid_end_bio_io(r1_bio); 2390 2393 } 2391 2394 }
+32 -7
drivers/md/raid10.c
··· 39 39 * far_copies (stored in second byte of layout) 40 40 * far_offset (stored in bit 16 of layout ) 41 41 * use_far_sets (stored in bit 17 of layout ) 42 + * use_far_sets_bugfixed (stored in bit 18 of layout ) 42 43 * 43 44 * The data to be stored is divided into chunks using chunksize. Each device 44 45 * is divided into far_copies sections. In each section, chunks are laid out ··· 1498 1497 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); 1499 1498 else 1500 1499 seq_printf(seq, " %d far-copies", conf->geo.far_copies); 1500 + if (conf->geo.far_set_size != conf->geo.raid_disks) 1501 + seq_printf(seq, " %d devices per set", conf->geo.far_set_size); 1501 1502 } 1502 1503 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, 1503 1504 conf->geo.raid_disks - mddev->degraded); ··· 2470 2467 choose_data_offset(r10_bio, rdev) + 2471 2468 (sector - r10_bio->sector)); 2472 2469 wbio->bi_bdev = rdev->bdev; 2473 - if (submit_bio_wait(WRITE, wbio) == 0) 2470 + if (submit_bio_wait(WRITE, wbio) < 0) 2474 2471 /* Failure! */ 2475 2472 ok = rdev_set_badblocks(rdev, sector, 2476 2473 sectors, 0) ··· 2657 2654 rdev_dec_pending(rdev, conf->mddev); 2658 2655 } 2659 2656 } 2660 - if (test_bit(R10BIO_WriteError, 2661 - &r10_bio->state)) 2662 - close_write(r10_bio); 2663 2657 if (fail) { 2664 2658 spin_lock_irq(&conf->device_lock); 2665 2659 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); 2666 2660 spin_unlock_irq(&conf->device_lock); 2667 2661 md_wakeup_thread(conf->mddev->thread); 2668 - } else 2662 + } else { 2663 + if (test_bit(R10BIO_WriteError, 2664 + &r10_bio->state)) 2665 + close_write(r10_bio); 2669 2666 raid_end_bio_io(r10_bio); 2667 + } 2670 2668 } 2671 2669 } 2672 2670 ··· 2695 2691 r10_bio = list_first_entry(&tmp, struct r10bio, 2696 2692 retry_list); 2697 2693 list_del(&r10_bio->retry_list); 2694 + if (mddev->degraded) 2695 + set_bit(R10BIO_Degraded, &r10_bio->state); 2696 + 2697 + if (test_bit(R10BIO_WriteError, 2698 + &r10_bio->state)) 2699 + close_write(r10_bio); 2698 2700 raid_end_bio_io(r10_bio); 2699 2701 } 2700 2702 } ··· 3397 3387 disks = mddev->raid_disks + mddev->delta_disks; 3398 3388 break; 3399 3389 } 3400 - if (layout >> 18) 3390 + if (layout >> 19) 3401 3391 return -1; 3402 3392 if (chunk < (PAGE_SIZE >> 9) || 3403 3393 !is_power_of_2(chunk)) ··· 3409 3399 geo->near_copies = nc; 3410 3400 geo->far_copies = fc; 3411 3401 geo->far_offset = fo; 3412 - geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks; 3402 + switch (layout >> 17) { 3403 + case 0: /* original layout. simple but not always optimal */ 3404 + geo->far_set_size = disks; 3405 + break; 3406 + case 1: /* "improved" layout which was buggy. Hopefully no-one is 3407 + * actually using this, but leave code here just in case.*/ 3408 + geo->far_set_size = disks/fc; 3409 + WARN(geo->far_set_size < fc, 3410 + "This RAID10 layout does not provide data safety - please backup and create new array\n"); 3411 + break; 3412 + case 2: /* "improved" layout fixed to match documentation */ 3413 + geo->far_set_size = fc * nc; 3414 + break; 3415 + default: /* Not a valid layout */ 3416 + return -1; 3417 + } 3413 3418 geo->chunk_mask = chunk - 1; 3414 3419 geo->chunk_shift = ffz(~chunk); 3415 3420 return nc*fc;
+2 -2
drivers/media/dvb-frontends/horus3a.h
··· 46 46 const struct horus3a_config *config, 47 47 struct i2c_adapter *i2c); 48 48 #else 49 - static inline struct dvb_frontend *horus3a_attach( 50 - const struct cxd2820r_config *config, 49 + static inline struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe, 50 + const struct horus3a_config *config, 51 51 struct i2c_adapter *i2c) 52 52 { 53 53 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+1 -1
drivers/media/dvb-frontends/lnbh25.h
··· 43 43 struct lnbh25_config *cfg, 44 44 struct i2c_adapter *i2c); 45 45 #else 46 - static inline dvb_frontend *lnbh25_attach( 46 + static inline struct dvb_frontend *lnbh25_attach( 47 47 struct dvb_frontend *fe, 48 48 struct lnbh25_config *cfg, 49 49 struct i2c_adapter *i2c)
+47 -26
drivers/media/dvb-frontends/m88ds3103.c
··· 18 18 19 19 static struct dvb_frontend_ops m88ds3103_ops; 20 20 21 + /* write single register with mask */ 22 + static int m88ds3103_update_bits(struct m88ds3103_dev *dev, 23 + u8 reg, u8 mask, u8 val) 24 + { 25 + int ret; 26 + u8 tmp; 27 + 28 + /* no need for read if whole reg is written */ 29 + if (mask != 0xff) { 30 + ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1); 31 + if (ret) 32 + return ret; 33 + 34 + val &= mask; 35 + tmp &= ~mask; 36 + val |= tmp; 37 + } 38 + 39 + return regmap_bulk_write(dev->regmap, reg, &val, 1); 40 + } 41 + 21 42 /* write reg val table using reg addr auto increment */ 22 43 static int m88ds3103_wr_reg_val_tab(struct m88ds3103_dev *dev, 23 44 const struct m88ds3103_reg_val *tab, int tab_len) ··· 415 394 u8tmp2 = 0x00; /* 0b00 */ 416 395 break; 417 396 } 418 - ret = regmap_update_bits(dev->regmap, 0x22, 0xc0, u8tmp1 << 6); 397 + ret = m88ds3103_update_bits(dev, 0x22, 0xc0, u8tmp1 << 6); 419 398 if (ret) 420 399 goto err; 421 - ret = regmap_update_bits(dev->regmap, 0x24, 0xc0, u8tmp2 << 6); 400 + ret = m88ds3103_update_bits(dev, 0x24, 0xc0, u8tmp2 << 6); 422 401 if (ret) 423 402 goto err; 424 403 } ··· 476 455 if (ret) 477 456 goto err; 478 457 } 479 - ret = regmap_update_bits(dev->regmap, 0x9d, 0x08, 0x08); 458 + ret = m88ds3103_update_bits(dev, 0x9d, 0x08, 0x08); 480 459 if (ret) 481 460 goto err; 482 461 ret = regmap_write(dev->regmap, 0xf1, 0x01); 483 462 if (ret) 484 463 goto err; 485 - ret = regmap_update_bits(dev->regmap, 0x30, 0x80, 0x80); 464 + ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80); 486 465 if (ret) 487 466 goto err; 488 467 } ··· 519 498 switch (dev->cfg->ts_mode) { 520 499 case M88DS3103_TS_SERIAL: 521 500 case M88DS3103_TS_SERIAL_D7: 522 - ret = regmap_update_bits(dev->regmap, 0x29, 0x20, u8tmp1); 501 + ret = m88ds3103_update_bits(dev, 0x29, 0x20, u8tmp1); 523 502 if (ret) 524 503 goto err; 525 504 u8tmp1 = 0; ··· 588 567 if (ret) 589 568 goto err; 590 569 591 - ret = regmap_update_bits(dev->regmap, 0x4d, 0x02, dev->cfg->spec_inv << 1); 570 + ret = m88ds3103_update_bits(dev, 0x4d, 0x02, dev->cfg->spec_inv << 1); 592 571 if (ret) 593 572 goto err; 594 573 595 - ret = regmap_update_bits(dev->regmap, 0x30, 0x10, dev->cfg->agc_inv << 4); 574 + ret = m88ds3103_update_bits(dev, 0x30, 0x10, dev->cfg->agc_inv << 4); 596 575 if (ret) 597 576 goto err; 598 577 ··· 646 625 dev->warm = false; 647 626 648 627 /* wake up device from sleep */ 649 - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x01); 628 + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x01); 650 629 if (ret) 651 630 goto err; 652 - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x00); 631 + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x00); 653 632 if (ret) 654 633 goto err; 655 - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x00); 634 + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x00); 656 635 if (ret) 657 636 goto err; 658 637 ··· 770 749 utmp = 0x29; 771 750 else 772 751 utmp = 0x27; 773 - ret = regmap_update_bits(dev->regmap, utmp, 0x01, 0x00); 752 + ret = m88ds3103_update_bits(dev, utmp, 0x01, 0x00); 774 753 if (ret) 775 754 goto err; 776 755 777 756 /* sleep */ 778 - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); 757 + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00); 779 758 if (ret) 780 759 goto err; 781 - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); 760 + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01); 782 761 if (ret) 783 762 goto err; 784 - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); 763 + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10); 785 764 if (ret) 786 765 goto err; 787 766 ··· 1013 992 } 1014 993 1015 994 utmp = tone << 7 | dev->cfg->envelope_mode << 5; 1016 - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 995 + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); 1017 996 if (ret) 1018 997 goto err; 1019 998 1020 999 utmp = 1 << 2; 1021 - ret = regmap_update_bits(dev->regmap, 0xa1, reg_a1_mask, utmp); 1000 + ret = m88ds3103_update_bits(dev, 0xa1, reg_a1_mask, utmp); 1022 1001 if (ret) 1023 1002 goto err; 1024 1003 ··· 1068 1047 voltage_dis ^= dev->cfg->lnb_en_pol; 1069 1048 1070 1049 utmp = voltage_dis << 1 | voltage_sel << 0; 1071 - ret = regmap_update_bits(dev->regmap, 0xa2, 0x03, utmp); 1050 + ret = m88ds3103_update_bits(dev, 0xa2, 0x03, utmp); 1072 1051 if (ret) 1073 1052 goto err; 1074 1053 ··· 1101 1080 } 1102 1081 1103 1082 utmp = dev->cfg->envelope_mode << 5; 1104 - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 1083 + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); 1105 1084 if (ret) 1106 1085 goto err; 1107 1086 ··· 1136 1115 } else { 1137 1116 dev_dbg(&client->dev, "diseqc tx timeout\n"); 1138 1117 1139 - ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); 1118 + ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40); 1140 1119 if (ret) 1141 1120 goto err; 1142 1121 } 1143 1122 1144 - ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); 1123 + ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80); 1145 1124 if (ret) 1146 1125 goto err; 1147 1126 ··· 1173 1152 } 1174 1153 1175 1154 utmp = dev->cfg->envelope_mode << 5; 1176 - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); 1155 + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); 1177 1156 if (ret) 1178 1157 goto err; 1179 1158 ··· 1215 1194 } else { 1216 1195 dev_dbg(&client->dev, "diseqc tx timeout\n"); 1217 1196 1218 - ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); 1197 + ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40); 1219 1198 if (ret) 1220 1199 goto err; 1221 1200 } 1222 1201 1223 - ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); 1202 + ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80); 1224 1203 if (ret) 1225 1204 goto err; 1226 1205 ··· 1456 1435 goto err_kfree; 1457 1436 1458 1437 /* sleep */ 1459 - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); 1438 + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00); 1460 1439 if (ret) 1461 1440 goto err_kfree; 1462 - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); 1441 + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01); 1463 1442 if (ret) 1464 1443 goto err_kfree; 1465 - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); 1444 + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10); 1466 1445 if (ret) 1467 1446 goto err_kfree; 1468 1447
+4
drivers/media/dvb-frontends/si2168.c
··· 502 502 /* firmware is in the new format */ 503 503 for (remaining = fw->size; remaining > 0; remaining -= 17) { 504 504 len = fw->data[fw->size - remaining]; 505 + if (len > SI2168_ARGLEN) { 506 + ret = -EINVAL; 507 + break; 508 + } 505 509 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); 506 510 cmd.wlen = len; 507 511 cmd.rlen = 1;
+4 -8
drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
··· 80 80 u16 reg; 81 81 unsigned long flags; 82 82 83 - if (!spi) { 84 - dev_dbg(&spi->master->dev, 85 - "%s(): SPI not initialized\n", __func__); 83 + if (!spi) 86 84 return IRQ_NONE; 87 - } 85 + 88 86 spin_lock_irqsave(&spi->lock, flags); 89 87 reg = readw(&spi->regs->control_stat); 90 88 if (!(reg & NETUP_SPI_CTRL_IRQ)) { ··· 232 234 unsigned long flags; 233 235 struct netup_spi *spi = ndev->spi; 234 236 235 - if (!spi) { 236 - dev_dbg(&spi->master->dev, 237 - "%s(): SPI not initialized\n", __func__); 237 + if (!spi) 238 238 return; 239 - } 239 + 240 240 spin_lock_irqsave(&spi->lock, flags); 241 241 reg = readw(&spi->regs->control_stat); 242 242 writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
+3 -4
drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
··· 1097 1097 Elf32_Ehdr *ehdr; 1098 1098 Elf32_Phdr *phdr; 1099 1099 u8 __iomem *dst; 1100 - int err, i; 1100 + int err = 0, i; 1101 1101 1102 1102 if (!fw || !context) 1103 1103 return -EINVAL; ··· 1106 1106 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff); 1107 1107 1108 1108 /* go through the available ELF segments */ 1109 - for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) { 1109 + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 1110 1110 1111 1111 /* Only consider LOAD segments */ 1112 1112 if (phdr->p_type != PT_LOAD) ··· 1192 1192 1193 1193 static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei) 1194 1194 { 1195 - int ret; 1196 1195 int err; 1197 1196 1198 1197 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA); ··· 1206 1207 if (err) { 1207 1208 dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err); 1208 1209 complete_all(&fei->fw_ack); 1209 - return ret; 1210 + return err; 1210 1211 } 1211 1212 1212 1213 return 0;
+1 -1
drivers/media/rc/ir-hix5hd2.c
··· 257 257 goto clkerr; 258 258 259 259 if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt, 260 - IRQF_NO_SUSPEND, pdev->name, priv) < 0) { 260 + 0, pdev->name, priv) < 0) { 261 261 dev_err(dev, "IRQ %d register failed\n", priv->irq); 262 262 ret = -EINVAL; 263 263 goto regerr;
+4
drivers/media/tuners/si2157.c
··· 166 166 167 167 for (remaining = fw->size; remaining > 0; remaining -= 17) { 168 168 len = fw->data[fw->size - remaining]; 169 + if (len > SI2157_ARGLEN) { 170 + dev_err(&client->dev, "Bad firmware length\n"); 171 + goto err_release_firmware; 172 + } 169 173 memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); 170 174 cmd.wlen = len; 171 175 cmd.rlen = 1;
+13 -2
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
··· 34 34 unsigned int pipe; 35 35 u8 requesttype; 36 36 37 + mutex_lock(&d->usb_mutex); 38 + 39 + if (req->size > sizeof(dev->buf)) { 40 + dev_err(&d->intf->dev, "too large message %u\n", req->size); 41 + ret = -EINVAL; 42 + goto err_mutex_unlock; 43 + } 44 + 37 45 if (req->index & CMD_WR_FLAG) { 38 46 /* write */ 39 47 memcpy(dev->buf, req->data, req->size); ··· 58 50 dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value, 59 51 req->index, dev->buf, req->size); 60 52 if (ret < 0) 61 - goto err; 53 + goto err_mutex_unlock; 62 54 63 55 /* read request, copy returned data to return buf */ 64 56 if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN)) 65 57 memcpy(req->data, dev->buf, req->size); 66 58 59 + mutex_unlock(&d->usb_mutex); 60 + 67 61 return 0; 68 - err: 62 + err_mutex_unlock: 63 + mutex_unlock(&d->usb_mutex); 69 64 dev_dbg(&d->intf->dev, "failed=%d\n", ret); 70 65 return ret; 71 66 }
+1 -1
drivers/media/usb/dvb-usb-v2/rtl28xxu.h
··· 71 71 72 72 73 73 struct rtl28xxu_dev { 74 - u8 buf[28]; 74 + u8 buf[128]; 75 75 u8 chip_id; 76 76 u8 tuner; 77 77 char *tuner_name;
+1 -1
drivers/media/v4l2-core/Kconfig
··· 47 47 # Used by LED subsystem flash drivers 48 48 config V4L2_FLASH_LED_CLASS 49 49 tristate "V4L2 flash API for LED flash class devices" 50 - depends on VIDEO_V4L2_SUBDEV_API 50 + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 51 51 depends on LEDS_CLASS_FLASH 52 52 ---help--- 53 53 Say Y here to enable V4L2 flash API support for LED flash
+9 -3
drivers/memory/Kconfig
··· 58 58 memory drives like NOR, NAND, OneNAND, SRAM. 59 59 60 60 config OMAP_GPMC_DEBUG 61 - bool 61 + bool "Enable GPMC debug output and skip reset of GPMC during init" 62 62 depends on OMAP_GPMC 63 63 help 64 64 Enables verbose debugging mostly to decode the bootloader provided 65 - timings. Enable this during development to configure devices 66 - connected to the GPMC bus. 65 + timings. To preserve the bootloader provided timings, the reset 66 + of GPMC is skipped during init. Enable this during development to 67 + configure devices connected to the GPMC bus. 68 + 69 + NOTE: In addition to matching the register setup with the bootloader 70 + you also need to match the GPMC FCLK frequency used by the 71 + bootloader or else the GPMC timings won't be identical with the 72 + bootloader timings. 67 73 68 74 config MVEBU_DEVBUS 69 75 bool "Marvell EBU Device Bus Controller"
+1 -1
drivers/memory/omap-gpmc.c
··· 696 696 int div; 697 697 u32 l; 698 698 699 - gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings"); 700 699 div = gpmc_calc_divider(t->sync_clk); 701 700 if (div < 0) 702 701 return div; ··· 1987 1988 if (ret < 0) 1988 1989 goto err; 1989 1990 1991 + gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings"); 1990 1992 ret = gpmc_cs_program_settings(cs, &gpmc_s); 1991 1993 if (ret < 0) 1992 1994 goto err;
+3 -6
drivers/mmc/card/mmc_test.c
··· 2263 2263 /* 2264 2264 * eMMC hardware reset. 2265 2265 */ 2266 - static int mmc_test_hw_reset(struct mmc_test_card *test) 2266 + static int mmc_test_reset(struct mmc_test_card *test) 2267 2267 { 2268 2268 struct mmc_card *card = test->card; 2269 2269 struct mmc_host *host = card->host; 2270 2270 int err; 2271 - 2272 - if (!mmc_card_mmc(card) || !mmc_can_reset(card)) 2273 - return RESULT_UNSUP_CARD; 2274 2271 2275 2272 err = mmc_hw_reset(host); 2276 2273 if (!err) ··· 2602 2605 }, 2603 2606 2604 2607 { 2605 - .name = "eMMC hardware reset", 2606 - .run = mmc_test_hw_reset, 2608 + .name = "Reset test", 2609 + .run = mmc_test_reset, 2607 2610 }, 2608 2611 }; 2609 2612
-7
drivers/mmc/core/mmc.c
··· 1924 1924 static int mmc_reset(struct mmc_host *host) 1925 1925 { 1926 1926 struct mmc_card *card = host->card; 1927 - u32 status; 1928 1927 1929 1928 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) 1930 1929 return -EOPNOTSUPP; ··· 1935 1936 mmc_set_clock(host, host->f_init); 1936 1937 1937 1938 host->ops->hw_reset(host); 1938 - 1939 - /* If the reset has happened, then a status command will fail */ 1940 - if (!mmc_send_status(card, &status)) { 1941 - mmc_host_clk_release(host); 1942 - return -ENOSYS; 1943 - } 1944 1939 1945 1940 /* Set initial state and call mmc_set_ios */ 1946 1941 mmc_set_initial_state(host);
+1 -1
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 1595 1595 packet->rdesc_count, 1); 1596 1596 1597 1597 /* Make sure ownership is written to the descriptor */ 1598 - wmb(); 1598 + smp_wmb(); 1599 1599 1600 1600 ring->cur = cur_index + 1; 1601 1601 if (!packet->skb->xmit_more ||
+4
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 1820 1820 return 0; 1821 1821 1822 1822 cur = ring->cur; 1823 + 1824 + /* Be sure we get ring->cur before accessing descriptor data */ 1825 + smp_rmb(); 1826 + 1823 1827 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1824 1828 1825 1829 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
+29 -13
drivers/net/ethernet/cavium/thunder/nic_main.c
··· 22 22 23 23 struct nicpf { 24 24 struct pci_dev *pdev; 25 - u8 rev_id; 26 25 u8 node; 27 26 unsigned int flags; 28 27 u8 num_vf_en; /* No of VF enabled */ ··· 43 44 u8 duplex[MAX_LMAC]; 44 45 u32 speed[MAX_LMAC]; 45 46 u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; 47 + u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; 46 48 u16 rss_ind_tbl_size; 47 49 bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; 48 50 ··· 53 53 struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; 54 54 bool irq_allocated[NIC_PF_MSIX_VECTORS]; 55 55 }; 56 + 57 + static inline bool pass1_silicon(struct nicpf *nic) 58 + { 59 + return nic->pdev->revision < 8; 60 + } 56 61 57 62 /* Supported devices */ 58 63 static const struct pci_device_id nic_id_table[] = { ··· 122 117 * when PF writes to MBOX(1), in next revisions when 123 118 * PF writes to MBOX(0) 124 119 */ 125 - if (nic->rev_id == 0) { 120 + if (pass1_silicon(nic)) { 126 121 /* see the comment for nic_reg_write()/nic_reg_read() 127 122 * functions above 128 123 */ ··· 310 305 { 311 306 int i; 312 307 313 - /* Reset NIC, in case the driver is repeatedly inserted and removed */ 314 - nic_reg_write(nic, NIC_PF_SOFT_RESET, 1); 315 - 316 308 /* Enable NIC HW block */ 317 309 nic_reg_write(nic, NIC_PF_CFG, 0x3); 318 310 ··· 397 395 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ 398 396 399 397 /* Leave RSS_SIZE as '0' to disable RSS */ 400 - nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 401 - (vnic << 24) | (padd << 16) | (rssi_base + rssi)); 398 + if (pass1_silicon(nic)) { 399 + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 400 + (vnic << 24) | (padd << 16) | 401 + (rssi_base + rssi)); 402 + } else { 403 + /* Set MPI_ALG to '0' to disable MCAM parsing */ 404 + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), 405 + (padd << 16)); 406 + /* MPI index is same as CPI if MPI_ALG is not enabled */ 407 + nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3), 408 + (vnic << 24) | (rssi_base + rssi)); 409 + } 402 410 403 411 if ((rssi + 1) >= cfg->rq_cnt) 404 412 continue; ··· 421 409 rssi = ((cpi - cpi_base) & 0x38) >> 3; 422 410 } 423 411 nic->cpi_base[cfg->vf_id] = cpi_base; 412 + nic->rssi_base[cfg->vf_id] = rssi_base; 424 413 } 425 414 426 415 /* Responsds to VF with its RSS indirection table size */ ··· 447 434 { 448 435 u8 qset, idx = 0; 449 436 u64 cpi_cfg, cpi_base, rssi_base, rssi; 437 + u64 idx_addr; 450 438 451 - cpi_base = nic->cpi_base[cfg->vf_id]; 452 - cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3)); 453 - rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset; 439 + rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; 454 440 455 441 rssi = rssi_base; 456 442 qset = cfg->vf_id; ··· 466 454 idx++; 467 455 } 468 456 457 + cpi_base = nic->cpi_base[cfg->vf_id]; 458 + if (pass1_silicon(nic)) 459 + idx_addr = NIC_PF_CPI_0_2047_CFG; 460 + else 461 + idx_addr = NIC_PF_MPI_0_2047_CFG; 462 + cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3)); 469 463 cpi_cfg &= ~(0xFULL << 20); 470 464 cpi_cfg |= (cfg->hash_bits << 20); 471 - nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg); 465 + nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg); 472 466 } 473 467 474 468 /* 4 level transmit side scheduler configutation ··· 1018 1000 err = -ENOMEM; 1019 1001 goto err_release_regions; 1020 1002 } 1021 - 1022 - pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id); 1023 1003 1024 1004 nic->node = nic_get_node_id(pdev); 1025 1005
+4
drivers/net/ethernet/cavium/thunder/nic_reg.h
··· 85 85 #define NIC_PF_ECC3_DBE_INT_W1S (0x2708) 86 86 #define NIC_PF_ECC3_DBE_ENA_W1C (0x2710) 87 87 #define NIC_PF_ECC3_DBE_ENA_W1S (0x2718) 88 + #define NIC_PF_MCAM_0_191_ENA (0x100000) 89 + #define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000) 90 + #define NIC_PF_MCAM_CTRL (0x120000) 88 91 #define NIC_PF_CPI_0_2047_CFG (0x200000) 92 + #define NIC_PF_MPI_0_2047_CFG (0x210000) 89 93 #define NIC_PF_RSSI_0_4097_RQ (0x220000) 90 94 #define NIC_PF_LMAC_0_7_CFG (0x240000) 91 95 #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
+1 -1
drivers/net/ethernet/cavium/thunder/nicvf_main.c
··· 29 29 static const struct pci_device_id nicvf_id_table[] = { 30 30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 31 31 PCI_DEVICE_ID_THUNDER_NIC_VF, 32 - PCI_VENDOR_ID_CAVIUM, 0xA11E) }, 32 + PCI_VENDOR_ID_CAVIUM, 0xA134) }, 33 33 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 34 34 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, 35 35 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+3 -1
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 977 977 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); 978 978 bgx->lmac[lmac].lmacid = lmac; 979 979 lmac++; 980 - if (lmac == MAX_LMAC_PER_BGX) 980 + if (lmac == MAX_LMAC_PER_BGX) { 981 + of_node_put(np_child); 981 982 break; 983 + } 982 984 } 983 985 return 0; 984 986 }
+3 -5
drivers/net/ethernet/freescale/gianfar.c
··· 341 341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) 342 342 priv->uses_rxfcb = 1; 343 343 344 - if (priv->hwts_rx_en) 344 + if (priv->hwts_rx_en || priv->rx_filer_enable) 345 345 priv->uses_rxfcb = 1; 346 346 } 347 347 ··· 351 351 u32 rctrl = 0; 352 352 353 353 if (priv->rx_filer_enable) { 354 - rctrl |= RCTRL_FILREN; 354 + rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; 355 355 /* Program the RIR0 reg with the required distribution */ 356 356 if (priv->poll_mode == GFAR_SQ_POLLING) 357 357 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0); ··· 3595 3595 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 3596 3596 } 3597 3597 if (events & IEVENT_BSY) { 3598 - dev->stats.rx_errors++; 3598 + dev->stats.rx_over_errors++; 3599 3599 atomic64_inc(&priv->extra_stats.rx_bsy); 3600 - 3601 - gfar_receive(irq, grp_id); 3602 3600 3603 3601 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 3604 3602 gfar_read(&regs->rstat));
+2 -2
drivers/net/ethernet/freescale/gianfar_ethtool.c
··· 695 695 u32 fcr = 0x0, fpr = FPR_FILER_MASK; 696 696 697 697 if (ethflow & RXH_L2DA) { 698 - fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | 698 + fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH | 699 699 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 700 700 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 701 701 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; 702 702 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 703 703 priv->cur_filer_idx = priv->cur_filer_idx - 1; 704 704 705 - fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | 705 + fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH | 706 706 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 707 707 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; 708 708 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+3 -1
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 2817 2817 2818 2818 for_each_available_child_of_node(np, pnp) { 2819 2819 ret = mv643xx_eth_shared_of_add_port(pdev, pnp); 2820 - if (ret) 2820 + if (ret) { 2821 + of_node_put(pnp); 2821 2822 return ret; 2823 + } 2822 2824 } 2823 2825 return 0; 2824 2826 }
+1 -1
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 2398 2398 } 2399 2399 } 2400 2400 2401 - memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); 2401 + memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe)); 2402 2402 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; 2403 2403 INIT_WORK(&priv->mfunc.master.comm_work, 2404 2404 mlx4_master_comm_channel);
+2
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 964 964 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; 965 965 else if (vlan_proto == ETH_P_8021Q) 966 966 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; 967 + else 968 + tx_desc->ctrl.ins_vlan = 0; 967 969 968 970 tx_desc->ctrl.fence_size = real_size; 969 971
+1 -1
drivers/net/ethernet/mellanox/mlx4/eq.c
··· 196 196 return; 197 197 } 198 198 199 - memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); 199 + memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); 200 200 s_eqe->slave_id = slave; 201 201 /* ensure all information is written before setting the ownersip bit */ 202 202 dma_wmb();
+11 -13
drivers/net/ethernet/nvidia/forcedeth.c
··· 4076 4076 struct fe_priv *np = netdev_priv(dev); 4077 4077 u8 __iomem *base = get_hwbase(dev); 4078 4078 u32 mask = 0; 4079 + unsigned long flags; 4080 + unsigned int irq = 0; 4079 4081 4080 4082 /* 4081 4083 * First disable irq(s) and then ··· 4087 4085 4088 4086 if (!using_multi_irqs(dev)) { 4089 4087 if (np->msi_flags & NV_MSI_X_ENABLED) 4090 - disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4088 + irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; 4091 4089 else 4092 - disable_irq_lockdep(np->pci_dev->irq); 4090 + irq = np->pci_dev->irq; 4093 4091 mask = np->irqmask; 4094 4092 } else { 4095 4093 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4096 - disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4094 + irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; 4097 4095 mask |= NVREG_IRQ_RX_ALL; 4098 4096 } 4099 4097 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4100 - disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4098 + irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; 4101 4099 mask |= NVREG_IRQ_TX_ALL; 4102 4100 } 4103 4101 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4104 - disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4102 + irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; 4105 4103 mask |= NVREG_IRQ_OTHER; 4106 4104 } 4107 4105 } 4108 - /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ 4106 + 4107 + disable_irq_nosync_lockdep_irqsave(irq, &flags); 4108 + synchronize_irq(irq); 4109 4109 4110 4110 if (np->recover_error) { 4111 4111 np->recover_error = 0; ··· 4160 4156 nv_nic_irq_optimized(0, dev); 4161 4157 else 4162 4158 nv_nic_irq(0, dev); 4163 - if (np->msi_flags & NV_MSI_X_ENABLED) 4164 - enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 4165 - else 4166 - enable_irq_lockdep(np->pci_dev->irq); 4167 4159 } else { 4168 4160 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 4169 4161 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; 4170 4162 nv_nic_irq_rx(0, dev); 4171 - enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 4172 4163 } 4173 4164 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { 4174 4165 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; 4175 4166 nv_nic_irq_tx(0, dev); 4176 - enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); 4177 4167 } 4178 4168 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { 4179 4169 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; 4180 4170 nv_nic_irq_other(0, dev); 4181 - enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); 4182 4171 } 4183 4172 } 4184 4173 4174 + enable_irq_lockdep_irqrestore(irq, &flags); 4185 4175 } 4186 4176 4187 4177 #ifdef CONFIG_NET_POLL_CONTROLLER
+7 -7
drivers/net/ethernet/renesas/sh_eth.c
··· 1127 1127 struct sh_eth_txdesc *txdesc = NULL; 1128 1128 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1129 1129 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1130 - int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1130 + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; 1131 1131 dma_addr_t dma_addr; 1132 1132 1133 1133 mdp->cur_rx = 0; ··· 1148 1148 1149 1149 /* RX descriptor */ 1150 1150 rxdesc = &mdp->rx_ring[i]; 1151 - /* The size of the buffer is a multiple of 16 bytes. */ 1152 - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1151 + /* The size of the buffer is a multiple of 32 bytes. */ 1152 + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); 1153 1153 dma_addr = dma_map_single(&ndev->dev, skb->data, 1154 1154 rxdesc->buffer_length, 1155 1155 DMA_FROM_DEVICE); ··· 1450 1450 struct sk_buff *skb; 1451 1451 u16 pkt_len = 0; 1452 1452 u32 desc_status; 1453 - int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1453 + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; 1454 1454 dma_addr_t dma_addr; 1455 1455 1456 1456 boguscnt = min(boguscnt, *quota); ··· 1506 1506 if (mdp->cd->rpadir) 1507 1507 skb_reserve(skb, NET_IP_ALIGN); 1508 1508 dma_unmap_single(&ndev->dev, rxdesc->addr, 1509 - ALIGN(mdp->rx_buf_sz, 16), 1509 + ALIGN(mdp->rx_buf_sz, 32), 1510 1510 DMA_FROM_DEVICE); 1511 1511 skb_put(skb, pkt_len); 1512 1512 skb->protocol = eth_type_trans(skb, ndev); ··· 1524 1524 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1525 1525 entry = mdp->dirty_rx % mdp->num_rx_ring; 1526 1526 rxdesc = &mdp->rx_ring[entry]; 1527 - /* The size of the buffer is 16 byte boundary. */ 1528 - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1527 + /* The size of the buffer is 32 byte boundary. */ 1528 + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); 1529 1529 1530 1530 if (mdp->rx_skbuff[entry] == NULL) { 1531 1531 skb = netdev_alloc_skb(ndev, skbuff_size);
+6 -2
drivers/net/ethernet/ti/netcp_ethss.c
··· 2637 2637 mac_phy_link = true; 2638 2638 2639 2639 slave->open = true; 2640 - if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2640 + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) { 2641 + of_node_put(port); 2641 2642 break; 2643 + } 2642 2644 } 2643 2645 2644 2646 /* of_phy_connect() is needed only for MAC-PHY interface */ ··· 3139 3137 continue; 3140 3138 } 3141 3139 gbe_dev->num_slaves++; 3142 - if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 3140 + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) { 3141 + of_node_put(interface); 3143 3142 break; 3143 + } 3144 3144 } 3145 3145 of_node_put(interfaces); 3146 3146
+2
drivers/net/phy/mdio-mux-mmioreg.c
··· 113 113 if (!iprop || len != sizeof(uint32_t)) { 114 114 dev_err(&pdev->dev, "mdio-mux child node %s is " 115 115 "missing a 'reg' property\n", np2->full_name); 116 + of_node_put(np2); 116 117 return -ENODEV; 117 118 } 118 119 if (be32_to_cpup(iprop) & ~s->mask) { 119 120 dev_err(&pdev->dev, "mdio-mux child node %s has " 120 121 "a 'reg' value with unmasked bits\n", 121 122 np2->full_name); 123 + of_node_put(np2); 122 124 return -ENODEV; 123 125 } 124 126 }
+1
drivers/net/phy/mdio-mux.c
··· 144 144 dev_err(dev, 145 145 "Error: Failed to allocate memory for child\n"); 146 146 ret_val = -ENOMEM; 147 + of_node_put(child_bus_node); 147 148 break; 148 149 } 149 150 cb->bus_number = v;
+1
drivers/net/wireless/ath/ath6kl/init.c
··· 715 715 board_filename, ret); 716 716 continue; 717 717 } 718 + of_node_put(node); 718 719 return true; 719 720 } 720 721 return false;
+1 -1
drivers/pci/pci-sysfs.c
··· 216 216 if (ret) 217 217 return ret; 218 218 219 - if (!node_online(node)) 219 + if (node >= MAX_NUMNODES || !node_online(node)) 220 220 return -EINVAL; 221 221 222 222 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+8 -2
drivers/perf/arm_pmu.c
··· 823 823 } 824 824 825 825 /* Now look up the logical CPU number */ 826 - for_each_possible_cpu(cpu) 827 - if (dn == of_cpu_device_node_get(cpu)) 826 + for_each_possible_cpu(cpu) { 827 + struct device_node *cpu_dn; 828 + 829 + cpu_dn = of_cpu_device_node_get(cpu); 830 + of_node_put(cpu_dn); 831 + 832 + if (dn == cpu_dn) 828 833 break; 834 + } 829 835 830 836 if (cpu >= nr_cpu_ids) { 831 837 pr_warn("Failed to find logical CPU for %s\n",
+1 -1
drivers/staging/iio/accel/sca3000_ring.c
··· 116 116 if (ret) 117 117 goto error_ret; 118 118 119 - for (i = 0; i < num_read; i++) 119 + for (i = 0; i < num_read / sizeof(u16); i++) 120 120 *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i); 121 121 122 122 if (copy_to_user(buf, rx, num_read))
+5 -4
drivers/staging/iio/adc/mxs-lradc.c
··· 915 915 case IIO_CHAN_INFO_OFFSET: 916 916 if (chan->type == IIO_TEMP) { 917 917 /* The calculated value from the ADC is in Kelvin, we 918 - * want Celsius for hwmon so the offset is 919 - * -272.15 * scale 918 + * want Celsius for hwmon so the offset is -273.15 919 + * The offset is applied before scaling so it is 920 + * actually -213.15 * 4 / 1.012 = -1079.644268 920 921 */ 921 - *val = -1075; 922 - *val2 = 691699; 922 + *val = -1079; 923 + *val2 = 644268; 923 924 924 925 return IIO_VAL_INT_PLUS_MICRO; 925 926 }
+1 -1
drivers/thermal/samsung/exynos_tmu.c
··· 932 932 933 933 if (data->soc == SOC_ARCH_EXYNOS5260) 934 934 emul_con = EXYNOS5260_EMUL_CON; 935 - if (data->soc == SOC_ARCH_EXYNOS5433) 935 + else if (data->soc == SOC_ARCH_EXYNOS5433) 936 936 emul_con = EXYNOS5433_TMU_EMUL_CON; 937 937 else if (data->soc == SOC_ARCH_EXYNOS7) 938 938 emul_con = EXYNOS7_TMU_REG_EMUL_CON;
-4
drivers/tty/serial/8250/8250_dma.c
··· 80 80 return 0; 81 81 82 82 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 83 - if (dma->tx_size < p->port.fifosize) { 84 - ret = -EINVAL; 85 - goto err; 86 - } 87 83 88 84 desc = dmaengine_prep_slave_single(dma->txchan, 89 85 dma->tx_addr + xmit->tail,
+1
drivers/usb/host/xhci-pci.c
··· 147 147 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 148 148 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { 149 149 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 150 + xhci->quirks |= XHCI_SPURIOUS_WAKEUP; 150 151 } 151 152 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 152 153 (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+25 -5
drivers/usb/host/xhci-ring.c
··· 2191 2191 } 2192 2192 /* Fast path - was this the last TRB in the TD for this URB? */ 2193 2193 } else if (event_trb == td->last_trb) { 2194 + if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX) 2195 + return finish_td(xhci, td, event_trb, event, ep, 2196 + status, false); 2197 + 2194 2198 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { 2195 2199 td->urb->actual_length = 2196 2200 td->urb->transfer_buffer_length - ··· 2246 2242 td->urb->actual_length += 2247 2243 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - 2248 2244 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 2245 + 2246 + if (trb_comp_code == COMP_SHORT_TX) { 2247 + xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n"); 2248 + td->urb_length_set = true; 2249 + return 0; 2250 + } 2249 2251 } 2250 2252 2251 2253 return finish_td(xhci, td, event_trb, event, ep, status, false); ··· 2284 2274 u32 trb_comp_code; 2285 2275 int ret = 0; 2286 2276 int td_num = 0; 2277 + bool handling_skipped_tds = false; 2287 2278 2288 2279 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 2289 2280 xdev = xhci->devs[slot_id]; ··· 2420 2409 */ 2421 2410 ep->skip = true; 2422 2411 xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); 2412 + goto cleanup; 2413 + case COMP_PING_ERR: 2414 + ep->skip = true; 2415 + xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n"); 2423 2416 goto cleanup; 2424 2417 default: 2425 2418 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { ··· 2561 2546 ep, &status); 2562 2547 2563 2548 cleanup: 2549 + 2550 + 2551 + handling_skipped_tds = ep->skip && 2552 + trb_comp_code != COMP_MISSED_INT && 2553 + trb_comp_code != COMP_PING_ERR; 2554 + 2564 2555 /* 2565 - * Do not update event ring dequeue pointer if ep->skip is set. 2566 - * Will roll back to continue process missed tds. 2556 + * Do not update event ring dequeue pointer if we're in a loop 2557 + * processing missed tds. 2567 2558 */ 2568 - if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { 2559 + if (!handling_skipped_tds) 2569 2560 inc_deq(xhci, xhci->event_ring); 2570 - } 2571 2561 2572 2562 if (ret) { 2573 2563 urb = td->urb; ··· 2607 2587 * Process them as short transfer until reach the td pointed by 2608 2588 * the event. 2609 2589 */ 2610 - } while (ep->skip && trb_comp_code != COMP_MISSED_INT); 2590 + } while (handling_skipped_tds); 2611 2591 2612 2592 return 0; 2613 2593 }
+7
drivers/vhost/vhost.h
··· 183 183 return vq->acked_features & (1ULL << bit); 184 184 } 185 185 186 + #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY 186 187 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq) 187 188 { 188 189 return vq->is_le; 189 190 } 191 + #else 192 + static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq) 193 + { 194 + return virtio_legacy_is_little_endian() || vq->is_le; 195 + } 196 + #endif 190 197 191 198 /* Memory accessors */ 192 199 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
+1
drivers/video/console/fbcon.c
··· 1093 1093 con_copy_unimap(vc, svc); 1094 1094 1095 1095 ops = info->fbcon_par; 1096 + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); 1096 1097 p->con_rotate = initial_rotation; 1097 1098 set_blitting_type(vc, info); 1098 1099
+1 -1
fs/btrfs/file.c
··· 2584 2584 alloc_start); 2585 2585 if (ret) 2586 2586 goto out; 2587 - } else { 2587 + } else if (offset + len > inode->i_size) { 2588 2588 /* 2589 2589 * If we are fallocating from the end of the file onward we 2590 2590 * need to zero out the end of the page if i_size lands in the
+4 -1
fs/btrfs/ioctl.c
··· 4641 4641 4642 4642 if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { 4643 4643 ret = -EINVAL; 4644 - goto out_bargs; 4644 + goto out_bctl; 4645 4645 } 4646 4646 4647 4647 do_balance: ··· 4655 4655 need_unlock = false; 4656 4656 4657 4657 ret = btrfs_balance(bctl, bargs); 4658 + bctl = NULL; 4658 4659 4659 4660 if (arg) { 4660 4661 if (copy_to_user(arg, bargs, sizeof(*bargs))) 4661 4662 ret = -EFAULT; 4662 4663 } 4663 4664 4665 + out_bctl: 4666 + kfree(bctl); 4664 4667 out_bargs: 4665 4668 kfree(bargs); 4666 4669 out_unlock:
+24 -11
fs/fs-writeback.c
··· 778 778 struct wb_writeback_work *base_work, 779 779 bool skip_if_busy) 780 780 { 781 - int next_memcg_id = 0; 782 - struct bdi_writeback *wb; 783 - struct wb_iter iter; 781 + struct bdi_writeback *last_wb = NULL; 782 + struct bdi_writeback *wb = list_entry_rcu(&bdi->wb_list, 783 + struct bdi_writeback, bdi_node); 784 784 785 785 might_sleep(); 786 786 restart: 787 787 rcu_read_lock(); 788 - bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) { 788 + list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { 789 789 DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); 790 790 struct wb_writeback_work fallback_work; 791 791 struct wb_writeback_work *work; 792 792 long nr_pages; 793 + 794 + if (last_wb) { 795 + wb_put(last_wb); 796 + last_wb = NULL; 797 + } 793 798 794 799 /* SYNC_ALL writes out I_DIRTY_TIME too */ 795 800 if (!wb_has_dirty_io(wb) && ··· 824 819 825 820 wb_queue_work(wb, work); 826 821 827 - next_memcg_id = wb->memcg_css->id + 1; 822 + /* 823 + * Pin @wb so that it stays on @bdi->wb_list. This allows 824 + * continuing iteration from @wb after dropping and 825 + * regrabbing rcu read lock. 826 + */ 827 + wb_get(wb); 828 + last_wb = wb; 829 + 828 830 rcu_read_unlock(); 829 831 wb_wait_for_completion(bdi, &fallback_work_done); 830 832 goto restart; 831 833 } 832 834 rcu_read_unlock(); 835 + 836 + if (last_wb) 837 + wb_put(last_wb); 833 838 } 834 839 835 840 #else /* CONFIG_CGROUP_WRITEBACK */ ··· 1872 1857 rcu_read_lock(); 1873 1858 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1874 1859 struct bdi_writeback *wb; 1875 - struct wb_iter iter; 1876 1860 1877 1861 if (!bdi_has_dirty_io(bdi)) 1878 1862 continue; 1879 1863 1880 - bdi_for_each_wb(wb, bdi, &iter, 0) 1864 + list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) 1881 1865 wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), 1882 1866 false, reason); 1883 1867 } ··· 1908 1894 rcu_read_lock(); 1909 1895 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { 1910 1896 struct bdi_writeback *wb; 1911 - struct wb_iter iter; 1912 1897 1913 - bdi_for_each_wb(wb, bdi, &iter, 0) 1914 - if (!list_empty(&bdi->wb.b_dirty_time)) 1915 - wb_wakeup(&bdi->wb); 1898 + list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) 1899 + if (!list_empty(&wb->b_dirty_time)) 1900 + wb_wakeup(wb); 1916 1901 } 1917 1902 rcu_read_unlock(); 1918 1903 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
+2 -1
fs/ocfs2/dlm/dlmmaster.c
··· 1658 1658 if (ret < 0) { 1659 1659 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1660 1660 response = DLM_MASTER_RESP_ERROR; 1661 + spin_unlock(&res->spinlock); 1661 1662 dlm_lockres_put(res); 1662 1663 } else { 1663 1664 dispatched = 1; 1664 1665 __dlm_lockres_grab_inflight_worker(dlm, res); 1666 + spin_unlock(&res->spinlock); 1665 1667 } 1666 - spin_unlock(&res->spinlock); 1667 1668 } else { 1668 1669 if (res) 1669 1670 dlm_lockres_put(res);
+1 -1
fs/ocfs2/dlm/dlmrecovery.c
··· 1723 1723 } else { 1724 1724 dispatched = 1; 1725 1725 __dlm_lockres_grab_inflight_worker(dlm, res); 1726 + spin_unlock(&res->spinlock); 1726 1727 } 1727 - spin_unlock(&res->spinlock); 1728 1728 } else { 1729 1729 /* put.. incase we are not the master */ 1730 1730 spin_unlock(&res->spinlock);
+3 -3
fs/overlayfs/copy_up.c
··· 81 81 if (len == 0) 82 82 return 0; 83 83 84 - old_file = ovl_path_open(old, O_RDONLY); 84 + old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY); 85 85 if (IS_ERR(old_file)) 86 86 return PTR_ERR(old_file); 87 87 88 - new_file = ovl_path_open(new, O_WRONLY); 88 + new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY); 89 89 if (IS_ERR(new_file)) { 90 90 error = PTR_ERR(new_file); 91 91 goto out_fput; ··· 267 267 268 268 out_cleanup: 269 269 ovl_cleanup(wdir, newdentry); 270 - goto out; 270 + goto out2; 271 271 } 272 272 273 273 /*
+3
fs/overlayfs/inode.c
··· 363 363 ovl_path_upper(dentry, &realpath); 364 364 } 365 365 366 + if (realpath.dentry->d_flags & DCACHE_OP_SELECT_INODE) 367 + return realpath.dentry->d_op->d_select_inode(realpath.dentry, file_flags); 368 + 366 369 return d_backing_inode(realpath.dentry); 367 370 } 368 371
+2
fs/overlayfs/super.c
··· 544 544 mntput(ufs->upper_mnt); 545 545 for (i = 0; i < ufs->numlower; i++) 546 546 mntput(ufs->lower_mnt[i]); 547 + kfree(ufs->lower_mnt); 547 548 548 549 kfree(ufs->config.lowerdir); 549 550 kfree(ufs->config.upperdir); ··· 1049 1048 oe->lowerstack[i].dentry = stack[i].dentry; 1050 1049 oe->lowerstack[i].mnt = ufs->lower_mnt[i]; 1051 1050 } 1051 + kfree(stack); 1052 1052 1053 1053 root_dentry->d_fsdata = oe; 1054 1054
+3
include/linux/backing-dev-defs.h
··· 116 116 struct list_head work_list; 117 117 struct delayed_work dwork; /* work item used for writeback */ 118 118 119 + struct list_head bdi_node; /* anchored at bdi->wb_list */ 120 + 119 121 #ifdef CONFIG_CGROUP_WRITEBACK 120 122 struct percpu_ref refcnt; /* used only for !root wb's */ 121 123 struct fprop_local_percpu memcg_completions; ··· 152 150 atomic_long_t tot_write_bandwidth; 153 151 154 152 struct bdi_writeback wb; /* the root writeback info for this bdi */ 153 + struct list_head wb_list; /* list of all wbs */ 155 154 #ifdef CONFIG_CGROUP_WRITEBACK 156 155 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 157 156 struct rb_root cgwb_congested_tree; /* their congested states */
+5 -64
include/linux/backing-dev.h
··· 19 19 #include <linux/slab.h> 20 20 21 21 int __must_check bdi_init(struct backing_dev_info *bdi); 22 - void bdi_destroy(struct backing_dev_info *bdi); 22 + void bdi_exit(struct backing_dev_info *bdi); 23 23 24 24 __printf(3, 4) 25 25 int bdi_register(struct backing_dev_info *bdi, struct device *parent, 26 26 const char *fmt, ...); 27 27 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 28 + void bdi_unregister(struct backing_dev_info *bdi); 29 + 28 30 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 31 + void bdi_destroy(struct backing_dev_info *bdi); 32 + 29 33 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 30 34 bool range_cyclic, enum wb_reason reason); 31 35 void wb_start_background_writeback(struct bdi_writeback *wb); ··· 412 408 rcu_read_unlock(); 413 409 } 414 410 415 - struct wb_iter { 416 - int start_memcg_id; 417 - struct radix_tree_iter tree_iter; 418 - void **slot; 419 - }; 420 - 421 - static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, 422 - struct backing_dev_info *bdi) 423 - { 424 - struct radix_tree_iter *titer = &iter->tree_iter; 425 - 426 - WARN_ON_ONCE(!rcu_read_lock_held()); 427 - 428 - if (iter->start_memcg_id >= 0) { 429 - iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id); 430 - iter->start_memcg_id = -1; 431 - } else { 432 - iter->slot = radix_tree_next_slot(iter->slot, titer, 0); 433 - } 434 - 435 - if (!iter->slot) 436 - iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); 437 - if (iter->slot) 438 - return *iter->slot; 439 - return NULL; 440 - } 441 - 442 - static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, 443 - struct backing_dev_info *bdi, 444 - int start_memcg_id) 445 - { 446 - iter->start_memcg_id = start_memcg_id; 447 - 448 - if (start_memcg_id) 449 - return __wb_iter_next(iter, bdi); 450 - else 451 - return &bdi->wb; 452 - } 453 - 454 - /** 455 - * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order 456 - * @wb_cur: cursor struct bdi_writeback pointer 457 - * @bdi: bdi to walk wb's of 458 - * @iter: pointer to struct wb_iter to be used as iteration buffer 459 - * @start_memcg_id: memcg ID to start iteration from 460 - * 461 - * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending 462 - * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter 463 - * to be used as temp storage during iteration. rcu_read_lock() must be 464 - * held throughout iteration. 465 - */ 466 - #define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \ 467 - for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \ 468 - (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) 469 - 470 411 #else /* CONFIG_CGROUP_WRITEBACK */ 471 412 472 413 static inline bool inode_cgwb_enabled(struct inode *inode) ··· 470 521 static inline void wb_blkcg_offline(struct blkcg *blkcg) 471 522 { 472 523 } 473 - 474 - struct wb_iter { 475 - int next_id; 476 - }; 477 - 478 - #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ 479 - for ((iter)->next_id = (start_blkcg_id); \ 480 - ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) 481 524 482 525 static inline int inode_congested(struct inode *inode, int cong_bits) 483 526 {
+2 -2
include/linux/blk-cgroup.h
··· 713 713 714 714 if (!throtl) { 715 715 blkg = blkg ?: q->root_blkg; 716 - blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, 716 + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, 717 717 bio->bi_iter.bi_size); 718 - blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); 718 + blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); 719 719 } 720 720 721 721 rcu_read_unlock();
+1 -1
include/linux/cma.h
··· 26 26 extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, 27 27 unsigned int order_per_bit, 28 28 struct cma **res_cma); 29 - extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); 29 + extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); 30 30 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); 31 31 #endif
+11 -2
include/linux/compiler-gcc.h
··· 237 237 #define KASAN_ABI_VERSION 3 238 238 #endif 239 239 240 - #if GCC_VERSION >= 50000 241 - #define CC_HAVE_BUILTIN_OVERFLOW 240 + #if GCC_VERSION >= 40902 241 + /* 242 + * Tell the compiler that address safety instrumentation (KASAN) 243 + * should not be applied to that function. 244 + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 245 + */ 246 + #define __no_sanitize_address __attribute__((no_sanitize_address)) 242 247 #endif 243 248 244 249 #endif /* gcc version >= 40000 specific checks */ 245 250 246 251 #if !defined(__noclone) 247 252 #define __noclone /* not needed */ 253 + #endif 254 + 255 + #if !defined(__no_sanitize_address) 256 + #define __no_sanitize_address 248 257 #endif 249 258 250 259 /*
+53 -13
include/linux/compiler.h
··· 198 198 199 199 #include <uapi/linux/types.h> 200 200 201 - static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 201 + #define __READ_ONCE_SIZE \ 202 + ({ \ 203 + switch (size) { \ 204 + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ 205 + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ 206 + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ 207 + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ 208 + default: \ 209 + barrier(); \ 210 + __builtin_memcpy((void *)res, (const void *)p, size); \ 211 + barrier(); \ 212 + } \ 213 + }) 214 + 215 + static __always_inline 216 + void __read_once_size(const volatile void *p, void *res, int size) 202 217 { 203 - switch (size) { 204 - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; 205 - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; 206 - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; 207 - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; 208 - default: 209 - barrier(); 210 - __builtin_memcpy((void *)res, (const void *)p, size); 211 - barrier(); 212 - } 218 + __READ_ONCE_SIZE; 213 219 } 220 + 221 + #ifdef CONFIG_KASAN 222 + /* 223 + * This function is not 'inline' because __no_sanitize_address confilcts 224 + * with inlining. Attempt to inline it may cause a build failure. 225 + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 226 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. 227 + */ 228 + static __no_sanitize_address __maybe_unused 229 + void __read_once_size_nocheck(const volatile void *p, void *res, int size) 230 + { 231 + __READ_ONCE_SIZE; 232 + } 233 + #else 234 + static __always_inline 235 + void __read_once_size_nocheck(const volatile void *p, void *res, int size) 236 + { 237 + __READ_ONCE_SIZE; 238 + } 239 + #endif 214 240 215 241 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 216 242 { ··· 274 248 * required ordering. 275 249 */ 276 250 277 - #define READ_ONCE(x) \ 278 - ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 251 + #define __READ_ONCE(x, check) \ 252 + ({ \ 253 + union { typeof(x) __val; char __c[1]; } __u; \ 254 + if (check) \ 255 + __read_once_size(&(x), __u.__c, sizeof(x)); \ 256 + else \ 257 + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ 258 + __u.__val; \ 259 + }) 260 + #define READ_ONCE(x) __READ_ONCE(x, 1) 261 + 262 + /* 263 + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need 264 + * to hide memory access from KASAN. 265 + */ 266 + #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) 279 267 280 268 #define WRITE_ONCE(x, val) \ 281 269 ({ \
+2 -2
include/linux/dma-contiguous.h
··· 111 111 return ret; 112 112 } 113 113 114 - struct page *dma_alloc_from_contiguous(struct device *dev, int count, 114 + struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, 115 115 unsigned int order); 116 116 bool dma_release_from_contiguous(struct device *dev, struct page *pages, 117 117 int count); ··· 144 144 } 145 145 146 146 static inline 147 - struct page *dma_alloc_from_contiguous(struct device *dev, int count, 147 + struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, 148 148 unsigned int order) 149 149 { 150 150 return NULL;
+5 -3
include/linux/memcontrol.h
··· 676 676 677 677 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); 678 678 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 679 - void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, 680 - unsigned long *pdirty, unsigned long *pwriteback); 679 + void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 680 + unsigned long *pheadroom, unsigned long *pdirty, 681 + unsigned long *pwriteback); 681 682 682 683 #else /* CONFIG_CGROUP_WRITEBACK */ 683 684 ··· 688 687 } 689 688 690 689 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 691 - unsigned long *pavail, 690 + unsigned long *pfilepages, 691 + unsigned long *pheadroom, 692 692 unsigned long *pdirty, 693 693 unsigned long *pwriteback) 694 694 {
-18
include/linux/overflow-arith.h
··· 1 - #pragma once 2 - 3 - #include <linux/kernel.h> 4 - 5 - #ifdef CC_HAVE_BUILTIN_OVERFLOW 6 - 7 - #define overflow_usub __builtin_usub_overflow 8 - 9 - #else 10 - 11 - static inline bool overflow_usub(unsigned int a, unsigned int b, 12 - unsigned int *res) 13 - { 14 - *res = a - b; 15 - return *res > a ? true : false; 16 - } 17 - 18 - #endif
+4 -2
include/sound/soc.h
··· 86 86 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 87 87 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 88 88 .tlv.p = (tlv_array),\ 89 - .info = snd_soc_info_volsw, \ 89 + .info = snd_soc_info_volsw_sx, \ 90 90 .get = snd_soc_get_volsw_sx,\ 91 91 .put = snd_soc_put_volsw_sx, \ 92 92 .private_value = (unsigned long)&(struct soc_mixer_control) \ ··· 156 156 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 157 157 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 158 158 .tlv.p = (tlv_array), \ 159 - .info = snd_soc_info_volsw, \ 159 + .info = snd_soc_info_volsw_sx, \ 160 160 .get = snd_soc_get_volsw_sx, \ 161 161 .put = snd_soc_put_volsw_sx, \ 162 162 .private_value = (unsigned long)&(struct soc_mixer_control) \ ··· 574 574 struct snd_ctl_elem_value *ucontrol); 575 575 int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, 576 576 struct snd_ctl_elem_info *uinfo); 577 + int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol, 578 + struct snd_ctl_elem_info *uinfo); 577 579 #define snd_soc_info_bool_ext snd_ctl_boolean_mono_info 578 580 int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, 579 581 struct snd_ctl_elem_value *ucontrol);
+1 -1
include/sound/wm8904.h
··· 119 119 #define WM8904_MIC_REGS 2 120 120 #define WM8904_GPIO_REGS 4 121 121 #define WM8904_DRC_REGS 4 122 - #define WM8904_EQ_REGS 25 122 + #define WM8904_EQ_REGS 24 123 123 124 124 /** 125 125 * DRC configurations are specified with a label and a set of register
+6 -2
kernel/kmod.c
··· 327 327 call_usermodehelper_exec_sync(sub_info); 328 328 } else { 329 329 pid_t pid; 330 - 330 + /* 331 + * Use CLONE_PARENT to reparent it to kthreadd; we do not 332 + * want to pollute current->children, and we need a parent 333 + * that always ignores SIGCHLD to ensure auto-reaping. 334 + */ 331 335 pid = kernel_thread(call_usermodehelper_exec_async, sub_info, 332 - SIGCHLD); 336 + CLONE_PARENT | SIGCHLD); 333 337 if (pid < 0) { 334 338 sub_info->retval = pid; 335 339 umh_complete(sub_info);
+6 -2
kernel/module.c
··· 1063 1063 if (core_kernel_text(a)) 1064 1064 return; 1065 1065 1066 - /* module_text_address is safe here: we're supposed to have reference 1067 - * to module from symbol_get, so it can't go away. */ 1066 + /* 1067 + * Even though we hold a reference on the module; we still need to 1068 + * disable preemption in order to safely traverse the data structure. 1069 + */ 1070 + preempt_disable(); 1068 1071 modaddr = __module_text_address(a); 1069 1072 BUG_ON(!modaddr); 1070 1073 module_put(modaddr); 1074 + preempt_enable(); 1071 1075 } 1072 1076 EXPORT_SYMBOL_GPL(symbol_put_addr); 1073 1077
+8 -4
kernel/sched/core.c
··· 2366 2366 trace_sched_wakeup_new(p); 2367 2367 check_preempt_curr(rq, p, WF_FORK); 2368 2368 #ifdef CONFIG_SMP 2369 - if (p->sched_class->task_woken) 2369 + if (p->sched_class->task_woken) { 2370 + /* 2371 + * Nothing relies on rq->lock after this, so its fine to 2372 + * drop it. 2373 + */ 2374 + lockdep_unpin_lock(&rq->lock); 2370 2375 p->sched_class->task_woken(rq, p); 2376 + lockdep_pin_lock(&rq->lock); 2377 + } 2371 2378 #endif 2372 2379 task_rq_unlock(rq, p, &flags); 2373 2380 } ··· 7244 7237 7245 7238 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7246 7239 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7247 - 7248 - /* nohz_full won't take effect without isolating the cpus. */ 7249 - tick_nohz_full_add_cpus_to(cpu_isolated_map); 7250 7240 7251 7241 sched_init_numa(); 7252 7242
+13 -4
kernel/sched/deadline.c
··· 668 668 * Queueing this task back might have overloaded rq, check if we need 669 669 * to kick someone away. 670 670 */ 671 - if (has_pushable_dl_tasks(rq)) 671 + if (has_pushable_dl_tasks(rq)) { 672 + /* 673 + * Nothing relies on rq->lock after this, so its safe to drop 674 + * rq->lock. 675 + */ 676 + lockdep_unpin_lock(&rq->lock); 672 677 push_dl_task(rq); 678 + lockdep_pin_lock(&rq->lock); 679 + } 673 680 #endif 674 681 675 682 unlock: ··· 1073 1066 int target = find_later_rq(p); 1074 1067 1075 1068 if (target != -1 && 1076 - dl_time_before(p->dl.deadline, 1077 - cpu_rq(target)->dl.earliest_dl.curr)) 1069 + (dl_time_before(p->dl.deadline, 1070 + cpu_rq(target)->dl.earliest_dl.curr) || 1071 + (cpu_rq(target)->dl.dl_nr_running == 0))) 1078 1072 cpu = target; 1079 1073 } 1080 1074 rcu_read_unlock(); ··· 1425 1417 1426 1418 later_rq = cpu_rq(cpu); 1427 1419 1428 - if (!dl_time_before(task->dl.deadline, 1420 + if (later_rq->dl.dl_nr_running && 1421 + !dl_time_before(task->dl.deadline, 1429 1422 later_rq->dl.earliest_dl.curr)) { 1430 1423 /* 1431 1424 * Target rq has tasks of equal or earlier deadline,
+5 -4
kernel/sched/fair.c
··· 2363 2363 */ 2364 2364 tg_weight = atomic_long_read(&tg->load_avg); 2365 2365 tg_weight -= cfs_rq->tg_load_avg_contrib; 2366 - tg_weight += cfs_rq_load_avg(cfs_rq); 2366 + tg_weight += cfs_rq->load.weight; 2367 2367 2368 2368 return tg_weight; 2369 2369 } ··· 2373 2373 long tg_weight, load, shares; 2374 2374 2375 2375 tg_weight = calc_tg_weight(tg, cfs_rq); 2376 - load = cfs_rq_load_avg(cfs_rq); 2376 + load = cfs_rq->load.weight; 2377 2377 2378 2378 shares = (tg->shares * load); 2379 2379 if (tg_weight) ··· 2664 2664 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ 2665 2665 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 2666 2666 { 2667 - int decayed; 2668 2667 struct sched_avg *sa = &cfs_rq->avg; 2668 + int decayed, removed = 0; 2669 2669 2670 2670 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 2671 2671 long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 2672 2672 sa->load_avg = max_t(long, sa->load_avg - r, 0); 2673 2673 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); 2674 + removed = 1; 2674 2675 } 2675 2676 2676 2677 if (atomic_long_read(&cfs_rq->removed_util_avg)) { ··· 2689 2688 cfs_rq->load_last_update_time_copy = sa->last_update_time; 2690 2689 #endif 2691 2690 2692 - return decayed; 2691 + return decayed || removed; 2693 2692 } 2694 2693 2695 2694 /* Update task and its cfs_rq load average */
+2
kernel/sched/idle.c
··· 57 57 rcu_idle_enter(); 58 58 trace_cpu_idle_rcuidle(0, smp_processor_id()); 59 59 local_irq_enable(); 60 + stop_critical_timings(); 60 61 while (!tif_need_resched() && 61 62 (cpu_idle_force_poll || tick_check_broadcast_expired())) 62 63 cpu_relax(); 64 + start_critical_timings(); 63 65 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 64 66 rcu_idle_exit(); 65 67 return 1;
+11
kernel/trace/trace_stack.c
··· 85 85 if (!object_is_on_stack(stack)) 86 86 return; 87 87 88 + /* Can't do this from NMI context (can cause deadlocks) */ 89 + if (in_nmi()) 90 + return; 91 + 88 92 local_irq_save(flags); 89 93 arch_spin_lock(&max_stack_lock); 94 + 95 + /* 96 + * RCU may not be watching, make it see us. 97 + * The stack trace code uses rcu_sched. 98 + */ 99 + rcu_irq_enter(); 90 100 91 101 /* In case another CPU set the tracer_frame on us */ 92 102 if (unlikely(!frame_size)) ··· 179 169 } 180 170 181 171 out: 172 + rcu_irq_exit(); 182 173 arch_spin_unlock(&max_stack_lock); 183 174 local_irq_restore(flags); 184 175 }
+1
lib/Kconfig.debug
··· 197 197 config FRAME_WARN 198 198 int "Warn for stack frames larger than (needs gcc 4.4)" 199 199 range 0 8192 200 + default 0 if KASAN 200 201 default 1024 if !64BIT 201 202 default 2048 if 64BIT 202 203 help
+1 -1
lib/fault-inject.c
··· 44 44 printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" 45 45 "name %pd, interval %lu, probability %lu, " 46 46 "space %d, times %d\n", attr->dname, 47 - attr->probability, attr->interval, 47 + attr->interval, attr->probability, 48 48 atomic_read(&attr->space), 49 49 atomic_read(&attr->times)); 50 50 if (attr->verbose > 1)
+30 -6
mm/backing-dev.c
··· 480 480 release_work); 481 481 struct backing_dev_info *bdi = wb->bdi; 482 482 483 + spin_lock_irq(&cgwb_lock); 484 + list_del_rcu(&wb->bdi_node); 485 + spin_unlock_irq(&cgwb_lock); 486 + 483 487 wb_shutdown(wb); 484 488 485 489 css_put(wb->memcg_css); ··· 579 575 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); 580 576 if (!ret) { 581 577 atomic_inc(&bdi->usage_cnt); 578 + list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); 582 579 list_add(&wb->memcg_node, memcg_cgwb_list); 583 580 list_add(&wb->blkcg_node, blkcg_cgwb_list); 584 581 css_get(memcg_css); ··· 681 676 static void cgwb_bdi_destroy(struct backing_dev_info *bdi) 682 677 { 683 678 struct radix_tree_iter iter; 684 - struct bdi_writeback_congested *congested, *congested_n; 679 + struct rb_node *rbn; 685 680 void **slot; 686 681 687 682 WARN_ON(test_bit(WB_registered, &bdi->wb.state)); ··· 691 686 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) 692 687 cgwb_kill(*slot); 693 688 694 - rbtree_postorder_for_each_entry_safe(congested, congested_n, 695 - &bdi->cgwb_congested_tree, rb_node) { 696 - rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree); 689 + while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { 690 + struct bdi_writeback_congested *congested = 691 + rb_entry(rbn, struct bdi_writeback_congested, rb_node); 692 + 693 + rb_erase(rbn, &bdi->cgwb_congested_tree); 697 694 congested->bdi = NULL; /* mark @congested unlinked */ 698 695 } 699 696 ··· 771 764 772 765 int bdi_init(struct backing_dev_info *bdi) 773 766 { 767 + int ret; 768 + 774 769 bdi->dev = NULL; 775 770 776 771 bdi->min_ratio = 0; 777 772 bdi->max_ratio = 100; 778 773 bdi->max_prop_frac = FPROP_FRAC_BASE; 779 774 INIT_LIST_HEAD(&bdi->bdi_list); 775 + INIT_LIST_HEAD(&bdi->wb_list); 780 776 init_waitqueue_head(&bdi->wb_waitq); 781 777 782 - return cgwb_bdi_init(bdi); 778 + ret = cgwb_bdi_init(bdi); 779 + 780 + list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); 781 + 782 + return ret; 783 783 } 784 784 EXPORT_SYMBOL(bdi_init); 785 785 ··· 837 823 synchronize_rcu_expedited(); 838 824 } 839 825 840 - void bdi_destroy(struct backing_dev_info *bdi) 826 + void bdi_unregister(struct backing_dev_info *bdi) 841 827 { 842 828 /* make sure nobody finds us on the bdi_list anymore */ 843 829 bdi_remove_from_list(bdi); ··· 849 835 device_unregister(bdi->dev); 850 836 bdi->dev = NULL; 851 837 } 838 + } 852 839 840 + void bdi_exit(struct backing_dev_info *bdi) 841 + { 842 + WARN_ON_ONCE(bdi->dev); 853 843 wb_exit(&bdi->wb); 844 + } 845 + 846 + void bdi_destroy(struct backing_dev_info *bdi) 847 + { 848 + bdi_unregister(bdi); 849 + bdi_exit(bdi); 854 850 } 855 851 EXPORT_SYMBOL(bdi_destroy); 856 852
+2 -2
mm/cma.c
··· 361 361 * This function allocates part of contiguous memory on specific 362 362 * contiguous memory area. 363 363 */ 364 - struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) 364 + struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) 365 365 { 366 366 unsigned long mask, offset, pfn, start = 0; 367 367 unsigned long bitmap_maxno, bitmap_no, bitmap_count; ··· 371 371 if (!cma || !cma->count) 372 372 return NULL; 373 373 374 - pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 374 + pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, 375 375 count, align); 376 376 377 377 if (!count)
+5 -4
mm/filemap.c
··· 2488 2488 break; 2489 2489 } 2490 2490 2491 + if (fatal_signal_pending(current)) { 2492 + status = -EINTR; 2493 + break; 2494 + } 2495 + 2491 2496 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2492 2497 &page, &fsdata); 2493 2498 if (unlikely(status < 0)) ··· 2530 2525 written += copied; 2531 2526 2532 2527 balance_dirty_pages_ratelimited(mapping); 2533 - if (fatal_signal_pending(current)) { 2534 - status = -EINTR; 2535 - break; 2536 - } 2537 2528 } while (iov_iter_count(i)); 2538 2529 2539 2530 return written ? written : status;
+2 -1
mm/huge_memory.c
··· 2206 2206 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; 2207 2207 _pte++, address += PAGE_SIZE) { 2208 2208 pte_t pteval = *_pte; 2209 - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { 2209 + if (pte_none(pteval) || (pte_present(pteval) && 2210 + is_zero_pfn(pte_pfn(pteval)))) { 2210 2211 if (!userfaultfd_armed(vma) && 2211 2212 ++none_or_zero <= khugepaged_max_ptes_none) 2212 2213 continue;
+17 -18
mm/memcontrol.c
··· 3741 3741 /** 3742 3742 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3743 3743 * @wb: bdi_writeback in question 3744 - * @pavail: out parameter for number of available pages 3744 + * @pfilepages: out parameter for number of file pages 3745 + * @pheadroom: out parameter for number of allocatable pages according to memcg 3745 3746 * @pdirty: out parameter for number of dirty pages 3746 3747 * @pwriteback: out parameter for number of pages under writeback 3747 3748 * 3748 - * Determine the numbers of available, dirty, and writeback pages in @wb's 3749 - * memcg. Dirty and writeback are self-explanatory. Available is a bit 3750 - * more involved. 3749 + * Determine the numbers of file, headroom, dirty, and writeback pages in 3750 + * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3751 + * is a bit more involved. 3751 3752 * 3752 - * A memcg's headroom is "min(max, high) - used". The available memory is 3753 - * calculated as the lowest headroom of itself and the ancestors plus the 3754 - * number of pages already being used for file pages. Note that this 3755 - * doesn't consider the actual amount of available memory in the system. 3756 - * The caller should further cap *@pavail accordingly. 3753 + * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3754 + * headroom is calculated as the lowest headroom of itself and the 3755 + * ancestors. Note that this doesn't consider the actual amount of 3756 + * available memory in the system. The caller should further cap 3757 + * *@pheadroom accordingly. 3757 3758 */ 3758 - void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, 3759 - unsigned long *pdirty, unsigned long *pwriteback) 3759 + void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3760 + unsigned long *pheadroom, unsigned long *pdirty, 3761 + unsigned long *pwriteback) 3760 3762 { 3761 3763 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3762 3764 struct mem_cgroup *parent; 3763 - unsigned long head_room = PAGE_COUNTER_MAX; 3764 - unsigned long file_pages; 3765 3765 3766 3766 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3767 3767 3768 3768 /* this should eventually include NR_UNSTABLE_NFS */ 3769 3769 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3770 + *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3771 + (1 << LRU_ACTIVE_FILE)); 3772 + *pheadroom = PAGE_COUNTER_MAX; 3770 3773 3771 - file_pages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3772 - (1 << LRU_ACTIVE_FILE)); 3773 3774 while ((parent = parent_mem_cgroup(memcg))) { 3774 3775 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3775 3776 unsigned long used = page_counter_read(&memcg->memory); 3776 3777 3777 - head_room = min(head_room, ceiling - min(ceiling, used)); 3778 + *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3778 3779 memcg = parent; 3779 3780 } 3780 - 3781 - *pavail = file_pages + head_room; 3782 3781 } 3783 3782 3784 3783 #else /* CONFIG_CGROUP_WRITEBACK */
+32 -22
mm/page-writeback.c
··· 145 145 unsigned long pos_ratio; 146 146 }; 147 147 148 - #define DTC_INIT_COMMON(__wb) .wb = (__wb), \ 149 - .wb_completions = &(__wb)->completions 150 - 151 148 /* 152 149 * Length of period for aging writeout fractions of bdis. This is an 153 150 * arbitrarily chosen number. The longer the period, the slower fractions will ··· 154 157 155 158 #ifdef CONFIG_CGROUP_WRITEBACK 156 159 157 - #define GDTC_INIT(__wb) .dom = &global_wb_domain, \ 158 - DTC_INIT_COMMON(__wb) 160 + #define GDTC_INIT(__wb) .wb = (__wb), \ 161 + .dom = &global_wb_domain, \ 162 + .wb_completions = &(__wb)->completions 163 + 159 164 #define GDTC_INIT_NO_WB .dom = &global_wb_domain 160 - #define MDTC_INIT(__wb, __gdtc) .dom = mem_cgroup_wb_domain(__wb), \ 161 - .gdtc = __gdtc, \ 162 - DTC_INIT_COMMON(__wb) 165 + 166 + #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \ 167 + .dom = mem_cgroup_wb_domain(__wb), \ 168 + .wb_completions = &(__wb)->memcg_completions, \ 169 + .gdtc = __gdtc 163 170 164 171 static bool mdtc_valid(struct dirty_throttle_control *dtc) 165 172 { ··· 214 213 215 214 #else /* CONFIG_CGROUP_WRITEBACK */ 216 215 217 - #define GDTC_INIT(__wb) DTC_INIT_COMMON(__wb) 216 + #define GDTC_INIT(__wb) .wb = (__wb), \ 217 + .wb_completions = &(__wb)->completions 218 218 #define GDTC_INIT_NO_WB 219 219 #define MDTC_INIT(__wb, __gdtc) 220 220 ··· 684 682 return max(thresh, dom->dirty_limit); 685 683 } 686 684 687 - /* memory available to a memcg domain is capped by system-wide clean memory */ 688 - static void mdtc_cap_avail(struct dirty_throttle_control *mdtc) 685 + /* 686 + * Memory which can be further allocated to a memcg domain is capped by 687 + * system-wide clean memory excluding the amount being used in the domain. 688 + */ 689 + static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, 690 + unsigned long filepages, unsigned long headroom) 689 691 { 690 692 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); 691 - unsigned long clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); 693 + unsigned long clean = filepages - min(filepages, mdtc->dirty); 694 + unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); 695 + unsigned long other_clean = global_clean - min(global_clean, clean); 692 696 693 - mdtc->avail = min(mdtc->avail, clean); 697 + mdtc->avail = filepages + min(headroom, other_clean); 694 698 } 695 699 696 700 /** ··· 1570 1562 } 1571 1563 1572 1564 if (mdtc) { 1573 - unsigned long writeback; 1565 + unsigned long filepages, headroom, writeback; 1574 1566 1575 1567 /* 1576 1568 * If @wb belongs to !root memcg, repeat the same 1577 1569 * basic calculations for the memcg domain. 1578 1570 */ 1579 - mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, 1580 - &writeback); 1581 - mdtc_cap_avail(mdtc); 1571 + mem_cgroup_wb_stats(wb, &filepages, &headroom, 1572 + &mdtc->dirty, &writeback); 1582 1573 mdtc->dirty += writeback; 1574 + mdtc_calc_avail(mdtc, filepages, headroom); 1583 1575 1584 1576 domain_dirty_limits(mdtc); 1585 1577 ··· 1901 1893 return true; 1902 1894 1903 1895 if (mdtc) { 1904 - unsigned long writeback; 1896 + unsigned long filepages, headroom, writeback; 1905 1897 1906 - mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, &writeback); 1907 - mdtc_cap_avail(mdtc); 1898 + mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, 1899 + &writeback); 1900 + mdtc_calc_avail(mdtc, filepages, headroom); 1908 1901 domain_dirty_limits(mdtc); /* ditto, ignore writeback */ 1909 1902 1910 1903 if (mdtc->dirty > mdtc->bg_thresh) ··· 1965 1956 int nr_pages = global_page_state(NR_FILE_DIRTY) + 1966 1957 global_page_state(NR_UNSTABLE_NFS); 1967 1958 struct bdi_writeback *wb; 1968 - struct wb_iter iter; 1969 1959 1970 1960 /* 1971 1961 * We want to write everything out, not just down to the dirty ··· 1973 1965 if (!bdi_has_dirty_io(&q->backing_dev_info)) 1974 1966 return; 1975 1967 1976 - bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0) 1968 + rcu_read_lock(); 1969 + list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node) 1977 1970 if (wb_has_dirty_io(wb)) 1978 1971 wb_start_writeback(wb, nr_pages, true, 1979 1972 WB_REASON_LAPTOP_TIMER); 1973 + rcu_read_unlock(); 1980 1974 } 1981 1975 1982 1976 /*
+1 -1
net/ipv4/fib_trie.c
··· 1569 1569 do { 1570 1570 /* record parent and next child index */ 1571 1571 pn = n; 1572 - cindex = key ? get_index(key, pn) : 0; 1572 + cindex = (key > pn->key) ? get_index(key, pn) : 0; 1573 1573 1574 1574 if (cindex >> pn->bits) 1575 1575 break;
+2 -1
net/ipv4/gre_offload.c
··· 36 36 SKB_GSO_TCP_ECN | 37 37 SKB_GSO_GRE | 38 38 SKB_GSO_GRE_CSUM | 39 - SKB_GSO_IPIP))) 39 + SKB_GSO_IPIP | 40 + SKB_GSO_SIT))) 40 41 goto out; 41 42 42 43 if (!skb->encapsulation)
+2 -4
net/ipv6/ip6_output.c
··· 28 28 29 29 #include <linux/errno.h> 30 30 #include <linux/kernel.h> 31 - #include <linux/overflow-arith.h> 32 31 #include <linux/string.h> 33 32 #include <linux/socket.h> 34 33 #include <linux/net.h> ··· 596 597 if (np->frag_size) 597 598 mtu = np->frag_size; 598 599 } 599 - 600 - if (overflow_usub(mtu, hlen + sizeof(struct frag_hdr), &mtu) || 601 - mtu <= 7) 600 + if (mtu < hlen + sizeof(struct frag_hdr) + 8) 602 601 goto fail_toobig; 602 + mtu -= hlen + sizeof(struct frag_hdr); 603 603 604 604 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, 605 605 &ipv6_hdr(skb)->saddr);
+1
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 644 644 s = s2; 645 645 } 646 646 } 647 + EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig); 647 648 648 649 static int nf_ct_net_init(struct net *net) 649 650 {
+2 -2
net/openvswitch/actions.c
··· 1110 1110 nla_data(a)); 1111 1111 1112 1112 /* Hide stolen IP fragments from user space. */ 1113 - if (err == -EINPROGRESS) 1114 - return 0; 1113 + if (err) 1114 + return err == -EINPROGRESS ? 0 : err; 1115 1115 break; 1116 1116 } 1117 1117
+20 -4
net/openvswitch/conntrack.c
··· 293 293 return helper->help(skb, protoff, ct, ctinfo); 294 294 } 295 295 296 + /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero 297 + * value if 'skb' is freed. 298 + */ 296 299 static int handle_fragments(struct net *net, struct sw_flow_key *key, 297 300 u16 zone, struct sk_buff *skb) 298 301 { ··· 311 308 return err; 312 309 313 310 ovs_cb.mru = IPCB(skb)->frag_max_size; 314 - } else if (key->eth.type == htons(ETH_P_IPV6)) { 315 311 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 312 + } else if (key->eth.type == htons(ETH_P_IPV6)) { 316 313 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 317 314 struct sk_buff *reasm; 318 315 ··· 321 318 if (!reasm) 322 319 return -EINPROGRESS; 323 320 324 - if (skb == reasm) 321 + if (skb == reasm) { 322 + kfree_skb(skb); 325 323 return -EINVAL; 324 + } 325 + 326 + /* Don't free 'skb' even though it is one of the original 327 + * fragments, as we're going to morph it into the head. 328 + */ 329 + skb_get(skb); 330 + nf_ct_frag6_consume_orig(reasm); 326 331 327 332 key->ip.proto = ipv6_hdr(reasm)->nexthdr; 328 333 skb_morph(skb, reasm); 334 + skb->next = reasm->next; 329 335 consume_skb(reasm); 330 336 ovs_cb.mru = IP6CB(skb)->frag_max_size; 331 - #else 332 - return -EPFNOSUPPORT; 333 337 #endif 334 338 } else { 339 + kfree_skb(skb); 335 340 return -EPFNOSUPPORT; 336 341 } 337 342 ··· 484 473 return false; 485 474 } 486 475 476 + /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero 477 + * value if 'skb' is freed. 478 + */ 487 479 int ovs_ct_execute(struct net *net, struct sk_buff *skb, 488 480 struct sw_flow_key *key, 489 481 const struct ovs_conntrack_info *info) ··· 522 508 &info->labels.mask); 523 509 err: 524 510 skb_push(skb, nh_ofs); 511 + if (err) 512 + kfree_skb(skb); 525 513 return err; 526 514 } 527 515
+1
net/openvswitch/conntrack.h
··· 67 67 struct sw_flow_key *key, 68 68 const struct ovs_conntrack_info *info) 69 69 { 70 + kfree_skb(skb); 70 71 return -ENOTSUPP; 71 72 } 72 73
+9 -2
net/rds/tcp_recv.c
··· 214 214 } 215 215 216 216 to_copy = min(tc->t_tinc_data_rem, left); 217 - pskb_pull(clone, offset); 218 - pskb_trim(clone, to_copy); 217 + if (!pskb_pull(clone, offset) || 218 + pskb_trim(clone, to_copy)) { 219 + pr_warn("rds_tcp_data_recv: pull/trim failed " 220 + "left %zu data_rem %zu skb_len %d\n", 221 + left, tc->t_tinc_data_rem, skb->len); 222 + kfree_skb(clone); 223 + desc->error = -ENOMEM; 224 + goto out; 225 + } 219 226 skb_queue_tail(&tinc->ti_skb_list, clone); 220 227 221 228 rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
+12
samples/bpf/bpf_helpers.h
··· 92 92 #define PT_REGS_RC(x) ((x)->gprs[2]) 93 93 #define PT_REGS_SP(x) ((x)->gprs[15]) 94 94 95 + #elif defined(__aarch64__) 96 + 97 + #define PT_REGS_PARM1(x) ((x)->regs[0]) 98 + #define PT_REGS_PARM2(x) ((x)->regs[1]) 99 + #define PT_REGS_PARM3(x) ((x)->regs[2]) 100 + #define PT_REGS_PARM4(x) ((x)->regs[3]) 101 + #define PT_REGS_PARM5(x) ((x)->regs[4]) 102 + #define PT_REGS_RET(x) ((x)->regs[30]) 103 + #define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */ 104 + #define PT_REGS_RC(x) ((x)->regs[0]) 105 + #define PT_REGS_SP(x) ((x)->sp) 106 + 95 107 #endif 96 108 #endif
+4 -2
security/keys/gc.c
··· 134 134 kdebug("- %u", key->serial); 135 135 key_check(key); 136 136 137 - /* Throw away the key data */ 138 - if (key->type->destroy) 137 + /* Throw away the key data if the key is instantiated */ 138 + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && 139 + !test_bit(KEY_FLAG_NEGATIVE, &key->flags) && 140 + key->type->destroy) 139 141 key->type->destroy(key); 140 142 141 143 security_key_free(key);
+3
security/keys/request_key.c
··· 440 440 441 441 kenter(""); 442 442 443 + if (ctx->index_key.type == &key_type_keyring) 444 + return ERR_PTR(-EPERM); 445 + 443 446 user = key_user_lookup(current_fsuid()); 444 447 if (!user) 445 448 return ERR_PTR(-ENOMEM);
+1
sound/hda/ext/hdac_ext_bus.c
··· 19 19 20 20 #include <linux/module.h> 21 21 #include <linux/slab.h> 22 + #include <linux/io.h> 22 23 #include <sound/hdaudio_ext.h> 23 24 24 25 MODULE_DESCRIPTION("HDA extended core");
+1 -3
sound/pci/hda/hda_codec.c
··· 3367 3367 int dev, err; 3368 3368 3369 3369 err = snd_hda_codec_parse_pcms(codec); 3370 - if (err < 0) { 3371 - snd_hda_codec_reset(codec); 3370 + if (err < 0) 3372 3371 return err; 3373 - } 3374 3372 3375 3373 /* attach a new PCM streams */ 3376 3374 list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
+1
sound/pci/hda/patch_conexant.c
··· 819 819 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), 820 820 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), 821 821 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD), 822 + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), 822 823 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), 823 824 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), 824 825 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+13 -13
sound/soc/codecs/rt298.c
··· 50 50 }; 51 51 52 52 static struct reg_default rt298_index_def[] = { 53 - { 0x01, 0xaaaa }, 54 - { 0x02, 0x8aaa }, 53 + { 0x01, 0xa5a8 }, 54 + { 0x02, 0x8e95 }, 55 55 { 0x03, 0x0002 }, 56 - { 0x04, 0xaf01 }, 57 - { 0x08, 0x000d }, 58 - { 0x09, 0xd810 }, 59 - { 0x0a, 0x0120 }, 56 + { 0x04, 0xaf67 }, 57 + { 0x08, 0x200f }, 58 + { 0x09, 0xd010 }, 59 + { 0x0a, 0x0100 }, 60 60 { 0x0b, 0x0000 }, 61 61 { 0x0d, 0x2800 }, 62 - { 0x0f, 0x0000 }, 63 - { 0x19, 0x0a17 }, 62 + { 0x0f, 0x0022 }, 63 + { 0x19, 0x0217 }, 64 64 { 0x20, 0x0020 }, 65 65 { 0x33, 0x0208 }, 66 66 { 0x46, 0x0300 }, 67 - { 0x49, 0x0004 }, 68 - { 0x4f, 0x50e9 }, 69 - { 0x50, 0x2000 }, 70 - { 0x63, 0x2902 }, 67 + { 0x49, 0x4004 }, 68 + { 0x4f, 0x50c9 }, 69 + { 0x50, 0x3000 }, 70 + { 0x63, 0x1b02 }, 71 71 { 0x67, 0x1111 }, 72 72 { 0x68, 0x1016 }, 73 73 { 0x69, 0x273f }, ··· 1214 1214 mdelay(10); 1215 1215 1216 1216 if (!rt298->pdata.gpio2_en) 1217 - regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000); 1217 + regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x40); 1218 1218 else 1219 1219 regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0); 1220 1220
+2
sound/soc/codecs/wm8962.c
··· 3808 3808 3809 3809 wm8962_reset(wm8962); 3810 3810 3811 + regcache_mark_dirty(wm8962->regmap); 3812 + 3811 3813 /* SYSCLK defaults to on; make sure it is off so we can safely 3812 3814 * write to registers if the device is declocked. 3813 3815 */
+28
sound/soc/soc-ops.c
··· 207 207 EXPORT_SYMBOL_GPL(snd_soc_info_volsw); 208 208 209 209 /** 210 + * snd_soc_info_volsw_sx - Mixer info callback for SX TLV controls 211 + * @kcontrol: mixer control 212 + * @uinfo: control element information 213 + * 214 + * Callback to provide information about a single mixer control, or a double 215 + * mixer control that spans 2 registers of the SX TLV type. SX TLV controls 216 + * have a range that represents both positive and negative values either side 217 + * of zero but without a sign bit. 218 + * 219 + * Returns 0 for success. 220 + */ 221 + int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol, 222 + struct snd_ctl_elem_info *uinfo) 223 + { 224 + struct soc_mixer_control *mc = 225 + (struct soc_mixer_control *)kcontrol->private_value; 226 + 227 + snd_soc_info_volsw(kcontrol, uinfo); 228 + /* Max represents the number of levels in an SX control not the 229 + * maximum value, so add the minimum value back on 230 + */ 231 + uinfo->value.integer.max += mc->min; 232 + 233 + return 0; 234 + } 235 + EXPORT_SYMBOL_GPL(snd_soc_info_volsw_sx); 236 + 237 + /** 210 238 * snd_soc_get_volsw - single mixer get callback 211 239 * @kcontrol: mixer control 212 240 * @ucontrol: control element information
+19
virt/kvm/arm/arch_timer.c
··· 137 137 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) 138 138 { 139 139 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 140 + bool phys_active; 141 + int ret; 140 142 141 143 /* 142 144 * We're about to run this vcpu again, so there is no need to ··· 153 151 */ 154 152 if (kvm_timer_should_fire(vcpu)) 155 153 kvm_timer_inject_irq(vcpu); 154 + 155 + /* 156 + * We keep track of whether the edge-triggered interrupt has been 157 + * signalled to the vgic/guest, and if so, we mask the interrupt and 158 + * the physical distributor to prevent the timer from raising a 159 + * physical interrupt whenever we run a guest, preventing forward 160 + * VCPU progress. 161 + */ 162 + if (kvm_vgic_get_phys_irq_active(timer->map)) 163 + phys_active = true; 164 + else 165 + phys_active = false; 166 + 167 + ret = irq_set_irqchip_state(timer->map->irq, 168 + IRQCHIP_STATE_ACTIVE, 169 + phys_active); 170 + WARN_ON(ret); 156 171 } 157 172 158 173 /**
+55 -40
virt/kvm/arm/vgic.c
··· 531 531 return false; 532 532 } 533 533 534 + /* 535 + * If a mapped interrupt's state has been modified by the guest such that it 536 + * is no longer active or pending, without it have gone through the sync path, 537 + * then the map->active field must be cleared so the interrupt can be taken 538 + * again. 539 + */ 540 + static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu) 541 + { 542 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 543 + struct list_head *root; 544 + struct irq_phys_map_entry *entry; 545 + struct irq_phys_map *map; 546 + 547 + rcu_read_lock(); 548 + 549 + /* Check for PPIs */ 550 + root = &vgic_cpu->irq_phys_map_list; 551 + list_for_each_entry_rcu(entry, root, entry) { 552 + map = &entry->map; 553 + 554 + if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) && 555 + !vgic_irq_is_active(vcpu, map->virt_irq)) 556 + map->active = false; 557 + } 558 + 559 + rcu_read_unlock(); 560 + } 561 + 534 562 bool vgic_handle_clear_pending_reg(struct kvm *kvm, 535 563 struct kvm_exit_mmio *mmio, 536 564 phys_addr_t offset, int vcpu_id) ··· 589 561 vcpu_id, offset); 590 562 vgic_reg_access(mmio, reg, offset, mode); 591 563 564 + vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id)); 592 565 vgic_update_state(kvm); 593 566 return true; 594 567 } ··· 627 598 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); 628 599 629 600 if (mmio->is_write) { 601 + vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id)); 630 602 vgic_update_state(kvm); 631 603 return true; 632 604 } ··· 1012 982 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; 1013 983 pend_shared = vcpu->arch.vgic_cpu.pending_shared; 1014 984 985 + if (!dist->enabled) { 986 + bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS); 987 + bitmap_zero(pend_shared, nr_shared); 988 + return 0; 989 + } 990 + 1015 991 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); 1016 992 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); 1017 993 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); ··· 1044 1008 struct vgic_dist *dist = &kvm->arch.vgic; 1045 1009 struct kvm_vcpu *vcpu; 1046 1010 int c; 1047 - 1048 - if (!dist->enabled) { 1049 - set_bit(0, dist->irq_pending_on_cpu); 1050 - return; 1051 - } 1052 1011 1053 1012 kvm_for_each_vcpu(c, vcpu, kvm) { 1054 1013 if (compute_pending_for_cpu(vcpu)) ··· 1123 1092 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1124 1093 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); 1125 1094 1095 + /* 1096 + * We must transfer the pending state back to the distributor before 1097 + * retiring the LR, otherwise we may loose edge-triggered interrupts. 1098 + */ 1099 + if (vlr.state & LR_STATE_PENDING) { 1100 + vgic_dist_irq_set_pending(vcpu, irq); 1101 + vlr.hwirq = 0; 1102 + } 1103 + 1126 1104 vlr.state = 0; 1127 1105 vgic_set_lr(vcpu, lr_nr, vlr); 1128 1106 clear_bit(lr_nr, vgic_cpu->lr_used); ··· 1172 1132 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); 1173 1133 vgic_irq_clear_active(vcpu, irq); 1174 1134 vgic_update_state(vcpu->kvm); 1175 - } else if (vgic_dist_irq_is_pending(vcpu, irq)) { 1135 + } else { 1136 + WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq)); 1176 1137 vlr.state |= LR_STATE_PENDING; 1177 1138 kvm_debug("Set pending: 0x%x\n", vlr.state); 1178 1139 } ··· 1281 1240 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1282 1241 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1283 1242 unsigned long *pa_percpu, *pa_shared; 1284 - int i, vcpu_id, lr, ret; 1243 + int i, vcpu_id; 1285 1244 int overflow = 0; 1286 1245 int nr_shared = vgic_nr_shared_irqs(dist); 1287 1246 ··· 1335 1294 * adjust that if needed while exiting. 1336 1295 */ 1337 1296 clear_bit(vcpu_id, dist->irq_pending_on_cpu); 1338 - } 1339 - 1340 - for (lr = 0; lr < vgic->nr_lr; lr++) { 1341 - struct vgic_lr vlr; 1342 - 1343 - if (!test_bit(lr, vgic_cpu->lr_used)) 1344 - continue; 1345 - 1346 - vlr = vgic_get_lr(vcpu, lr); 1347 - 1348 - /* 1349 - * If we have a mapping, and the virtual interrupt is 1350 - * presented to the guest (as pending or active), then we must 1351 - * set the state to active in the physical world. See 1352 - * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt. 1353 - */ 1354 - if (vlr.state & LR_HW) { 1355 - struct irq_phys_map *map; 1356 - map = vgic_irq_map_search(vcpu, vlr.irq); 1357 - 1358 - ret = irq_set_irqchip_state(map->irq, 1359 - IRQCHIP_STATE_ACTIVE, 1360 - true); 1361 - WARN_ON(ret); 1362 - } 1363 1297 } 1364 1298 } 1365 1299 ··· 1437 1421 return 0; 1438 1422 1439 1423 map = vgic_irq_map_search(vcpu, vlr.irq); 1440 - BUG_ON(!map || !map->active); 1424 + BUG_ON(!map); 1441 1425 1442 1426 ret = irq_get_irqchip_state(map->irq, 1443 1427 IRQCHIP_STATE_ACTIVE, ··· 1445 1429 1446 1430 WARN_ON(ret); 1447 1431 1448 - if (map->active) { 1449 - ret = irq_set_irqchip_state(map->irq, 1450 - IRQCHIP_STATE_ACTIVE, 1451 - false); 1452 - WARN_ON(ret); 1432 + if (map->active) 1453 1433 return 0; 1454 - } 1455 1434 1456 1435 return 1; 1457 1436 } ··· 1618 1607 } else { 1619 1608 if (level_triggered) { 1620 1609 vgic_dist_irq_clear_level(vcpu, irq_num); 1621 - if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) 1610 + if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) { 1622 1611 vgic_dist_irq_clear_pending(vcpu, irq_num); 1612 + vgic_cpu_irq_clear(vcpu, irq_num); 1613 + if (!compute_pending_for_cpu(vcpu)) 1614 + clear_bit(cpuid, dist->irq_pending_on_cpu); 1615 + } 1623 1616 } 1624 1617 1625 1618 ret = false;