Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

In netdevice.h we removed the structure in net-next that is being
changes in 'net'. In macsec.c and rtnetlink.c we have overlaps
between fixes in 'net' and the u64 attribute changes in 'net-next'.

The mlx5 conflicts have to do with vxlan support dependencies.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1271 -577
+1
.mailmap
··· 69 69 Jeff Garzik <jgarzik@pretzel.yyz.us> 70 70 Jens Axboe <axboe@suse.de> 71 71 Jens Osterkamp <Jens.Osterkamp@de.ibm.com> 72 + John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> 72 73 John Stultz <johnstul@us.ibm.com> 73 74 <josh@joshtriplett.org> <josh@freedesktop.org> 74 75 <josh@joshtriplett.org> <josh@kernel.org>
+4
Documentation/devicetree/bindings/ata/ahci-platform.txt
··· 32 32 - target-supply : regulator for SATA target power 33 33 - phys : reference to the SATA PHY node 34 34 - phy-names : must be "sata-phy" 35 + - ports-implemented : Mask that indicates which ports that the HBA supports 36 + are available for software to use. Useful if PORTS_IMPL 37 + is not programmed by the BIOS, which is true with 38 + some embedded SOC's. 35 39 36 40 Required properties when using sub-nodes: 37 41 - #address-cells : number of cells to encode an address
+7 -7
Documentation/networking/checksum-offloads.txt
··· 69 69 LCO is a technique for efficiently computing the outer checksum of an 70 70 encapsulated datagram when the inner checksum is due to be offloaded. 71 71 The ones-complement sum of a correctly checksummed TCP or UDP packet is 72 - equal to the sum of the pseudo header, because everything else gets 73 - 'cancelled out' by the checksum field. This is because the sum was 72 + equal to the complement of the sum of the pseudo header, because everything 73 + else gets 'cancelled out' by the checksum field. This is because the sum was 74 74 complemented before being written to the checksum field. 75 75 More generally, this holds in any case where the 'IP-style' ones complement 76 76 checksum is used, and thus any checksum that TX Checksum Offload supports. 77 77 That is, if we have set up TX Checksum Offload with a start/offset pair, we 78 - know that _after the device has filled in that checksum_, the ones 78 + know that after the device has filled in that checksum, the ones 79 79 complement sum from csum_start to the end of the packet will be equal to 80 - _whatever value we put in the checksum field beforehand_. This allows us 81 - to compute the outer checksum without looking at the payload: we simply 82 - stop summing when we get to csum_start, then add the 16-bit word at 83 - (csum_start + csum_offset). 80 + the complement of whatever value we put in the checksum field beforehand. 81 + This allows us to compute the outer checksum without looking at the payload: 82 + we simply stop summing when we get to csum_start, then add the complement of 83 + the 16-bit word at (csum_start + csum_offset). 84 84 Then, when the true inner checksum is filled in (either by hardware or by 85 85 skb_checksum_help()), the outer checksum will become correct by virtue of 86 86 the arithmetic.
+27 -27
MAINTAINERS
··· 872 872 F: include/linux/perf/arm_pmu.h 873 873 874 874 ARM PORT 875 - M: Russell King <linux@arm.linux.org.uk> 875 + M: Russell King <linux@armlinux.org.uk> 876 876 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 877 - W: http://www.arm.linux.org.uk/ 877 + W: http://www.armlinux.org.uk/ 878 878 S: Maintained 879 879 F: arch/arm/ 880 880 ··· 886 886 T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git 887 887 888 888 ARM PRIMECELL AACI PL041 DRIVER 889 - M: Russell King <linux@arm.linux.org.uk> 889 + M: Russell King <linux@armlinux.org.uk> 890 890 S: Maintained 891 891 F: sound/arm/aaci.* 892 892 893 893 ARM PRIMECELL CLCD PL110 DRIVER 894 - M: Russell King <linux@arm.linux.org.uk> 894 + M: Russell King <linux@armlinux.org.uk> 895 895 S: Maintained 896 896 F: drivers/video/fbdev/amba-clcd.* 897 897 898 898 ARM PRIMECELL KMI PL050 DRIVER 899 - M: Russell King <linux@arm.linux.org.uk> 899 + M: Russell King <linux@armlinux.org.uk> 900 900 S: Maintained 901 901 F: drivers/input/serio/ambakmi.* 902 902 F: include/linux/amba/kmi.h 903 903 904 904 ARM PRIMECELL MMCI PL180/1 DRIVER 905 - M: Russell King <linux@arm.linux.org.uk> 905 + M: Russell King <linux@armlinux.org.uk> 906 906 S: Maintained 907 907 F: drivers/mmc/host/mmci.* 908 908 F: include/linux/amba/mmci.h 909 909 910 910 ARM PRIMECELL UART PL010 AND PL011 DRIVERS 911 - M: Russell King <linux@arm.linux.org.uk> 911 + M: Russell King <linux@armlinux.org.uk> 912 912 S: Maintained 913 913 F: drivers/tty/serial/amba-pl01*.c 914 914 F: include/linux/amba/serial.h 915 915 916 916 ARM PRIMECELL BUS SUPPORT 917 - M: Russell King <linux@arm.linux.org.uk> 917 + M: Russell King <linux@armlinux.org.uk> 918 918 S: Maintained 919 919 F: drivers/amba/ 920 920 F: include/linux/amba/bus.h ··· 1036 1036 S: Maintained 1037 1037 1038 1038 ARM/CLKDEV SUPPORT 1039 - M: Russell King <linux@arm.linux.org.uk> 1039 + M: Russell King <linux@armlinux.org.uk> 1040 1040 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1041 1041 S: Maintained 1042 1042 F: arch/arm/include/asm/clkdev.h ··· 1093 1093 N: digicolor 1094 1094 1095 1095 ARM/EBSA110 MACHINE SUPPORT 1096 - M: Russell King <linux@arm.linux.org.uk> 1096 + M: Russell King <linux@armlinux.org.uk> 1097 1097 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1098 - W: http://www.arm.linux.org.uk/ 1098 + W: http://www.armlinux.org.uk/ 1099 1099 S: Maintained 1100 1100 F: arch/arm/mach-ebsa110/ 1101 1101 F: drivers/net/ethernet/amd/am79c961a.* ··· 1124 1124 F: arch/arm/mm/*-fa* 1125 1125 1126 1126 ARM/FOOTBRIDGE ARCHITECTURE 1127 - M: Russell King <linux@arm.linux.org.uk> 1127 + M: Russell King <linux@armlinux.org.uk> 1128 1128 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1129 - W: http://www.arm.linux.org.uk/ 1129 + W: http://www.armlinux.org.uk/ 1130 1130 S: Maintained 1131 1131 F: arch/arm/include/asm/hardware/dec21285.h 1132 1132 F: arch/arm/mach-footbridge/ ··· 1457 1457 ARM/PT DIGITAL BOARD PORT 1458 1458 M: Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de> 1459 1459 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1460 - W: http://www.arm.linux.org.uk/ 1460 + W: http://www.armlinux.org.uk/ 1461 1461 S: Maintained 1462 1462 1463 1463 ARM/QUALCOMM SUPPORT ··· 1493 1493 F: arch/arm64/boot/dts/renesas/ 1494 1494 1495 1495 ARM/RISCPC ARCHITECTURE 1496 - M: Russell King <linux@arm.linux.org.uk> 1496 + M: Russell King <linux@armlinux.org.uk> 1497 1497 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1498 - W: http://www.arm.linux.org.uk/ 1498 + W: http://www.armlinux.org.uk/ 1499 1499 S: Maintained 1500 1500 F: arch/arm/include/asm/hardware/entry-macro-iomd.S 1501 1501 F: arch/arm/include/asm/hardware/ioc.h ··· 1773 1773 F: drivers/clocksource/versatile.c 1774 1774 1775 1775 ARM/VFP SUPPORT 1776 - M: Russell King <linux@arm.linux.org.uk> 1776 + M: Russell King <linux@armlinux.org.uk> 1777 1777 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1778 - W: http://www.arm.linux.org.uk/ 1778 + W: http://www.armlinux.org.uk/ 1779 1779 S: Maintained 1780 1780 F: arch/arm/vfp/ 1781 1781 ··· 2924 2924 F: include/linux/cleancache.h 2925 2925 2926 2926 CLK API 2927 - M: Russell King <linux@arm.linux.org.uk> 2927 + M: Russell King <linux@armlinux.org.uk> 2928 2928 L: linux-clk@vger.kernel.org 2929 2929 S: Maintained 2930 2930 F: include/linux/clk.h ··· 3358 3358 F: drivers/net/ethernet/stmicro/stmmac/ 3359 3359 3360 3360 CYBERPRO FB DRIVER 3361 - M: Russell King <linux@arm.linux.org.uk> 3361 + M: Russell King <linux@armlinux.org.uk> 3362 3362 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 3363 - W: http://www.arm.linux.org.uk/ 3363 + W: http://www.armlinux.org.uk/ 3364 3364 S: Maintained 3365 3365 F: drivers/video/fbdev/cyber2000fb.* 3366 3366 ··· 3885 3885 3886 3886 DRM DRIVERS FOR VIVANTE GPU IP 3887 3887 M: Lucas Stach <l.stach@pengutronix.de> 3888 - R: Russell King <linux+etnaviv@arm.linux.org.uk> 3888 + R: Russell King <linux+etnaviv@armlinux.org.uk> 3889 3889 R: Christian Gmeiner <christian.gmeiner@gmail.com> 3890 3890 L: dri-devel@lists.freedesktop.org 3891 3891 S: Maintained ··· 4227 4227 F: arch/ia64/kernel/efi.c 4228 4228 F: arch/x86/boot/compressed/eboot.[ch] 4229 4229 F: arch/x86/include/asm/efi.h 4230 - F: arch/x86/platform/efi/* 4231 - F: drivers/firmware/efi/* 4230 + F: arch/x86/platform/efi/ 4231 + F: drivers/firmware/efi/ 4232 4232 F: include/linux/efi*.h 4233 4233 4234 4234 EFI VARIABLE FILESYSTEM ··· 6902 6902 S: Maintained 6903 6903 6904 6904 MARVELL ARMADA DRM SUPPORT 6905 - M: Russell King <rmk+kernel@arm.linux.org.uk> 6905 + M: Russell King <rmk+kernel@armlinux.org.uk> 6906 6906 S: Maintained 6907 6907 F: drivers/gpu/drm/armada/ 6908 6908 ··· 7902 7902 F: drivers/nfc/nxp-nci 7903 7903 7904 7904 NXP TDA998X DRM DRIVER 7905 - M: Russell King <rmk+kernel@arm.linux.org.uk> 7905 + M: Russell King <rmk+kernel@armlinux.org.uk> 7906 7906 S: Supported 7907 7907 F: drivers/gpu/drm/i2c/tda998x_drv.c 7908 7908 F: include/drm/i2c/tda998x.h ··· 7975 7975 F: drivers/cpufreq/omap-cpufreq.c 7976 7976 7977 7977 OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT 7978 - M: Rajendra Nayak <rnayak@ti.com> 7978 + M: Rajendra Nayak <rnayak@codeaurora.org> 7979 7979 M: Paul Walmsley <paul@pwsan.com> 7980 7980 L: linux-omap@vger.kernel.org 7981 7981 S: Maintained
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 6 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc7 5 5 NAME = Charred Weasel 6 6 7 7 # *DOCUMENTATION*
+13
arch/arc/Kconfig
··· 58 58 config RWSEM_GENERIC_SPINLOCK 59 59 def_bool y 60 60 61 + config ARCH_DISCONTIGMEM_ENABLE 62 + def_bool y 63 + 61 64 config ARCH_FLATMEM_ENABLE 62 65 def_bool y 63 66 ··· 350 347 351 348 endchoice 352 349 350 + config NODES_SHIFT 351 + int "Maximum NUMA Nodes (as a power of 2)" 352 + default "1" if !DISCONTIGMEM 353 + default "2" if DISCONTIGMEM 354 + depends on NEED_MULTIPLE_NODES 355 + ---help--- 356 + Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory 357 + zones. 358 + 353 359 if ISA_ARCOMPACT 354 360 355 361 config ARC_COMPACT_IRQ_LEVELS ··· 467 455 468 456 config HIGHMEM 469 457 bool "High Memory Support" 458 + select DISCONTIGMEM 470 459 help 471 460 With ARC 2G:2G address split, only upper 2G is directly addressable by 472 461 kernel. Enable this to potentially allow access to rest of 2G and PAE
+18 -9
arch/arc/include/asm/io.h
··· 13 13 #include <asm/byteorder.h> 14 14 #include <asm/page.h> 15 15 16 + #ifdef CONFIG_ISA_ARCV2 17 + #include <asm/barrier.h> 18 + #define __iormb() rmb() 19 + #define __iowmb() wmb() 20 + #else 21 + #define __iormb() do { } while (0) 22 + #define __iowmb() do { } while (0) 23 + #endif 24 + 16 25 extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); 17 26 extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, 18 27 unsigned long flags); ··· 39 30 #define ioremap_nocache(phy, sz) ioremap(phy, sz) 40 31 #define ioremap_wc(phy, sz) ioremap(phy, sz) 41 32 #define ioremap_wt(phy, sz) ioremap(phy, sz) 33 + 34 + /* 35 + * io{read,write}{16,32}be() macros 36 + */ 37 + #define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) 38 + #define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) 39 + 40 + #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); }) 41 + #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); }) 42 42 43 43 /* Change struct page to physical address */ 44 44 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) ··· 125 107 : "memory"); 126 108 127 109 } 128 - 129 - #ifdef CONFIG_ISA_ARCV2 130 - #include <asm/barrier.h> 131 - #define __iormb() rmb() 132 - #define __iowmb() wmb() 133 - #else 134 - #define __iormb() do { } while (0) 135 - #define __iowmb() do { } while (0) 136 - #endif 137 110 138 111 /* 139 112 * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+43
arch/arc/include/asm/mmzone.h
··· 1 + /* 2 + * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #ifndef _ASM_ARC_MMZONE_H 10 + #define _ASM_ARC_MMZONE_H 11 + 12 + #ifdef CONFIG_DISCONTIGMEM 13 + 14 + extern struct pglist_data node_data[]; 15 + #define NODE_DATA(nid) (&node_data[nid]) 16 + 17 + static inline int pfn_to_nid(unsigned long pfn) 18 + { 19 + int is_end_low = 1; 20 + 21 + if (IS_ENABLED(CONFIG_ARC_HAS_PAE40)) 22 + is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL); 23 + 24 + /* 25 + * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF 26 + * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF 27 + * HIGHMEM with PAE40: 0x1_0000_0000 to ... 28 + */ 29 + if (pfn >= ARCH_PFN_OFFSET && is_end_low) 30 + return 0; 31 + 32 + return 1; 33 + } 34 + 35 + static inline int pfn_valid(unsigned long pfn) 36 + { 37 + int nid = pfn_to_nid(pfn); 38 + 39 + return (pfn <= node_end_pfn(nid)); 40 + } 41 + #endif /* CONFIG_DISCONTIGMEM */ 42 + 43 + #endif
+11 -4
arch/arc/include/asm/page.h
··· 72 72 73 73 typedef pte_t * pgtable_t; 74 74 75 + /* 76 + * Use virt_to_pfn with caution: 77 + * If used in pte or paddr related macros, it could cause truncation 78 + * in PAE40 builds 79 + * As a rule of thumb, only use it in helpers starting with virt_ 80 + * You have been warned ! 81 + */ 75 82 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 76 83 77 84 #define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE) 78 85 86 + #ifdef CONFIG_FLATMEM 79 87 #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) 88 + #endif 80 89 81 90 /* 82 91 * __pa, __va, virt_to_page (ALERT: deprecated, don't use them) ··· 94 85 * virt here means link-address/program-address as embedded in object code. 95 86 * And for ARC, link-addr = physical address 96 87 */ 97 - #define __pa(vaddr) ((unsigned long)vaddr) 88 + #define __pa(vaddr) ((unsigned long)(vaddr)) 98 89 #define __va(paddr) ((void *)((unsigned long)(paddr))) 99 90 100 - #define virt_to_page(kaddr) \ 101 - (mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE)) 102 - 91 + #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 103 92 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 104 93 105 94 /* Default Permissions for stack/heaps pages (Non Executable) */
+6 -7
arch/arc/include/asm/pgtable.h
··· 278 278 #define pmd_present(x) (pmd_val(x)) 279 279 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) 280 280 281 - #define pte_page(pte) \ 282 - (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE)) 283 - 281 + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 284 282 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 285 - #define pte_pfn(pte) virt_to_pfn(pte_val(pte)) 286 - #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \ 287 - pgprot_val(prot))) 288 - #define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1)) 283 + #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 284 + 285 + /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ 286 + #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 287 + #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 289 288 290 289 /* 291 290 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+39 -15
arch/arc/mm/init.c
··· 30 30 static unsigned long low_mem_sz; 31 31 32 32 #ifdef CONFIG_HIGHMEM 33 - static unsigned long min_high_pfn; 33 + static unsigned long min_high_pfn, max_high_pfn; 34 34 static u64 high_mem_start; 35 35 static u64 high_mem_sz; 36 + #endif 37 + 38 + #ifdef CONFIG_DISCONTIGMEM 39 + struct pglist_data node_data[MAX_NUMNODES] __read_mostly; 40 + EXPORT_SYMBOL(node_data); 36 41 #endif 37 42 38 43 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ ··· 114 109 /* Last usable page of low mem */ 115 110 max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz); 116 111 117 - #ifdef CONFIG_HIGHMEM 118 - min_high_pfn = PFN_DOWN(high_mem_start); 119 - max_pfn = PFN_DOWN(high_mem_start + high_mem_sz); 112 + #ifdef CONFIG_FLATMEM 113 + /* pfn_valid() uses this */ 114 + max_mapnr = max_low_pfn - min_low_pfn; 120 115 #endif 121 - 122 - max_mapnr = max_pfn - min_low_pfn; 123 116 124 117 /*------------- bootmem allocator setup -----------------------*/ 125 118 ··· 132 129 * the crash 133 130 */ 134 131 135 - memblock_add(low_mem_start, low_mem_sz); 132 + memblock_add_node(low_mem_start, low_mem_sz, 0); 136 133 memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); 137 134 138 135 #ifdef CONFIG_BLK_DEV_INITRD ··· 152 149 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; 153 150 zones_holes[ZONE_NORMAL] = 0; 154 151 155 - #ifdef CONFIG_HIGHMEM 156 - zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; 157 - 158 - /* This handles the peripheral address space hole */ 159 - zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn; 160 - #endif 161 - 162 152 /* 163 153 * We can't use the helper free_area_init(zones[]) because it uses 164 154 * PAGE_OFFSET to compute the @min_low_pfn which would be wrong ··· 164 168 zones_holes); /* holes */ 165 169 166 170 #ifdef CONFIG_HIGHMEM 171 + /* 172 + * Populate a new node with highmem 173 + * 174 + * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based) 175 + * than addresses in normal ala low memory (0x8000_0000 based). 176 + * Even with PAE, the huge peripheral space hole would waste a lot of 177 + * mem with single mem_map[]. This warrants a mem_map per region design. 178 + * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM. 179 + * 180 + * DISCONTIGMEM in turns requires multiple nodes. node 0 above is 181 + * populated with normal memory zone while node 1 only has highmem 182 + */ 183 + node_set_online(1); 184 + 185 + min_high_pfn = PFN_DOWN(high_mem_start); 186 + max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz); 187 + 188 + zones_size[ZONE_NORMAL] = 0; 189 + zones_holes[ZONE_NORMAL] = 0; 190 + 191 + zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn; 192 + zones_holes[ZONE_HIGHMEM] = 0; 193 + 194 + free_area_init_node(1, /* node-id */ 195 + zones_size, /* num pages per zone */ 196 + min_high_pfn, /* first pfn of node */ 197 + zones_holes); /* holes */ 198 + 167 199 high_memory = (void *)(min_high_pfn << PAGE_SHIFT); 168 200 kmap_init(); 169 201 #endif ··· 209 185 unsigned long tmp; 210 186 211 187 reset_all_zones_managed_pages(); 212 - for (tmp = min_high_pfn; tmp < max_pfn; tmp++) 188 + for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++) 213 189 free_highmem_page(pfn_to_page(tmp)); 214 190 #endif 215 191
+9
arch/arm/boot/dts/omap3-n900.dts
··· 329 329 regulator-name = "V28"; 330 330 regulator-min-microvolt = <2800000>; 331 331 regulator-max-microvolt = <2800000>; 332 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 332 333 regulator-always-on; /* due to battery cover sensor */ 333 334 }; 334 335 ··· 337 336 regulator-name = "VCSI"; 338 337 regulator-min-microvolt = <1800000>; 339 338 regulator-max-microvolt = <1800000>; 339 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 340 340 }; 341 341 342 342 &vaux3 { 343 343 regulator-name = "VMMC2_30"; 344 344 regulator-min-microvolt = <2800000>; 345 345 regulator-max-microvolt = <3000000>; 346 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 346 347 }; 347 348 348 349 &vaux4 { 349 350 regulator-name = "VCAM_ANA_28"; 350 351 regulator-min-microvolt = <2800000>; 351 352 regulator-max-microvolt = <2800000>; 353 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 352 354 }; 353 355 354 356 &vmmc1 { 355 357 regulator-name = "VMMC1"; 356 358 regulator-min-microvolt = <1850000>; 357 359 regulator-max-microvolt = <3150000>; 360 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 358 361 }; 359 362 360 363 &vmmc2 { 361 364 regulator-name = "V28_A"; 362 365 regulator-min-microvolt = <2800000>; 363 366 regulator-max-microvolt = <3000000>; 367 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 364 368 regulator-always-on; /* due VIO leak to AIC34 VDDs */ 365 369 }; 366 370 ··· 373 367 regulator-name = "VPLL"; 374 368 regulator-min-microvolt = <1800000>; 375 369 regulator-max-microvolt = <1800000>; 370 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 376 371 regulator-always-on; 377 372 }; 378 373 ··· 381 374 regulator-name = "VSDI_CSI"; 382 375 regulator-min-microvolt = <1800000>; 383 376 regulator-max-microvolt = <1800000>; 377 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 384 378 regulator-always-on; 385 379 }; 386 380 ··· 389 381 regulator-name = "VMMC2_IO_18"; 390 382 regulator-min-microvolt = <1800000>; 391 383 regulator-max-microvolt = <1800000>; 384 + regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ 392 385 }; 393 386 394 387 &vio {
+1 -1
arch/arm/boot/dts/omap34xx.dtsi
··· 46 46 0x480bd800 0x017c>; 47 47 interrupts = <24>; 48 48 iommus = <&mmu_isp>; 49 - syscon = <&scm_conf 0xdc>; 49 + syscon = <&scm_conf 0x6c>; 50 50 ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>; 51 51 #clock-cells = <1>; 52 52 ports {
+2 -2
arch/arm/boot/dts/omap5-board-common.dtsi
··· 472 472 ldo1_reg: ldo1 { 473 473 /* VDDAPHY_CAM: vdda_csiport */ 474 474 regulator-name = "ldo1"; 475 - regulator-min-microvolt = <1500000>; 475 + regulator-min-microvolt = <1800000>; 476 476 regulator-max-microvolt = <1800000>; 477 477 }; 478 478 ··· 498 498 ldo4_reg: ldo4 { 499 499 /* VDDAPHY_DISP: vdda_dsiport/hdmi */ 500 500 regulator-name = "ldo4"; 501 - regulator-min-microvolt = <1500000>; 501 + regulator-min-microvolt = <1800000>; 502 502 regulator-max-microvolt = <1800000>; 503 503 }; 504 504
+2 -2
arch/arm/boot/dts/omap5-cm-t54.dts
··· 513 513 ldo1_reg: ldo1 { 514 514 /* VDDAPHY_CAM: vdda_csiport */ 515 515 regulator-name = "ldo1"; 516 - regulator-min-microvolt = <1500000>; 516 + regulator-min-microvolt = <1800000>; 517 517 regulator-max-microvolt = <1800000>; 518 518 }; 519 519 ··· 537 537 ldo4_reg: ldo4 { 538 538 /* VDDAPHY_DISP: vdda_dsiport/hdmi */ 539 539 regulator-name = "ldo4"; 540 - regulator-min-microvolt = <1500000>; 540 + regulator-min-microvolt = <1800000>; 541 541 regulator-max-microvolt = <1800000>; 542 542 }; 543 543
+1 -1
arch/arm/boot/dts/omap5.dtsi
··· 269 269 omap5_pmx_wkup: pinmux@c840 { 270 270 compatible = "ti,omap5-padconf", 271 271 "pinctrl-single"; 272 - reg = <0xc840 0x0038>; 272 + reg = <0xc840 0x003c>; 273 273 #address-cells = <1>; 274 274 #size-cells = <0>; 275 275 #interrupt-cells = <1>;
+2 -1
arch/arm/boot/dts/qcom-apq8064.dtsi
··· 666 666 }; 667 667 668 668 sata0: sata@29000000 { 669 - compatible = "generic-ahci"; 669 + compatible = "qcom,apq8064-ahci", "generic-ahci"; 670 670 status = "disabled"; 671 671 reg = <0x29000000 0x180>; 672 672 interrupts = <GIC_SPI 209 IRQ_TYPE_NONE>; ··· 688 688 689 689 phys = <&sata_phy0>; 690 690 phy-names = "sata-phy"; 691 + ports-implemented = <0x1>; 691 692 }; 692 693 693 694 /* Temporary fixed regulator */
-2
arch/arm/boot/dts/sun8i-q8-common.dtsi
··· 125 125 }; 126 126 127 127 &reg_dc1sw { 128 - regulator-min-microvolt = <3000000>; 129 - regulator-max-microvolt = <3000000>; 130 128 regulator-name = "vcc-lcd"; 131 129 }; 132 130
+11
arch/arm/include/asm/domain.h
··· 84 84 85 85 #ifndef __ASSEMBLY__ 86 86 87 + #ifdef CONFIG_CPU_CP15_MMU 87 88 static inline unsigned int get_domain(void) 88 89 { 89 90 unsigned int domain; ··· 104 103 : : "r" (val) : "memory"); 105 104 isb(); 106 105 } 106 + #else 107 + static inline unsigned int get_domain(void) 108 + { 109 + return 0; 110 + } 111 + 112 + static inline void set_domain(unsigned val) 113 + { 114 + } 115 + #endif 107 116 108 117 #ifdef CONFIG_CPU_USE_DOMAINS 109 118 #define modify_domain(dom,type) \
+1 -1
arch/arm/kernel/head-nommu.S
··· 236 236 mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE 237 237 ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) 238 238 /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ 239 - mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) 239 + mov r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) 240 240 241 241 setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled 242 242 beq 3f @ Memory-map not unified
+1 -1
arch/arm/kvm/mmu.c
··· 1004 1004 kvm_pfn_t pfn = *pfnp; 1005 1005 gfn_t gfn = *ipap >> PAGE_SHIFT; 1006 1006 1007 - if (PageTransCompound(pfn_to_page(pfn))) { 1007 + if (PageTransCompoundMap(pfn_to_page(pfn))) { 1008 1008 unsigned long mask; 1009 1009 /* 1010 1010 * The address we faulted on is backed by a transparent huge
+5
arch/arm/mach-davinci/board-mityomapl138.c
··· 121 121 const char *partnum = NULL; 122 122 struct davinci_soc_info *soc_info = &davinci_soc_info; 123 123 124 + if (!IS_BUILTIN(CONFIG_NVMEM)) { 125 + pr_warn("Factory Config not available without CONFIG_NVMEM\n"); 126 + goto bad_config; 127 + } 128 + 124 129 ret = nvmem_device_read(nvmem, 0, sizeof(factory_config), 125 130 &factory_config); 126 131 if (ret != sizeof(struct factory_config)) {
+5
arch/arm/mach-davinci/common.c
··· 33 33 char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; 34 34 off_t offset = (off_t)context; 35 35 36 + if (!IS_BUILTIN(CONFIG_NVMEM)) { 37 + pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n"); 38 + return; 39 + } 40 + 36 41 /* Read MAC addr from EEPROM */ 37 42 if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) 38 43 pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
+1 -1
arch/arm/mach-exynos/pm_domains.c
··· 92 92 if (IS_ERR(pd->clk[i])) 93 93 break; 94 94 95 - if (IS_ERR(pd->clk[i])) 95 + if (IS_ERR(pd->pclk[i])) 96 96 continue; /* Skip on first power up */ 97 97 if (clk_set_parent(pd->clk[i], pd->pclk[i])) 98 98 pr_err("%s: error setting parent to clock%d\n",
+1
arch/arm/mach-socfpga/headsmp.S
··· 13 13 #include <asm/assembler.h> 14 14 15 15 .arch armv7-a 16 + .arm 16 17 17 18 ENTRY(secondary_trampoline) 18 19 /* CPU1 will always fetch from 0x0 when it is brought out of reset.
+8 -7
arch/arm/mm/nommu.c
··· 87 87 /* MPU initialisation functions */ 88 88 void __init sanity_check_meminfo_mpu(void) 89 89 { 90 - int i; 91 90 phys_addr_t phys_offset = PHYS_OFFSET; 92 91 phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; 93 92 struct memblock_region *reg; ··· 109 110 } else { 110 111 /* 111 112 * memblock auto merges contiguous blocks, remove 112 - * all blocks afterwards 113 + * all blocks afterwards in one go (we can't remove 114 + * blocks separately while iterating) 113 115 */ 114 116 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", 115 - &mem_start, &reg->base); 116 - memblock_remove(reg->base, reg->size); 117 + &mem_end, &reg->base); 118 + memblock_remove(reg->base, 0 - reg->base); 119 + break; 117 120 } 118 121 } 119 122 ··· 145 144 pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", 146 145 &specified_mem_size, &aligned_region_size); 147 146 memblock_remove(mem_start + aligned_region_size, 148 - specified_mem_size - aligned_round_size); 147 + specified_mem_size - aligned_region_size); 149 148 150 149 mem_end = mem_start + aligned_region_size; 151 150 } ··· 262 261 return; 263 262 264 263 region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET, 265 - ilog2(meminfo.bank[0].size), 264 + ilog2(memblock.memory.regions[0].size), 266 265 MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL); 267 266 if (region_err) { 268 267 panic("MPU region initialization failure! %d", region_err); ··· 286 285 * some architectures which the DRAM is the exception vector to trap, 287 286 * alloc_page breaks with error, although it is not NULL, but "0." 288 287 */ 289 - memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); 288 + memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE); 290 289 #else /* ifndef CONFIG_CPU_V7M */ 291 290 /* 292 291 * There is no dedicated vector page on V7-M. So nothing needs to be
-1
arch/arm64/boot/dts/renesas/r8a7795.dtsi
··· 120 120 compatible = "fixed-clock"; 121 121 #clock-cells = <0>; 122 122 clock-frequency = <0>; 123 - status = "disabled"; 124 123 }; 125 124 126 125 soc {
+1 -1
arch/parisc/kernel/syscall.S
··· 344 344 #endif 345 345 346 346 cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */ 347 - comiclr,>>= __NR_Linux_syscalls, %r20, %r0 347 + comiclr,>> __NR_Linux_syscalls, %r20, %r0 348 348 b,n .Ltracesys_nosys 349 349 350 350 LDREGX %r20(%r19), %r19
+1 -1
arch/powerpc/include/asm/word-at-a-time.h
··· 82 82 "andc %1,%1,%2\n\t" 83 83 "popcntd %0,%1" 84 84 : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) 85 - : "r" (bits)); 85 + : "b" (bits)); 86 86 87 87 return leading_zero_bits; 88 88 }
+1
arch/x86/events/amd/iommu.c
··· 474 474 475 475 static struct perf_amd_iommu __perf_iommu = { 476 476 .pmu = { 477 + .task_ctx_nr = perf_invalid_context, 477 478 .event_init = perf_iommu_event_init, 478 479 .add = perf_iommu_add, 479 480 .del = perf_iommu_del,
+2
arch/x86/events/intel/core.c
··· 3637 3637 pr_cont("Knights Landing events, "); 3638 3638 break; 3639 3639 3640 + case 142: /* 14nm Kabylake Mobile */ 3641 + case 158: /* 14nm Kabylake Desktop */ 3640 3642 case 78: /* 14nm Skylake Mobile */ 3641 3643 case 94: /* 14nm Skylake Desktop */ 3642 3644 case 85: /* 14nm Skylake Server */
+1 -3
arch/x86/kernel/apic/x2apic_uv_x.c
··· 891 891 } 892 892 pr_info("UV: Found %s hub\n", hub); 893 893 894 - /* We now only need to map the MMRs on UV1 */ 895 - if (is_uv1_hub()) 896 - map_low_mmrs(); 894 + map_low_mmrs(); 897 895 898 896 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); 899 897 m_val = m_n_config.s.m_skt;
+12 -2
arch/x86/kernel/sysfb_efi.c
··· 106 106 continue; 107 107 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 108 108 resource_size_t start, end; 109 + unsigned long flags; 110 + 111 + flags = pci_resource_flags(dev, i); 112 + if (!(flags & IORESOURCE_MEM)) 113 + continue; 114 + 115 + if (flags & IORESOURCE_UNSET) 116 + continue; 117 + 118 + if (pci_resource_len(dev, i) == 0) 119 + continue; 109 120 110 121 start = pci_resource_start(dev, i); 111 - if (start == 0) 112 - break; 113 122 end = pci_resource_end(dev, i); 114 123 if (screen_info.lfb_base >= start && 115 124 screen_info.lfb_base < end) { 116 125 found_bar = 1; 126 + break; 117 127 } 118 128 } 119 129 }
+1 -1
arch/x86/kernel/tsc_msr.c
··· 92 92 93 93 if (freq_desc_tables[cpu_index].msr_plat) { 94 94 rdmsr(MSR_PLATFORM_INFO, lo, hi); 95 - ratio = (lo >> 8) & 0x1f; 95 + ratio = (lo >> 8) & 0xff; 96 96 } else { 97 97 rdmsr(MSR_IA32_PERF_STATUS, lo, hi); 98 98 ratio = (hi >> 8) & 0x1f;
+2 -2
arch/x86/kvm/mmu.c
··· 2823 2823 */ 2824 2824 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && 2825 2825 level == PT_PAGE_TABLE_LEVEL && 2826 - PageTransCompound(pfn_to_page(pfn)) && 2826 + PageTransCompoundMap(pfn_to_page(pfn)) && 2827 2827 !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { 2828 2828 unsigned long mask; 2829 2829 /* ··· 4785 4785 */ 4786 4786 if (sp->role.direct && 4787 4787 !kvm_is_reserved_pfn(pfn) && 4788 - PageTransCompound(pfn_to_page(pfn))) { 4788 + PageTransCompoundMap(pfn_to_page(pfn))) { 4789 4789 drop_spte(kvm, sptep); 4790 4790 need_tlb_flush = 1; 4791 4791 goto restart;
+9 -9
arch/x86/platform/efi/efi-bgrt.c
··· 43 43 return; 44 44 45 45 if (bgrt_tab->header.length < sizeof(*bgrt_tab)) { 46 - pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n", 46 + pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", 47 47 bgrt_tab->header.length, sizeof(*bgrt_tab)); 48 48 return; 49 49 } 50 50 if (bgrt_tab->version != 1) { 51 - pr_err("Ignoring BGRT: invalid version %u (expected 1)\n", 51 + pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n", 52 52 bgrt_tab->version); 53 53 return; 54 54 } 55 55 if (bgrt_tab->status & 0xfe) { 56 - pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n", 56 + pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n", 57 57 bgrt_tab->status); 58 58 return; 59 59 } 60 60 if (bgrt_tab->image_type != 0) { 61 - pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n", 61 + pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", 62 62 bgrt_tab->image_type); 63 63 return; 64 64 } 65 65 if (!bgrt_tab->image_address) { 66 - pr_err("Ignoring BGRT: null image address\n"); 66 + pr_notice("Ignoring BGRT: null image address\n"); 67 67 return; 68 68 } 69 69 70 70 image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB); 71 71 if (!image) { 72 - pr_err("Ignoring BGRT: failed to map image header memory\n"); 72 + pr_notice("Ignoring BGRT: failed to map image header memory\n"); 73 73 return; 74 74 } 75 75 76 76 memcpy(&bmp_header, image, sizeof(bmp_header)); 77 77 memunmap(image); 78 78 if (bmp_header.id != 0x4d42) { 79 - pr_err("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n", 79 + pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n", 80 80 bmp_header.id); 81 81 return; 82 82 } ··· 84 84 85 85 bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); 86 86 if (!bgrt_image) { 87 - pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n", 87 + pr_notice("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n", 88 88 bgrt_image_size); 89 89 return; 90 90 } 91 91 92 92 image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB); 93 93 if (!image) { 94 - pr_err("Ignoring BGRT: failed to map image memory\n"); 94 + pr_notice("Ignoring BGRT: failed to map image memory\n"); 95 95 kfree(bgrt_image); 96 96 bgrt_image = NULL; 97 97 return;
+1
crypto/Kconfig
··· 96 96 config CRYPTO_RSA 97 97 tristate "RSA algorithm" 98 98 select CRYPTO_AKCIPHER 99 + select CRYPTO_MANAGER 99 100 select MPILIB 100 101 select ASN1 101 102 help
+2 -1
crypto/ahash.c
··· 69 69 struct scatterlist *sg; 70 70 71 71 sg = walk->sg; 72 - walk->pg = sg_page(sg); 73 72 walk->offset = sg->offset; 73 + walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 74 + walk->offset = offset_in_page(walk->offset); 74 75 walk->entrylen = sg->length; 75 76 76 77 if (walk->entrylen > walk->total)
+3
drivers/acpi/acpica/dsmethod.c
··· 428 428 obj_desc->method.mutex->mutex. 429 429 original_sync_level = 430 430 obj_desc->method.mutex->mutex.sync_level; 431 + 432 + obj_desc->method.mutex->mutex.thread_id = 433 + acpi_os_get_thread_id(); 431 434 } 432 435 } 433 436
+4 -1
drivers/acpi/nfit.c
··· 287 287 offset); 288 288 rc = -ENXIO; 289 289 } 290 - } else 290 + } else { 291 291 rc = 0; 292 + if (cmd_rc) 293 + *cmd_rc = xlat_status(buf, cmd); 294 + } 292 295 293 296 out: 294 297 ACPI_FREE(out_obj);
+8
drivers/ata/Kconfig
··· 202 202 203 203 If unsure, say N. 204 204 205 + config SATA_AHCI_SEATTLE 206 + tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support" 207 + depends on ARCH_SEATTLE 208 + help 209 + This option enables support for AMD Seattle SATA host controller. 210 + 211 + If unsure, say N 212 + 205 213 config SATA_INIC162X 206 214 tristate "Initio 162x SATA support (Very Experimental)" 207 215 depends on PCI
+1
drivers/ata/Makefile
··· 4 4 # non-SFF interface 5 5 obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o 6 6 obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o 7 + obj-$(CONFIG_SATA_AHCI_SEATTLE) += ahci_seattle.o libahci.o libahci_platform.o 7 8 obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o 8 9 obj-$(CONFIG_SATA_FSL) += sata_fsl.o 9 10 obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
+3
drivers/ata/ahci_platform.c
··· 51 51 if (rc) 52 52 return rc; 53 53 54 + of_property_read_u32(dev->of_node, 55 + "ports-implemented", &hpriv->force_port_map); 56 + 54 57 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) 55 58 hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; 56 59
+210
drivers/ata/ahci_seattle.c
··· 1 + /* 2 + * AMD Seattle AHCI SATA driver 3 + * 4 + * Copyright (c) 2015, Advanced Micro Devices 5 + * Author: Brijesh Singh <brijesh.singh@amd.com> 6 + * 7 + * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + */ 18 + 19 + #include <linux/kernel.h> 20 + #include <linux/module.h> 21 + #include <linux/pm.h> 22 + #include <linux/device.h> 23 + #include <linux/of_device.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/libata.h> 26 + #include <linux/ahci_platform.h> 27 + #include <linux/acpi.h> 28 + #include <linux/pci_ids.h> 29 + #include "ahci.h" 30 + 31 + /* SGPIO Control Register definition 32 + * 33 + * Bit Type Description 34 + * 31 RW OD7.2 (activity) 35 + * 30 RW OD7.1 (locate) 36 + * 29 RW OD7.0 (fault) 37 + * 28...8 RW OD6.2...OD0.0 (3bits per port, 1 bit per LED) 38 + * 7 RO SGPIO feature flag 39 + * 6:4 RO Reserved 40 + * 3:0 RO Number of ports (0 means no port supported) 41 + */ 42 + #define ACTIVITY_BIT_POS(x) (8 + (3 * x)) 43 + #define LOCATE_BIT_POS(x) (ACTIVITY_BIT_POS(x) + 1) 44 + #define FAULT_BIT_POS(x) (LOCATE_BIT_POS(x) + 1) 45 + 46 + #define ACTIVITY_MASK 0x00010000 47 + #define LOCATE_MASK 0x00080000 48 + #define FAULT_MASK 0x00400000 49 + 50 + #define DRV_NAME "ahci-seattle" 51 + 52 + static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state, 53 + ssize_t size); 54 + 55 + struct seattle_plat_data { 56 + void __iomem *sgpio_ctrl; 57 + }; 58 + 59 + static struct ata_port_operations ahci_port_ops = { 60 + .inherits = &ahci_ops, 61 + }; 62 + 63 + static const struct ata_port_info ahci_port_info = { 64 + .flags = AHCI_FLAG_COMMON, 65 + .pio_mask = ATA_PIO4, 66 + .udma_mask = ATA_UDMA6, 67 + .port_ops = &ahci_port_ops, 68 + }; 69 + 70 + static struct ata_port_operations ahci_seattle_ops = { 71 + .inherits = &ahci_ops, 72 + .transmit_led_message = seattle_transmit_led_message, 73 + }; 74 + 75 + static const struct ata_port_info ahci_port_seattle_info = { 76 + .flags = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY, 77 + .link_flags = ATA_LFLAG_SW_ACTIVITY, 78 + .pio_mask = ATA_PIO4, 79 + .udma_mask = ATA_UDMA6, 80 + .port_ops = &ahci_seattle_ops, 81 + }; 82 + 83 + static struct scsi_host_template ahci_platform_sht = { 84 + AHCI_SHT(DRV_NAME), 85 + }; 86 + 87 + static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state, 88 + ssize_t size) 89 + { 90 + struct ahci_host_priv *hpriv = ap->host->private_data; 91 + struct ahci_port_priv *pp = ap->private_data; 92 + struct seattle_plat_data *plat_data = hpriv->plat_data; 93 + unsigned long flags; 94 + int pmp; 95 + struct ahci_em_priv *emp; 96 + u32 val; 97 + 98 + /* get the slot number from the message */ 99 + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 100 + if (pmp >= EM_MAX_SLOTS) 101 + return -EINVAL; 102 + emp = &pp->em_priv[pmp]; 103 + 104 + val = ioread32(plat_data->sgpio_ctrl); 105 + if (state & ACTIVITY_MASK) 106 + val |= 1 << ACTIVITY_BIT_POS((ap->port_no)); 107 + else 108 + val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no))); 109 + 110 + if (state & LOCATE_MASK) 111 + val |= 1 << LOCATE_BIT_POS((ap->port_no)); 112 + else 113 + val &= ~(1 << LOCATE_BIT_POS((ap->port_no))); 114 + 115 + if (state & FAULT_MASK) 116 + val |= 1 << FAULT_BIT_POS((ap->port_no)); 117 + else 118 + val &= ~(1 << FAULT_BIT_POS((ap->port_no))); 119 + 120 + iowrite32(val, plat_data->sgpio_ctrl); 121 + 122 + spin_lock_irqsave(ap->lock, flags); 123 + 124 + /* save off new led state for port/slot */ 125 + emp->led_state = state; 126 + 127 + spin_unlock_irqrestore(ap->lock, flags); 128 + 129 + return size; 130 + } 131 + 132 + static const struct ata_port_info *ahci_seattle_get_port_info( 133 + struct platform_device *pdev, struct ahci_host_priv *hpriv) 134 + { 135 + struct device *dev = &pdev->dev; 136 + struct seattle_plat_data *plat_data; 137 + u32 val; 138 + 139 + plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL); 140 + if (IS_ERR(plat_data)) 141 + return &ahci_port_info; 142 + 143 + plat_data->sgpio_ctrl = devm_ioremap_resource(dev, 144 + platform_get_resource(pdev, IORESOURCE_MEM, 1)); 145 + if (IS_ERR(plat_data->sgpio_ctrl)) 146 + return &ahci_port_info; 147 + 148 + val = ioread32(plat_data->sgpio_ctrl); 149 + 150 + if (!(val & 0xf)) 151 + return &ahci_port_info; 152 + 153 + hpriv->em_loc = 0; 154 + hpriv->em_buf_sz = 4; 155 + hpriv->em_msg_type = EM_MSG_TYPE_LED; 156 + hpriv->plat_data = plat_data; 157 + 158 + dev_info(dev, "SGPIO LED control is enabled.\n"); 159 + return &ahci_port_seattle_info; 160 + } 161 + 162 + static int ahci_seattle_probe(struct platform_device *pdev) 163 + { 164 + int rc; 165 + struct ahci_host_priv *hpriv; 166 + 167 + hpriv = ahci_platform_get_resources(pdev); 168 + if (IS_ERR(hpriv)) 169 + return PTR_ERR(hpriv); 170 + 171 + rc = ahci_platform_enable_resources(hpriv); 172 + if (rc) 173 + return rc; 174 + 175 + rc = ahci_platform_init_host(pdev, hpriv, 176 + ahci_seattle_get_port_info(pdev, hpriv), 177 + &ahci_platform_sht); 178 + if (rc) 179 + goto disable_resources; 180 + 181 + return 0; 182 + disable_resources: 183 + ahci_platform_disable_resources(hpriv); 184 + return rc; 185 + } 186 + 187 + static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend, 188 + ahci_platform_resume); 189 + 190 + static const struct acpi_device_id ahci_acpi_match[] = { 191 + { "AMDI0600", 0 }, 192 + {} 193 + }; 194 + MODULE_DEVICE_TABLE(acpi, ahci_acpi_match); 195 + 196 + static struct platform_driver ahci_seattle_driver = { 197 + .probe = ahci_seattle_probe, 198 + .remove = ata_platform_remove_one, 199 + .driver = { 200 + .name = DRV_NAME, 201 + .acpi_match_table = ahci_acpi_match, 202 + .pm = &ahci_pm_ops, 203 + }, 204 + }; 205 + module_platform_driver(ahci_seattle_driver); 206 + 207 + MODULE_DESCRIPTION("Seattle AHCI SATA platform driver"); 208 + MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>"); 209 + MODULE_LICENSE("GPL"); 210 + MODULE_ALIAS("platform:" DRV_NAME);
+1
drivers/ata/libahci.c
··· 507 507 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n", 508 508 port_map, hpriv->force_port_map); 509 509 port_map = hpriv->force_port_map; 510 + hpriv->saved_port_map = port_map; 510 511 } 511 512 512 513 if (hpriv->mask_port_map) {
-3
drivers/base/power/opp/core.c
··· 259 259 reg = opp_table->regulator; 260 260 if (IS_ERR(reg)) { 261 261 /* Regulator may not be required for device */ 262 - if (reg) 263 - dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__, 264 - PTR_ERR(reg)); 265 262 rcu_read_unlock(); 266 263 return 0; 267 264 }
+1 -1
drivers/base/property.c
··· 21 21 22 22 static inline bool is_pset_node(struct fwnode_handle *fwnode) 23 23 { 24 - return fwnode && fwnode->type == FWNODE_PDATA; 24 + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA; 25 25 } 26 26 27 27 static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
+15 -11
drivers/cpufreq/cpufreq.c
··· 1557 1557 if (!cpufreq_driver) 1558 1558 return; 1559 1559 1560 - if (!has_target()) 1560 + if (!has_target() && !cpufreq_driver->suspend) 1561 1561 goto suspend; 1562 1562 1563 1563 pr_debug("%s: Suspending Governors\n", __func__); 1564 1564 1565 1565 for_each_active_policy(policy) { 1566 - down_write(&policy->rwsem); 1567 - ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1568 - up_write(&policy->rwsem); 1566 + if (has_target()) { 1567 + down_write(&policy->rwsem); 1568 + ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1569 + up_write(&policy->rwsem); 1569 1570 1570 - if (ret) 1571 - pr_err("%s: Failed to stop governor for policy: %p\n", 1572 - __func__, policy); 1573 - else if (cpufreq_driver->suspend 1574 - && cpufreq_driver->suspend(policy)) 1571 + if (ret) { 1572 + pr_err("%s: Failed to stop governor for policy: %p\n", 1573 + __func__, policy); 1574 + continue; 1575 + } 1576 + } 1577 + 1578 + if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) 1575 1579 pr_err("%s: Failed to suspend driver: %p\n", __func__, 1576 1580 policy); 1577 1581 } ··· 1600 1596 1601 1597 cpufreq_suspended = false; 1602 1598 1603 - if (!has_target()) 1599 + if (!has_target() && !cpufreq_driver->resume) 1604 1600 return; 1605 1601 1606 1602 pr_debug("%s: Resuming Governors\n", __func__); ··· 1609 1605 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { 1610 1606 pr_err("%s: Failed to resume driver: %p\n", __func__, 1611 1607 policy); 1612 - } else { 1608 + } else if (has_target()) { 1613 1609 down_write(&policy->rwsem); 1614 1610 ret = cpufreq_start_governor(policy); 1615 1611 up_write(&policy->rwsem);
+18 -8
drivers/cpufreq/intel_pstate.c
··· 453 453 } 454 454 } 455 455 456 + static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 457 + { 458 + if (hwp_active) 459 + intel_pstate_hwp_set(policy->cpus); 460 + 461 + return 0; 462 + } 463 + 456 464 static void intel_pstate_hwp_set_online_cpus(void) 457 465 { 458 466 get_online_cpus(); ··· 1070 1062 1071 1063 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1072 1064 { 1073 - return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf * 1074 - cpu->pstate.scaling, cpu->sample.mperf); 1065 + return fp_toint(mul_fp(cpu->sample.core_pct_busy, 1066 + int_tofp(cpu->pstate.max_pstate_physical * 1067 + cpu->pstate.scaling / 100))); 1075 1068 } 1076 1069 1077 1070 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) ··· 1114 1105 { 1115 1106 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 1116 1107 u64 duration_ns; 1117 - 1118 - intel_pstate_calc_busy(cpu); 1119 1108 1120 1109 /* 1121 1110 * core_busy is the ratio of actual performance to max ··· 1198 1191 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1199 1192 bool sample_taken = intel_pstate_sample(cpu, time); 1200 1193 1201 - if (sample_taken && !hwp_active) 1202 - intel_pstate_adjust_busy_pstate(cpu); 1194 + if (sample_taken) { 1195 + intel_pstate_calc_busy(cpu); 1196 + if (!hwp_active) 1197 + intel_pstate_adjust_busy_pstate(cpu); 1198 + } 1203 1199 } 1204 1200 } 1205 1201 ··· 1356 1346 out: 1357 1347 intel_pstate_set_update_util_hook(policy->cpu); 1358 1348 1359 - if (hwp_active) 1360 - intel_pstate_hwp_set(policy->cpus); 1349 + intel_pstate_hwp_set_policy(policy); 1361 1350 1362 1351 return 0; 1363 1352 } ··· 1420 1411 .flags = CPUFREQ_CONST_LOOPS, 1421 1412 .verify = intel_pstate_verify_policy, 1422 1413 .setpolicy = intel_pstate_set_policy, 1414 + .resume = intel_pstate_hwp_set_policy, 1423 1415 .get = intel_pstate_get, 1424 1416 .init = intel_pstate_cpu_init, 1425 1417 .stop_cpu = intel_pstate_stop_cpu,
+4
drivers/cpufreq/sti-cpufreq.c
··· 259 259 { 260 260 int ret; 261 261 262 + if ((!of_machine_is_compatible("st,stih407")) && 263 + (!of_machine_is_compatible("st,stih410"))) 264 + return -ENODEV; 265 + 262 266 ddata.cpu = get_cpu_device(0); 263 267 if (!ddata.cpu) { 264 268 dev_err(ddata.cpu, "Failed to get device for CPU0\n");
+1 -1
drivers/cpuidle/cpuidle-arm.c
··· 50 50 * call the CPU ops suspend protocol with idle index as a 51 51 * parameter. 52 52 */ 53 - arm_cpuidle_suspend(idx); 53 + ret = arm_cpuidle_suspend(idx); 54 54 55 55 cpu_pm_exit(); 56 56 }
+11
drivers/crypto/qat/qat_common/adf_common_drv.h
··· 236 236 uint32_t vf_mask); 237 237 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); 238 238 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); 239 + int adf_init_pf_wq(void); 240 + void adf_exit_pf_wq(void); 239 241 #else 240 242 static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) 241 243 { ··· 253 251 } 254 252 255 253 static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) 254 + { 255 + } 256 + 257 + static inline int adf_init_pf_wq(void) 258 + { 259 + return 0; 260 + } 261 + 262 + static inline void adf_exit_pf_wq(void) 256 263 { 257 264 } 258 265 #endif
+6
drivers/crypto/qat/qat_common/adf_ctl_drv.c
··· 462 462 if (adf_init_aer()) 463 463 goto err_aer; 464 464 465 + if (adf_init_pf_wq()) 466 + goto err_pf_wq; 467 + 465 468 if (qat_crypto_register()) 466 469 goto err_crypto_register; 467 470 468 471 return 0; 469 472 470 473 err_crypto_register: 474 + adf_exit_pf_wq(); 475 + err_pf_wq: 471 476 adf_exit_aer(); 472 477 err_aer: 473 478 adf_chr_drv_destroy(); ··· 485 480 { 486 481 adf_chr_drv_destroy(); 487 482 adf_exit_aer(); 483 + adf_exit_pf_wq(); 488 484 qat_crypto_unregister(); 489 485 adf_clean_vf_map(false); 490 486 mutex_destroy(&adf_ctl_lock);
+16 -10
drivers/crypto/qat/qat_common/adf_sriov.c
··· 119 119 int i; 120 120 u32 reg; 121 121 122 - /* Workqueue for PF2VF responses */ 123 - pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); 124 - if (!pf2vf_resp_wq) 125 - return -ENOMEM; 126 - 127 122 for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; 128 123 i++, vf_info++) { 129 124 /* This ptr will be populated when VFs will be created */ ··· 211 216 212 217 kfree(accel_dev->pf.vf_info); 213 218 accel_dev->pf.vf_info = NULL; 214 - 215 - if (pf2vf_resp_wq) { 216 - destroy_workqueue(pf2vf_resp_wq); 217 - pf2vf_resp_wq = NULL; 218 - } 219 219 } 220 220 EXPORT_SYMBOL_GPL(adf_disable_sriov); 221 221 ··· 294 304 return numvfs; 295 305 } 296 306 EXPORT_SYMBOL_GPL(adf_sriov_configure); 307 + 308 + int __init adf_init_pf_wq(void) 309 + { 310 + /* Workqueue for PF2VF responses */ 311 + pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); 312 + 313 + return !pf2vf_resp_wq ? -ENOMEM : 0; 314 + } 315 + 316 + void adf_exit_pf_wq(void) 317 + { 318 + if (pf2vf_resp_wq) { 319 + destroy_workqueue(pf2vf_resp_wq); 320 + pf2vf_resp_wq = NULL; 321 + } 322 + }
+1 -1
drivers/firmware/qemu_fw_cfg.c
··· 77 77 static inline void fw_cfg_read_blob(u16 key, 78 78 void *buf, loff_t pos, size_t count) 79 79 { 80 - u32 glk; 80 + u32 glk = -1U; 81 81 acpi_status status; 82 82 83 83 /* If we have ACPI, ensure mutual exclusion against any potential
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 541 541 if (!metadata_size) { 542 542 if (bo->metadata_size) { 543 543 kfree(bo->metadata); 544 + bo->metadata = NULL; 544 545 bo->metadata_size = 0; 545 546 } 546 547 return 0;
+4
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
··· 298 298 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 299 299 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 300 300 301 + /* vertical FP must be at least 1 */ 302 + if (mode->crtc_vsync_start == mode->crtc_vdisplay) 303 + adjusted_mode->crtc_vsync_start++; 304 + 301 305 /* get the native mode for scaling */ 302 306 if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) 303 307 amdgpu_panel_mode_fixup(encoder, adjusted_mode);
+31 -1
drivers/gpu/drm/i915/i915_drv.c
··· 792 792 static int i915_drm_resume_early(struct drm_device *dev) 793 793 { 794 794 struct drm_i915_private *dev_priv = dev->dev_private; 795 - int ret = 0; 795 + int ret; 796 796 797 797 /* 798 798 * We have a resume ordering issue with the snd-hda driver also ··· 802 802 * 803 803 * FIXME: This should be solved with a special hdmi sink device or 804 804 * similar so that power domains can be employed. 805 + */ 806 + 807 + /* 808 + * Note that we need to set the power state explicitly, since we 809 + * powered off the device during freeze and the PCI core won't power 810 + * it back up for us during thaw. Powering off the device during 811 + * freeze is not a hard requirement though, and during the 812 + * suspend/resume phases the PCI core makes sure we get here with the 813 + * device powered on. So in case we change our freeze logic and keep 814 + * the device powered we can also remove the following set power state 815 + * call. 816 + */ 817 + ret = pci_set_power_state(dev->pdev, PCI_D0); 818 + if (ret) { 819 + DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); 820 + goto out; 821 + } 822 + 823 + /* 824 + * Note that pci_enable_device() first enables any parent bridge 825 + * device and only then sets the power state for this device. The 826 + * bridge enabling is a nop though, since bridge devices are resumed 827 + * first. The order of enabling power and enabling the device is 828 + * imposed by the PCI core as described above, so here we preserve the 829 + * same order for the freeze/thaw phases. 830 + * 831 + * TODO: eventually we should remove pci_disable_device() / 832 + * pci_enable_enable_device() from suspend/resume. Due to how they 833 + * depend on the device enable refcount we can't anyway depend on them 834 + * disabling/enabling the device. 805 835 */ 806 836 if (pci_enable_device(dev->pdev)) { 807 837 ret = -EIO;
+8 -1
drivers/gpu/drm/i915/i915_reg.h
··· 2907 2907 #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) 2908 2908 #define BXT_RP_STATE_CAP _MMIO(0x138170) 2909 2909 2910 - #define INTERVAL_1_28_US(us) (((us) * 100) >> 7) 2910 + /* 2911 + * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS 2912 + * 8300) freezing up around GPU hangs. Looks as if even 2913 + * scheduling/timer interrupts start misbehaving if the RPS 2914 + * EI/thresholds are "bad", leading to a very sluggish or even 2915 + * frozen machine. 2916 + */ 2917 + #define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) 2911 2918 #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) 2912 2919 #define INTERVAL_0_833_US(us) (((us) * 6) / 5) 2913 2920 #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+13 -9
drivers/gpu/drm/i915/intel_ddi.c
··· 443 443 } else if (IS_BROADWELL(dev_priv)) { 444 444 ddi_translations_fdi = bdw_ddi_translations_fdi; 445 445 ddi_translations_dp = bdw_ddi_translations_dp; 446 - ddi_translations_edp = bdw_ddi_translations_edp; 446 + 447 + if (dev_priv->edp_low_vswing) { 448 + ddi_translations_edp = bdw_ddi_translations_edp; 449 + n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); 450 + } else { 451 + ddi_translations_edp = bdw_ddi_translations_dp; 452 + n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); 453 + } 454 + 447 455 ddi_translations_hdmi = bdw_ddi_translations_hdmi; 448 - n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); 456 + 449 457 n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); 450 458 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); 451 459 hdmi_default_entry = 7; ··· 3209 3201 intel_ddi_clock_get(encoder, pipe_config); 3210 3202 } 3211 3203 3212 - static void intel_ddi_destroy(struct drm_encoder *encoder) 3213 - { 3214 - /* HDMI has nothing special to destroy, so we can go with this. */ 3215 - intel_dp_encoder_destroy(encoder); 3216 - } 3217 - 3218 3204 static bool intel_ddi_compute_config(struct intel_encoder *encoder, 3219 3205 struct intel_crtc_state *pipe_config) 3220 3206 { ··· 3227 3225 } 3228 3226 3229 3227 static const struct drm_encoder_funcs intel_ddi_funcs = { 3230 - .destroy = intel_ddi_destroy, 3228 + .reset = intel_dp_encoder_reset, 3229 + .destroy = intel_dp_encoder_destroy, 3231 3230 }; 3232 3231 3233 3232 static struct intel_connector * ··· 3327 3324 intel_encoder->post_disable = intel_ddi_post_disable; 3328 3325 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 3329 3326 intel_encoder->get_config = intel_ddi_get_config; 3327 + intel_encoder->suspend = intel_dp_encoder_suspend; 3330 3328 3331 3329 intel_dig_port->port = port; 3332 3330 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+3
drivers/gpu/drm/i915/intel_display.c
··· 13351 13351 } 13352 13352 13353 13353 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13354 + if (state->legacy_cursor_update) 13355 + continue; 13356 + 13354 13357 ret = intel_crtc_wait_for_pending_flips(crtc); 13355 13358 if (ret) 13356 13359 return ret;
+2 -2
drivers/gpu/drm/i915/intel_dp.c
··· 4898 4898 kfree(intel_dig_port); 4899 4899 } 4900 4900 4901 - static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4901 + void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4902 4902 { 4903 4903 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 4904 4904 ··· 4940 4940 edp_panel_vdd_schedule_off(intel_dp); 4941 4941 } 4942 4942 4943 - static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4943 + void intel_dp_encoder_reset(struct drm_encoder *encoder) 4944 4944 { 4945 4945 struct intel_dp *intel_dp; 4946 4946
+2
drivers/gpu/drm/i915/intel_drv.h
··· 1238 1238 void intel_dp_start_link_train(struct intel_dp *intel_dp); 1239 1239 void intel_dp_stop_link_train(struct intel_dp *intel_dp); 1240 1240 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 1241 + void intel_dp_encoder_reset(struct drm_encoder *encoder); 1242 + void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); 1241 1243 void intel_dp_encoder_destroy(struct drm_encoder *encoder); 1242 1244 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); 1243 1245 bool intel_dp_compute_config(struct intel_encoder *encoder,
+10 -2
drivers/gpu/drm/i915/intel_hdmi.c
··· 1415 1415 hdmi_to_dig_port(intel_hdmi)); 1416 1416 } 1417 1417 1418 - if (!live_status) 1419 - DRM_DEBUG_KMS("Live status not up!"); 1418 + if (!live_status) { 1419 + DRM_DEBUG_KMS("HDMI live status down\n"); 1420 + /* 1421 + * Live status register is not reliable on all intel platforms. 1422 + * So consider live_status only for certain platforms, for 1423 + * others, read EDID to determine presence of sink. 1424 + */ 1425 + if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv)) 1426 + live_status = true; 1427 + } 1420 1428 1421 1429 intel_hdmi_unset_edid(connector); 1422 1430
+4
drivers/gpu/drm/radeon/atombios_encoders.c
··· 310 310 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 311 311 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 312 312 313 + /* vertical FP must be at least 1 */ 314 + if (mode->crtc_vsync_start == mode->crtc_vdisplay) 315 + adjusted_mode->crtc_vsync_start++; 316 + 313 317 /* get the native mode for scaling */ 314 318 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 315 319 radeon_panel_mode_fixup(encoder, adjusted_mode);
+6 -1
drivers/gpu/ipu-v3/ipu-common.c
··· 1068 1068 goto err_register; 1069 1069 } 1070 1070 1071 - pdev->dev.of_node = of_node; 1072 1071 pdev->dev.parent = dev; 1073 1072 1074 1073 ret = platform_device_add_data(pdev, &reg->pdata, ··· 1078 1079 platform_device_put(pdev); 1079 1080 goto err_register; 1080 1081 } 1082 + 1083 + /* 1084 + * Set of_node only after calling platform_device_add. Otherwise 1085 + * the platform:imx-ipuv3-crtc modalias won't be used. 1086 + */ 1087 + pdev->dev.of_node = of_node; 1081 1088 } 1082 1089 1083 1090 return 0;
+20 -6
drivers/hv/ring_buffer.c
··· 103 103 * there is room for the producer to send the pending packet. 104 104 */ 105 105 106 - static bool hv_need_to_signal_on_read(u32 prev_write_sz, 107 - struct hv_ring_buffer_info *rbi) 106 + static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) 108 107 { 109 108 u32 cur_write_sz; 110 109 u32 r_size; 111 - u32 write_loc = rbi->ring_buffer->write_index; 110 + u32 write_loc; 112 111 u32 read_loc = rbi->ring_buffer->read_index; 113 - u32 pending_sz = rbi->ring_buffer->pending_send_sz; 112 + u32 pending_sz; 114 113 114 + /* 115 + * Issue a full memory barrier before making the signaling decision. 116 + * Here is the reason for having this barrier: 117 + * If the reading of the pend_sz (in this function) 118 + * were to be reordered and read before we commit the new read 119 + * index (in the calling function) we could 120 + * have a problem. If the host were to set the pending_sz after we 121 + * have sampled pending_sz and go to sleep before we commit the 122 + * read index, we could miss sending the interrupt. Issue a full 123 + * memory barrier to address this. 124 + */ 125 + mb(); 126 + 127 + pending_sz = rbi->ring_buffer->pending_send_sz; 128 + write_loc = rbi->ring_buffer->write_index; 115 129 /* If the other end is not blocked on write don't bother. */ 116 130 if (pending_sz == 0) 117 131 return false; ··· 134 120 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : 135 121 read_loc - write_loc; 136 122 137 - if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz)) 123 + if (cur_write_sz >= pending_sz) 138 124 return true; 139 125 140 126 return false; ··· 469 455 /* Update the read index */ 470 456 hv_set_next_read_location(inring_info, next_read_location); 471 457 472 - *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); 458 + *signal = hv_need_to_signal_on_read(inring_info); 473 459 474 460 return ret; 475 461 }
+2
drivers/iio/adc/at91-sama5d2_adc.c
··· 451 451 if (ret) 452 452 goto vref_disable; 453 453 454 + platform_set_drvdata(pdev, indio_dev); 455 + 454 456 ret = iio_device_register(indio_dev); 455 457 if (ret < 0) 456 458 goto per_clk_disable_unprepare;
+27 -3
drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
··· 104 104 return 0; 105 105 } 106 106 107 + static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id) 108 + { 109 + const struct acpi_device_id *id; 110 + 111 + id = acpi_match_device(dev->driver->acpi_match_table, dev); 112 + if (!id) 113 + return NULL; 114 + 115 + *chip_id = (int)id->driver_data; 116 + 117 + return dev_name(dev); 118 + } 119 + 107 120 /** 108 121 * inv_mpu_probe() - probe function. 109 122 * @client: i2c client. ··· 128 115 const struct i2c_device_id *id) 129 116 { 130 117 struct inv_mpu6050_state *st; 131 - int result; 132 - const char *name = id ? id->name : NULL; 118 + int result, chip_type; 133 119 struct regmap *regmap; 120 + const char *name; 134 121 135 122 if (!i2c_check_functionality(client->adapter, 136 123 I2C_FUNC_SMBUS_I2C_BLOCK)) 137 124 return -EOPNOTSUPP; 125 + 126 + if (id) { 127 + chip_type = (int)id->driver_data; 128 + name = id->name; 129 + } else if (ACPI_HANDLE(&client->dev)) { 130 + name = inv_mpu_match_acpi_device(&client->dev, &chip_type); 131 + if (!name) 132 + return -ENODEV; 133 + } else { 134 + return -ENOSYS; 135 + } 138 136 139 137 regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config); 140 138 if (IS_ERR(regmap)) { ··· 155 131 } 156 132 157 133 result = inv_mpu_core_probe(regmap, client->irq, name, 158 - NULL, id->driver_data); 134 + NULL, chip_type); 159 135 if (result < 0) 160 136 return result; 161 137
+2 -1
drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
··· 46 46 struct regmap *regmap; 47 47 const struct spi_device_id *id = spi_get_device_id(spi); 48 48 const char *name = id ? id->name : NULL; 49 + const int chip_type = id ? id->driver_data : 0; 49 50 50 51 regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config); 51 52 if (IS_ERR(regmap)) { ··· 56 55 } 57 56 58 57 return inv_mpu_core_probe(regmap, spi->irq, name, 59 - inv_mpu_i2c_disable, id->driver_data); 58 + inv_mpu_i2c_disable, chip_type); 60 59 } 61 60 62 61 static int inv_mpu_remove(struct spi_device *spi)
+3 -3
drivers/iio/magnetometer/ak8975.c
··· 462 462 int rc; 463 463 int irq; 464 464 465 + init_waitqueue_head(&data->data_ready_queue); 466 + clear_bit(0, &data->flags); 465 467 if (client->irq) 466 468 irq = client->irq; 467 469 else ··· 479 477 return rc; 480 478 } 481 479 482 - init_waitqueue_head(&data->data_ready_queue); 483 - clear_bit(0, &data->flags); 484 480 data->eoc_irq = irq; 485 481 486 482 return rc; ··· 732 732 int eoc_gpio; 733 733 int err; 734 734 const char *name = NULL; 735 - enum asahi_compass_chipset chipset; 735 + enum asahi_compass_chipset chipset = AK_MAX_TYPE; 736 736 737 737 /* Grab and set up the supplied GPIO. */ 738 738 if (client->dev.platform_data)
+10 -4
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 612 612 struct Scsi_Host *shost; 613 613 struct iser_conn *iser_conn = NULL; 614 614 struct ib_conn *ib_conn; 615 + u32 max_fr_sectors; 615 616 u16 max_cmds; 616 617 617 618 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); ··· 633 632 iser_conn = ep->dd_data; 634 633 max_cmds = iser_conn->max_cmds; 635 634 shost->sg_tablesize = iser_conn->scsi_sg_tablesize; 636 - shost->max_sectors = iser_conn->scsi_max_sectors; 637 635 638 636 mutex_lock(&iser_conn->state_mutex); 639 637 if (iser_conn->state != ISER_CONN_UP) { ··· 657 657 */ 658 658 shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, 659 659 ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len); 660 - shost->max_sectors = min_t(unsigned int, 661 - 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9); 662 660 663 661 if (iscsi_host_add(shost, 664 662 ib_conn->device->ib_device->dma_device)) { ··· 669 671 if (iscsi_host_add(shost, NULL)) 670 672 goto free_host; 671 673 } 674 + 675 + /* 676 + * FRs or FMRs can only map up to a (device) page per entry, but if the 677 + * first entry is misaligned we'll end up using using two entries 678 + * (head and tail) for a single page worth data, so we have to drop 679 + * one segment from the calculation. 680 + */ 681 + max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; 682 + shost->max_sectors = min(iser_max_sectors, max_fr_sectors); 672 683 673 684 if (cmds_max > max_cmds) { 674 685 iser_info("cmds_max changed from %u to %u\n", ··· 996 989 .queuecommand = iscsi_queuecommand, 997 990 .change_queue_depth = scsi_change_queue_depth, 998 991 .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE, 999 - .max_sectors = ISER_DEF_MAX_SECTORS, 1000 992 .cmd_per_lun = ISER_DEF_CMD_PER_LUN, 1001 993 .eh_abort_handler = iscsi_eh_abort, 1002 994 .eh_device_reset_handler= iscsi_eh_device_reset,
+8 -8
drivers/input/misc/twl6040-vibra.c
··· 181 181 { 182 182 struct vibra_info *info = container_of(work, 183 183 struct vibra_info, play_work); 184 + int ret; 185 + 186 + /* Do not allow effect, while the routing is set to use audio */ 187 + ret = twl6040_get_vibralr_status(info->twl6040); 188 + if (ret & TWL6040_VIBSEL) { 189 + dev_info(info->dev, "Vibra is configured for audio\n"); 190 + return; 191 + } 184 192 185 193 mutex_lock(&info->mutex); 186 194 ··· 207 199 struct ff_effect *effect) 208 200 { 209 201 struct vibra_info *info = input_get_drvdata(input); 210 - int ret; 211 - 212 - /* Do not allow effect, while the routing is set to use audio */ 213 - ret = twl6040_get_vibralr_status(info->twl6040); 214 - if (ret & TWL6040_VIBSEL) { 215 - dev_info(&input->dev, "Vibra is configured for audio\n"); 216 - return -EBUSY; 217 - } 218 202 219 203 info->weak_speed = effect->u.rumble.weak_magnitude; 220 204 info->strong_speed = effect->u.rumble.strong_magnitude;
+14 -14
drivers/input/touchscreen/atmel_mxt_ts.c
··· 1093 1093 return 0; 1094 1094 } 1095 1095 1096 + static int mxt_acquire_irq(struct mxt_data *data) 1097 + { 1098 + int error; 1099 + 1100 + enable_irq(data->irq); 1101 + 1102 + error = mxt_process_messages_until_invalid(data); 1103 + if (error) 1104 + return error; 1105 + 1106 + return 0; 1107 + } 1108 + 1096 1109 static int mxt_soft_reset(struct mxt_data *data) 1097 1110 { 1098 1111 struct device *dev = &data->client->dev; ··· 1124 1111 /* Ignore CHG line for 100ms after reset */ 1125 1112 msleep(100); 1126 1113 1127 - enable_irq(data->irq); 1114 + mxt_acquire_irq(data); 1128 1115 1129 1116 ret = mxt_wait_for_completion(data, &data->reset_completion, 1130 1117 MXT_RESET_TIMEOUT); ··· 1477 1464 release_mem: 1478 1465 kfree(config_mem); 1479 1466 return ret; 1480 - } 1481 - 1482 - static int mxt_acquire_irq(struct mxt_data *data) 1483 - { 1484 - int error; 1485 - 1486 - enable_irq(data->irq); 1487 - 1488 - error = mxt_process_messages_until_invalid(data); 1489 - if (error) 1490 - return error; 1491 - 1492 - return 0; 1493 1467 } 1494 1468 1495 1469 static int mxt_get_info(struct mxt_data *data)
+2 -2
drivers/input/touchscreen/zforce_ts.c
··· 370 370 point.coord_x = point.coord_y = 0; 371 371 } 372 372 373 - point.state = payload[9 * i + 5] & 0x03; 374 - point.id = (payload[9 * i + 5] & 0xfc) >> 2; 373 + point.state = payload[9 * i + 5] & 0x0f; 374 + point.id = (payload[9 * i + 5] & 0xf0) >> 4; 375 375 376 376 /* determine touch major, minor and orientation */ 377 377 point.area_major = max(payload[9 * i + 6],
+4 -4
drivers/media/media-device.c
··· 846 846 } 847 847 EXPORT_SYMBOL_GPL(media_device_find_devres); 848 848 849 + #if IS_ENABLED(CONFIG_PCI) 849 850 void media_device_pci_init(struct media_device *mdev, 850 851 struct pci_dev *pci_dev, 851 852 const char *name) 852 853 { 853 - #ifdef CONFIG_PCI 854 854 mdev->dev = &pci_dev->dev; 855 855 856 856 if (name) ··· 866 866 mdev->driver_version = LINUX_VERSION_CODE; 867 867 868 868 media_device_init(mdev); 869 - #endif 870 869 } 871 870 EXPORT_SYMBOL_GPL(media_device_pci_init); 871 + #endif 872 872 873 + #if IS_ENABLED(CONFIG_USB) 873 874 void __media_device_usb_init(struct media_device *mdev, 874 875 struct usb_device *udev, 875 876 const char *board_name, 876 877 const char *driver_name) 877 878 { 878 - #ifdef CONFIG_USB 879 879 mdev->dev = &udev->dev; 880 880 881 881 if (driver_name) ··· 895 895 mdev->driver_version = LINUX_VERSION_CODE; 896 896 897 897 media_device_init(mdev); 898 - #endif 899 898 } 900 899 EXPORT_SYMBOL_GPL(__media_device_usb_init); 900 + #endif 901 901 902 902 903 903 #endif /* CONFIG_MEDIA_CONTROLLER */
+2 -11
drivers/media/platform/exynos4-is/media-dev.c
··· 1446 1446 1447 1447 platform_set_drvdata(pdev, fmd); 1448 1448 1449 - /* Protect the media graph while we're registering entities */ 1450 - mutex_lock(&fmd->media_dev.graph_mutex); 1451 - 1452 1449 ret = fimc_md_register_platform_entities(fmd, dev->of_node); 1453 - if (ret) { 1454 - mutex_unlock(&fmd->media_dev.graph_mutex); 1450 + if (ret) 1455 1451 goto err_clk; 1456 - } 1457 1452 1458 1453 ret = fimc_md_register_sensor_entities(fmd); 1459 - if (ret) { 1460 - mutex_unlock(&fmd->media_dev.graph_mutex); 1454 + if (ret) 1461 1455 goto err_m_ent; 1462 - } 1463 - 1464 - mutex_unlock(&fmd->media_dev.graph_mutex); 1465 1456 1466 1457 ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode); 1467 1458 if (ret)
+3 -9
drivers/media/platform/s3c-camif/camif-core.c
··· 493 493 if (ret < 0) 494 494 goto err_sens; 495 495 496 - mutex_lock(&camif->media_dev.graph_mutex); 497 - 498 496 ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev); 499 497 if (ret < 0) 500 - goto err_unlock; 498 + goto err_sens; 501 499 502 500 ret = camif_register_video_nodes(camif); 503 501 if (ret < 0) 504 - goto err_unlock; 502 + goto err_sens; 505 503 506 504 ret = camif_create_media_links(camif); 507 505 if (ret < 0) 508 - goto err_unlock; 509 - 510 - mutex_unlock(&camif->media_dev.graph_mutex); 506 + goto err_sens; 511 507 512 508 ret = media_device_register(&camif->media_dev); 513 509 if (ret < 0) ··· 512 516 pm_runtime_put(dev); 513 517 return 0; 514 518 515 - err_unlock: 516 - mutex_unlock(&camif->media_dev.graph_mutex); 517 519 err_sens: 518 520 v4l2_device_unregister(&camif->v4l2_dev); 519 521 media_device_unregister(&camif->media_dev);
+5
drivers/misc/mic/vop/vop_vringh.c
··· 945 945 ret = -EFAULT; 946 946 goto free_ret; 947 947 } 948 + /* Ensure desc has not changed between the two reads */ 949 + if (memcmp(&dd, dd_config, sizeof(dd))) { 950 + ret = -EINVAL; 951 + goto free_ret; 952 + } 948 953 mutex_lock(&vdev->vdev_mutex); 949 954 mutex_lock(&vi->vop_mutex); 950 955 ret = vop_virtio_add_device(vdev, dd_config);
+4 -3
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 1608 1608 1609 1609 ret = xgene_enet_init_hw(pdata); 1610 1610 if (ret) 1611 - goto err; 1611 + goto err_netdev; 1612 1612 1613 1613 mac_ops = pdata->mac_ops; 1614 1614 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { 1615 1615 ret = xgene_enet_mdio_config(pdata); 1616 1616 if (ret) 1617 - goto err; 1617 + goto err_netdev; 1618 1618 } else { 1619 1619 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); 1620 1620 } 1621 1621 1622 1622 xgene_enet_napi_add(pdata); 1623 1623 return 0; 1624 - err: 1624 + err_netdev: 1625 1625 unregister_netdev(ndev); 1626 + err: 1626 1627 free_netdev(ndev); 1627 1628 return ret; 1628 1629 }
+19 -4
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 1439 1439 if (!TX_CMP_VALID(txcmp, raw_cons)) 1440 1440 break; 1441 1441 1442 + /* The valid test of the entry must be done first before 1443 + * reading any further. 1444 + */ 1445 + rmb(); 1442 1446 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1443 1447 tx_pkts++; 1444 1448 /* return full budget so NAPI will complete. */ ··· 4100 4096 } 4101 4097 4102 4098 static int bnxt_cfg_rx_mode(struct bnxt *); 4099 + static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 4103 4100 4104 4101 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 4105 4102 { 4103 + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 4106 4104 int rc = 0; 4107 4105 4108 4106 if (irq_re_init) { ··· 4160 4154 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 4161 4155 goto err_out; 4162 4156 } 4163 - bp->vnic_info[0].uc_filter_count = 1; 4157 + vnic->uc_filter_count = 1; 4164 4158 4165 - bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 4159 + vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 4166 4160 4167 4161 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) 4168 - bp->vnic_info[0].rx_mask |= 4169 - CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 4162 + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 4163 + 4164 + if (bp->dev->flags & IFF_ALLMULTI) { 4165 + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 4166 + vnic->mc_list_count = 0; 4167 + } else { 4168 + u32 mask = 0; 4169 + 4170 + bnxt_mc_list_updated(bp, &mask); 4171 + vnic->rx_mask |= mask; 4172 + } 4170 4173 4171 4174 rc = bnxt_cfg_rx_mode(bp); 4172 4175 if (rc)
+8 -2
drivers/net/ethernet/freescale/fec_main.c
··· 1521 1521 struct fec_enet_private *fep = netdev_priv(ndev); 1522 1522 1523 1523 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { 1524 - clear_bit(queue_id, &fep->work_rx); 1525 - pkt_received += fec_enet_rx_queue(ndev, 1524 + int ret; 1525 + 1526 + ret = fec_enet_rx_queue(ndev, 1526 1527 budget - pkt_received, queue_id); 1528 + 1529 + if (ret < budget - pkt_received) 1530 + clear_bit(queue_id, &fep->work_rx); 1531 + 1532 + pkt_received += ret; 1527 1533 } 1528 1534 return pkt_received; 1529 1535 }
+1 -1
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 698 698 699 699 if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) 700 700 return -1; 701 - hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); 701 + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); 702 702 703 703 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 704 704 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
+7 -1
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 14 14 bool "Mellanox Technologies ConnectX-4 Ethernet support" 15 15 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 16 16 select PTP_1588_CLOCK 17 - select VXLAN if MLX5_CORE=y 18 17 default n 19 18 ---help--- 20 19 Ethernet support in Mellanox Technologies ConnectX-4 NIC. ··· 31 32 This flag is depended on the kernel's DCB support. 32 33 33 34 If unsure, set to Y 35 + 36 + config MLX5_CORE_EN_VXLAN 37 + bool "VXLAN offloads Support" 38 + default y 39 + depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m) 40 + ---help--- 41 + Say Y here if you want to use VXLAN offloads in the driver.
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 6 6 7 7 mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ 8 8 en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ 9 - en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o 9 + en_txrx.o en_clock.o en_tc.o en_arfs.o 10 10 11 + mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o 11 12 mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
+5
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 522 522 struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; 523 523 524 524 struct mlx5e_flow_steering fs; 525 + struct mlx5e_flow_tables fts; 526 + struct mlx5e_eth_addr_db eth_addr; 527 + struct mlx5e_vlan_db vlan; 528 + #ifdef CONFIG_MLX5_CORE_EN_VXLAN 525 529 struct mlx5e_vxlan_db vxlan; 530 + #endif 526 531 527 532 struct mlx5e_params params; 528 533 struct workqueue_struct *wq;
+3
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 2509 2509 vf_stats); 2510 2510 } 2511 2511 2512 + #if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) 2512 2513 static void mlx5e_add_vxlan_port(struct net_device *netdev, 2513 2514 sa_family_t sa_family, __be16 port) 2514 2515 { ··· 2581 2580 2582 2581 return features; 2583 2582 } 2583 + #endif 2584 2584 2585 2585 static const struct net_device_ops mlx5e_netdev_ops_basic = { 2586 2586 .ndo_open = mlx5e_open, ··· 2616 2614 .ndo_set_features = mlx5e_set_features, 2617 2615 .ndo_change_mtu = mlx5e_change_mtu, 2618 2616 .ndo_do_ioctl = mlx5e_ioctl, 2617 + #ifdef CONFIG_MLX5_CORE_EN_VXLAN 2619 2618 .ndo_add_vxlan_port = mlx5e_add_vxlan_port, 2620 2619 .ndo_del_vxlan_port = mlx5e_del_vxlan_port, 2621 2620 .ndo_features_check = mlx5e_features_check,
+9 -2
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
··· 48 48 49 49 static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) 50 50 { 51 - return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && 51 + return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) && 52 + (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && 52 53 mlx5_core_is_pf(mdev)); 53 54 } 54 55 56 + #ifdef CONFIG_MLX5_CORE_EN_VXLAN 55 57 void mlx5e_vxlan_init(struct mlx5e_priv *priv); 58 + void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); 59 + #else 60 + static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {} 61 + static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {} 62 + #endif 63 + 56 64 void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, 57 65 u16 port, int add); 58 66 struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); 59 - void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); 60 67 61 68 #endif /* __MLX5_VXLAN_H__ */
+2 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 2843 2843 lag->ref_count++; 2844 2844 return 0; 2845 2845 2846 + err_col_port_enable: 2847 + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2846 2848 err_col_port_add: 2847 2849 if (!lag->ref_count) 2848 2850 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); 2849 - err_col_port_enable: 2850 - mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); 2851 2851 return err; 2852 2852 } 2853 2853
+8
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 214 214 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, 215 215 table_type, range, local_port, set); 216 216 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 217 + if (err) 218 + goto err_flood_bm_set; 219 + else 220 + goto buffer_out; 217 221 222 + err_flood_bm_set: 223 + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, 224 + table_type, range, local_port, !set); 225 + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); 218 226 buffer_out: 219 227 kfree(sftr_pl); 220 228 return err;
+9 -5
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
··· 1015 1015 { 1016 1016 int i, v, addr; 1017 1017 __le32 *ptr32; 1018 + int ret; 1018 1019 1019 1020 addr = base; 1020 1021 ptr32 = buf; 1021 1022 for (i = 0; i < size / sizeof(u32); i++) { 1022 - if (netxen_rom_fast_read(adapter, addr, &v) == -1) 1023 - return -1; 1023 + ret = netxen_rom_fast_read(adapter, addr, &v); 1024 + if (ret) 1025 + return ret; 1026 + 1024 1027 *ptr32 = cpu_to_le32(v); 1025 1028 ptr32++; 1026 1029 addr += sizeof(u32); 1027 1030 } 1028 1031 if ((char *)buf + size > (char *)ptr32) { 1029 1032 __le32 local; 1030 - if (netxen_rom_fast_read(adapter, addr, &v) == -1) 1031 - return -1; 1033 + ret = netxen_rom_fast_read(adapter, addr, &v); 1034 + if (ret) 1035 + return ret; 1032 1036 local = cpu_to_le32(v); 1033 1037 memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); 1034 1038 } ··· 1944 1940 if (adapter->phy_read && 1945 1941 adapter->phy_read(adapter, 1946 1942 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 1947 - &autoneg) != 0) 1943 + &autoneg) == 0) 1948 1944 adapter->link_autoneg = autoneg; 1949 1945 } else 1950 1946 goto link_down;
+2 -1
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
··· 852 852 ptr32 = (__le32 *)&serial_num; 853 853 offset = NX_FW_SERIAL_NUM_OFFSET; 854 854 for (i = 0; i < 8; i++) { 855 - if (netxen_rom_fast_read(adapter, offset, &val) == -1) { 855 + err = netxen_rom_fast_read(adapter, offset, &val); 856 + if (err) { 856 857 dev_err(&pdev->dev, "error reading board info\n"); 857 858 adapter->driver_mismatch = 1; 858 859 return;
+3 -5
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 429 429 u8 xmit_type; 430 430 u16 idx; 431 431 u16 hlen; 432 - bool data_split; 432 + bool data_split = false; 433 433 434 434 /* Get tx-queue context and netdev index */ 435 435 txq_index = skb_get_queue_mapping(skb); ··· 2094 2094 edev->q_num_rx_buffers = NUM_RX_BDS_DEF; 2095 2095 edev->q_num_tx_buffers = NUM_TX_BDS_DEF; 2096 2096 2097 - DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n"); 2098 - 2099 2097 SET_NETDEV_DEV(ndev, &pdev->dev); 2100 2098 2101 2099 memset(&edev->stats, 0, sizeof(edev->stats)); ··· 2272 2274 { 2273 2275 struct qed_pf_params pf_params; 2274 2276 2275 - /* 16 rx + 16 tx */ 2277 + /* 64 rx + 64 tx */ 2276 2278 memset(&pf_params, 0, sizeof(struct qed_pf_params)); 2277 - pf_params.eth_pf_params.num_cons = 32; 2279 + pf_params.eth_pf_params.num_cons = 128; 2278 2280 qed_ops->common->update_pf_params(cdev, &pf_params); 2279 2281 } 2280 2282
+3 -2
drivers/net/geneve.c
··· 495 495 int gh_len; 496 496 int err = -ENOSYS; 497 497 498 - udp_tunnel_gro_complete(skb, nhoff); 499 - 500 498 gh = (struct genevehdr *)(skb->data + nhoff); 501 499 gh_len = geneve_hlen(gh); 502 500 type = gh->proto_type; ··· 505 507 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 506 508 507 509 rcu_read_unlock(); 510 + 511 + skb_set_inner_mac_header(skb, nhoff + gh_len); 512 + 508 513 return err; 509 514 } 510 515
+13 -10
drivers/net/macsec.c
··· 85 85 * @tfm: crypto struct, key storage 86 86 */ 87 87 struct macsec_key { 88 - u64 id; 88 + u8 id[MACSEC_KEYID_LEN]; 89 89 struct crypto_aead *tfm; 90 90 }; 91 91 ··· 1530 1530 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1531 1531 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1532 1532 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1533 - [MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 }, 1533 + [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1534 + .len = MACSEC_KEYID_LEN, }, 1534 1535 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1535 1536 .len = MACSEC_MAX_KEY_LEN, }, 1536 1537 }; ··· 1577 1576 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1578 1577 return false; 1579 1578 } 1579 + 1580 + if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1581 + return false; 1580 1582 1581 1583 return true; 1582 1584 } ··· 1646 1642 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1647 1643 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1648 1644 1649 - rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]); 1645 + nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN); 1650 1646 rx_sa->sc = rx_sc; 1651 1647 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1652 1648 ··· 1727 1723 return false; 1728 1724 } 1729 1725 1726 + if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1727 + return false; 1728 + 1730 1729 return true; 1731 1730 } 1732 1731 ··· 1785 1778 return -ENOMEM; 1786 1779 } 1787 1780 1788 - tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]); 1781 + nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN); 1789 1782 1790 1783 spin_lock_bh(&tx_sa->lock); 1791 1784 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); ··· 2372 2365 2373 2366 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2374 2367 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2375 - nla_put_u64_64bit(skb, MACSEC_SA_ATTR_KEYID, 2376 - tx_sa->key.id, 2377 - MACSEC_SA_ATTR_PAD) || 2368 + nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2378 2369 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2379 2370 nla_nest_cancel(skb, txsa_nest); 2380 2371 nla_nest_cancel(skb, txsa_list); ··· 2474 2469 2475 2470 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2476 2471 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2477 - nla_put_u64_64bit(skb, MACSEC_SA_ATTR_KEYID, 2478 - rx_sa->key.id, 2479 - MACSEC_SA_ATTR_PAD) || 2472 + nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2480 2473 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2481 2474 nla_nest_cancel(skb, rxsa_nest); 2482 2475 nla_nest_cancel(skb, rxsc_nest);
+1 -1
drivers/net/macvtap.c
··· 384 384 goto wake_up; 385 385 } 386 386 387 - kfree_skb(skb); 387 + consume_skb(skb); 388 388 while (segs) { 389 389 struct sk_buff *nskb = segs->next; 390 390
+3 -2
drivers/net/vxlan.c
··· 613 613 614 614 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 615 615 { 616 - udp_tunnel_gro_complete(skb, nhoff); 617 - 616 + /* Sets 'skb->inner_mac_header' since we are always called with 617 + * 'skb->encapsulation' set. 618 + */ 618 619 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 619 620 } 620 621
+10 -3
drivers/nvdimm/pmem.c
··· 397 397 */ 398 398 start += start_pad; 399 399 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; 400 - if (nd_pfn->mode == PFN_MODE_PMEM) 401 - offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align) 400 + if (nd_pfn->mode == PFN_MODE_PMEM) { 401 + unsigned long memmap_size; 402 + 403 + /* 404 + * vmemmap_populate_hugepages() allocates the memmap array in 405 + * HPAGE_SIZE chunks. 406 + */ 407 + memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); 408 + offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align) 402 409 - start; 403 - else if (nd_pfn->mode == PFN_MODE_RAM) 410 + } else if (nd_pfn->mode == PFN_MODE_RAM) 404 411 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start; 405 412 else 406 413 goto err;
+2 -2
drivers/nvmem/mxs-ocotp.c
··· 94 94 if (ret) 95 95 goto close_banks; 96 96 97 - while (val_size) { 97 + while (val_size >= reg_size) { 98 98 if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) { 99 99 /* fill up non-data register */ 100 100 *buf = 0; ··· 103 103 } 104 104 105 105 buf++; 106 - val_size--; 106 + val_size -= reg_size; 107 107 offset += reg_size; 108 108 } 109 109
+64 -51
drivers/rapidio/devices/rio_mport_cdev.c
··· 126 126 struct list_head node; 127 127 struct mport_dev *md; 128 128 enum rio_mport_map_dir dir; 129 - u32 rioid; 129 + u16 rioid; 130 130 u64 rio_addr; 131 131 dma_addr_t phys_addr; /* for mmap */ 132 132 void *virt_addr; /* kernel address, for dma_free_coherent */ ··· 137 137 138 138 struct rio_mport_dma_map { 139 139 int valid; 140 - uint64_t length; 140 + u64 length; 141 141 void *vaddr; 142 142 dma_addr_t paddr; 143 143 }; ··· 208 208 struct kfifo event_fifo; 209 209 wait_queue_head_t event_rx_wait; 210 210 spinlock_t fifo_lock; 211 - unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ 211 + u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ 212 212 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 213 213 struct dma_chan *dmach; 214 214 struct list_head async_list; ··· 276 276 return -EFAULT; 277 277 278 278 if ((maint_io.offset % 4) || 279 - (maint_io.length == 0) || (maint_io.length % 4)) 279 + (maint_io.length == 0) || (maint_io.length % 4) || 280 + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) 280 281 return -EINVAL; 281 282 282 283 buffer = vmalloc(maint_io.length); ··· 299 298 offset += 4; 300 299 } 301 300 302 - if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) 301 + if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, 302 + buffer, maint_io.length))) 303 303 ret = -EFAULT; 304 304 out: 305 305 vfree(buffer); ··· 321 319 return -EFAULT; 322 320 323 321 if ((maint_io.offset % 4) || 324 - (maint_io.length == 0) || (maint_io.length % 4)) 322 + (maint_io.length == 0) || (maint_io.length % 4) || 323 + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) 325 324 return -EINVAL; 326 325 327 326 buffer = vmalloc(maint_io.length); ··· 330 327 return -ENOMEM; 331 328 length = maint_io.length; 332 329 333 - if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { 330 + if (unlikely(copy_from_user(buffer, 331 + (void __user *)(uintptr_t)maint_io.buffer, length))) { 334 332 ret = -EFAULT; 335 333 goto out; 336 334 } ··· 364 360 */ 365 361 static int 366 362 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, 367 - u32 rioid, u64 raddr, u32 size, 363 + u16 rioid, u64 raddr, u32 size, 368 364 dma_addr_t *paddr) 369 365 { 370 366 struct rio_mport *mport = md->mport; ··· 373 369 374 370 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); 375 371 376 - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 372 + map = kzalloc(sizeof(*map), GFP_KERNEL); 377 373 if (map == NULL) 378 374 return -ENOMEM; 379 375 ··· 398 394 399 395 static int 400 396 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, 401 - u32 rioid, u64 raddr, u32 size, 397 + u16 rioid, u64 raddr, u32 size, 402 398 dma_addr_t *paddr) 403 399 { 404 400 struct rio_mport_mapping *map; ··· 437 433 dma_addr_t paddr; 438 434 int ret; 439 435 440 - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 436 + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 441 437 return -EFAULT; 442 438 443 439 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", ··· 452 448 453 449 map.handle = paddr; 454 450 455 - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) 451 + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) 456 452 return -EFAULT; 457 453 return 0; 458 454 } ··· 473 469 if (!md->mport->ops->unmap_outb) 474 470 return -EPROTONOSUPPORT; 475 471 476 - if (copy_from_user(&handle, arg, sizeof(u64))) 472 + if (copy_from_user(&handle, arg, sizeof(handle))) 477 473 return -EFAULT; 478 474 479 475 rmcd_debug(OBW, "h=0x%llx", handle); ··· 502 498 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) 503 499 { 504 500 struct mport_dev *md = priv->md; 505 - uint16_t hdid; 501 + u16 hdid; 506 502 507 - if (copy_from_user(&hdid, arg, sizeof(uint16_t))) 503 + if (copy_from_user(&hdid, arg, sizeof(hdid))) 508 504 return -EFAULT; 509 505 510 506 md->mport->host_deviceid = hdid; ··· 524 520 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) 525 521 { 526 522 struct mport_dev *md = priv->md; 527 - uint32_t comptag; 523 + u32 comptag; 528 524 529 - if (copy_from_user(&comptag, arg, sizeof(uint32_t))) 525 + if (copy_from_user(&comptag, arg, sizeof(comptag))) 530 526 return -EFAULT; 531 527 532 528 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); ··· 841 837 * @xfer: data transfer descriptor structure 842 838 */ 843 839 static int 844 - rio_dma_transfer(struct file *filp, uint32_t transfer_mode, 840 + rio_dma_transfer(struct file *filp, u32 transfer_mode, 845 841 enum rio_transfer_sync sync, enum dma_data_direction dir, 846 842 struct rio_transfer_io *xfer) 847 843 { ··· 879 875 unsigned long offset; 880 876 long pinned; 881 877 882 - offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; 878 + offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK; 883 879 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; 884 880 885 881 page_list = kmalloc_array(nr_pages, ··· 1019 1015 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) 1020 1016 return -EFAULT; 1021 1017 1022 - if (transaction.count != 1) 1018 + if (transaction.count != 1) /* only single transfer for now */ 1023 1019 return -EINVAL; 1024 1020 1025 1021 if ((transaction.transfer_mode & 1026 1022 priv->md->properties.transfer_mode) == 0) 1027 1023 return -ENODEV; 1028 1024 1029 - transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); 1025 + transfer = vmalloc(transaction.count * sizeof(*transfer)); 1030 1026 if (!transfer) 1031 1027 return -ENOMEM; 1032 1028 1033 - if (unlikely(copy_from_user(transfer, transaction.block, 1034 - transaction.count * sizeof(struct rio_transfer_io)))) { 1029 + if (unlikely(copy_from_user(transfer, 1030 + (void __user *)(uintptr_t)transaction.block, 1031 + transaction.count * sizeof(*transfer)))) { 1035 1032 ret = -EFAULT; 1036 1033 goto out_free; 1037 1034 } ··· 1043 1038 ret = rio_dma_transfer(filp, transaction.transfer_mode, 1044 1039 transaction.sync, dir, &transfer[i]); 1045 1040 1046 - if (unlikely(copy_to_user(transaction.block, transfer, 1047 - transaction.count * sizeof(struct rio_transfer_io)))) 1041 + if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, 1042 + transfer, 1043 + transaction.count * sizeof(*transfer)))) 1048 1044 ret = -EFAULT; 1049 1045 1050 1046 out_free: ··· 1135 1129 } 1136 1130 1137 1131 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, 1138 - uint64_t size, struct rio_mport_mapping **mapping) 1132 + u64 size, struct rio_mport_mapping **mapping) 1139 1133 { 1140 1134 struct rio_mport_mapping *map; 1141 1135 1142 - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1136 + map = kzalloc(sizeof(*map), GFP_KERNEL); 1143 1137 if (map == NULL) 1144 1138 return -ENOMEM; 1145 1139 ··· 1171 1165 struct rio_mport_mapping *mapping = NULL; 1172 1166 int ret; 1173 1167 1174 - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) 1168 + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 1175 1169 return -EFAULT; 1176 1170 1177 1171 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); ··· 1180 1174 1181 1175 map.dma_handle = mapping->phys_addr; 1182 1176 1183 - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { 1177 + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { 1184 1178 mutex_lock(&md->buf_mutex); 1185 1179 kref_put(&mapping->ref, mport_release_mapping); 1186 1180 mutex_unlock(&md->buf_mutex); ··· 1198 1192 int ret = -EFAULT; 1199 1193 struct rio_mport_mapping *map, *_map; 1200 1194 1201 - if (copy_from_user(&handle, arg, sizeof(u64))) 1195 + if (copy_from_user(&handle, arg, sizeof(handle))) 1202 1196 return -EFAULT; 1203 1197 rmcd_debug(EXIT, "filp=%p", filp); 1204 1198 ··· 1248 1242 1249 1243 static int 1250 1244 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, 1251 - u64 raddr, u32 size, 1245 + u64 raddr, u64 size, 1252 1246 struct rio_mport_mapping **mapping) 1253 1247 { 1254 1248 struct rio_mport *mport = md->mport; 1255 1249 struct rio_mport_mapping *map; 1256 1250 int ret; 1257 1251 1258 - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1252 + /* rio_map_inb_region() accepts u32 size */ 1253 + if (size > 0xffffffff) 1254 + return -EINVAL; 1255 + 1256 + map = kzalloc(sizeof(*map), GFP_KERNEL); 1259 1257 if (map == NULL) 1260 1258 return -ENOMEM; 1261 1259 ··· 1272 1262 1273 1263 if (raddr == RIO_MAP_ANY_ADDR) 1274 1264 raddr = map->phys_addr; 1275 - ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); 1265 + ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); 1276 1266 if (ret < 0) 1277 1267 goto err_map_inb; 1278 1268 ··· 1298 1288 1299 1289 static int 1300 1290 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, 1301 - u64 raddr, u32 size, 1291 + u64 raddr, u64 size, 1302 1292 struct rio_mport_mapping **mapping) 1303 1293 { 1304 1294 struct rio_mport_mapping *map; ··· 1341 1331 1342 1332 if (!md->mport->ops->map_inb) 1343 1333 return -EPROTONOSUPPORT; 1344 - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 1334 + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 1345 1335 return -EFAULT; 1346 1336 1347 1337 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); ··· 1354 1344 map.handle = mapping->phys_addr; 1355 1345 map.rio_addr = mapping->rio_addr; 1356 1346 1357 - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { 1347 + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { 1358 1348 /* Delete mapping if it was created by this request */ 1359 1349 if (ret == 0 && mapping->filp == filp) { 1360 1350 mutex_lock(&md->buf_mutex); ··· 1385 1375 if (!md->mport->ops->unmap_inb) 1386 1376 return -EPROTONOSUPPORT; 1387 1377 1388 - if (copy_from_user(&handle, arg, sizeof(u64))) 1378 + if (copy_from_user(&handle, arg, sizeof(handle))) 1389 1379 return -EFAULT; 1390 1380 1391 1381 mutex_lock(&md->buf_mutex); ··· 1411 1401 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) 1412 1402 { 1413 1403 struct mport_dev *md = priv->md; 1414 - uint32_t port_idx = md->mport->index; 1404 + u32 port_idx = md->mport->index; 1415 1405 1416 1406 rmcd_debug(MPORT, "port_index=%d", port_idx); 1417 1407 ··· 1461 1451 handled = 0; 1462 1452 spin_lock(&data->db_lock); 1463 1453 list_for_each_entry(db_filter, &data->doorbells, data_node) { 1464 - if (((db_filter->filter.rioid == 0xffffffff || 1454 + if (((db_filter->filter.rioid == RIO_INVALID_DESTID || 1465 1455 db_filter->filter.rioid == src)) && 1466 1456 info >= db_filter->filter.low && 1467 1457 info <= db_filter->filter.high) { ··· 1534 1524 1535 1525 if (copy_from_user(&filter, arg, sizeof(filter))) 1536 1526 return -EFAULT; 1527 + 1528 + if (filter.low > filter.high) 1529 + return -EINVAL; 1537 1530 1538 1531 spin_lock_irqsave(&priv->md->db_lock, flags); 1539 1532 list_for_each_entry(db_filter, &priv->db_filters, priv_node) { ··· 1750 1737 return -EEXIST; 1751 1738 } 1752 1739 1753 - size = sizeof(struct rio_dev); 1740 + size = sizeof(*rdev); 1754 1741 mport = md->mport; 1755 - destid = (u16)dev_info.destid; 1756 - hopcount = (u8)dev_info.hopcount; 1742 + destid = dev_info.destid; 1743 + hopcount = dev_info.hopcount; 1757 1744 1758 1745 if (rio_mport_read_config_32(mport, destid, hopcount, 1759 1746 RIO_PEF_CAR, &rval)) ··· 1885 1872 do { 1886 1873 rdev = rio_get_comptag(dev_info.comptag, rdev); 1887 1874 if (rdev && rdev->dev.parent == &mport->net->dev && 1888 - rdev->destid == (u16)dev_info.destid && 1889 - rdev->hopcount == (u8)dev_info.hopcount) 1875 + rdev->destid == dev_info.destid && 1876 + rdev->hopcount == dev_info.hopcount) 1890 1877 break; 1891 1878 } while (rdev); 1892 1879 } ··· 2159 2146 return maint_port_idx_get(data, (void __user *)arg); 2160 2147 case RIO_MPORT_GET_PROPERTIES: 2161 2148 md->properties.hdid = md->mport->host_deviceid; 2162 - if (copy_to_user((void __user *)arg, &(data->md->properties), 2163 - sizeof(data->md->properties))) 2149 + if (copy_to_user((void __user *)arg, &(md->properties), 2150 + sizeof(md->properties))) 2164 2151 return -EFAULT; 2165 2152 return 0; 2166 2153 case RIO_ENABLE_DOORBELL_RANGE: ··· 2172 2159 case RIO_DISABLE_PORTWRITE_RANGE: 2173 2160 return rio_mport_remove_pw_filter(data, (void __user *)arg); 2174 2161 case RIO_SET_EVENT_MASK: 2175 - data->event_mask = arg; 2162 + data->event_mask = (u32)arg; 2176 2163 return 0; 2177 2164 case RIO_GET_EVENT_MASK: 2178 2165 if (copy_to_user((void __user *)arg, &data->event_mask, 2179 - sizeof(data->event_mask))) 2166 + sizeof(u32))) 2180 2167 return -EFAULT; 2181 2168 return 0; 2182 2169 case RIO_MAP_OUTBOUND: ··· 2387 2374 return -EINVAL; 2388 2375 2389 2376 ret = rio_mport_send_doorbell(mport, 2390 - (u16)event.u.doorbell.rioid, 2377 + event.u.doorbell.rioid, 2391 2378 event.u.doorbell.payload); 2392 2379 if (ret < 0) 2393 2380 return ret; ··· 2434 2421 struct mport_dev *md; 2435 2422 struct rio_mport_attr attr; 2436 2423 2437 - md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); 2424 + md = kzalloc(sizeof(*md), GFP_KERNEL); 2438 2425 if (!md) { 2439 2426 rmcd_error("Unable allocate a device object"); 2440 2427 return NULL; ··· 2483 2470 /* The transfer_mode property will be returned through mport query 2484 2471 * interface 2485 2472 */ 2486 - #ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ 2473 + #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ 2487 2474 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; 2488 2475 #else 2489 2476 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
-6
drivers/usb/core/port.c
··· 249 249 250 250 return retval; 251 251 } 252 - 253 - static int usb_port_prepare(struct device *dev) 254 - { 255 - return 1; 256 - } 257 252 #endif 258 253 259 254 static const struct dev_pm_ops usb_port_pm_ops = { 260 255 #ifdef CONFIG_PM 261 256 .runtime_suspend = usb_port_runtime_suspend, 262 257 .runtime_resume = usb_port_runtime_resume, 263 - .prepare = usb_port_prepare, 264 258 #endif 265 259 }; 266 260
+1 -7
drivers/usb/core/usb.c
··· 312 312 313 313 static int usb_dev_prepare(struct device *dev) 314 314 { 315 - struct usb_device *udev = to_usb_device(dev); 316 - 317 - /* Return 0 if the current wakeup setting is wrong, otherwise 1 */ 318 - if (udev->do_remote_wakeup != device_may_wakeup(dev)) 319 - return 0; 320 - 321 - return 1; 315 + return 0; /* Implement eventually? */ 322 316 } 323 317 324 318 static void usb_dev_complete(struct device *dev)
+2 -2
drivers/usb/musb/jz4740.c
··· 83 83 { 84 84 usb_phy_generic_register(); 85 85 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 86 - if (!musb->xceiv) { 86 + if (IS_ERR(musb->xceiv)) { 87 87 pr_err("HS UDC: no transceiver configured\n"); 88 - return -ENODEV; 88 + return PTR_ERR(musb->xceiv); 89 89 } 90 90 91 91 /* Silicon does not implement ConfigData register.
+3 -3
drivers/usb/musb/musb_gadget.c
··· 1164 1164 musb_writew(epio, MUSB_RXMAXP, 0); 1165 1165 } 1166 1166 1167 - musb_ep->desc = NULL; 1168 - musb_ep->end_point.desc = NULL; 1169 - 1170 1167 /* abort all pending DMA and requests */ 1171 1168 nuke(musb_ep, -ESHUTDOWN); 1169 + 1170 + musb_ep->desc = NULL; 1171 + musb_ep->end_point.desc = NULL; 1172 1172 1173 1173 schedule_work(&musb->irq_work); 1174 1174
+1 -1
drivers/usb/musb/musb_host.c
··· 2735 2735 .description = "musb-hcd", 2736 2736 .product_desc = "MUSB HDRC host driver", 2737 2737 .hcd_priv_size = sizeof(struct musb *), 2738 - .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 2738 + .flags = HCD_USB2 | HCD_MEMORY, 2739 2739 2740 2740 /* not using irq handler or reset hooks from usbcore, since 2741 2741 * those must be shared with peripheral code for OTG configs
+4
drivers/usb/serial/cp210x.c
··· 109 109 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ 110 110 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ 111 111 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ 112 + { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ 112 113 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 113 114 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 114 115 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ ··· 119 118 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ 120 119 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 121 120 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 121 + { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ 122 122 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ 123 123 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ 124 124 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ ··· 143 141 { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ 144 142 { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ 145 143 { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ 144 + { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */ 145 + { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ 146 146 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ 147 147 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ 148 148 { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+1 -1
drivers/virtio/virtio_ring.c
··· 1006 1006 const char *name) 1007 1007 { 1008 1008 struct virtqueue *vq; 1009 - void *queue; 1009 + void *queue = NULL; 1010 1010 dma_addr_t dma_addr; 1011 1011 size_t queue_size_in_bytes; 1012 1012 struct vring vring;
+16
drivers/xen/balloon.c
··· 151 151 static void balloon_process(struct work_struct *work); 152 152 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 153 153 154 + static void release_memory_resource(struct resource *resource); 155 + 154 156 /* When ballooning out (allocating memory to return to Xen) we don't really 155 157 want the kernel to try too hard since that can trigger the oom killer. */ 156 158 #define GFP_BALLOON \ ··· 268 266 kfree(res); 269 267 return NULL; 270 268 } 269 + 270 + #ifdef CONFIG_SPARSEMEM 271 + { 272 + unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); 273 + unsigned long pfn = res->start >> PAGE_SHIFT; 274 + 275 + if (pfn > limit) { 276 + pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", 277 + pfn, limit); 278 + release_memory_resource(res); 279 + return NULL; 280 + } 281 + } 282 + #endif 271 283 272 284 return res; 273 285 }
+8 -12
drivers/xen/evtchn.c
··· 316 316 { 317 317 unsigned int new_size; 318 318 evtchn_port_t *new_ring, *old_ring; 319 - unsigned int p, c; 320 319 321 320 /* 322 321 * Ensure the ring is large enough to capture all possible ··· 345 346 /* 346 347 * Copy the old ring contents to the new ring. 347 348 * 348 - * If the ring contents crosses the end of the current ring, 349 - * it needs to be copied in two chunks. 349 + * To take care of wrapping, a full ring, and the new index 350 + * pointing into the second half, simply copy the old contents 351 + * twice. 350 352 * 351 353 * +---------+ +------------------+ 352 - * |34567 12| -> | 1234567 | 353 - * +-----p-c-+ +------------------+ 354 + * |34567 12| -> |34567 1234567 12| 355 + * +-----p-c-+ +-------c------p---+ 354 356 */ 355 - p = evtchn_ring_offset(u, u->ring_prod); 356 - c = evtchn_ring_offset(u, u->ring_cons); 357 - if (p < c) { 358 - memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); 359 - memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); 360 - } else 361 - memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); 357 + memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring)); 358 + memcpy(new_ring + u->ring_size, old_ring, 359 + u->ring_size * sizeof(*u->ring)); 362 360 363 361 u->ring = new_ring; 364 362 u->ring_size = new_size;
+14 -11
fs/pnode.c
··· 198 198 199 199 /* all accesses are serialized by namespace_sem */ 200 200 static struct user_namespace *user_ns; 201 - static struct mount *last_dest, *last_source, *dest_master; 201 + static struct mount *last_dest, *first_source, *last_source, *dest_master; 202 202 static struct mountpoint *mp; 203 203 static struct hlist_head *list; 204 204 ··· 221 221 type = CL_MAKE_SHARED; 222 222 } else { 223 223 struct mount *n, *p; 224 + bool done; 224 225 for (n = m; ; n = p) { 225 226 p = n->mnt_master; 226 - if (p == dest_master || IS_MNT_MARKED(p)) { 227 - while (last_dest->mnt_master != p) { 228 - last_source = last_source->mnt_master; 229 - last_dest = last_source->mnt_parent; 230 - } 231 - if (!peers(n, last_dest)) { 232 - last_source = last_source->mnt_master; 233 - last_dest = last_source->mnt_parent; 234 - } 227 + if (p == dest_master || IS_MNT_MARKED(p)) 235 228 break; 236 - } 237 229 } 230 + do { 231 + struct mount *parent = last_source->mnt_parent; 232 + if (last_source == first_source) 233 + break; 234 + done = parent->mnt_master == p; 235 + if (done && peers(n, parent)) 236 + break; 237 + last_source = last_source->mnt_master; 238 + } while (!done); 239 + 238 240 type = CL_SLAVE; 239 241 /* beginning of peer group among the slaves? */ 240 242 if (IS_MNT_SHARED(m)) ··· 288 286 */ 289 287 user_ns = current->nsproxy->mnt_ns->user_ns; 290 288 last_dest = dest_mnt; 289 + first_source = source_mnt; 291 290 last_source = source_mnt; 292 291 mp = dest_mp; 293 292 list = tree_list;
+2 -1
fs/proc/base.c
··· 955 955 struct mm_struct *mm = file->private_data; 956 956 unsigned long env_start, env_end; 957 957 958 - if (!mm) 958 + /* Ensure the process spawned far enough to have an environment. */ 959 + if (!mm || !mm->env_end) 959 960 return 0; 960 961 961 962 page = (char *)__get_free_page(GFP_TEMPORARY);
+2 -2
include/acpi/acpi_bus.h
··· 394 394 395 395 static inline bool is_acpi_node(struct fwnode_handle *fwnode) 396 396 { 397 - return fwnode && (fwnode->type == FWNODE_ACPI 397 + return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI 398 398 || fwnode->type == FWNODE_ACPI_DATA); 399 399 } 400 400 401 401 static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) 402 402 { 403 - return fwnode && fwnode->type == FWNODE_ACPI; 403 + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI; 404 404 } 405 405 406 406 static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+1 -1
include/linux/compiler-gcc.h
··· 246 246 #define __HAVE_BUILTIN_BSWAP32__ 247 247 #define __HAVE_BUILTIN_BSWAP64__ 248 248 #endif 249 - #if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) 249 + #if GCC_VERSION >= 40800 250 250 #define __HAVE_BUILTIN_BSWAP16__ 251 251 #endif 252 252 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+1 -1
include/linux/of.h
··· 133 133 134 134 static inline bool is_of_node(struct fwnode_handle *fwnode) 135 135 { 136 - return fwnode && fwnode->type == FWNODE_OF; 136 + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF; 137 137 } 138 138 139 139 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
+22
include/linux/page-flags.h
··· 517 517 } 518 518 519 519 /* 520 + * PageTransCompoundMap is the same as PageTransCompound, but it also 521 + * guarantees the primary MMU has the entire compound page mapped 522 + * through pmd_trans_huge, which in turn guarantees the secondary MMUs 523 + * can also map the entire compound page. This allows the secondary 524 + * MMUs to call get_user_pages() only once for each compound page and 525 + * to immediately map the entire compound page with a single secondary 526 + * MMU fault. If there will be a pmd split later, the secondary MMUs 527 + * will get an update through the MMU notifier invalidation through 528 + * split_huge_pmd(). 529 + * 530 + * Unlike PageTransCompound, this is safe to be called only while 531 + * split_huge_pmd() cannot run from under us, like if protected by the 532 + * MMU notifier, otherwise it may result in page->_mapcount < 0 false 533 + * positives. 534 + */ 535 + static inline int PageTransCompoundMap(struct page *page) 536 + { 537 + return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; 538 + } 539 + 540 + /* 520 541 * PageTransTail returns true for both transparent huge pages 521 542 * and hugetlbfs pages, so it should only be called when it's known 522 543 * that hugetlbfs pages aren't involved. ··· 580 559 #else 581 560 TESTPAGEFLAG_FALSE(TransHuge) 582 561 TESTPAGEFLAG_FALSE(TransCompound) 562 + TESTPAGEFLAG_FALSE(TransCompoundMap) 583 563 TESTPAGEFLAG_FALSE(TransTail) 584 564 TESTPAGEFLAG_FALSE(DoubleMap) 585 565 TESTSETFLAG_FALSE(DoubleMap)
+75 -69
include/linux/rio_mport_cdev.h include/uapi/linux/rio_mport_cdev.h
··· 39 39 #ifndef _RIO_MPORT_CDEV_H_ 40 40 #define _RIO_MPORT_CDEV_H_ 41 41 42 - #ifndef __user 43 - #define __user 44 - #endif 42 + #include <linux/ioctl.h> 43 + #include <linux/types.h> 45 44 46 45 struct rio_mport_maint_io { 47 - uint32_t rioid; /* destID of remote device */ 48 - uint32_t hopcount; /* hopcount to remote device */ 49 - uint32_t offset; /* offset in register space */ 50 - size_t length; /* length in bytes */ 51 - void __user *buffer; /* data buffer */ 46 + __u16 rioid; /* destID of remote device */ 47 + __u8 hopcount; /* hopcount to remote device */ 48 + __u8 pad0[5]; 49 + __u32 offset; /* offset in register space */ 50 + __u32 length; /* length in bytes */ 51 + __u64 buffer; /* pointer to data buffer */ 52 52 }; 53 53 54 54 /* ··· 66 66 #define RIO_CAP_MAP_INB (1 << 7) 67 67 68 68 struct rio_mport_properties { 69 - uint16_t hdid; 70 - uint8_t id; /* Physical port ID */ 71 - uint8_t index; 72 - uint32_t flags; 73 - uint32_t sys_size; /* Default addressing size */ 74 - uint8_t port_ok; 75 - uint8_t link_speed; 76 - uint8_t link_width; 77 - uint32_t dma_max_sge; 78 - uint32_t dma_max_size; 79 - uint32_t dma_align; 80 - uint32_t transfer_mode; /* Default transfer mode */ 81 - uint32_t cap_sys_size; /* Capable system sizes */ 82 - uint32_t cap_addr_size; /* Capable addressing sizes */ 83 - uint32_t cap_transfer_mode; /* Capable transfer modes */ 84 - uint32_t cap_mport; /* Mport capabilities */ 69 + __u16 hdid; 70 + __u8 id; /* Physical port ID */ 71 + __u8 index; 72 + __u32 flags; 73 + __u32 sys_size; /* Default addressing size */ 74 + __u8 port_ok; 75 + __u8 link_speed; 76 + __u8 link_width; 77 + __u8 pad0; 78 + __u32 dma_max_sge; 79 + __u32 dma_max_size; 80 + __u32 dma_align; 81 + __u32 transfer_mode; /* Default transfer mode */ 82 + __u32 cap_sys_size; /* Capable system sizes */ 83 + __u32 cap_addr_size; /* Capable addressing sizes */ 84 + __u32 cap_transfer_mode; /* Capable transfer modes */ 85 + __u32 cap_mport; /* Mport capabilities */ 85 86 }; 86 87 87 88 /* ··· 94 93 #define RIO_PORTWRITE (1 << 1) 95 94 96 95 struct rio_doorbell { 97 - uint32_t rioid; 98 - uint16_t payload; 96 + __u16 rioid; 97 + __u16 payload; 99 98 }; 100 99 101 100 struct rio_doorbell_filter { 102 - uint32_t rioid; /* 0xffffffff to match all ids */ 103 - uint16_t low; 104 - uint16_t high; 101 + __u16 rioid; /* Use RIO_INVALID_DESTID to match all ids */ 102 + __u16 low; 103 + __u16 high; 104 + __u16 pad0; 105 105 }; 106 106 107 107 108 108 struct rio_portwrite { 109 - uint32_t payload[16]; 109 + __u32 payload[16]; 110 110 }; 111 111 112 112 struct rio_pw_filter { 113 - uint32_t mask; 114 - uint32_t low; 115 - uint32_t high; 113 + __u32 mask; 114 + __u32 low; 115 + __u32 high; 116 + __u32 pad0; 116 117 }; 117 118 118 119 /* RapidIO base address for inbound requests set to value defined below 119 120 * indicates that no specific RIO-to-local address translation is requested 120 121 * and driver should use direct (one-to-one) address mapping. 121 122 */ 122 - #define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0)) 123 + #define RIO_MAP_ANY_ADDR (__u64)(~((__u64) 0)) 123 124 124 125 struct rio_mmap { 125 - uint32_t rioid; 126 - uint64_t rio_addr; 127 - uint64_t length; 128 - uint64_t handle; 129 - void *address; 126 + __u16 rioid; 127 + __u16 pad0[3]; 128 + __u64 rio_addr; 129 + __u64 length; 130 + __u64 handle; 131 + __u64 address; 130 132 }; 131 133 132 134 struct rio_dma_mem { 133 - uint64_t length; /* length of DMA memory */ 134 - uint64_t dma_handle; /* handle associated with this memory */ 135 - void *buffer; /* pointer to this memory */ 135 + __u64 length; /* length of DMA memory */ 136 + __u64 dma_handle; /* handle associated with this memory */ 137 + __u64 address; 136 138 }; 137 139 138 - 139 140 struct rio_event { 140 - unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ 141 + __u32 header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ 141 142 union { 142 143 struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ 143 144 struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ 144 145 } u; 146 + __u32 pad0; 145 147 }; 146 148 147 149 enum rio_transfer_sync { ··· 188 184 }; 189 185 190 186 struct rio_transfer_io { 191 - uint32_t rioid; /* Target destID */ 192 - uint64_t rio_addr; /* Address in target's RIO mem space */ 193 - enum rio_exchange method; /* Data exchange method */ 194 - void __user *loc_addr; 195 - uint64_t handle; 196 - uint64_t offset; /* Offset in buffer */ 197 - uint64_t length; /* Length in bytes */ 198 - uint32_t completion_code; /* Completion code for this transfer */ 187 + __u64 rio_addr; /* Address in target's RIO mem space */ 188 + __u64 loc_addr; 189 + __u64 handle; 190 + __u64 offset; /* Offset in buffer */ 191 + __u64 length; /* Length in bytes */ 192 + __u16 rioid; /* Target destID */ 193 + __u16 method; /* Data exchange method, one of rio_exchange enum */ 194 + __u32 completion_code; /* Completion code for this transfer */ 199 195 }; 200 196 201 197 struct rio_transaction { 202 - uint32_t transfer_mode; /* Data transfer mode */ 203 - enum rio_transfer_sync sync; /* Synchronization method */ 204 - enum rio_transfer_dir dir; /* Transfer direction */ 205 - size_t count; /* Number of transfers */ 206 - struct rio_transfer_io __user *block; /* Array of <count> transfers */ 198 + __u64 block; /* Pointer to array of <count> transfers */ 199 + __u32 count; /* Number of transfers */ 200 + __u32 transfer_mode; /* Data transfer mode */ 201 + __u16 sync; /* Synch method, one of rio_transfer_sync enum */ 202 + __u16 dir; /* Transfer direction, one of rio_transfer_dir enum */ 203 + __u32 pad0; 207 204 }; 208 205 209 206 struct rio_async_tx_wait { 210 - uint32_t token; /* DMA transaction ID token */ 211 - uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */ 207 + __u32 token; /* DMA transaction ID token */ 208 + __u32 timeout; /* Wait timeout in msec, if 0 use default TO */ 212 209 }; 213 210 214 211 #define RIO_MAX_DEVNAME_SZ 20 215 212 216 213 struct rio_rdev_info { 217 - uint32_t destid; 218 - uint8_t hopcount; 219 - uint32_t comptag; 214 + __u16 destid; 215 + __u8 hopcount; 216 + __u8 pad0; 217 + __u32 comptag; 220 218 char name[RIO_MAX_DEVNAME_SZ + 1]; 221 219 }; 222 220 ··· 226 220 #define RIO_MPORT_DRV_MAGIC 'm' 227 221 228 222 #define RIO_MPORT_MAINT_HDID_SET \ 229 - _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t) 223 + _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16) 230 224 #define RIO_MPORT_MAINT_COMPTAG_SET \ 231 - _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t) 225 + _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32) 232 226 #define RIO_MPORT_MAINT_PORT_IDX_GET \ 233 - _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t) 227 + _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32) 234 228 #define RIO_MPORT_GET_PROPERTIES \ 235 229 _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) 236 230 #define RIO_MPORT_MAINT_READ_LOCAL \ ··· 250 244 #define RIO_DISABLE_PORTWRITE_RANGE \ 251 245 _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) 252 246 #define RIO_SET_EVENT_MASK \ 253 - _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int) 247 + _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32) 254 248 #define RIO_GET_EVENT_MASK \ 255 - _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int) 249 + _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32) 256 250 #define RIO_MAP_OUTBOUND \ 257 251 _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) 258 252 #define RIO_UNMAP_OUTBOUND \ ··· 260 254 #define RIO_MAP_INBOUND \ 261 255 _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) 262 256 #define RIO_UNMAP_INBOUND \ 263 - _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t) 257 + _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64) 264 258 #define RIO_ALLOC_DMA \ 265 259 _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) 266 260 #define RIO_FREE_DMA \ 267 - _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t) 261 + _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64) 268 262 #define RIO_TRANSFER \ 269 263 _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) 270 264 #define RIO_WAIT_FOR_ASYNC \
+4
include/linux/swap.h
··· 533 533 #ifdef CONFIG_MEMCG 534 534 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) 535 535 { 536 + /* Cgroup2 doesn't have per-cgroup swappiness */ 537 + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 538 + return vm_swappiness; 539 + 536 540 /* root ? */ 537 541 if (mem_cgroup_disabled() || !memcg->css.parent) 538 542 return vm_swappiness;
+1
include/net/netns/xfrm.h
··· 80 80 struct flow_cache flow_cache_global; 81 81 atomic_t flow_cache_genid; 82 82 struct list_head flow_cache_gc_list; 83 + atomic_t flow_cache_gc_count; 83 84 spinlock_t flow_cache_gc_lock; 84 85 struct work_struct flow_cache_gc_work; 85 86 struct work_struct flow_cache_flush_work;
-9
include/net/udp_tunnel.h
··· 112 112 return iptunnel_handle_offloads(skb, type); 113 113 } 114 114 115 - static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff) 116 - { 117 - struct udphdr *uh; 118 - 119 - uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr)); 120 - skb_shinfo(skb)->gso_type |= uh->check ? 121 - SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 122 - } 123 - 124 115 static inline void udp_tunnel_encap_enable(struct socket *sock) 125 116 { 126 117 #if IS_ENABLED(CONFIG_IPV6)
+2 -2
include/uapi/asm-generic/unistd.h
··· 718 718 #define __NR_copy_file_range 285 719 719 __SYSCALL(__NR_copy_file_range, sys_copy_file_range) 720 720 #define __NR_preadv2 286 721 - __SYSCALL(__NR_preadv2, sys_preadv2) 721 + __SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2) 722 722 #define __NR_pwritev2 287 723 - __SYSCALL(__NR_pwritev2, sys_pwritev2) 723 + __SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2) 724 724 725 725 #undef __NR_syscalls 726 726 #define __NR_syscalls 288
+3 -1
include/uapi/linux/if_macsec.h
··· 19 19 20 20 #define MACSEC_MAX_KEY_LEN 128 21 21 22 + #define MACSEC_KEYID_LEN 16 23 + 22 24 #define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL 23 25 #define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL 24 26 ··· 81 79 MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */ 82 80 MACSEC_SA_ATTR_PN, /* config/dump, u32 */ 83 81 MACSEC_SA_ATTR_KEY, /* config, data */ 84 - MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */ 82 + MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */ 85 83 MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */ 86 84 MACSEC_SA_ATTR_PAD, 87 85 __MACSEC_SA_ATTR_END,
+15 -9
include/uapi/linux/swab.h
··· 45 45 46 46 static inline __attribute_const__ __u16 __fswab16(__u16 val) 47 47 { 48 - #ifdef __HAVE_BUILTIN_BSWAP16__ 49 - return __builtin_bswap16(val); 50 - #elif defined (__arch_swab16) 48 + #if defined (__arch_swab16) 51 49 return __arch_swab16(val); 52 50 #else 53 51 return ___constant_swab16(val); ··· 54 56 55 57 static inline __attribute_const__ __u32 __fswab32(__u32 val) 56 58 { 57 - #ifdef __HAVE_BUILTIN_BSWAP32__ 58 - return __builtin_bswap32(val); 59 - #elif defined(__arch_swab32) 59 + #if defined(__arch_swab32) 60 60 return __arch_swab32(val); 61 61 #else 62 62 return ___constant_swab32(val); ··· 63 67 64 68 static inline __attribute_const__ __u64 __fswab64(__u64 val) 65 69 { 66 - #ifdef __HAVE_BUILTIN_BSWAP64__ 67 - return __builtin_bswap64(val); 68 - #elif defined (__arch_swab64) 70 + #if defined (__arch_swab64) 69 71 return __arch_swab64(val); 70 72 #elif defined(__SWAB_64_THRU_32__) 71 73 __u32 h = val >> 32; ··· 96 102 * __swab16 - return a byteswapped 16-bit value 97 103 * @x: value to byteswap 98 104 */ 105 + #ifdef __HAVE_BUILTIN_BSWAP16__ 106 + #define __swab16(x) (__u16)__builtin_bswap16((__u16)(x)) 107 + #else 99 108 #define __swab16(x) \ 100 109 (__builtin_constant_p((__u16)(x)) ? \ 101 110 ___constant_swab16(x) : \ 102 111 __fswab16(x)) 112 + #endif 103 113 104 114 /** 105 115 * __swab32 - return a byteswapped 32-bit value 106 116 * @x: value to byteswap 107 117 */ 118 + #ifdef __HAVE_BUILTIN_BSWAP32__ 119 + #define __swab32(x) (__u32)__builtin_bswap32((__u32)(x)) 120 + #else 108 121 #define __swab32(x) \ 109 122 (__builtin_constant_p((__u32)(x)) ? \ 110 123 ___constant_swab32(x) : \ 111 124 __fswab32(x)) 125 + #endif 112 126 113 127 /** 114 128 * __swab64 - return a byteswapped 64-bit value 115 129 * @x: value to byteswap 116 130 */ 131 + #ifdef __HAVE_BUILTIN_BSWAP64__ 132 + #define __swab64(x) (__u64)__builtin_bswap64((__u64)(x)) 133 + #else 117 134 #define __swab64(x) \ 118 135 (__builtin_constant_p((__u64)(x)) ? \ 119 136 ___constant_swab64(x) : \ 120 137 __fswab64(x)) 138 + #endif 121 139 122 140 /** 123 141 * __swahw32 - return a word-swapped 32-bit value
+2 -2
include/xen/page.h
··· 15 15 */ 16 16 17 17 #define xen_pfn_to_page(xen_pfn) \ 18 - ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT))) 18 + (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT))) 19 19 #define page_to_xen_pfn(page) \ 20 - (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT) 20 + ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT)) 21 21 22 22 #define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE) 23 23
+16 -13
kernel/sched/core.c
··· 596 596 return false; 597 597 598 598 /* 599 - * FIFO realtime policy runs the highest priority task (after DEADLINE). 600 - * Other runnable tasks are of a lower priority. The scheduler tick 601 - * isn't needed. 602 - */ 603 - fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 604 - if (fifo_nr_running) 605 - return true; 606 - 607 - /* 608 - * Round-robin realtime tasks time slice with other tasks at the same 609 - * realtime priority. 599 + * If there are more than one RR tasks, we need the tick to effect the 600 + * actual RR behaviour. 610 601 */ 611 602 if (rq->rt.rr_nr_running) { 612 603 if (rq->rt.rr_nr_running == 1) ··· 606 615 return false; 607 616 } 608 617 609 - /* Normal multitasking need periodic preemption checks */ 610 - if (rq->cfs.nr_running > 1) 618 + /* 619 + * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 620 + * forced preemption between FIFO tasks. 621 + */ 622 + fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 623 + if (fifo_nr_running) 624 + return true; 625 + 626 + /* 627 + * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 628 + * if there's more than one we need the tick for involuntary 629 + * preemption. 630 + */ 631 + if (rq->nr_running > 1) 611 632 return false; 612 633 613 634 return true;
+7 -2
kernel/trace/trace_events.c
··· 2113 2113 trace_create_file("filter", 0644, file->dir, file, 2114 2114 &ftrace_event_filter_fops); 2115 2115 2116 - trace_create_file("trigger", 0644, file->dir, file, 2117 - &event_trigger_fops); 2116 + /* 2117 + * Only event directories that can be enabled should have 2118 + * triggers. 2119 + */ 2120 + if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 2121 + trace_create_file("trigger", 0644, file->dir, file, 2122 + &event_trigger_fops); 2118 2123 2119 2124 trace_create_file("format", 0444, file->dir, call, 2120 2125 &ftrace_event_format_fops);
+5 -1
lib/stackdepot.c
··· 42 42 43 43 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) 44 44 45 + #define STACK_ALLOC_NULL_PROTECTION_BITS 1 45 46 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ 46 47 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) 47 48 #define STACK_ALLOC_ALIGN 4 48 49 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ 49 50 STACK_ALLOC_ALIGN) 50 - #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS) 51 + #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ 52 + STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) 51 53 #define STACK_ALLOC_SLABS_CAP 1024 52 54 #define STACK_ALLOC_MAX_SLABS \ 53 55 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ ··· 61 59 struct { 62 60 u32 slabindex : STACK_ALLOC_INDEX_BITS; 63 61 u32 offset : STACK_ALLOC_OFFSET_BITS; 62 + u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS; 64 63 }; 65 64 }; 66 65 ··· 139 136 stack->size = size; 140 137 stack->handle.slabindex = depot_index; 141 138 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; 139 + stack->handle.valid = 1; 142 140 memcpy(stack->entries, entries, size * sizeof(unsigned long)); 143 141 depot_offset += required_size; 144 142
+4 -10
mm/compaction.c
··· 852 852 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 853 853 ISOLATE_UNEVICTABLE); 854 854 855 - /* 856 - * In case of fatal failure, release everything that might 857 - * have been isolated in the previous iteration, and signal 858 - * the failure back to caller. 859 - */ 860 - if (!pfn) { 861 - putback_movable_pages(&cc->migratepages); 862 - cc->nr_migratepages = 0; 855 + if (!pfn) 863 856 break; 864 - } 865 857 866 858 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 867 859 break; ··· 1733 1741 1734 1742 static inline bool kcompactd_work_requested(pg_data_t *pgdat) 1735 1743 { 1736 - return pgdat->kcompactd_max_order > 0; 1744 + return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); 1737 1745 } 1738 1746 1739 1747 static bool kcompactd_node_suitable(pg_data_t *pgdat) ··· 1797 1805 INIT_LIST_HEAD(&cc.freepages); 1798 1806 INIT_LIST_HEAD(&cc.migratepages); 1799 1807 1808 + if (kthread_should_stop()) 1809 + return; 1800 1810 status = compact_zone(zone, &cc); 1801 1811 1802 1812 if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
+2 -2
mm/huge_memory.c
··· 3452 3452 } 3453 3453 } 3454 3454 3455 - pr_info("%lu of %lu THP split", split, total); 3455 + pr_info("%lu of %lu THP split\n", split, total); 3456 3456 3457 3457 return 0; 3458 3458 } ··· 3463 3463 { 3464 3464 void *ret; 3465 3465 3466 - ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL, 3466 + ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, 3467 3467 &split_huge_pages_fops); 3468 3468 if (!ret) 3469 3469 pr_warn("Failed to create split_huge_pages in debugfs");
+2 -9
mm/memory.c
··· 1222 1222 next = pmd_addr_end(addr, end); 1223 1223 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1224 1224 if (next - addr != HPAGE_PMD_SIZE) { 1225 - #ifdef CONFIG_DEBUG_VM 1226 - if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { 1227 - pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", 1228 - __func__, addr, end, 1229 - vma->vm_start, 1230 - vma->vm_end); 1231 - BUG(); 1232 - } 1233 - #endif 1225 + VM_BUG_ON_VMA(vma_is_anonymous(vma) && 1226 + !rwsem_is_locked(&tlb->mm->mmap_sem), vma); 1234 1227 split_huge_pmd(vma, pmd, addr); 1235 1228 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1236 1229 goto next;
+4 -2
mm/page-writeback.c
··· 1910 1910 if (gdtc->dirty > gdtc->bg_thresh) 1911 1911 return true; 1912 1912 1913 - if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc)) 1913 + if (wb_stat(wb, WB_RECLAIMABLE) > 1914 + wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) 1914 1915 return true; 1915 1916 1916 1917 if (mdtc) { ··· 1925 1924 if (mdtc->dirty > mdtc->bg_thresh) 1926 1925 return true; 1927 1926 1928 - if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc)) 1927 + if (wb_stat(wb, WB_RECLAIMABLE) > 1928 + wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) 1929 1929 return true; 1930 1930 } 1931 1931
+1 -1
mm/page_alloc.c
··· 6485 6485 setup_per_zone_inactive_ratio(); 6486 6486 return 0; 6487 6487 } 6488 - module_init(init_per_zone_wmark_min) 6488 + core_initcall(init_per_zone_wmark_min) 6489 6489 6490 6490 /* 6491 6491 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+7 -1
mm/zswap.c
··· 170 170 static LIST_HEAD(zswap_pools); 171 171 /* protects zswap_pools list modification */ 172 172 static DEFINE_SPINLOCK(zswap_pools_lock); 173 + /* pool counter to provide unique names to zpool */ 174 + static atomic_t zswap_pools_count = ATOMIC_INIT(0); 173 175 174 176 /* used by param callback function */ 175 177 static bool zswap_init_started; ··· 567 565 static struct zswap_pool *zswap_pool_create(char *type, char *compressor) 568 566 { 569 567 struct zswap_pool *pool; 568 + char name[38]; /* 'zswap' + 32 char (max) num + \0 */ 570 569 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; 571 570 572 571 pool = kzalloc(sizeof(*pool), GFP_KERNEL); ··· 576 573 return NULL; 577 574 } 578 575 579 - pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); 576 + /* unique name for each pool specifically required by zsmalloc */ 577 + snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); 578 + 579 + pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); 580 580 if (!pool->zpool) { 581 581 pr_err("%s zpool not available\n", type); 582 582 goto error;
+3 -2
net/bridge/br_ioctl.c
··· 21 21 #include <asm/uaccess.h> 22 22 #include "br_private.h" 23 23 24 - /* called with RTNL */ 25 24 static int get_bridge_ifindices(struct net *net, int *indices, int num) 26 25 { 27 26 struct net_device *dev; 28 27 int i = 0; 29 28 30 - for_each_netdev(net, dev) { 29 + rcu_read_lock(); 30 + for_each_netdev_rcu(net, dev) { 31 31 if (i >= num) 32 32 break; 33 33 if (dev->priv_flags & IFF_EBRIDGE) 34 34 indices[i++] = dev->ifindex; 35 35 } 36 + rcu_read_unlock(); 36 37 37 38 return i; 38 39 }
+7 -5
net/bridge/br_multicast.c
··· 1279 1279 struct br_ip saddr; 1280 1280 unsigned long max_delay; 1281 1281 unsigned long now = jiffies; 1282 + unsigned int offset = skb_transport_offset(skb); 1282 1283 __be32 group; 1283 1284 int err = 0; 1284 1285 ··· 1290 1289 1291 1290 group = ih->group; 1292 1291 1293 - if (skb->len == sizeof(*ih)) { 1292 + if (skb->len == offset + sizeof(*ih)) { 1294 1293 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); 1295 1294 1296 1295 if (!max_delay) { 1297 1296 max_delay = 10 * HZ; 1298 1297 group = 0; 1299 1298 } 1300 - } else if (skb->len >= sizeof(*ih3)) { 1299 + } else if (skb->len >= offset + sizeof(*ih3)) { 1301 1300 ih3 = igmpv3_query_hdr(skb); 1302 1301 if (ih3->nsrcs) 1303 1302 goto out; ··· 1358 1357 struct br_ip saddr; 1359 1358 unsigned long max_delay; 1360 1359 unsigned long now = jiffies; 1360 + unsigned int offset = skb_transport_offset(skb); 1361 1361 const struct in6_addr *group = NULL; 1362 1362 bool is_general_query; 1363 1363 int err = 0; ··· 1368 1366 (port && port->state == BR_STATE_DISABLED)) 1369 1367 goto out; 1370 1368 1371 - if (skb->len == sizeof(*mld)) { 1372 - if (!pskb_may_pull(skb, sizeof(*mld))) { 1369 + if (skb->len == offset + sizeof(*mld)) { 1370 + if (!pskb_may_pull(skb, offset + sizeof(*mld))) { 1373 1371 err = -EINVAL; 1374 1372 goto out; 1375 1373 } ··· 1378 1376 if (max_delay) 1379 1377 group = &mld->mld_mca; 1380 1378 } else { 1381 - if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1379 + if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) { 1382 1380 err = -EINVAL; 1383 1381 goto out; 1384 1382 }
+13 -1
net/core/flow.c
··· 92 92 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list); 93 93 spin_unlock_bh(&xfrm->flow_cache_gc_lock); 94 94 95 - list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) 95 + list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { 96 96 flow_entry_kill(fce, xfrm); 97 + atomic_dec(&xfrm->flow_cache_gc_count); 98 + WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0); 99 + } 97 100 } 98 101 99 102 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, ··· 104 101 struct netns_xfrm *xfrm) 105 102 { 106 103 if (deleted) { 104 + atomic_add(deleted, &xfrm->flow_cache_gc_count); 107 105 fcp->hash_count -= deleted; 108 106 spin_lock_bh(&xfrm->flow_cache_gc_lock); 109 107 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list); ··· 235 231 if (unlikely(!fle)) { 236 232 if (fcp->hash_count > fc->high_watermark) 237 233 flow_cache_shrink(fc, fcp); 234 + 235 + if (fcp->hash_count > 2 * fc->high_watermark || 236 + atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) { 237 + atomic_inc(&net->xfrm.flow_cache_genid); 238 + flo = ERR_PTR(-ENOBUFS); 239 + goto ret_object; 240 + } 238 241 239 242 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 240 243 if (fle) { ··· 457 446 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task); 458 447 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task); 459 448 mutex_init(&net->xfrm.flow_flush_sem); 449 + atomic_set(&net->xfrm.flow_cache_gc_count, 0); 460 450 461 451 fc->hash_shift = 10; 462 452 fc->low_watermark = 2 * flow_cache_hash_size(fc);
+10 -8
net/core/rtnetlink.c
··· 1173 1173 1174 1174 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev) 1175 1175 { 1176 - struct rtnl_link_ifmap map = { 1177 - .mem_start = dev->mem_start, 1178 - .mem_end = dev->mem_end, 1179 - .base_addr = dev->base_addr, 1180 - .irq = dev->irq, 1181 - .dma = dev->dma, 1182 - .port = dev->if_port, 1183 - }; 1176 + struct rtnl_link_ifmap map; 1177 + 1178 + memset(&map, 0, sizeof(map)); 1179 + map.mem_start = dev->mem_start; 1180 + map.mem_end = dev->mem_end; 1181 + map.base_addr = dev->base_addr; 1182 + map.irq = dev->irq; 1183 + map.dma = dev->dma; 1184 + map.port = dev->if_port; 1185 + 1184 1186 if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) 1185 1187 return -EMSGSIZE; 1186 1188
+4 -2
net/ipv4/fou.c
··· 227 227 int err = -ENOSYS; 228 228 const struct net_offload **offloads; 229 229 230 - udp_tunnel_gro_complete(skb, nhoff); 231 - 232 230 rcu_read_lock(); 233 231 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 234 232 ops = rcu_dereference(offloads[proto]); ··· 234 236 goto out_unlock; 235 237 236 238 err = ops->callbacks.gro_complete(skb, nhoff); 239 + 240 + skb_set_inner_mac_header(skb, nhoff); 237 241 238 242 out_unlock: 239 243 rcu_read_unlock(); ··· 411 411 goto out_unlock; 412 412 413 413 err = ops->callbacks.gro_complete(skb, nhoff + guehlen); 414 + 415 + skb_set_inner_mac_header(skb, nhoff + guehlen); 414 416 415 417 out_unlock: 416 418 rcu_read_unlock();
+18
net/ipv4/ip_vti.c
··· 156 156 struct dst_entry *dst = skb_dst(skb); 157 157 struct net_device *tdev; /* Device to other host */ 158 158 int err; 159 + int mtu; 159 160 160 161 if (!dst) { 161 162 dev->stats.tx_carrier_errors++; ··· 191 190 dst_link_failure(skb); 192 191 } else 193 192 tunnel->err_count = 0; 193 + } 194 + 195 + mtu = dst_mtu(dst); 196 + if (skb->len > mtu) { 197 + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 198 + if (skb->protocol == htons(ETH_P_IP)) { 199 + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 200 + htonl(mtu)); 201 + } else { 202 + if (mtu < IPV6_MIN_MTU) 203 + mtu = IPV6_MIN_MTU; 204 + 205 + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 206 + } 207 + 208 + dst_release(dst); 209 + goto tx_error; 194 210 } 195 211 196 212 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
+5 -3
net/ipv4/udp_offload.c
··· 350 350 351 351 uh->len = newlen; 352 352 353 + /* Set encapsulation before calling into inner gro_complete() functions 354 + * to make them set up the inner offsets. 355 + */ 356 + skb->encapsulation = 1; 357 + 353 358 rcu_read_lock(); 354 359 sk = (*lookup)(skb, uh->source, uh->dest); 355 360 if (sk && udp_sk(sk)->gro_complete) ··· 364 359 365 360 if (skb->remcsum_offload) 366 361 skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; 367 - 368 - skb->encapsulation = 1; 369 - skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); 370 362 371 363 return err; 372 364 }
+2 -3
net/ipv6/icmp.c
··· 446 446 447 447 if (__ipv6_addr_needs_scope_id(addr_type)) 448 448 iif = skb->dev->ifindex; 449 + else 450 + iif = l3mdev_master_ifindex(skb->dev); 449 451 450 452 /* 451 453 * Must not send error if the source does not uniquely ··· 501 499 fl6.flowi6_oif = np->mcast_oif; 502 500 else if (!fl6.flowi6_oif) 503 501 fl6.flowi6_oif = np->ucast_oif; 504 - 505 - if (!fl6.flowi6_oif) 506 - fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev); 507 502 508 503 dst = icmpv6_route_lookup(net, skb, sk, &fl6); 509 504 if (IS_ERR(dst))
+6 -1
net/ipv6/tcp_ipv6.c
··· 810 810 fl6.flowi6_proto = IPPROTO_TCP; 811 811 if (rt6_need_strict(&fl6.daddr) && !oif) 812 812 fl6.flowi6_oif = tcp_v6_iif(skb); 813 - else 813 + else { 814 + if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) 815 + oif = skb->skb_iif; 816 + 814 817 fl6.flowi6_oif = oif; 818 + } 819 + 815 820 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); 816 821 fl6.fl6_dport = t1->dest; 817 822 fl6.fl6_sport = t1->source;
+1
net/llc/af_llc.c
··· 626 626 if (llc->cmsg_flags & LLC_CMSG_PKTINFO) { 627 627 struct llc_pktinfo info; 628 628 629 + memset(&info, 0, sizeof(info)); 629 630 info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex; 630 631 llc_pdu_decode_dsap(skb, &info.lpi_sap); 631 632 llc_pdu_decode_da(skb, info.lpi_mac);
+1 -20
net/vmw_vsock/af_vsock.c
··· 1808 1808 else if (sk->sk_shutdown & RCV_SHUTDOWN) 1809 1809 err = 0; 1810 1810 1811 - if (copied > 0) { 1812 - /* We only do these additional bookkeeping/notification steps 1813 - * if we actually copied something out of the queue pair 1814 - * instead of just peeking ahead. 1815 - */ 1816 - 1817 - if (!(flags & MSG_PEEK)) { 1818 - /* If the other side has shutdown for sending and there 1819 - * is nothing more to read, then modify the socket 1820 - * state. 1821 - */ 1822 - if (vsk->peer_shutdown & SEND_SHUTDOWN) { 1823 - if (vsock_stream_has_data(vsk) <= 0) { 1824 - sk->sk_state = SS_UNCONNECTED; 1825 - sock_set_flag(sk, SOCK_DONE); 1826 - sk->sk_state_change(sk); 1827 - } 1828 - } 1829 - } 1811 + if (copied > 0) 1830 1812 err = copied; 1831 - } 1832 1813 1833 1814 out: 1834 1815 release_sock(sk);
+3
net/xfrm/xfrm_output.c
··· 99 99 100 100 skb_dst_force(skb); 101 101 102 + /* Inner headers are invalid now. */ 103 + skb->encapsulation = 0; 104 + 102 105 err = x->type->output(x, skb); 103 106 if (err == -EINPROGRESS) 104 107 goto out;
+45 -24
scripts/mod/file2alias.c
··· 371 371 do_usb_entry_multi(symval + i, mod); 372 372 } 373 373 374 + static void do_of_entry_multi(void *symval, struct module *mod) 375 + { 376 + char alias[500]; 377 + int len; 378 + char *tmp; 379 + 380 + DEF_FIELD_ADDR(symval, of_device_id, name); 381 + DEF_FIELD_ADDR(symval, of_device_id, type); 382 + DEF_FIELD_ADDR(symval, of_device_id, compatible); 383 + 384 + len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", 385 + (*type)[0] ? *type : "*"); 386 + 387 + if (compatible[0]) 388 + sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", 389 + *compatible); 390 + 391 + /* Replace all whitespace with underscores */ 392 + for (tmp = alias; tmp && *tmp; tmp++) 393 + if (isspace(*tmp)) 394 + *tmp = '_'; 395 + 396 + buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); 397 + strcat(alias, "C"); 398 + add_wildcard(alias); 399 + buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); 400 + } 401 + 402 + static void do_of_table(void *symval, unsigned long size, 403 + struct module *mod) 404 + { 405 + unsigned int i; 406 + const unsigned long id_size = SIZE_of_device_id; 407 + 408 + device_id_check(mod->name, "of", size, id_size, symval); 409 + 410 + /* Leave last one: it's the terminator. */ 411 + size -= id_size; 412 + 413 + for (i = 0; i < size; i += id_size) 414 + do_of_entry_multi(symval + i, mod); 415 + } 416 + 374 417 /* Looks like: hid:bNvNpN */ 375 418 static int do_hid_entry(const char *filename, 376 419 void *symval, char *alias) ··· 726 683 return 1; 727 684 } 728 685 ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry); 729 - 730 - static int do_of_entry (const char *filename, void *symval, char *alias) 731 - { 732 - int len; 733 - char *tmp; 734 - DEF_FIELD_ADDR(symval, of_device_id, name); 735 - DEF_FIELD_ADDR(symval, of_device_id, type); 736 - DEF_FIELD_ADDR(symval, of_device_id, compatible); 737 - 738 - len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", 739 - (*type)[0] ? *type : "*"); 740 - 741 - if (compatible[0]) 742 - sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", 743 - *compatible); 744 - 745 - /* Replace all whitespace with underscores */ 746 - for (tmp = alias; tmp && *tmp; tmp++) 747 - if (isspace (*tmp)) 748 - *tmp = '_'; 749 - 750 - return 1; 751 - } 752 - ADD_TO_DEVTABLE("of", of_device_id, do_of_entry); 753 686 754 687 static int do_vio_entry(const char *filename, void *symval, 755 688 char *alias) ··· 1367 1348 /* First handle the "special" cases */ 1368 1349 if (sym_is(name, namelen, "usb")) 1369 1350 do_usb_table(symval, sym->st_size, mod); 1351 + if (sym_is(name, namelen, "of")) 1352 + do_of_table(symval, sym->st_size, mod); 1370 1353 else if (sym_is(name, namelen, "pnp")) 1371 1354 do_pnp_device_entry(symval, sym->st_size, mod); 1372 1355 else if (sym_is(name, namelen, "pnp_card"))
+2 -2
security/integrity/ima/ima_policy.c
··· 884 884 "BPRM_CHECK", 885 885 "MODULE_CHECK", 886 886 "FIRMWARE_CHECK", 887 + "POST_SETATTR", 887 888 "KEXEC_KERNEL_CHECK", 888 889 "KEXEC_INITRAMFS_CHECK", 889 - "POLICY_CHECK", 890 - "POST_SETATTR" 890 + "POLICY_CHECK" 891 891 }; 892 892 893 893 void *ima_policy_start(struct seq_file *m, loff_t *pos)
+3
tools/net/bpf_jit_disasm.c
··· 98 98 char *buff; 99 99 100 100 len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0); 101 + if (len < 0) 102 + return NULL; 103 + 101 104 buff = malloc(len); 102 105 if (!buff) 103 106 return NULL;