Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'v4.3-rc4' into next

Merge with mainline to sync up with changes to parkbd driver.

+2525 -1622
+1 -1
Documentation/Changes
··· 43 43 o grub 0.93 # grub --version || grub-install --version 44 44 o mcelog 0.6 # mcelog --version 45 45 o iptables 1.4.2 # iptables -V 46 - o openssl & libcrypto 1.0.1k # openssl version 46 + o openssl & libcrypto 1.0.0 # openssl version 47 47 48 48 49 49 Kernel compilation
+1 -1
Documentation/devicetree/bindings/input/cypress,cyapa.txt
··· 25 25 /* Cypress Gen3 touchpad */ 26 26 touchpad@67 { 27 27 compatible = "cypress,cyapa"; 28 - reg = <0x24>; 28 + reg = <0x67>; 29 29 interrupt-parent = <&gpio>; 30 30 interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ 31 31 wakeup-source;
+18 -2
Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
··· 4 4 interrupt. 5 5 6 6 Required Properties: 7 - - compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" 8 - as fallback 7 + - compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or 8 + "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc" 9 9 - reg: Base address and size of the controllers memory area 10 10 - interrupt-parent: phandle of the parent interrupt controller. 11 11 - interrupts: Interrupt specifier for the controllers interrupt. 12 12 - interrupt-controller : Identifies the node as an interrupt controller 13 13 - #interrupt-cells : Specifies the number of cells needed to encode interrupt 14 14 source, should be 1 15 + 16 + Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x, 17 + use ar7240 for all other SoCs. 15 18 16 19 Please refer to interrupts.txt in this directory for details of the common 17 20 Interrupt Controllers bindings used by client devices. ··· 23 20 24 21 interrupt-controller@18060010 { 25 22 compatible = "qca,ar9132-misc-intc", qca,ar7100-misc-intc"; 23 + reg = <0x18060010 0x4>; 24 + 25 + interrupt-parent = <&cpuintc>; 26 + interrupts = <6>; 27 + 28 + interrupt-controller; 29 + #interrupt-cells = <1>; 30 + }; 31 + 32 + Another example: 33 + 34 + interrupt-controller@18060010 { 35 + compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc"; 26 36 reg = <0x18060010 0x4>; 27 37 28 38 interrupt-parent = <&cpuintc>;
+1 -1
Documentation/input/multi-touch-protocol.txt
··· 361 361 ABS_MT_POSITION_X := T_X 362 362 ABS_MT_POSITION_Y := T_Y 363 363 ABS_MT_TOOL_X := C_X 364 - ABS_MT_TOOL_X := C_Y 364 + ABS_MT_TOOL_Y := C_Y 365 365 366 366 Unfortunately, there is not enough information to specify both the touching 367 367 ellipse and the tool ellipse, so one has to resort to approximations. One
+38 -13
Documentation/power/pci.txt
··· 979 979 (alternatively, the runtime_suspend() callback will have to check if the 980 980 device should really be suspended and return -EAGAIN if that is not the case). 981 981 982 - The runtime PM of PCI devices is disabled by default. It is also blocked by 983 - pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI 984 - driver implements the runtime PM callbacks and intends to use the runtime PM 985 - framework provided by the PM core and the PCI subsystem, it should enable this 986 - feature by executing the pm_runtime_enable() helper function. However, the 987 - driver should not call the pm_runtime_allow() helper function unblocking 988 - the runtime PM of the device. Instead, it should allow user space or some 989 - platform-specific code to do that (user space can do it via sysfs), although 990 - once it has called pm_runtime_enable(), it must be prepared to handle the 982 + The runtime PM of PCI devices is enabled by default by the PCI core. PCI 983 + device drivers do not need to enable it and should not attempt to do so. 984 + However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid() 985 + helper function. In addition to that, the runtime PM usage counter of 986 + each PCI device is incremented by local_pci_probe() before executing the 987 + probe callback provided by the device's driver. 988 + 989 + If a PCI driver implements the runtime PM callbacks and intends to use the 990 + runtime PM framework provided by the PM core and the PCI subsystem, it needs 991 + to decrement the device's runtime PM usage counter in its probe callback 992 + function. If it doesn't do that, the counter will always be different from 993 + zero for the device and it will never be runtime-suspended. The simplest 994 + way to do that is by calling pm_runtime_put_noidle(), but if the driver 995 + wants to schedule an autosuspend right away, for example, it may call 996 + pm_runtime_put_autosuspend() instead for this purpose. Generally, it 997 + just needs to call a function that decrements the devices usage counter 998 + from its probe routine to make runtime PM work for the device. 999 + 1000 + It is important to remember that the driver's runtime_suspend() callback 1001 + may be executed right after the usage counter has been decremented, because 1002 + user space may already have cuased the pm_runtime_allow() helper function 1003 + unblocking the runtime PM of the device to run via sysfs, so the driver must 1004 + be prepared to cope with that. 1005 + 1006 + The driver itself should not call pm_runtime_allow(), though. Instead, it 1007 + should let user space or some platform-specific code do that (user space can 1008 + do it via sysfs as stated above), but it must be prepared to handle the 991 1009 runtime PM of the device correctly as soon as pm_runtime_allow() is called 992 - (which may happen at any time). [It also is possible that user space causes 993 - pm_runtime_allow() to be called via sysfs before the driver is loaded, so in 994 - fact the driver has to be prepared to handle the runtime PM of the device as 995 - soon as it calls pm_runtime_enable().] 1010 + (which may happen at any time, even before the driver is loaded). 1011 + 1012 + When the driver's remove callback runs, it has to balance the decrementation 1013 + of the device's runtime PM usage counter at the probe time. For this reason, 1014 + if it has decremented the counter in its probe callback, it must run 1015 + pm_runtime_get_noresume() in its remove callback. [Since the core carries 1016 + out a runtime resume of the device and bumps up the device's usage counter 1017 + before running the driver's remove callback, the runtime PM of the device 1018 + is effectively disabled for the duration of the remove execution and all 1019 + runtime PM helper functions incrementing the device's usage counter are 1020 + then effectively equivalent to pm_runtime_get_noresume().] 996 1021 997 1022 The runtime PM framework works by processing requests to suspend or resume 998 1023 devices, or to check if they are idle (in which cases it is reasonable to
+1
Documentation/ptp/testptp.c
··· 18 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 19 */ 20 20 #define _GNU_SOURCE 21 + #define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ 21 22 #include <errno.h> 22 23 #include <fcntl.h> 23 24 #include <inttypes.h>
+2 -2
MAINTAINERS
··· 5957 5957 KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V 5958 5958 M: Joerg Roedel <joro@8bytes.org> 5959 5959 L: kvm@vger.kernel.org 5960 - W: http://kvm.qumranet.com 5960 + W: http://www.linux-kvm.org/ 5961 5961 S: Maintained 5962 5962 F: arch/x86/include/asm/svm.h 5963 5963 F: arch/x86/kvm/svm.c ··· 5965 5965 KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC 5966 5966 M: Alexander Graf <agraf@suse.com> 5967 5967 L: kvm-ppc@vger.kernel.org 5968 - W: http://kvm.qumranet.com 5968 + W: http://www.linux-kvm.org/ 5969 5969 T: git git://github.com/agraf/linux-2.6.git 5970 5970 S: Supported 5971 5971 F: arch/powerpc/include/asm/kvm*
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 3 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = Hurr durr I'ma sheep 6 6 7 7 # *DOCUMENTATION*
+1
arch/arc/include/asm/Kbuild
··· 48 48 generic-y += ucontext.h 49 49 generic-y += user.h 50 50 generic-y += vga.h 51 + generic-y += word-at-a-time.h 51 52 generic-y += xor.h
+2 -2
arch/arm64/include/asm/pgtable.h
··· 79 79 #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 80 80 #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) 81 81 82 - #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 82 + #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 83 83 #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 84 84 #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 85 85 #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) ··· 496 496 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 497 497 { 498 498 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 499 - PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; 499 + PTE_PROT_NONE | PTE_VALID | PTE_WRITE; 500 500 /* preserve the hardware dirty information */ 501 501 if (pte_hw_dirty(pte)) 502 502 pte = pte_mkdirty(pte);
+2 -1
arch/arm64/kernel/efi.c
··· 258 258 */ 259 259 if (!is_normal_ram(md)) 260 260 prot = __pgprot(PROT_DEVICE_nGnRE); 261 - else if (md->type == EFI_RUNTIME_SERVICES_CODE) 261 + else if (md->type == EFI_RUNTIME_SERVICES_CODE || 262 + !PAGE_ALIGNED(md->phys_addr)) 262 263 prot = PAGE_KERNEL_EXEC; 263 264 else 264 265 prot = PAGE_KERNEL;
+20 -2
arch/arm64/kernel/entry-ftrace.S
··· 178 178 ENDPROC(ftrace_stub) 179 179 180 180 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 181 + /* save return value regs*/ 182 + .macro save_return_regs 183 + sub sp, sp, #64 184 + stp x0, x1, [sp] 185 + stp x2, x3, [sp, #16] 186 + stp x4, x5, [sp, #32] 187 + stp x6, x7, [sp, #48] 188 + .endm 189 + 190 + /* restore return value regs*/ 191 + .macro restore_return_regs 192 + ldp x0, x1, [sp] 193 + ldp x2, x3, [sp, #16] 194 + ldp x4, x5, [sp, #32] 195 + ldp x6, x7, [sp, #48] 196 + add sp, sp, #64 197 + .endm 198 + 181 199 /* 182 200 * void ftrace_graph_caller(void) 183 201 * ··· 222 204 * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. 223 205 */ 224 206 ENTRY(return_to_handler) 225 - str x0, [sp, #-16]! 207 + save_return_regs 226 208 mov x0, x29 // parent's fp 227 209 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); 228 210 mov x30, x0 // restore the original return address 229 - ldr x0, [sp], #16 211 + restore_return_regs 230 212 ret 231 213 END(return_to_handler) 232 214 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+1
arch/avr32/include/asm/Kbuild
··· 20 20 generic-y += topology.h 21 21 generic-y += trace_clock.h 22 22 generic-y += vga.h 23 + generic-y += word-at-a-time.h 23 24 generic-y += xor.h
+1
arch/blackfin/include/asm/Kbuild
··· 46 46 generic-y += ucontext.h 47 47 generic-y += unaligned.h 48 48 generic-y += user.h 49 + generic-y += word-at-a-time.h 49 50 generic-y += xor.h
+1
arch/c6x/include/asm/Kbuild
··· 59 59 generic-y += ucontext.h 60 60 generic-y += user.h 61 61 generic-y += vga.h 62 + generic-y += word-at-a-time.h 62 63 generic-y += xor.h
+1
arch/cris/include/asm/Kbuild
··· 43 43 generic-y += trace_clock.h 44 44 generic-y += types.h 45 45 generic-y += vga.h 46 + generic-y += word-at-a-time.h 46 47 generic-y += xor.h
+1
arch/frv/include/asm/Kbuild
··· 7 7 generic-y += mm-arch-hooks.h 8 8 generic-y += preempt.h 9 9 generic-y += trace_clock.h 10 + generic-y += word-at-a-time.h
+1
arch/hexagon/include/asm/Kbuild
··· 58 58 generic-y += ucontext.h 59 59 generic-y += unaligned.h 60 60 generic-y += vga.h 61 + generic-y += word-at-a-time.h 61 62 generic-y += xor.h
+1
arch/ia64/include/asm/Kbuild
··· 8 8 generic-y += preempt.h 9 9 generic-y += trace_clock.h 10 10 generic-y += vtime.h 11 + generic-y += word-at-a-time.h
+1
arch/m32r/include/asm/Kbuild
··· 9 9 generic-y += preempt.h 10 10 generic-y += sections.h 11 11 generic-y += trace_clock.h 12 + generic-y += word-at-a-time.h
+8 -1
arch/m68k/configs/amiga_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 58 57 CONFIG_NET_IPGRE=m 59 58 CONFIG_NET_IPVTI=m 60 59 CONFIG_NET_FOU_IP_TUNNELS=y 61 - CONFIG_GENEVE_CORE=m 62 60 CONFIG_INET_AH=m 63 61 CONFIG_INET_ESP=m 64 62 CONFIG_INET_IPCOMP=m ··· 67 67 # CONFIG_INET_LRO is not set 68 68 CONFIG_INET_DIAG=m 69 69 CONFIG_INET_UDP_DIAG=m 70 + CONFIG_IPV6=m 70 71 CONFIG_IPV6_ROUTER_PREF=y 71 72 CONFIG_INET6_AH=m 72 73 CONFIG_INET6_ESP=m 73 74 CONFIG_INET6_IPCOMP=m 75 + CONFIG_IPV6_ILA=m 74 76 CONFIG_IPV6_VTI=m 75 77 CONFIG_IPV6_GRE=m 76 78 CONFIG_NETFILTER=y ··· 181 179 CONFIG_IP_SET_LIST_SET=m 182 180 CONFIG_NF_CONNTRACK_IPV4=m 183 181 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 182 + CONFIG_NFT_DUP_IPV4=m 184 183 CONFIG_NF_TABLES_ARP=m 185 184 CONFIG_NF_LOG_ARP=m 186 185 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 209 206 CONFIG_IP_NF_ARP_MANGLE=m 210 207 CONFIG_NF_CONNTRACK_IPV6=m 211 208 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 209 + CONFIG_NFT_DUP_IPV6=m 212 210 CONFIG_NFT_CHAIN_NAT_IPV6=m 213 211 CONFIG_NFT_MASQ_IPV6=m 214 212 CONFIG_NFT_REDIR_IPV6=m ··· 275 271 CONFIG_MPLS=y 276 272 CONFIG_NET_MPLS_GSO=m 277 273 CONFIG_MPLS_ROUTING=m 274 + CONFIG_MPLS_IPTUNNEL=m 278 275 # CONFIG_WIRELESS is not set 279 276 # CONFIG_UEVENT_HELPER is not set 280 277 CONFIG_DEVTMPFS=y ··· 375 370 # CONFIG_NET_VENDOR_SEEQ is not set 376 371 # CONFIG_NET_VENDOR_SMSC is not set 377 372 # CONFIG_NET_VENDOR_STMICRO is not set 373 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 378 374 # CONFIG_NET_VENDOR_VIA is not set 379 375 # CONFIG_NET_VENDOR_WIZNET is not set 380 376 CONFIG_PPP=m ··· 543 537 CONFIG_TEST_BPF=m 544 538 CONFIG_TEST_FIRMWARE=m 545 539 CONFIG_TEST_UDELAY=m 540 + CONFIG_TEST_STATIC_KEYS=m 546 541 CONFIG_EARLY_PRINTK=y 547 542 CONFIG_ENCRYPTED_KEYS=m 548 543 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/apollo_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 56 55 CONFIG_NET_IPGRE=m 57 56 CONFIG_NET_IPVTI=m 58 57 CONFIG_NET_FOU_IP_TUNNELS=y 59 - CONFIG_GENEVE_CORE=m 60 58 CONFIG_INET_AH=m 61 59 CONFIG_INET_ESP=m 62 60 CONFIG_INET_IPCOMP=m ··· 65 65 # CONFIG_INET_LRO is not set 66 66 CONFIG_INET_DIAG=m 67 67 CONFIG_INET_UDP_DIAG=m 68 + CONFIG_IPV6=m 68 69 CONFIG_IPV6_ROUTER_PREF=y 69 70 CONFIG_INET6_AH=m 70 71 CONFIG_INET6_ESP=m 71 72 CONFIG_INET6_IPCOMP=m 73 + CONFIG_IPV6_ILA=m 72 74 CONFIG_IPV6_VTI=m 73 75 CONFIG_IPV6_GRE=m 74 76 CONFIG_NETFILTER=y ··· 179 177 CONFIG_IP_SET_LIST_SET=m 180 178 CONFIG_NF_CONNTRACK_IPV4=m 181 179 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 180 + CONFIG_NFT_DUP_IPV4=m 182 181 CONFIG_NF_TABLES_ARP=m 183 182 CONFIG_NF_LOG_ARP=m 184 183 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 207 204 CONFIG_IP_NF_ARP_MANGLE=m 208 205 CONFIG_NF_CONNTRACK_IPV6=m 209 206 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 207 + CONFIG_NFT_DUP_IPV6=m 210 208 CONFIG_NFT_CHAIN_NAT_IPV6=m 211 209 CONFIG_NFT_MASQ_IPV6=m 212 210 CONFIG_NFT_REDIR_IPV6=m ··· 273 269 CONFIG_MPLS=y 274 270 CONFIG_NET_MPLS_GSO=m 275 271 CONFIG_MPLS_ROUTING=m 272 + CONFIG_MPLS_IPTUNNEL=m 276 273 # CONFIG_WIRELESS is not set 277 274 # CONFIG_UEVENT_HELPER is not set 278 275 CONFIG_DEVTMPFS=y ··· 349 344 # CONFIG_NET_VENDOR_SAMSUNG is not set 350 345 # CONFIG_NET_VENDOR_SEEQ is not set 351 346 # CONFIG_NET_VENDOR_STMICRO is not set 347 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 352 348 # CONFIG_NET_VENDOR_VIA is not set 353 349 # CONFIG_NET_VENDOR_WIZNET is not set 354 350 CONFIG_PPP=m ··· 501 495 CONFIG_TEST_BPF=m 502 496 CONFIG_TEST_FIRMWARE=m 503 497 CONFIG_TEST_UDELAY=m 498 + CONFIG_TEST_STATIC_KEYS=m 504 499 CONFIG_EARLY_PRINTK=y 505 500 CONFIG_ENCRYPTED_KEYS=m 506 501 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/atari_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 56 55 CONFIG_NET_IPGRE=m 57 56 CONFIG_NET_IPVTI=m 58 57 CONFIG_NET_FOU_IP_TUNNELS=y 59 - CONFIG_GENEVE_CORE=m 60 58 CONFIG_INET_AH=m 61 59 CONFIG_INET_ESP=m 62 60 CONFIG_INET_IPCOMP=m ··· 65 65 # CONFIG_INET_LRO is not set 66 66 CONFIG_INET_DIAG=m 67 67 CONFIG_INET_UDP_DIAG=m 68 + CONFIG_IPV6=m 68 69 CONFIG_IPV6_ROUTER_PREF=y 69 70 CONFIG_INET6_AH=m 70 71 CONFIG_INET6_ESP=m 71 72 CONFIG_INET6_IPCOMP=m 73 + CONFIG_IPV6_ILA=m 72 74 CONFIG_IPV6_VTI=m 73 75 CONFIG_IPV6_GRE=m 74 76 CONFIG_NETFILTER=y ··· 179 177 CONFIG_IP_SET_LIST_SET=m 180 178 CONFIG_NF_CONNTRACK_IPV4=m 181 179 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 180 + CONFIG_NFT_DUP_IPV4=m 182 181 CONFIG_NF_TABLES_ARP=m 183 182 CONFIG_NF_LOG_ARP=m 184 183 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 207 204 CONFIG_IP_NF_ARP_MANGLE=m 208 205 CONFIG_NF_CONNTRACK_IPV6=m 209 206 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 207 + CONFIG_NFT_DUP_IPV6=m 210 208 CONFIG_NFT_CHAIN_NAT_IPV6=m 211 209 CONFIG_NFT_MASQ_IPV6=m 212 210 CONFIG_NFT_REDIR_IPV6=m ··· 273 269 CONFIG_MPLS=y 274 270 CONFIG_NET_MPLS_GSO=m 275 271 CONFIG_MPLS_ROUTING=m 272 + CONFIG_MPLS_IPTUNNEL=m 276 273 # CONFIG_WIRELESS is not set 277 274 # CONFIG_UEVENT_HELPER is not set 278 275 CONFIG_DEVTMPFS=y ··· 360 355 # CONFIG_NET_VENDOR_SEEQ is not set 361 356 CONFIG_SMC91X=y 362 357 # CONFIG_NET_VENDOR_STMICRO is not set 358 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 363 359 # CONFIG_NET_VENDOR_VIA is not set 364 360 # CONFIG_NET_VENDOR_WIZNET is not set 365 361 CONFIG_PPP=m ··· 523 517 CONFIG_TEST_BPF=m 524 518 CONFIG_TEST_FIRMWARE=m 525 519 CONFIG_TEST_UDELAY=m 520 + CONFIG_TEST_STATIC_KEYS=m 526 521 CONFIG_EARLY_PRINTK=y 527 522 CONFIG_ENCRYPTED_KEYS=m 528 523 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/bvme6000_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 54 53 CONFIG_NET_IPGRE=m 55 54 CONFIG_NET_IPVTI=m 56 55 CONFIG_NET_FOU_IP_TUNNELS=y 57 - CONFIG_GENEVE_CORE=m 58 56 CONFIG_INET_AH=m 59 57 CONFIG_INET_ESP=m 60 58 CONFIG_INET_IPCOMP=m ··· 63 63 # CONFIG_INET_LRO is not set 64 64 CONFIG_INET_DIAG=m 65 65 CONFIG_INET_UDP_DIAG=m 66 + CONFIG_IPV6=m 66 67 CONFIG_IPV6_ROUTER_PREF=y 67 68 CONFIG_INET6_AH=m 68 69 CONFIG_INET6_ESP=m 69 70 CONFIG_INET6_IPCOMP=m 71 + CONFIG_IPV6_ILA=m 70 72 CONFIG_IPV6_VTI=m 71 73 CONFIG_IPV6_GRE=m 72 74 CONFIG_NETFILTER=y ··· 177 175 CONFIG_IP_SET_LIST_SET=m 178 176 CONFIG_NF_CONNTRACK_IPV4=m 179 177 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 178 + CONFIG_NFT_DUP_IPV4=m 180 179 CONFIG_NF_TABLES_ARP=m 181 180 CONFIG_NF_LOG_ARP=m 182 181 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 205 202 CONFIG_IP_NF_ARP_MANGLE=m 206 203 CONFIG_NF_CONNTRACK_IPV6=m 207 204 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205 + CONFIG_NFT_DUP_IPV6=m 208 206 CONFIG_NFT_CHAIN_NAT_IPV6=m 209 207 CONFIG_NFT_MASQ_IPV6=m 210 208 CONFIG_NFT_REDIR_IPV6=m ··· 271 267 CONFIG_MPLS=y 272 268 CONFIG_NET_MPLS_GSO=m 273 269 CONFIG_MPLS_ROUTING=m 270 + CONFIG_MPLS_IPTUNNEL=m 274 271 # CONFIG_WIRELESS is not set 275 272 # CONFIG_UEVENT_HELPER is not set 276 273 CONFIG_DEVTMPFS=y ··· 348 343 # CONFIG_NET_VENDOR_SAMSUNG is not set 349 344 # CONFIG_NET_VENDOR_SEEQ is not set 350 345 # CONFIG_NET_VENDOR_STMICRO is not set 346 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 351 347 # CONFIG_NET_VENDOR_VIA is not set 352 348 # CONFIG_NET_VENDOR_WIZNET is not set 353 349 CONFIG_PPP=m ··· 494 488 CONFIG_TEST_BPF=m 495 489 CONFIG_TEST_FIRMWARE=m 496 490 CONFIG_TEST_UDELAY=m 491 + CONFIG_TEST_STATIC_KEYS=m 497 492 CONFIG_EARLY_PRINTK=y 498 493 CONFIG_ENCRYPTED_KEYS=m 499 494 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/hp300_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 56 55 CONFIG_NET_IPGRE=m 57 56 CONFIG_NET_IPVTI=m 58 57 CONFIG_NET_FOU_IP_TUNNELS=y 59 - CONFIG_GENEVE_CORE=m 60 58 CONFIG_INET_AH=m 61 59 CONFIG_INET_ESP=m 62 60 CONFIG_INET_IPCOMP=m ··· 65 65 # CONFIG_INET_LRO is not set 66 66 CONFIG_INET_DIAG=m 67 67 CONFIG_INET_UDP_DIAG=m 68 + CONFIG_IPV6=m 68 69 CONFIG_IPV6_ROUTER_PREF=y 69 70 CONFIG_INET6_AH=m 70 71 CONFIG_INET6_ESP=m 71 72 CONFIG_INET6_IPCOMP=m 73 + CONFIG_IPV6_ILA=m 72 74 CONFIG_IPV6_VTI=m 73 75 CONFIG_IPV6_GRE=m 74 76 CONFIG_NETFILTER=y ··· 179 177 CONFIG_IP_SET_LIST_SET=m 180 178 CONFIG_NF_CONNTRACK_IPV4=m 181 179 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 180 + CONFIG_NFT_DUP_IPV4=m 182 181 CONFIG_NF_TABLES_ARP=m 183 182 CONFIG_NF_LOG_ARP=m 184 183 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 207 204 CONFIG_IP_NF_ARP_MANGLE=m 208 205 CONFIG_NF_CONNTRACK_IPV6=m 209 206 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 207 + CONFIG_NFT_DUP_IPV6=m 210 208 CONFIG_NFT_CHAIN_NAT_IPV6=m 211 209 CONFIG_NFT_MASQ_IPV6=m 212 210 CONFIG_NFT_REDIR_IPV6=m ··· 273 269 CONFIG_MPLS=y 274 270 CONFIG_NET_MPLS_GSO=m 275 271 CONFIG_MPLS_ROUTING=m 272 + CONFIG_MPLS_IPTUNNEL=m 276 273 # CONFIG_WIRELESS is not set 277 274 # CONFIG_UEVENT_HELPER is not set 278 275 CONFIG_DEVTMPFS=y ··· 350 345 # CONFIG_NET_VENDOR_SAMSUNG is not set 351 346 # CONFIG_NET_VENDOR_SEEQ is not set 352 347 # CONFIG_NET_VENDOR_STMICRO is not set 348 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 353 349 # CONFIG_NET_VENDOR_VIA is not set 354 350 # CONFIG_NET_VENDOR_WIZNET is not set 355 351 CONFIG_PPP=m ··· 503 497 CONFIG_TEST_BPF=m 504 498 CONFIG_TEST_FIRMWARE=m 505 499 CONFIG_TEST_UDELAY=m 500 + CONFIG_TEST_STATIC_KEYS=m 506 501 CONFIG_EARLY_PRINTK=y 507 502 CONFIG_ENCRYPTED_KEYS=m 508 503 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/mac_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 55 54 CONFIG_NET_IPGRE=m 56 55 CONFIG_NET_IPVTI=m 57 56 CONFIG_NET_FOU_IP_TUNNELS=y 58 - CONFIG_GENEVE_CORE=m 59 57 CONFIG_INET_AH=m 60 58 CONFIG_INET_ESP=m 61 59 CONFIG_INET_IPCOMP=m ··· 64 64 # CONFIG_INET_LRO is not set 65 65 CONFIG_INET_DIAG=m 66 66 CONFIG_INET_UDP_DIAG=m 67 + CONFIG_IPV6=m 67 68 CONFIG_IPV6_ROUTER_PREF=y 68 69 CONFIG_INET6_AH=m 69 70 CONFIG_INET6_ESP=m 70 71 CONFIG_INET6_IPCOMP=m 72 + CONFIG_IPV6_ILA=m 71 73 CONFIG_IPV6_VTI=m 72 74 CONFIG_IPV6_GRE=m 73 75 CONFIG_NETFILTER=y ··· 178 176 CONFIG_IP_SET_LIST_SET=m 179 177 CONFIG_NF_CONNTRACK_IPV4=m 180 178 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 179 + CONFIG_NFT_DUP_IPV4=m 181 180 CONFIG_NF_TABLES_ARP=m 182 181 CONFIG_NF_LOG_ARP=m 183 182 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 206 203 CONFIG_IP_NF_ARP_MANGLE=m 207 204 CONFIG_NF_CONNTRACK_IPV6=m 208 205 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 206 + CONFIG_NFT_DUP_IPV6=m 209 207 CONFIG_NFT_CHAIN_NAT_IPV6=m 210 208 CONFIG_NFT_MASQ_IPV6=m 211 209 CONFIG_NFT_REDIR_IPV6=m ··· 275 271 CONFIG_MPLS=y 276 272 CONFIG_NET_MPLS_GSO=m 277 273 CONFIG_MPLS_ROUTING=m 274 + CONFIG_MPLS_IPTUNNEL=m 278 275 # CONFIG_WIRELESS is not set 279 276 # CONFIG_UEVENT_HELPER is not set 280 277 CONFIG_DEVTMPFS=y ··· 369 364 # CONFIG_NET_VENDOR_SEEQ is not set 370 365 # CONFIG_NET_VENDOR_SMSC is not set 371 366 # CONFIG_NET_VENDOR_STMICRO is not set 367 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 372 368 # CONFIG_NET_VENDOR_VIA is not set 373 369 # CONFIG_NET_VENDOR_WIZNET is not set 374 370 CONFIG_PPP=m ··· 525 519 CONFIG_TEST_BPF=m 526 520 CONFIG_TEST_FIRMWARE=m 527 521 CONFIG_TEST_UDELAY=m 522 + CONFIG_TEST_STATIC_KEYS=m 528 523 CONFIG_EARLY_PRINTK=y 529 524 CONFIG_ENCRYPTED_KEYS=m 530 525 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/multi_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 65 64 CONFIG_NET_IPGRE=m 66 65 CONFIG_NET_IPVTI=m 67 66 CONFIG_NET_FOU_IP_TUNNELS=y 68 - CONFIG_GENEVE_CORE=m 69 67 CONFIG_INET_AH=m 70 68 CONFIG_INET_ESP=m 71 69 CONFIG_INET_IPCOMP=m ··· 74 74 # CONFIG_INET_LRO is not set 75 75 CONFIG_INET_DIAG=m 76 76 CONFIG_INET_UDP_DIAG=m 77 + CONFIG_IPV6=m 77 78 CONFIG_IPV6_ROUTER_PREF=y 78 79 CONFIG_INET6_AH=m 79 80 CONFIG_INET6_ESP=m 80 81 CONFIG_INET6_IPCOMP=m 82 + CONFIG_IPV6_ILA=m 81 83 CONFIG_IPV6_VTI=m 82 84 CONFIG_IPV6_GRE=m 83 85 CONFIG_NETFILTER=y ··· 188 186 CONFIG_IP_SET_LIST_SET=m 189 187 CONFIG_NF_CONNTRACK_IPV4=m 190 188 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 189 + CONFIG_NFT_DUP_IPV4=m 191 190 CONFIG_NF_TABLES_ARP=m 192 191 CONFIG_NF_LOG_ARP=m 193 192 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 216 213 CONFIG_IP_NF_ARP_MANGLE=m 217 214 CONFIG_NF_CONNTRACK_IPV6=m 218 215 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 216 + CONFIG_NFT_DUP_IPV6=m 219 217 CONFIG_NFT_CHAIN_NAT_IPV6=m 220 218 CONFIG_NFT_MASQ_IPV6=m 221 219 CONFIG_NFT_REDIR_IPV6=m ··· 285 281 CONFIG_MPLS=y 286 282 CONFIG_NET_MPLS_GSO=m 287 283 CONFIG_MPLS_ROUTING=m 284 + CONFIG_MPLS_IPTUNNEL=m 288 285 # CONFIG_WIRELESS is not set 289 286 # CONFIG_UEVENT_HELPER is not set 290 287 CONFIG_DEVTMPFS=y ··· 415 410 # CONFIG_NET_VENDOR_SEEQ is not set 416 411 CONFIG_SMC91X=y 417 412 # CONFIG_NET_VENDOR_STMICRO is not set 413 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 418 414 # CONFIG_NET_VENDOR_VIA is not set 419 415 # CONFIG_NET_VENDOR_WIZNET is not set 420 416 CONFIG_PLIP=m ··· 605 599 CONFIG_TEST_BPF=m 606 600 CONFIG_TEST_FIRMWARE=m 607 601 CONFIG_TEST_UDELAY=m 602 + CONFIG_TEST_STATIC_KEYS=m 608 603 CONFIG_EARLY_PRINTK=y 609 604 CONFIG_ENCRYPTED_KEYS=m 610 605 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/mvme147_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 53 52 CONFIG_NET_IPGRE=m 54 53 CONFIG_NET_IPVTI=m 55 54 CONFIG_NET_FOU_IP_TUNNELS=y 56 - CONFIG_GENEVE_CORE=m 57 55 CONFIG_INET_AH=m 58 56 CONFIG_INET_ESP=m 59 57 CONFIG_INET_IPCOMP=m ··· 62 62 # CONFIG_INET_LRO is not set 63 63 CONFIG_INET_DIAG=m 64 64 CONFIG_INET_UDP_DIAG=m 65 + CONFIG_IPV6=m 65 66 CONFIG_IPV6_ROUTER_PREF=y 66 67 CONFIG_INET6_AH=m 67 68 CONFIG_INET6_ESP=m 68 69 CONFIG_INET6_IPCOMP=m 70 + CONFIG_IPV6_ILA=m 69 71 CONFIG_IPV6_VTI=m 70 72 CONFIG_IPV6_GRE=m 71 73 CONFIG_NETFILTER=y ··· 176 174 CONFIG_IP_SET_LIST_SET=m 177 175 CONFIG_NF_CONNTRACK_IPV4=m 178 176 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 177 + CONFIG_NFT_DUP_IPV4=m 179 178 CONFIG_NF_TABLES_ARP=m 180 179 CONFIG_NF_LOG_ARP=m 181 180 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 204 201 CONFIG_IP_NF_ARP_MANGLE=m 205 202 CONFIG_NF_CONNTRACK_IPV6=m 206 203 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 204 + CONFIG_NFT_DUP_IPV6=m 207 205 CONFIG_NFT_CHAIN_NAT_IPV6=m 208 206 CONFIG_NFT_MASQ_IPV6=m 209 207 CONFIG_NFT_REDIR_IPV6=m ··· 270 266 CONFIG_MPLS=y 271 267 CONFIG_NET_MPLS_GSO=m 272 268 CONFIG_MPLS_ROUTING=m 269 + CONFIG_MPLS_IPTUNNEL=m 273 270 # CONFIG_WIRELESS is not set 274 271 # CONFIG_UEVENT_HELPER is not set 275 272 CONFIG_DEVTMPFS=y ··· 348 343 # CONFIG_NET_VENDOR_SAMSUNG is not set 349 344 # CONFIG_NET_VENDOR_SEEQ is not set 350 345 # CONFIG_NET_VENDOR_STMICRO is not set 346 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 351 347 # CONFIG_NET_VENDOR_VIA is not set 352 348 # CONFIG_NET_VENDOR_WIZNET is not set 353 349 CONFIG_PPP=m ··· 494 488 CONFIG_TEST_BPF=m 495 489 CONFIG_TEST_FIRMWARE=m 496 490 CONFIG_TEST_UDELAY=m 491 + CONFIG_TEST_STATIC_KEYS=m 497 492 CONFIG_EARLY_PRINTK=y 498 493 CONFIG_ENCRYPTED_KEYS=m 499 494 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/mvme16x_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 54 53 CONFIG_NET_IPGRE=m 55 54 CONFIG_NET_IPVTI=m 56 55 CONFIG_NET_FOU_IP_TUNNELS=y 57 - CONFIG_GENEVE_CORE=m 58 56 CONFIG_INET_AH=m 59 57 CONFIG_INET_ESP=m 60 58 CONFIG_INET_IPCOMP=m ··· 63 63 # CONFIG_INET_LRO is not set 64 64 CONFIG_INET_DIAG=m 65 65 CONFIG_INET_UDP_DIAG=m 66 + CONFIG_IPV6=m 66 67 CONFIG_IPV6_ROUTER_PREF=y 67 68 CONFIG_INET6_AH=m 68 69 CONFIG_INET6_ESP=m 69 70 CONFIG_INET6_IPCOMP=m 71 + CONFIG_IPV6_ILA=m 70 72 CONFIG_IPV6_VTI=m 71 73 CONFIG_IPV6_GRE=m 72 74 CONFIG_NETFILTER=y ··· 177 175 CONFIG_IP_SET_LIST_SET=m 178 176 CONFIG_NF_CONNTRACK_IPV4=m 179 177 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 178 + CONFIG_NFT_DUP_IPV4=m 180 179 CONFIG_NF_TABLES_ARP=m 181 180 CONFIG_NF_LOG_ARP=m 182 181 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 205 202 CONFIG_IP_NF_ARP_MANGLE=m 206 203 CONFIG_NF_CONNTRACK_IPV6=m 207 204 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205 + CONFIG_NFT_DUP_IPV6=m 208 206 CONFIG_NFT_CHAIN_NAT_IPV6=m 209 207 CONFIG_NFT_MASQ_IPV6=m 210 208 CONFIG_NFT_REDIR_IPV6=m ··· 271 267 CONFIG_MPLS=y 272 268 CONFIG_NET_MPLS_GSO=m 273 269 CONFIG_MPLS_ROUTING=m 270 + CONFIG_MPLS_IPTUNNEL=m 274 271 # CONFIG_WIRELESS is not set 275 272 # CONFIG_UEVENT_HELPER is not set 276 273 CONFIG_DEVTMPFS=y ··· 348 343 # CONFIG_NET_VENDOR_SAMSUNG is not set 349 344 # CONFIG_NET_VENDOR_SEEQ is not set 350 345 # CONFIG_NET_VENDOR_STMICRO is not set 346 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 351 347 # CONFIG_NET_VENDOR_VIA is not set 352 348 # CONFIG_NET_VENDOR_WIZNET is not set 353 349 CONFIG_PPP=m ··· 494 488 CONFIG_TEST_BPF=m 495 489 CONFIG_TEST_FIRMWARE=m 496 490 CONFIG_TEST_UDELAY=m 491 + CONFIG_TEST_STATIC_KEYS=m 497 492 CONFIG_EARLY_PRINTK=y 498 493 CONFIG_ENCRYPTED_KEYS=m 499 494 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/q40_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 54 53 CONFIG_NET_IPGRE=m 55 54 CONFIG_NET_IPVTI=m 56 55 CONFIG_NET_FOU_IP_TUNNELS=y 57 - CONFIG_GENEVE_CORE=m 58 56 CONFIG_INET_AH=m 59 57 CONFIG_INET_ESP=m 60 58 CONFIG_INET_IPCOMP=m ··· 63 63 # CONFIG_INET_LRO is not set 64 64 CONFIG_INET_DIAG=m 65 65 CONFIG_INET_UDP_DIAG=m 66 + CONFIG_IPV6=m 66 67 CONFIG_IPV6_ROUTER_PREF=y 67 68 CONFIG_INET6_AH=m 68 69 CONFIG_INET6_ESP=m 69 70 CONFIG_INET6_IPCOMP=m 71 + CONFIG_IPV6_ILA=m 70 72 CONFIG_IPV6_VTI=m 71 73 CONFIG_IPV6_GRE=m 72 74 CONFIG_NETFILTER=y ··· 177 175 CONFIG_IP_SET_LIST_SET=m 178 176 CONFIG_NF_CONNTRACK_IPV4=m 179 177 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 178 + CONFIG_NFT_DUP_IPV4=m 180 179 CONFIG_NF_TABLES_ARP=m 181 180 CONFIG_NF_LOG_ARP=m 182 181 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 205 202 CONFIG_IP_NF_ARP_MANGLE=m 206 203 CONFIG_NF_CONNTRACK_IPV6=m 207 204 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 205 + CONFIG_NFT_DUP_IPV6=m 208 206 CONFIG_NFT_CHAIN_NAT_IPV6=m 209 207 CONFIG_NFT_MASQ_IPV6=m 210 208 CONFIG_NFT_REDIR_IPV6=m ··· 271 267 CONFIG_MPLS=y 272 268 CONFIG_NET_MPLS_GSO=m 273 269 CONFIG_MPLS_ROUTING=m 270 + CONFIG_MPLS_IPTUNNEL=m 274 271 # CONFIG_WIRELESS is not set 275 272 # CONFIG_UEVENT_HELPER is not set 276 273 CONFIG_DEVTMPFS=y ··· 359 354 # CONFIG_NET_VENDOR_SEEQ is not set 360 355 # CONFIG_NET_VENDOR_SMSC is not set 361 356 # CONFIG_NET_VENDOR_STMICRO is not set 357 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 362 358 # CONFIG_NET_VENDOR_VIA is not set 363 359 # CONFIG_NET_VENDOR_WIZNET is not set 364 360 CONFIG_PLIP=m ··· 516 510 CONFIG_TEST_BPF=m 517 511 CONFIG_TEST_FIRMWARE=m 518 512 CONFIG_TEST_UDELAY=m 513 + CONFIG_TEST_STATIC_KEYS=m 519 514 CONFIG_EARLY_PRINTK=y 520 515 CONFIG_ENCRYPTED_KEYS=m 521 516 CONFIG_CRYPTO_RSA=m
+8 -1
arch/m68k/configs/sun3_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 51 50 CONFIG_NET_IPGRE=m 52 51 CONFIG_NET_IPVTI=m 53 52 CONFIG_NET_FOU_IP_TUNNELS=y 54 - CONFIG_GENEVE_CORE=m 55 53 CONFIG_INET_AH=m 56 54 CONFIG_INET_ESP=m 57 55 CONFIG_INET_IPCOMP=m ··· 60 60 # CONFIG_INET_LRO is not set 61 61 CONFIG_INET_DIAG=m 62 62 CONFIG_INET_UDP_DIAG=m 63 + CONFIG_IPV6=m 63 64 CONFIG_IPV6_ROUTER_PREF=y 64 65 CONFIG_INET6_AH=m 65 66 CONFIG_INET6_ESP=m 66 67 CONFIG_INET6_IPCOMP=m 68 + CONFIG_IPV6_ILA=m 67 69 CONFIG_IPV6_VTI=m 68 70 CONFIG_IPV6_GRE=m 69 71 CONFIG_NETFILTER=y ··· 174 172 CONFIG_IP_SET_LIST_SET=m 175 173 CONFIG_NF_CONNTRACK_IPV4=m 176 174 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 175 + CONFIG_NFT_DUP_IPV4=m 177 176 CONFIG_NF_TABLES_ARP=m 178 177 CONFIG_NF_LOG_ARP=m 179 178 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 202 199 CONFIG_IP_NF_ARP_MANGLE=m 203 200 CONFIG_NF_CONNTRACK_IPV6=m 204 201 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 202 + CONFIG_NFT_DUP_IPV6=m 205 203 CONFIG_NFT_CHAIN_NAT_IPV6=m 206 204 CONFIG_NFT_MASQ_IPV6=m 207 205 CONFIG_NFT_REDIR_IPV6=m ··· 268 264 CONFIG_MPLS=y 269 265 CONFIG_NET_MPLS_GSO=m 270 266 CONFIG_MPLS_ROUTING=m 267 + CONFIG_MPLS_IPTUNNEL=m 271 268 # CONFIG_WIRELESS is not set 272 269 # CONFIG_UEVENT_HELPER is not set 273 270 CONFIG_DEVTMPFS=y ··· 346 341 # CONFIG_NET_VENDOR_SEEQ is not set 347 342 # CONFIG_NET_VENDOR_STMICRO is not set 348 343 # CONFIG_NET_VENDOR_SUN is not set 344 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 349 345 # CONFIG_NET_VENDOR_VIA is not set 350 346 # CONFIG_NET_VENDOR_WIZNET is not set 351 347 CONFIG_PPP=m ··· 495 489 CONFIG_TEST_BPF=m 496 490 CONFIG_TEST_FIRMWARE=m 497 491 CONFIG_TEST_UDELAY=m 492 + CONFIG_TEST_STATIC_KEYS=m 498 493 CONFIG_ENCRYPTED_KEYS=m 499 494 CONFIG_CRYPTO_RSA=m 500 495 CONFIG_CRYPTO_MANAGER=y
+8 -1
arch/m68k/configs/sun3x_defconfig
··· 10 10 # CONFIG_PID_NS is not set 11 11 # CONFIG_NET_NS is not set 12 12 CONFIG_BLK_DEV_INITRD=y 13 + CONFIG_USERFAULTFD=y 13 14 CONFIG_SLAB=y 14 15 CONFIG_MODULES=y 15 16 CONFIG_MODULE_UNLOAD=y ··· 51 50 CONFIG_NET_IPGRE=m 52 51 CONFIG_NET_IPVTI=m 53 52 CONFIG_NET_FOU_IP_TUNNELS=y 54 - CONFIG_GENEVE_CORE=m 55 53 CONFIG_INET_AH=m 56 54 CONFIG_INET_ESP=m 57 55 CONFIG_INET_IPCOMP=m ··· 60 60 # CONFIG_INET_LRO is not set 61 61 CONFIG_INET_DIAG=m 62 62 CONFIG_INET_UDP_DIAG=m 63 + CONFIG_IPV6=m 63 64 CONFIG_IPV6_ROUTER_PREF=y 64 65 CONFIG_INET6_AH=m 65 66 CONFIG_INET6_ESP=m 66 67 CONFIG_INET6_IPCOMP=m 68 + CONFIG_IPV6_ILA=m 67 69 CONFIG_IPV6_VTI=m 68 70 CONFIG_IPV6_GRE=m 69 71 CONFIG_NETFILTER=y ··· 174 172 CONFIG_IP_SET_LIST_SET=m 175 173 CONFIG_NF_CONNTRACK_IPV4=m 176 174 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 175 + CONFIG_NFT_DUP_IPV4=m 177 176 CONFIG_NF_TABLES_ARP=m 178 177 CONFIG_NF_LOG_ARP=m 179 178 CONFIG_NFT_CHAIN_NAT_IPV4=m ··· 202 199 CONFIG_IP_NF_ARP_MANGLE=m 203 200 CONFIG_NF_CONNTRACK_IPV6=m 204 201 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 202 + CONFIG_NFT_DUP_IPV6=m 205 203 CONFIG_NFT_CHAIN_NAT_IPV6=m 206 204 CONFIG_NFT_MASQ_IPV6=m 207 205 CONFIG_NFT_REDIR_IPV6=m ··· 268 264 CONFIG_MPLS=y 269 265 CONFIG_NET_MPLS_GSO=m 270 266 CONFIG_MPLS_ROUTING=m 267 + CONFIG_MPLS_IPTUNNEL=m 271 268 # CONFIG_WIRELESS is not set 272 269 # CONFIG_UEVENT_HELPER is not set 273 270 CONFIG_DEVTMPFS=y ··· 346 341 # CONFIG_NET_VENDOR_SAMSUNG is not set 347 342 # CONFIG_NET_VENDOR_SEEQ is not set 348 343 # CONFIG_NET_VENDOR_STMICRO is not set 344 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 349 345 # CONFIG_NET_VENDOR_VIA is not set 350 346 # CONFIG_NET_VENDOR_WIZNET is not set 351 347 CONFIG_PPP=m ··· 495 489 CONFIG_TEST_BPF=m 496 490 CONFIG_TEST_FIRMWARE=m 497 491 CONFIG_TEST_UDELAY=m 492 + CONFIG_TEST_STATIC_KEYS=m 498 493 CONFIG_EARLY_PRINTK=y 499 494 CONFIG_ENCRYPTED_KEYS=m 500 495 CONFIG_CRYPTO_RSA=m
+30
arch/m68k/include/asm/linkage.h
··· 4 4 #define __ALIGN .align 4 5 5 #define __ALIGN_STR ".align 4" 6 6 7 + /* 8 + * Make sure the compiler doesn't do anything stupid with the 9 + * arguments on the stack - they are owned by the *caller*, not 10 + * the callee. This just fools gcc into not spilling into them, 11 + * and keeps it from doing tailcall recursion and/or using the 12 + * stack slots for temporaries, since they are live and "used" 13 + * all the way to the end of the function. 14 + */ 15 + #define asmlinkage_protect(n, ret, args...) \ 16 + __asmlinkage_protect##n(ret, ##args) 17 + #define __asmlinkage_protect_n(ret, args...) \ 18 + __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) 19 + #define __asmlinkage_protect0(ret) \ 20 + __asmlinkage_protect_n(ret) 21 + #define __asmlinkage_protect1(ret, arg1) \ 22 + __asmlinkage_protect_n(ret, "m" (arg1)) 23 + #define __asmlinkage_protect2(ret, arg1, arg2) \ 24 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) 25 + #define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ 26 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) 27 + #define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ 28 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ 29 + "m" (arg4)) 30 + #define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ 31 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ 32 + "m" (arg4), "m" (arg5)) 33 + #define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ 34 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ 35 + "m" (arg4), "m" (arg5), "m" (arg6)) 36 + 7 37 #endif
+1 -1
arch/m68k/include/asm/unistd.h
··· 4 4 #include <uapi/asm/unistd.h> 5 5 6 6 7 - #define NR_syscalls 356 7 + #define NR_syscalls 375 8 8 9 9 #define __ARCH_WANT_OLD_READDIR 10 10 #define __ARCH_WANT_OLD_STAT
+19
arch/m68k/include/uapi/asm/unistd.h
··· 361 361 #define __NR_memfd_create 353 362 362 #define __NR_bpf 354 363 363 #define __NR_execveat 355 364 + #define __NR_socket 356 365 + #define __NR_socketpair 357 366 + #define __NR_bind 358 367 + #define __NR_connect 359 368 + #define __NR_listen 360 369 + #define __NR_accept4 361 370 + #define __NR_getsockopt 362 371 + #define __NR_setsockopt 363 372 + #define __NR_getsockname 364 373 + #define __NR_getpeername 365 374 + #define __NR_sendto 366 375 + #define __NR_sendmsg 367 376 + #define __NR_recvfrom 368 377 + #define __NR_recvmsg 369 378 + #define __NR_shutdown 370 379 + #define __NR_recvmmsg 371 380 + #define __NR_sendmmsg 372 381 + #define __NR_userfaultfd 373 382 + #define __NR_membarrier 374 364 383 365 384 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
+19 -1
arch/m68k/kernel/syscalltable.S
··· 376 376 .long sys_memfd_create 377 377 .long sys_bpf 378 378 .long sys_execveat /* 355 */ 379 - 379 + .long sys_socket 380 + .long sys_socketpair 381 + .long sys_bind 382 + .long sys_connect 383 + .long sys_listen /* 360 */ 384 + .long sys_accept4 385 + .long sys_getsockopt 386 + .long sys_setsockopt 387 + .long sys_getsockname 388 + .long sys_getpeername /* 365 */ 389 + .long sys_sendto 390 + .long sys_sendmsg 391 + .long sys_recvfrom 392 + .long sys_recvmsg 393 + .long sys_shutdown /* 370 */ 394 + .long sys_recvmmsg 395 + .long sys_sendmmsg 396 + .long sys_userfaultfd 397 + .long sys_membarrier
+1
arch/metag/include/asm/Kbuild
··· 54 54 generic-y += unaligned.h 55 55 generic-y += user.h 56 56 generic-y += vga.h 57 + generic-y += word-at-a-time.h 57 58 generic-y += xor.h
+1
arch/microblaze/include/asm/Kbuild
··· 10 10 generic-y += preempt.h 11 11 generic-y += syscalls.h 12 12 generic-y += trace_clock.h 13 + generic-y += word-at-a-time.h
+20 -2
arch/mips/ath79/irq.c
··· 293 293 294 294 return 0; 295 295 } 296 - IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc", 297 - ath79_misc_intc_of_init); 296 + 297 + static int __init ar7100_misc_intc_of_init( 298 + struct device_node *node, struct device_node *parent) 299 + { 300 + ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; 301 + return ath79_misc_intc_of_init(node, parent); 302 + } 303 + 304 + IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", 305 + ar7100_misc_intc_of_init); 306 + 307 + static int __init ar7240_misc_intc_of_init( 308 + struct device_node *node, struct device_node *parent) 309 + { 310 + ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; 311 + return ath79_misc_intc_of_init(node, parent); 312 + } 313 + 314 + IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", 315 + ar7240_misc_intc_of_init); 298 316 299 317 static int __init ar79_cpu_intc_of_init( 300 318 struct device_node *node, struct device_node *parent)
+1 -1
arch/mips/cavium-octeon/setup.c
··· 933 933 while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) 934 934 && (total < MAX_MEMORY)) { 935 935 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 936 - __pa_symbol(&__init_end), -1, 936 + __pa_symbol(&_end), -1, 937 937 0x100000, 938 938 CVMX_BOOTMEM_FLAG_NO_LOCKING); 939 939 if (memory >= 0) {
+1
arch/mips/include/asm/Kbuild
··· 17 17 generic-y += serial.h 18 18 generic-y += trace_clock.h 19 19 generic-y += user.h 20 + generic-y += word-at-a-time.h 20 21 generic-y += xor.h
+3
arch/mips/include/asm/cpu-features.h
··· 20 20 #ifndef cpu_has_tlb 21 21 #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) 22 22 #endif 23 + #ifndef cpu_has_ftlb 24 + #define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB) 25 + #endif 23 26 #ifndef cpu_has_tlbinv 24 27 #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) 25 28 #endif
+1
arch/mips/include/asm/cpu.h
··· 385 385 #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ 386 386 #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ 387 387 #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ 388 + #define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */ 388 389 389 390 /* 390 391 * CPU ASE encodings
+9
arch/mips/include/asm/maar.h
··· 66 66 } 67 67 68 68 /** 69 + * maar_init() - initialise MAARs 70 + * 71 + * Performs initialisation of MAARs for the current CPU, making use of the 72 + * platforms implementation of platform_maar_init where necessary and 73 + * duplicating the setup it provides on secondary CPUs. 74 + */ 75 + extern void maar_init(void); 76 + 77 + /** 69 78 * struct maar_config - MAAR configuration data 70 79 * @lower: The lowest address that the MAAR pair will affect. Must be 71 80 * aligned to a 2^16 byte boundary.
+39
arch/mips/include/asm/mips-cm.h
··· 194 194 BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) 195 195 BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) 196 196 BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) 197 + BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) 197 198 198 199 /* Core Local & Core Other register accessor functions */ 199 200 BUILD_CM_Cx_RW(reset_release, 0x00) ··· 317 316 #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 318 317 #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) 319 318 319 + /* GCR_SYS_CONFIG2 register fields */ 320 + #define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 321 + #define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) 322 + 320 323 /* GCR_Cx_COHERENCE register fields */ 321 324 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 322 325 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) ··· 408 403 return 0; 409 404 410 405 return read_gcr_rev(); 406 + } 407 + 408 + /** 409 + * mips_cm_max_vp_width() - return the width in bits of VP indices 410 + * 411 + * Return: the width, in bits, of VP indices in fields that combine core & VP 412 + * indices. 413 + */ 414 + static inline unsigned int mips_cm_max_vp_width(void) 415 + { 416 + extern int smp_num_siblings; 417 + 418 + if (mips_cm_revision() >= CM_REV_CM3) 419 + return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; 420 + 421 + return smp_num_siblings; 422 + } 423 + 424 + /** 425 + * mips_cm_vp_id() - calculate the hardware VP ID for a CPU 426 + * @cpu: the CPU whose VP ID to calculate 427 + * 428 + * Hardware such as the GIC uses identifiers for VPs which may not match the 429 + * CPU numbers used by Linux. This function calculates the hardware VP 430 + * identifier corresponding to a given CPU. 431 + * 432 + * Return: the VP ID for the CPU. 433 + */ 434 + static inline unsigned int mips_cm_vp_id(unsigned int cpu) 435 + { 436 + unsigned int core = cpu_data[cpu].core; 437 + unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); 438 + 439 + return (core * mips_cm_max_vp_width()) + vp; 411 440 } 412 441 413 442 #endif /* __MIPS_ASM_MIPS_CM_H__ */
+2
arch/mips/include/asm/mipsregs.h
··· 487 487 488 488 /* Bits specific to the MIPS32/64 PRA. */ 489 489 #define MIPS_CONF_MT (_ULCAST_(7) << 7) 490 + #define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) 491 + #define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) 490 492 #define MIPS_CONF_AR (_ULCAST_(7) << 10) 491 493 #define MIPS_CONF_AT (_ULCAST_(3) << 13) 492 494 #define MIPS_CONF_M (_ULCAST_(1) << 31)
+12 -6
arch/mips/include/uapi/asm/unistd.h
··· 377 377 #define __NR_memfd_create (__NR_Linux + 354) 378 378 #define __NR_bpf (__NR_Linux + 355) 379 379 #define __NR_execveat (__NR_Linux + 356) 380 + #define __NR_userfaultfd (__NR_Linux + 357) 381 + #define __NR_membarrier (__NR_Linux + 358) 380 382 381 383 /* 382 384 * Offset of the last Linux o32 flavoured syscall 383 385 */ 384 - #define __NR_Linux_syscalls 356 386 + #define __NR_Linux_syscalls 358 385 387 386 388 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 387 389 388 390 #define __NR_O32_Linux 4000 389 - #define __NR_O32_Linux_syscalls 356 391 + #define __NR_O32_Linux_syscalls 358 390 392 391 393 #if _MIPS_SIM == _MIPS_SIM_ABI64 392 394 ··· 713 711 #define __NR_memfd_create (__NR_Linux + 314) 714 712 #define __NR_bpf (__NR_Linux + 315) 715 713 #define __NR_execveat (__NR_Linux + 316) 714 + #define __NR_userfaultfd (__NR_Linux + 317) 715 + #define __NR_membarrier (__NR_Linux + 318) 716 716 717 717 /* 718 718 * Offset of the last Linux 64-bit flavoured syscall 719 719 */ 720 - #define __NR_Linux_syscalls 316 720 + #define __NR_Linux_syscalls 318 721 721 722 722 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 723 723 724 724 #define __NR_64_Linux 5000 725 - #define __NR_64_Linux_syscalls 316 725 + #define __NR_64_Linux_syscalls 318 726 726 727 727 #if _MIPS_SIM == _MIPS_SIM_NABI32 728 728 ··· 1053 1049 #define __NR_memfd_create (__NR_Linux + 318) 1054 1050 #define __NR_bpf (__NR_Linux + 319) 1055 1051 #define __NR_execveat (__NR_Linux + 320) 1052 + #define __NR_userfaultfd (__NR_Linux + 321) 1053 + #define __NR_membarrier (__NR_Linux + 322) 1056 1054 1057 1055 /* 1058 1056 * Offset of the last N32 flavoured syscall 1059 1057 */ 1060 - #define __NR_Linux_syscalls 320 1058 + #define __NR_Linux_syscalls 322 1061 1059 1062 1060 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1063 1061 1064 1062 #define __NR_N32_Linux 6000 1065 - #define __NR_N32_Linux_syscalls 320 1063 + #define __NR_N32_Linux_syscalls 322 1066 1064 1067 1065 #endif /* _UAPI_ASM_UNISTD_H */
+1
arch/mips/jz4740/board-qi_lb60.c
··· 26 26 #include <linux/power/jz4740-battery.h> 27 27 #include <linux/power/gpio-charger.h> 28 28 29 + #include <asm/mach-jz4740/gpio.h> 29 30 #include <asm/mach-jz4740/jz4740_fb.h> 30 31 #include <asm/mach-jz4740/jz4740_mmc.h> 31 32 #include <asm/mach-jz4740/jz4740_nand.h>
+1
arch/mips/jz4740/gpio.c
··· 28 28 #include <linux/seq_file.h> 29 29 30 30 #include <asm/mach-jz4740/base.h> 31 + #include <asm/mach-jz4740/gpio.h> 31 32 32 33 #define JZ4740_GPIO_BASE_A (32*0) 33 34 #define JZ4740_GPIO_BASE_B (32*1)
+7 -5
arch/mips/kernel/cps-vec.S
··· 39 39 mfc0 \dest, CP0_CONFIG, 3 40 40 andi \dest, \dest, MIPS_CONF3_MT 41 41 beqz \dest, \nomt 42 + nop 42 43 .endm 43 44 44 45 .section .text.cps-vec ··· 224 223 END(excep_ejtag) 225 224 226 225 LEAF(mips_cps_core_init) 227 - #ifdef CONFIG_MIPS_MT 226 + #ifdef CONFIG_MIPS_MT_SMP 228 227 /* Check that the core implements the MT ASE */ 229 228 has_mt t0, 3f 230 - nop 231 229 232 230 .set push 233 231 .set mips64r2 ··· 310 310 PTR_ADDU t0, t0, t1 311 311 312 312 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 313 + li t9, 0 314 + #ifdef CONFIG_MIPS_MT_SMP 313 315 has_mt ta2, 1f 314 - li t9, 0 315 316 316 317 /* Find the number of VPEs present in the core */ 317 318 mfc0 t1, CP0_MVPCONF0 ··· 331 330 /* Retrieve the VPE ID from EBase.CPUNum */ 332 331 mfc0 t9, $15, 1 333 332 and t9, t9, t1 333 + #endif 334 334 335 335 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 336 336 li t1, VPEBOOTCFG_SIZE ··· 339 337 PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) 340 338 PTR_ADDU v0, v0, ta3 341 339 342 - #ifdef CONFIG_MIPS_MT 340 + #ifdef CONFIG_MIPS_MT_SMP 343 341 344 342 /* If the core doesn't support MT then return */ 345 343 bnez ta2, 1f ··· 453 451 454 452 2: .set pop 455 453 456 - #endif /* CONFIG_MIPS_MT */ 454 + #endif /* CONFIG_MIPS_MT_SMP */ 457 455 458 456 /* Return */ 459 457 jr ra
+13 -8
arch/mips/kernel/cpu-probe.c
··· 410 410 static inline unsigned int decode_config0(struct cpuinfo_mips *c) 411 411 { 412 412 unsigned int config0; 413 - int isa; 413 + int isa, mt; 414 414 415 415 config0 = read_c0_config(); 416 416 417 417 /* 418 418 * Look for Standard TLB or Dual VTLB and FTLB 419 419 */ 420 - if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || 421 - (((config0 & MIPS_CONF_MT) >> 7) == 4)) 420 + mt = config0 & MIPS_CONF_MT; 421 + if (mt == MIPS_CONF_MT_TLB) 422 422 c->options |= MIPS_CPU_TLB; 423 + else if (mt == MIPS_CONF_MT_FTLB) 424 + c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; 423 425 424 426 isa = (config0 & MIPS_CONF_AT) >> 13; 425 427 switch (isa) { ··· 561 559 if (cpu_has_tlb) { 562 560 if (((config4 & MIPS_CONF4_IE) >> 29) == 2) 563 561 c->options |= MIPS_CPU_TLBINV; 562 + 564 563 /* 565 - * This is a bit ugly. R6 has dropped that field from 566 - * config4 and the only valid configuration is VTLB+FTLB so 567 - * set a good value for mmuextdef for that case. 564 + * R6 has dropped the MMUExtDef field from config4. 565 + * On R6 the fields always describe the FTLB, and only if it is 566 + * present according to Config.MT. 568 567 */ 569 - if (cpu_has_mips_r6) 568 + if (!cpu_has_mips_r6) 569 + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; 570 + else if (cpu_has_ftlb) 570 571 mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; 571 572 else 572 - mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; 573 + mmuextdef = 0; 573 574 574 575 switch (mmuextdef) { 575 576 case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
+1 -25
arch/mips/kernel/octeon_switch.S
··· 18 18 .set pop 19 19 /* 20 20 * task_struct *resume(task_struct *prev, task_struct *next, 21 - * struct thread_info *next_ti, int usedfpu) 21 + * struct thread_info *next_ti) 22 22 */ 23 23 .align 7 24 24 LEAF(resume) ··· 27 27 LONG_S t1, THREAD_STATUS(a0) 28 28 cpu_save_nonscratch a0 29 29 LONG_S ra, THREAD_REG31(a0) 30 - 31 - /* 32 - * check if we need to save FPU registers 33 - */ 34 - .set push 35 - .set noreorder 36 - beqz a3, 1f 37 - PTR_L t3, TASK_THREAD_INFO(a0) 38 - .set pop 39 - 40 - /* 41 - * clear saved user stack CU1 bit 42 - */ 43 - LONG_L t0, ST_OFF(t3) 44 - li t1, ~ST0_CU1 45 - and t0, t0, t1 46 - LONG_S t0, ST_OFF(t3) 47 - 48 - .set push 49 - .set arch=mips64r2 50 - fpu_save_double a0 t0 t1 # c0_status passed in t0 51 - # clobbers t1 52 - .set pop 53 - 1: 54 30 55 31 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 56 32 /* Check if we need to store CVMSEG state */
+1 -27
arch/mips/kernel/r2300_switch.S
··· 31 31 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) 32 32 33 33 /* 34 - * FPU context is saved iff the process has used it's FPU in the current 35 - * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user 36 - * space STATUS register should be 0, so that a process *always* starts its 37 - * userland with FPU disabled after each context switch. 38 - * 39 - * FPU will be enabled as soon as the process accesses FPU again, through 40 - * do_cpu() trap. 41 - */ 42 - 43 - /* 44 34 * task_struct *resume(task_struct *prev, task_struct *next, 45 - * struct thread_info *next_ti, int usedfpu) 35 + * struct thread_info *next_ti) 46 36 */ 47 37 LEAF(resume) 48 38 mfc0 t1, CP0_STATUS 49 39 sw t1, THREAD_STATUS(a0) 50 40 cpu_save_nonscratch a0 51 41 sw ra, THREAD_REG31(a0) 52 - 53 - beqz a3, 1f 54 - 55 - PTR_L t3, TASK_THREAD_INFO(a0) 56 - 57 - /* 58 - * clear saved user stack CU1 bit 59 - */ 60 - lw t0, ST_OFF(t3) 61 - li t1, ~ST0_CU1 62 - and t0, t0, t1 63 - sw t0, ST_OFF(t3) 64 - 65 - fpu_save_single a0, t0 # clobbers t0 66 - 67 - 1: 68 42 69 43 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 70 44 PTR_LA t8, __stack_chk_guard
+18 -23
arch/mips/kernel/scall32-o32.S
··· 36 36 lw t1, PT_EPC(sp) # skip syscall on return 37 37 38 38 subu v0, v0, __NR_O32_Linux # check syscall number 39 - sltiu t0, v0, __NR_O32_Linux_syscalls + 1 40 39 addiu t1, 4 # skip to next instruction 41 40 sw t1, PT_EPC(sp) 42 - beqz t0, illegal_syscall 43 - 44 - sll t0, v0, 2 45 - la t1, sys_call_table 46 - addu t1, t0 47 - lw t2, (t1) # syscall routine 48 - beqz t2, illegal_syscall 49 41 50 42 sw a3, PT_R26(sp) # save a3 for syscall restarting 51 43 ··· 88 96 li t1, _TIF_WORK_SYSCALL_ENTRY 89 97 and t0, t1 90 98 bnez t0, syscall_trace_entry # -> yes 99 + syscall_common: 100 + sltiu t0, v0, __NR_O32_Linux_syscalls + 1 101 + beqz t0, illegal_syscall 102 + 103 + sll t0, v0, 2 104 + la t1, sys_call_table 105 + addu t1, t0 106 + lw t2, (t1) # syscall routine 107 + 108 + beqz t2, illegal_syscall 91 109 92 110 jalr t2 # Do The Real Thing (TM) 93 111 ··· 118 116 119 117 syscall_trace_entry: 120 118 SAVE_STATIC 121 - move s0, t2 119 + move s0, v0 122 120 move a0, sp 123 121 124 122 /* ··· 131 129 132 130 1: jal syscall_trace_enter 133 131 134 - bltz v0, 2f # seccomp failed? Skip syscall 132 + bltz v0, 1f # seccomp failed? Skip syscall 135 133 136 - move t0, s0 134 + move v0, s0 # restore syscall 135 + 137 136 RESTORE_STATIC 138 137 lw a0, PT_R4(sp) # Restore argument registers 139 138 lw a1, PT_R5(sp) 140 139 lw a2, PT_R6(sp) 141 140 lw a3, PT_R7(sp) 142 - jalr t0 141 + j syscall_common 143 142 144 - li t0, -EMAXERRNO - 1 # error? 145 - sltu t0, t0, v0 146 - sw t0, PT_R7(sp) # set error flag 147 - beqz t0, 1f 148 - 149 - lw t1, PT_R2(sp) # syscall number 150 - negu v0 # error 151 - sw t1, PT_R0(sp) # save it for syscall restarting 152 - 1: sw v0, PT_R2(sp) # result 153 - 154 - 2: j syscall_exit 143 + 1: j syscall_exit 155 144 156 145 /* ------------------------------------------------------------------------ */ 157 146 ··· 592 599 PTR sys_memfd_create 593 600 PTR sys_bpf /* 4355 */ 594 601 PTR sys_execveat 602 + PTR sys_userfaultfd 603 + PTR sys_membarrier
+18 -22
arch/mips/kernel/scall64-64.S
··· 39 39 .set at 40 40 #endif 41 41 42 - dsubu t0, v0, __NR_64_Linux # check syscall number 43 - sltiu t0, t0, __NR_64_Linux_syscalls + 1 44 42 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) 45 43 ld t1, PT_EPC(sp) # skip syscall on return 46 44 daddiu t1, 4 # skip to next instruction 47 45 sd t1, PT_EPC(sp) 48 46 #endif 49 - beqz t0, illegal_syscall 50 - 51 - dsll t0, v0, 3 # offset into table 52 - ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0) 53 - # syscall routine 54 47 55 48 sd a3, PT_R26(sp) # save a3 for syscall restarting 56 49 ··· 51 58 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? 52 59 and t0, t1, t0 53 60 bnez t0, syscall_trace_entry 61 + 62 + syscall_common: 63 + dsubu t2, v0, __NR_64_Linux 64 + sltiu t0, t2, __NR_64_Linux_syscalls + 1 65 + beqz t0, illegal_syscall 66 + 67 + dsll t0, t2, 3 # offset into table 68 + dla t2, sys_call_table 69 + daddu t0, t2, t0 70 + ld t2, (t0) # syscall routine 71 + beqz t2, illegal_syscall 54 72 55 73 jalr t2 # Do The Real Thing (TM) 56 74 ··· 82 78 83 79 syscall_trace_entry: 84 80 SAVE_STATIC 85 - move s0, t2 81 + move s0, v0 86 82 move a0, sp 87 83 move a1, v0 88 84 jal syscall_trace_enter 89 85 90 - bltz v0, 2f # seccomp failed? Skip syscall 86 + bltz v0, 1f # seccomp failed? Skip syscall 91 87 92 - move t0, s0 88 + move v0, s0 93 89 RESTORE_STATIC 94 90 ld a0, PT_R4(sp) # Restore argument registers 95 91 ld a1, PT_R5(sp) ··· 97 93 ld a3, PT_R7(sp) 98 94 ld a4, PT_R8(sp) 99 95 ld a5, PT_R9(sp) 100 - jalr t0 96 + j syscall_common 101 97 102 - li t0, -EMAXERRNO - 1 # error? 103 - sltu t0, t0, v0 104 - sd t0, PT_R7(sp) # set error flag 105 - beqz t0, 1f 106 - 107 - ld t1, PT_R2(sp) # syscall number 108 - dnegu v0 # error 109 - sd t1, PT_R0(sp) # save it for syscall restarting 110 - 1: sd v0, PT_R2(sp) # result 111 - 112 - 2: j syscall_exit 98 + 1: j syscall_exit 113 99 114 100 illegal_syscall: 115 101 /* This also isn't a 64-bit syscall, throw an error. */ ··· 430 436 PTR sys_memfd_create 431 437 PTR sys_bpf /* 5315 */ 432 438 PTR sys_execveat 439 + PTR sys_userfaultfd 440 + PTR sys_membarrier 433 441 .size sys_call_table,.-sys_call_table
+7 -14
arch/mips/kernel/scall64-n32.S
··· 52 52 and t0, t1, t0 53 53 bnez t0, n32_syscall_trace_entry 54 54 55 + syscall_common: 55 56 jalr t2 # Do The Real Thing (TM) 56 57 57 58 li t0, -EMAXERRNO - 1 # error? ··· 76 75 move a1, v0 77 76 jal syscall_trace_enter 78 77 79 - bltz v0, 2f # seccomp failed? Skip syscall 78 + bltz v0, 1f # seccomp failed? Skip syscall 80 79 81 - move t0, s0 80 + move t2, s0 82 81 RESTORE_STATIC 83 82 ld a0, PT_R4(sp) # Restore argument registers 84 83 ld a1, PT_R5(sp) ··· 86 85 ld a3, PT_R7(sp) 87 86 ld a4, PT_R8(sp) 88 87 ld a5, PT_R9(sp) 89 - jalr t0 88 + j syscall_common 90 89 91 - li t0, -EMAXERRNO - 1 # error? 92 - sltu t0, t0, v0 93 - sd t0, PT_R7(sp) # set error flag 94 - beqz t0, 1f 95 - 96 - ld t1, PT_R2(sp) # syscall number 97 - dnegu v0 # error 98 - sd t1, PT_R0(sp) # save it for syscall restarting 99 - 1: sd v0, PT_R2(sp) # result 100 - 101 - 2: j syscall_exit 90 + 1: j syscall_exit 102 91 103 92 not_n32_scall: 104 93 /* This is not an n32 compatibility syscall, pass it on to ··· 420 429 PTR sys_memfd_create 421 430 PTR sys_bpf 422 431 PTR compat_sys_execveat /* 6320 */ 432 + PTR sys_userfaultfd 433 + PTR sys_membarrier 423 434 .size sysn32_call_table,.-sysn32_call_table
+7 -14
arch/mips/kernel/scall64-o32.S
··· 87 87 and t0, t1, t0 88 88 bnez t0, trace_a_syscall 89 89 90 + syscall_common: 90 91 jalr t2 # Do The Real Thing (TM) 91 92 92 93 li t0, -EMAXERRNO - 1 # error? ··· 131 130 132 131 1: jal syscall_trace_enter 133 132 134 - bltz v0, 2f # seccomp failed? Skip syscall 133 + bltz v0, 1f # seccomp failed? Skip syscall 135 134 136 - move t0, s0 135 + move t2, s0 137 136 RESTORE_STATIC 138 137 ld a0, PT_R4(sp) # Restore argument registers 139 138 ld a1, PT_R5(sp) ··· 143 142 ld a5, PT_R9(sp) 144 143 ld a6, PT_R10(sp) 145 144 ld a7, PT_R11(sp) # For indirect syscalls 146 - jalr t0 145 + j syscall_common 147 146 148 - li t0, -EMAXERRNO - 1 # error? 149 - sltu t0, t0, v0 150 - sd t0, PT_R7(sp) # set error flag 151 - beqz t0, 1f 152 - 153 - ld t1, PT_R2(sp) # syscall number 154 - dnegu v0 # error 155 - sd t1, PT_R0(sp) # save it for syscall restarting 156 - 1: sd v0, PT_R2(sp) # result 157 - 158 - 2: j syscall_exit 147 + 1: j syscall_exit 159 148 160 149 /* ------------------------------------------------------------------------ */ 161 150 ··· 575 584 PTR sys_memfd_create 576 585 PTR sys_bpf /* 4355 */ 577 586 PTR compat_sys_execveat 587 + PTR sys_userfaultfd 588 + PTR sys_membarrier 578 589 .size sys32_call_table,.-sys32_call_table
+9 -1
arch/mips/kernel/setup.c
··· 338 338 if (end <= reserved_end) 339 339 continue; 340 340 #ifdef CONFIG_BLK_DEV_INITRD 341 - /* mapstart should be after initrd_end */ 341 + /* Skip zones before initrd and initrd itself */ 342 342 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) 343 343 continue; 344 344 #endif ··· 370 370 #endif 371 371 max_low_pfn = PFN_DOWN(HIGHMEM_START); 372 372 } 373 + 374 + #ifdef CONFIG_BLK_DEV_INITRD 375 + /* 376 + * mapstart should be after initrd_end 377 + */ 378 + if (initrd_end) 379 + mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); 380 + #endif 373 381 374 382 /* 375 383 * Initialize the boot-time allocator with low memory only.
+2
arch/mips/kernel/smp.c
··· 42 42 #include <asm/mmu_context.h> 43 43 #include <asm/time.h> 44 44 #include <asm/setup.h> 45 + #include <asm/maar.h> 45 46 46 47 cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ 47 48 ··· 158 157 mips_clockevent_init(); 159 158 mp_ops->init_secondary(); 160 159 cpu_report(); 160 + maar_init(); 161 161 162 162 /* 163 163 * XXX parity protection should be folded in here when it's converted
+3
arch/mips/loongson64/common/env.c
··· 64 64 } 65 65 if (memsize == 0) 66 66 memsize = 256; 67 + 68 + loongson_sysconf.nr_uarts = 1; 69 + 67 70 pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); 68 71 #else 69 72 struct boot_params *boot_p;
+1 -1
arch/mips/mm/dma-default.c
··· 100 100 else 101 101 #endif 102 102 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) 103 - if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) 103 + if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) 104 104 dma_flag = __GFP_DMA; 105 105 else 106 106 #endif
+114 -63
arch/mips/mm/init.c
··· 44 44 #include <asm/pgalloc.h> 45 45 #include <asm/tlb.h> 46 46 #include <asm/fixmap.h> 47 + #include <asm/maar.h> 47 48 48 49 /* 49 50 * We have up to 8 empty zeroed pages so we can map one of the right colour ··· 253 252 #endif 254 253 } 255 254 255 + unsigned __weak platform_maar_init(unsigned num_pairs) 256 + { 257 + struct maar_config cfg[BOOT_MEM_MAP_MAX]; 258 + unsigned i, num_configured, num_cfg = 0; 259 + phys_addr_t skip; 260 + 261 + for (i = 0; i < boot_mem_map.nr_map; i++) { 262 + switch (boot_mem_map.map[i].type) { 263 + case BOOT_MEM_RAM: 264 + case BOOT_MEM_INIT_RAM: 265 + break; 266 + default: 267 + continue; 268 + } 269 + 270 + skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); 271 + 272 + cfg[num_cfg].lower = boot_mem_map.map[i].addr; 273 + cfg[num_cfg].lower += skip; 274 + 275 + cfg[num_cfg].upper = cfg[num_cfg].lower; 276 + cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; 277 + cfg[num_cfg].upper -= skip; 278 + 279 + cfg[num_cfg].attrs = MIPS_MAAR_S; 280 + num_cfg++; 281 + } 282 + 283 + num_configured = maar_config(cfg, num_cfg, num_pairs); 284 + if (num_configured < num_cfg) 285 + pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", 286 + num_pairs, num_cfg); 287 + 288 + return num_configured; 289 + } 290 + 291 + void maar_init(void) 292 + { 293 + unsigned num_maars, used, i; 294 + phys_addr_t lower, upper, attr; 295 + static struct { 296 + struct maar_config cfgs[3]; 297 + unsigned used; 298 + } recorded = { { { 0 } }, 0 }; 299 + 300 + if (!cpu_has_maar) 301 + return; 302 + 303 + /* Detect the number of MAARs */ 304 + write_c0_maari(~0); 305 + back_to_back_c0_hazard(); 306 + num_maars = read_c0_maari() + 1; 307 + 308 + /* MAARs should be in pairs */ 309 + WARN_ON(num_maars % 2); 310 + 311 + /* Set MAARs using values we recorded already */ 312 + if (recorded.used) { 313 + used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); 314 + BUG_ON(used != recorded.used); 315 + } else { 316 + /* Configure the required MAARs */ 317 + used = platform_maar_init(num_maars / 2); 318 + } 319 + 320 + /* Disable any further MAARs */ 321 + for (i = (used * 2); i < num_maars; i++) { 322 + write_c0_maari(i); 323 + back_to_back_c0_hazard(); 324 + write_c0_maar(0); 325 + back_to_back_c0_hazard(); 326 + } 327 + 328 + if (recorded.used) 329 + return; 330 + 331 + pr_info("MAAR configuration:\n"); 332 + for (i = 0; i < num_maars; i += 2) { 333 + write_c0_maari(i); 334 + back_to_back_c0_hazard(); 335 + upper = read_c0_maar(); 336 + 337 + write_c0_maari(i + 1); 338 + back_to_back_c0_hazard(); 339 + lower = read_c0_maar(); 340 + 341 + attr = lower & upper; 342 + lower = (lower & MIPS_MAAR_ADDR) << 4; 343 + upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; 344 + 345 + pr_info(" [%d]: ", i / 2); 346 + if (!(attr & MIPS_MAAR_V)) { 347 + pr_cont("disabled\n"); 348 + continue; 349 + } 350 + 351 + pr_cont("%pa-%pa", &lower, &upper); 352 + 353 + if (attr & MIPS_MAAR_S) 354 + pr_cont(" speculate"); 355 + 356 + pr_cont("\n"); 357 + 358 + /* Record the setup for use on secondary CPUs */ 359 + if (used <= ARRAY_SIZE(recorded.cfgs)) { 360 + recorded.cfgs[recorded.used].lower = lower; 361 + recorded.cfgs[recorded.used].upper = upper; 362 + recorded.cfgs[recorded.used].attrs = attr; 363 + recorded.used++; 364 + } 365 + } 366 + } 367 + 256 368 #ifndef CONFIG_NEED_MULTIPLE_NODES 257 369 int page_is_ram(unsigned long pagenr) 258 370 { ··· 446 332 free_highmem_page(page); 447 333 } 448 334 #endif 449 - } 450 - 451 - unsigned __weak platform_maar_init(unsigned num_pairs) 452 - { 453 - struct maar_config cfg[BOOT_MEM_MAP_MAX]; 454 - unsigned i, num_configured, num_cfg = 0; 455 - phys_addr_t skip; 456 - 457 - for (i = 0; i < boot_mem_map.nr_map; i++) { 458 - switch (boot_mem_map.map[i].type) { 459 - case BOOT_MEM_RAM: 460 - case BOOT_MEM_INIT_RAM: 461 - break; 462 - default: 463 - continue; 464 - } 465 - 466 - skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); 467 - 468 - cfg[num_cfg].lower = boot_mem_map.map[i].addr; 469 - cfg[num_cfg].lower += skip; 470 - 471 - cfg[num_cfg].upper = cfg[num_cfg].lower; 472 - cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; 473 - cfg[num_cfg].upper -= skip; 474 - 475 - cfg[num_cfg].attrs = MIPS_MAAR_S; 476 - num_cfg++; 477 - } 478 - 479 - num_configured = maar_config(cfg, num_cfg, num_pairs); 480 - if (num_configured < num_cfg) 481 - pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", 482 - num_pairs, num_cfg); 483 - 484 - return num_configured; 485 - } 486 - 487 - static void maar_init(void) 488 - { 489 - unsigned num_maars, used, i; 490 - 491 - if (!cpu_has_maar) 492 - return; 493 - 494 - /* Detect the number of MAARs */ 495 - write_c0_maari(~0); 496 - back_to_back_c0_hazard(); 497 - num_maars = read_c0_maari() + 1; 498 - 499 - /* MAARs should be in pairs */ 500 - WARN_ON(num_maars % 2); 501 - 502 - /* Configure the required MAARs */ 503 - used = platform_maar_init(num_maars / 2); 504 - 505 - /* Disable any further MAARs */ 506 - for (i = (used * 2); i < num_maars; i++) { 507 - write_c0_maari(i); 508 - back_to_back_c0_hazard(); 509 - write_c0_maar(0); 510 - back_to_back_c0_hazard(); 511 - } 512 335 } 513 336 514 337 void __init mem_init(void)
+54 -9
arch/mips/net/bpf_jit_asm.S
··· 57 57 58 58 LEAF(sk_load_word) 59 59 is_offset_negative(word) 60 - .globl sk_load_word_positive 61 - sk_load_word_positive: 60 + FEXPORT(sk_load_word_positive) 62 61 is_offset_in_header(4, word) 63 62 /* Offset within header boundaries */ 64 63 PTR_ADDU t1, $r_skb_data, offset 64 + .set reorder 65 65 lw $r_A, 0(t1) 66 + .set noreorder 66 67 #ifdef CONFIG_CPU_LITTLE_ENDIAN 68 + # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) 67 69 wsbh t0, $r_A 68 70 rotr $r_A, t0, 16 71 + # else 72 + sll t0, $r_A, 24 73 + srl t1, $r_A, 24 74 + srl t2, $r_A, 8 75 + or t0, t0, t1 76 + andi t2, t2, 0xff00 77 + andi t1, $r_A, 0xff00 78 + or t0, t0, t2 79 + sll t1, t1, 8 80 + or $r_A, t0, t1 81 + # endif 69 82 #endif 70 83 jr $r_ra 71 84 move $r_ret, zero ··· 86 73 87 74 LEAF(sk_load_half) 88 75 is_offset_negative(half) 89 - .globl sk_load_half_positive 90 - sk_load_half_positive: 76 + FEXPORT(sk_load_half_positive) 91 77 is_offset_in_header(2, half) 92 78 /* Offset within header boundaries */ 93 79 PTR_ADDU t1, $r_skb_data, offset 80 + .set reorder 94 81 lh $r_A, 0(t1) 82 + .set noreorder 95 83 #ifdef CONFIG_CPU_LITTLE_ENDIAN 84 + # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) 96 85 wsbh t0, $r_A 97 86 seh $r_A, t0 87 + # else 88 + sll t0, $r_A, 24 89 + andi t1, $r_A, 0xff00 90 + sra t0, t0, 16 91 + srl t1, t1, 8 92 + or $r_A, t0, t1 93 + # endif 98 94 #endif 99 95 jr $r_ra 100 96 move $r_ret, zero ··· 111 89 112 90 LEAF(sk_load_byte) 113 91 is_offset_negative(byte) 114 - .globl sk_load_byte_positive 115 - sk_load_byte_positive: 92 + FEXPORT(sk_load_byte_positive) 116 93 is_offset_in_header(1, byte) 117 94 /* Offset within header boundaries */ 118 95 PTR_ADDU t1, $r_skb_data, offset ··· 169 148 NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) 170 149 bpf_slow_path_common(4) 171 150 #ifdef CONFIG_CPU_LITTLE_ENDIAN 151 + # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) 172 152 wsbh t0, $r_s0 173 153 jr $r_ra 174 154 rotr $r_A, t0, 16 175 - #endif 155 + # else 156 + sll t0, $r_s0, 24 157 + srl t1, $r_s0, 24 158 + srl t2, $r_s0, 8 159 + or t0, t0, t1 160 + andi t2, t2, 0xff00 161 + andi t1, $r_s0, 0xff00 162 + or t0, t0, t2 163 + sll t1, t1, 8 176 164 jr $r_ra 177 - move $r_A, $r_s0 165 + or $r_A, t0, t1 166 + # endif 167 + #else 168 + jr $r_ra 169 + move $r_A, $r_s0 170 + #endif 178 171 179 172 END(bpf_slow_path_word) 180 173 181 174 NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) 182 175 bpf_slow_path_common(2) 183 176 #ifdef CONFIG_CPU_LITTLE_ENDIAN 177 + # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) 184 178 jr $r_ra 185 179 wsbh $r_A, $r_s0 186 - #endif 180 + # else 181 + sll t0, $r_s0, 8 182 + andi t1, $r_s0, 0xff00 183 + andi t0, t0, 0xff00 184 + srl t1, t1, 8 185 + jr $r_ra 186 + or $r_A, t0, t1 187 + # endif 188 + #else 187 189 jr $r_ra 188 190 move $r_A, $r_s0 191 + #endif 189 192 190 193 END(bpf_slow_path_half) 191 194
+1
arch/mn10300/include/asm/Kbuild
··· 9 9 generic-y += preempt.h 10 10 generic-y += sections.h 11 11 generic-y += trace_clock.h 12 + generic-y += word-at-a-time.h
+1
arch/nios2/include/asm/Kbuild
··· 61 61 generic-y += unaligned.h 62 62 generic-y += user.h 63 63 generic-y += vga.h 64 + generic-y += word-at-a-time.h 64 65 generic-y += xor.h
+1
arch/powerpc/include/asm/Kbuild
··· 7 7 generic-y += preempt.h 8 8 generic-y += rwsem.h 9 9 generic-y += vtime.h 10 + generic-y += word-at-a-time.h
+1
arch/s390/include/asm/Kbuild
··· 6 6 generic-y += mm-arch-hooks.h 7 7 generic-y += preempt.h 8 8 generic-y += trace_clock.h 9 + generic-y += word-at-a-time.h
+1
arch/score/include/asm/Kbuild
··· 13 13 generic-y += trace_clock.h 14 14 generic-y += xor.h 15 15 generic-y += serial.h 16 + generic-y += word-at-a-time.h
+4 -29
arch/tile/gxio/mpipe.c
··· 19 19 #include <linux/errno.h> 20 20 #include <linux/io.h> 21 21 #include <linux/module.h> 22 + #include <linux/string.h> 22 23 23 24 #include <gxio/iorpc_globals.h> 24 25 #include <gxio/iorpc_mpipe.h> ··· 29 28 30 29 /* HACK: Avoid pointless "shadow" warnings. */ 31 30 #define link link_shadow 32 - 33 - /** 34 - * strscpy - Copy a C-string into a sized buffer, but only if it fits 35 - * @dest: Where to copy the string to 36 - * @src: Where to copy the string from 37 - * @size: size of destination buffer 38 - * 39 - * Use this routine to avoid copying too-long strings. 40 - * The routine returns the total number of bytes copied 41 - * (including the trailing NUL) or zero if the buffer wasn't 42 - * big enough. To ensure that programmers pay attention 43 - * to the return code, the destination has a single NUL 44 - * written at the front (if size is non-zero) when the 45 - * buffer is not big enough. 46 - */ 47 - static size_t strscpy(char *dest, const char *src, size_t size) 48 - { 49 - size_t len = strnlen(src, size) + 1; 50 - if (len > size) { 51 - if (size) 52 - dest[0] = '\0'; 53 - return 0; 54 - } 55 - memcpy(dest, src, len); 56 - return len; 57 - } 58 31 59 32 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) 60 33 { ··· 515 540 if (!context) 516 541 return GXIO_ERR_NO_DEVICE; 517 542 518 - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) 543 + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) 519 544 return GXIO_ERR_NO_DEVICE; 520 545 521 546 return gxio_mpipe_info_instance_aux(context, name); ··· 534 559 535 560 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); 536 561 if (rv >= 0) { 537 - if (strscpy(link_name, name.name, sizeof(name.name)) == 0) 562 + if (strscpy(link_name, name.name, sizeof(name.name)) < 0) 538 563 return GXIO_ERR_INVAL_MEMORY_SIZE; 539 564 memcpy(link_mac, mac.mac, sizeof(mac.mac)); 540 565 } ··· 551 576 _gxio_mpipe_link_name_t name; 552 577 int rv; 553 578 554 - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) 579 + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) 555 580 return GXIO_ERR_NO_DEVICE; 556 581 557 582 rv = gxio_mpipe_link_open_aux(context, name, flags);
+1
arch/tile/include/asm/Kbuild
··· 40 40 generic-y += termios.h 41 41 generic-y += trace_clock.h 42 42 generic-y += types.h 43 + generic-y += word-at-a-time.h 43 44 generic-y += xor.h
+1
arch/tile/kernel/usb.c
··· 22 22 #include <linux/platform_device.h> 23 23 #include <linux/usb/tilegx.h> 24 24 #include <linux/init.h> 25 + #include <linux/module.h> 25 26 #include <linux/types.h> 26 27 27 28 static u64 ehci_dmamask = DMA_BIT_MASK(32);
+1
arch/um/include/asm/Kbuild
··· 25 25 generic-y += switch_to.h 26 26 generic-y += topology.h 27 27 generic-y += trace_clock.h 28 + generic-y += word-at-a-time.h 28 29 generic-y += xor.h
+1
arch/unicore32/include/asm/Kbuild
··· 62 62 generic-y += unaligned.h 63 63 generic-y += user.h 64 64 generic-y += vga.h 65 + generic-y += word-at-a-time.h 65 66 generic-y += xor.h
+1 -1
arch/x86/include/asm/cpufeature.h
··· 193 193 #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 194 194 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 195 195 #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ 196 - #define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ 196 + #define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ 197 197 #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ 198 198 #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ 199 199 #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
+2
arch/x86/include/asm/efi.h
··· 86 86 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, 87 87 u32 type, u64 attribute); 88 88 89 + #ifdef CONFIG_KASAN 89 90 /* 90 91 * CONFIG_KASAN may redefine memset to __memset. __memset function is present 91 92 * only in kernel binary. Since the EFI stub linked into a separate binary it ··· 96 95 #undef memcpy 97 96 #undef memset 98 97 #undef memmove 98 + #endif 99 99 100 100 #endif /* CONFIG_X86_32 */ 101 101
+2
arch/x86/include/asm/msr-index.h
··· 141 141 #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) 142 142 #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) 143 143 144 + #define MSR_PEBS_FRONTEND 0x000003f7 145 + 144 146 #define MSR_IA32_POWER_CTL 0x000001fc 145 147 146 148 #define MSR_IA32_MC0_CTL 0x00000400
+1
arch/x86/include/asm/pvclock-abi.h
··· 41 41 42 42 #define PVCLOCK_TSC_STABLE_BIT (1 << 0) 43 43 #define PVCLOCK_GUEST_STOPPED (1 << 1) 44 + /* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ 44 45 #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) 45 46 #endif /* __ASSEMBLY__ */ 46 47 #endif /* _ASM_X86_PVCLOCK_ABI_H */
+1 -1
arch/x86/include/uapi/asm/bitsperlong.h
··· 1 1 #ifndef __ASM_X86_BITSPERLONG_H 2 2 #define __ASM_X86_BITSPERLONG_H 3 3 4 - #ifdef __x86_64__ 4 + #if defined(__x86_64__) && !defined(__ILP32__) 5 5 # define __BITS_PER_LONG 64 6 6 #else 7 7 # define __BITS_PER_LONG 32
+7 -5
arch/x86/kernel/cpu/mshyperv.c
··· 34 34 struct ms_hyperv_info ms_hyperv; 35 35 EXPORT_SYMBOL_GPL(ms_hyperv); 36 36 37 - static void (*hv_kexec_handler)(void); 38 - static void (*hv_crash_handler)(struct pt_regs *regs); 39 - 40 37 #if IS_ENABLED(CONFIG_HYPERV) 41 38 static void (*vmbus_handler)(void); 39 + static void (*hv_kexec_handler)(void); 40 + static void (*hv_crash_handler)(struct pt_regs *regs); 42 41 43 42 void hyperv_vector_handler(struct pt_regs *regs) 44 43 { ··· 95 96 hv_crash_handler = NULL; 96 97 } 97 98 EXPORT_SYMBOL_GPL(hv_remove_crash_handler); 98 - #endif 99 99 100 + #ifdef CONFIG_KEXEC_CORE 100 101 static void hv_machine_shutdown(void) 101 102 { 102 103 if (kexec_in_progress && hv_kexec_handler) ··· 110 111 hv_crash_handler(regs); 111 112 native_machine_crash_shutdown(regs); 112 113 } 113 - 114 + #endif /* CONFIG_KEXEC_CORE */ 115 + #endif /* CONFIG_HYPERV */ 114 116 115 117 static uint32_t __init ms_hyperv_platform(void) 116 118 { ··· 186 186 no_timer_check = 1; 187 187 #endif 188 188 189 + #if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) 189 190 machine_ops.shutdown = hv_machine_shutdown; 190 191 machine_ops.crash_shutdown = hv_machine_crash_shutdown; 192 + #endif 191 193 mark_tsc_unstable("running on Hyper-V"); 192 194 } 193 195
+1
arch/x86/kernel/cpu/perf_event.h
··· 47 47 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ 48 48 EXTRA_REG_LBR = 2, /* lbr_select */ 49 49 EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ 50 + EXTRA_REG_FE = 4, /* fe_* */ 50 51 51 52 EXTRA_REG_MAX /* number of entries needed */ 52 53 };
+15 -2
arch/x86/kernel/cpu/perf_event_intel.c
··· 205 205 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 206 206 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 207 207 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 208 + /* 209 + * Note the low 8 bits eventsel code is not a continuous field, containing 210 + * some #GPing bits. These are masked out. 211 + */ 212 + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), 208 213 EVENT_EXTRA_END 209 214 }; 210 215 ··· 255 250 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 256 251 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 257 252 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 258 - INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ 253 + INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ 259 254 EVENT_CONSTRAINT_END 260 255 }; 261 256 ··· 2896 2891 2897 2892 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 2898 2893 2894 + PMU_FORMAT_ATTR(frontend, "config1:0-23"); 2895 + 2899 2896 static struct attribute *intel_arch3_formats_attr[] = { 2900 2897 &format_attr_event.attr, 2901 2898 &format_attr_umask.attr, ··· 2911 2904 2912 2905 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ 2913 2906 &format_attr_ldlat.attr, /* PEBS load latency */ 2907 + NULL, 2908 + }; 2909 + 2910 + static struct attribute *skl_format_attr[] = { 2911 + &format_attr_frontend.attr, 2914 2912 NULL, 2915 2913 }; 2916 2914 ··· 3528 3516 3529 3517 x86_pmu.hw_config = hsw_hw_config; 3530 3518 x86_pmu.get_event_constraints = hsw_get_event_constraints; 3531 - x86_pmu.cpu_events = hsw_events_attrs; 3519 + x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, 3520 + skl_format_attr); 3532 3521 WARN_ON(!x86_pmu.format_attrs); 3533 3522 x86_pmu.cpu_events = hsw_events_attrs; 3534 3523 pr_cont("Skylake events, ");
+2 -2
arch/x86/kernel/cpu/perf_event_msr.c
··· 10 10 PERF_MSR_EVENT_MAX, 11 11 }; 12 12 13 - bool test_aperfmperf(int idx) 13 + static bool test_aperfmperf(int idx) 14 14 { 15 15 return boot_cpu_has(X86_FEATURE_APERFMPERF); 16 16 } 17 17 18 - bool test_intel(int idx) 18 + static bool test_intel(int idx) 19 19 { 20 20 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || 21 21 boot_cpu_data.x86 != 6)
+1 -1
arch/x86/kernel/cpu/scattered.c
··· 37 37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, 38 38 { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, 39 39 { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, 40 - { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, 40 + { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 }, 41 41 { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, 42 42 { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, 43 43 { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
+3 -4
arch/x86/kernel/crash.c
··· 185 185 } 186 186 187 187 #ifdef CONFIG_KEXEC_FILE 188 - static int get_nr_ram_ranges_callback(unsigned long start_pfn, 189 - unsigned long nr_pfn, void *arg) 188 + static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) 190 189 { 191 - int *nr_ranges = arg; 190 + unsigned int *nr_ranges = arg; 192 191 193 192 (*nr_ranges)++; 194 193 return 0; ··· 213 214 214 215 ced->image = image; 215 216 216 - walk_system_ram_range(0, -1, &nr_ranges, 217 + walk_system_ram_res(0, -1, &nr_ranges, 217 218 get_nr_ram_ranges_callback); 218 219 219 220 ced->max_nr_ranges = nr_ranges;
+55
arch/x86/kernel/process.c
··· 506 506 return randomize_range(mm->brk, range_end, 0) ? : mm->brk; 507 507 } 508 508 509 + /* 510 + * Called from fs/proc with a reference on @p to find the function 511 + * which called into schedule(). This needs to be done carefully 512 + * because the task might wake up and we might look at a stack 513 + * changing under us. 514 + */ 515 + unsigned long get_wchan(struct task_struct *p) 516 + { 517 + unsigned long start, bottom, top, sp, fp, ip; 518 + int count = 0; 519 + 520 + if (!p || p == current || p->state == TASK_RUNNING) 521 + return 0; 522 + 523 + start = (unsigned long)task_stack_page(p); 524 + if (!start) 525 + return 0; 526 + 527 + /* 528 + * Layout of the stack page: 529 + * 530 + * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) 531 + * PADDING 532 + * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING 533 + * stack 534 + * ----------- bottom = start + sizeof(thread_info) 535 + * thread_info 536 + * ----------- start 537 + * 538 + * The tasks stack pointer points at the location where the 539 + * framepointer is stored. The data on the stack is: 540 + * ... IP FP ... IP FP 541 + * 542 + * We need to read FP and IP, so we need to adjust the upper 543 + * bound by another unsigned long. 544 + */ 545 + top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; 546 + top -= 2 * sizeof(unsigned long); 547 + bottom = start + sizeof(struct thread_info); 548 + 549 + sp = READ_ONCE(p->thread.sp); 550 + if (sp < bottom || sp > top) 551 + return 0; 552 + 553 + fp = READ_ONCE(*(unsigned long *)sp); 554 + do { 555 + if (fp < bottom || fp > top) 556 + return 0; 557 + ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); 558 + if (!in_sched_functions(ip)) 559 + return ip; 560 + fp = READ_ONCE(*(unsigned long *)fp); 561 + } while (count++ < 16 && p->state != TASK_RUNNING); 562 + return 0; 563 + }
-28
arch/x86/kernel/process_32.c
··· 324 324 325 325 return prev_p; 326 326 } 327 - 328 - #define top_esp (THREAD_SIZE - sizeof(unsigned long)) 329 - #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) 330 - 331 - unsigned long get_wchan(struct task_struct *p) 332 - { 333 - unsigned long bp, sp, ip; 334 - unsigned long stack_page; 335 - int count = 0; 336 - if (!p || p == current || p->state == TASK_RUNNING) 337 - return 0; 338 - stack_page = (unsigned long)task_stack_page(p); 339 - sp = p->thread.sp; 340 - if (!stack_page || sp < stack_page || sp > top_esp+stack_page) 341 - return 0; 342 - /* include/asm-i386/system.h:switch_to() pushes bp last. */ 343 - bp = *(unsigned long *) sp; 344 - do { 345 - if (bp < stack_page || bp > top_ebp+stack_page) 346 - return 0; 347 - ip = *(unsigned long *) (bp+4); 348 - if (!in_sched_functions(ip)) 349 - return ip; 350 - bp = *(unsigned long *) bp; 351 - } while (count++ < 16); 352 - return 0; 353 - } 354 -
-24
arch/x86/kernel/process_64.c
··· 499 499 } 500 500 EXPORT_SYMBOL_GPL(set_personality_ia32); 501 501 502 - unsigned long get_wchan(struct task_struct *p) 503 - { 504 - unsigned long stack; 505 - u64 fp, ip; 506 - int count = 0; 507 - 508 - if (!p || p == current || p->state == TASK_RUNNING) 509 - return 0; 510 - stack = (unsigned long)task_stack_page(p); 511 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) 512 - return 0; 513 - fp = *(u64 *)(p->thread.sp); 514 - do { 515 - if (fp < (unsigned long)stack || 516 - fp >= (unsigned long)stack+THREAD_SIZE) 517 - return 0; 518 - ip = *(u64 *)(fp+8); 519 - if (!in_sched_functions(ip)) 520 - return ip; 521 - fp = *(u64 *)fp; 522 - } while (count++ < 16); 523 - return 0; 524 - } 525 - 526 502 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 527 503 { 528 504 int ret = 0;
+13 -112
arch/x86/kvm/svm.c
··· 514 514 struct vcpu_svm *svm = to_svm(vcpu); 515 515 516 516 if (svm->vmcb->control.next_rip != 0) { 517 - WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); 517 + WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); 518 518 svm->next_rip = svm->vmcb->control.next_rip; 519 519 } 520 520 ··· 866 866 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); 867 867 } 868 868 869 - #define MTRR_TYPE_UC_MINUS 7 870 - #define MTRR2PROTVAL_INVALID 0xff 871 - 872 - static u8 mtrr2protval[8]; 873 - 874 - static u8 fallback_mtrr_type(int mtrr) 875 - { 876 - /* 877 - * WT and WP aren't always available in the host PAT. Treat 878 - * them as UC and UC- respectively. Everything else should be 879 - * there. 880 - */ 881 - switch (mtrr) 882 - { 883 - case MTRR_TYPE_WRTHROUGH: 884 - return MTRR_TYPE_UNCACHABLE; 885 - case MTRR_TYPE_WRPROT: 886 - return MTRR_TYPE_UC_MINUS; 887 - default: 888 - BUG(); 889 - } 890 - } 891 - 892 - static void build_mtrr2protval(void) 893 - { 894 - int i; 895 - u64 pat; 896 - 897 - for (i = 0; i < 8; i++) 898 - mtrr2protval[i] = MTRR2PROTVAL_INVALID; 899 - 900 - /* Ignore the invalid MTRR types. */ 901 - mtrr2protval[2] = 0; 902 - mtrr2protval[3] = 0; 903 - 904 - /* 905 - * Use host PAT value to figure out the mapping from guest MTRR 906 - * values to nested page table PAT/PCD/PWT values. We do not 907 - * want to change the host PAT value every time we enter the 908 - * guest. 909 - */ 910 - rdmsrl(MSR_IA32_CR_PAT, pat); 911 - for (i = 0; i < 8; i++) { 912 - u8 mtrr = pat >> (8 * i); 913 - 914 - if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID) 915 - mtrr2protval[mtrr] = __cm_idx2pte(i); 916 - } 917 - 918 - for (i = 0; i < 8; i++) { 919 - if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) { 920 - u8 fallback = fallback_mtrr_type(i); 921 - mtrr2protval[i] = mtrr2protval[fallback]; 922 - BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID); 923 - } 924 - } 925 - } 926 - 927 869 static __init int svm_hardware_setup(void) 928 870 { 929 871 int cpu; ··· 932 990 } else 933 991 kvm_disable_tdp(); 934 992 935 - build_mtrr2protval(); 936 993 return 0; 937 994 938 995 err: ··· 1086 1145 return target_tsc - tsc; 1087 1146 } 1088 1147 1089 - static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat) 1090 - { 1091 - struct kvm_vcpu *vcpu = &svm->vcpu; 1092 - 1093 - /* Unlike Intel, AMD takes the guest's CR0.CD into account. 1094 - * 1095 - * AMD does not have IPAT. To emulate it for the case of guests 1096 - * with no assigned devices, just set everything to WB. If guests 1097 - * have assigned devices, however, we cannot force WB for RAM 1098 - * pages only, so use the guest PAT directly. 1099 - */ 1100 - if (!kvm_arch_has_assigned_device(vcpu->kvm)) 1101 - *g_pat = 0x0606060606060606; 1102 - else 1103 - *g_pat = vcpu->arch.pat; 1104 - } 1105 - 1106 - static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 1107 - { 1108 - u8 mtrr; 1109 - 1110 - /* 1111 - * 1. MMIO: trust guest MTRR, so same as item 3. 1112 - * 2. No passthrough: always map as WB, and force guest PAT to WB as well 1113 - * 3. Passthrough: can't guarantee the result, try to trust guest. 1114 - */ 1115 - if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) 1116 - return 0; 1117 - 1118 - if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && 1119 - kvm_read_cr0(vcpu) & X86_CR0_CD) 1120 - return _PAGE_NOCACHE; 1121 - 1122 - mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); 1123 - return mtrr2protval[mtrr]; 1124 - } 1125 - 1126 1148 static void init_vmcb(struct vcpu_svm *svm, bool init_event) 1127 1149 { 1128 1150 struct vmcb_control_area *control = &svm->vmcb->control; ··· 1182 1278 clr_cr_intercept(svm, INTERCEPT_CR3_READ); 1183 1279 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); 1184 1280 save->g_pat = svm->vcpu.arch.pat; 1185 - svm_set_guest_pat(svm, &save->g_pat); 1186 1281 save->cr3 = 0; 1187 1282 save->cr4 = 0; 1188 1283 } ··· 1576 1673 1577 1674 if (!vcpu->fpu_active) 1578 1675 cr0 |= X86_CR0_TS; 1579 - 1580 - /* These are emulated via page tables. */ 1581 - cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1582 - 1676 + /* 1677 + * re-enable caching here because the QEMU bios 1678 + * does not do it - this results in some delay at 1679 + * reboot 1680 + */ 1681 + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) 1682 + cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1583 1683 svm->vmcb->save.cr0 = cr0; 1584 1684 mark_dirty(svm->vmcb, VMCB_CR); 1585 1685 update_cr0_intercept(svm); ··· 3257 3351 case MSR_VM_IGNNE: 3258 3352 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); 3259 3353 break; 3260 - case MSR_IA32_CR_PAT: 3261 - if (npt_enabled) { 3262 - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) 3263 - return 1; 3264 - vcpu->arch.pat = data; 3265 - svm_set_guest_pat(svm, &svm->vmcb->save.g_pat); 3266 - mark_dirty(svm->vmcb, VMCB_NPT); 3267 - break; 3268 - } 3269 - /* fall through */ 3270 3354 default: 3271 3355 return kvm_set_msr_common(vcpu, msr); 3272 3356 } ··· 4089 4193 static bool svm_has_high_real_mode_segbase(void) 4090 4194 { 4091 4195 return true; 4196 + } 4197 + 4198 + static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) 4199 + { 4200 + return 0; 4092 4201 } 4093 4202 4094 4203 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
+8 -3
arch/x86/kvm/vmx.c
··· 8617 8617 u64 ipat = 0; 8618 8618 8619 8619 /* For VT-d and EPT combination 8620 - * 1. MMIO: guest may want to apply WC, trust it. 8620 + * 1. MMIO: always map as UC 8621 8621 * 2. EPT with VT-d: 8622 8622 * a. VT-d without snooping control feature: can't guarantee the 8623 - * result, try to trust guest. So the same as item 1. 8623 + * result, try to trust guest. 8624 8624 * b. VT-d with snooping control feature: snooping control feature of 8625 8625 * VT-d engine can guarantee the cache correctness. Just set it 8626 8626 * to WB to keep consistent with host. So the same as item 3. 8627 8627 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep 8628 8628 * consistent with host MTRR 8629 8629 */ 8630 - if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { 8630 + if (is_mmio) { 8631 + cache = MTRR_TYPE_UNCACHABLE; 8632 + goto exit; 8633 + } 8634 + 8635 + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { 8631 8636 ipat = VMX_EPT_IPAT_BIT; 8632 8637 cache = MTRR_TYPE_WRBACK; 8633 8638 goto exit;
-4
arch/x86/kvm/x86.c
··· 1708 1708 vcpu->pvclock_set_guest_stopped_request = false; 1709 1709 } 1710 1710 1711 - pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO; 1712 - 1713 1711 /* If the host uses TSC clocksource, then it is stable */ 1714 1712 if (use_master_clock) 1715 1713 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; ··· 2005 2007 &vcpu->requests); 2006 2008 2007 2009 ka->boot_vcpu_runs_old_kvmclock = tmp; 2008 - 2009 - ka->kvmclock_offset = -get_kernel_ns(); 2010 2010 } 2011 2011 2012 2012 vcpu->arch.time = data;
+1 -1
arch/x86/mm/init_64.c
··· 1132 1132 * has been zapped already via cleanup_highmem(). 1133 1133 */ 1134 1134 all_end = roundup((unsigned long)_brk_end, PMD_SIZE); 1135 - set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); 1135 + set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); 1136 1136 1137 1137 rodata_test(); 1138 1138
+66 -1
arch/x86/platform/efi/efi.c
··· 705 705 } 706 706 707 707 /* 708 + * Iterate the EFI memory map in reverse order because the regions 709 + * will be mapped top-down. The end result is the same as if we had 710 + * mapped things forward, but doesn't require us to change the 711 + * existing implementation of efi_map_region(). 712 + */ 713 + static inline void *efi_map_next_entry_reverse(void *entry) 714 + { 715 + /* Initial call */ 716 + if (!entry) 717 + return memmap.map_end - memmap.desc_size; 718 + 719 + entry -= memmap.desc_size; 720 + if (entry < memmap.map) 721 + return NULL; 722 + 723 + return entry; 724 + } 725 + 726 + /* 727 + * efi_map_next_entry - Return the next EFI memory map descriptor 728 + * @entry: Previous EFI memory map descriptor 729 + * 730 + * This is a helper function to iterate over the EFI memory map, which 731 + * we do in different orders depending on the current configuration. 732 + * 733 + * To begin traversing the memory map @entry must be %NULL. 734 + * 735 + * Returns %NULL when we reach the end of the memory map. 736 + */ 737 + static void *efi_map_next_entry(void *entry) 738 + { 739 + if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { 740 + /* 741 + * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE 742 + * config table feature requires us to map all entries 743 + * in the same order as they appear in the EFI memory 744 + * map. That is to say, entry N must have a lower 745 + * virtual address than entry N+1. This is because the 746 + * firmware toolchain leaves relative references in 747 + * the code/data sections, which are split and become 748 + * separate EFI memory regions. Mapping things 749 + * out-of-order leads to the firmware accessing 750 + * unmapped addresses. 751 + * 752 + * Since we need to map things this way whether or not 753 + * the kernel actually makes use of 754 + * EFI_PROPERTIES_TABLE, let's just switch to this 755 + * scheme by default for 64-bit. 756 + */ 757 + return efi_map_next_entry_reverse(entry); 758 + } 759 + 760 + /* Initial call */ 761 + if (!entry) 762 + return memmap.map; 763 + 764 + entry += memmap.desc_size; 765 + if (entry >= memmap.map_end) 766 + return NULL; 767 + 768 + return entry; 769 + } 770 + 771 + /* 708 772 * Map the efi memory ranges of the runtime services and update new_mmap with 709 773 * virtual addresses. 710 774 */ ··· 778 714 unsigned long left = 0; 779 715 efi_memory_desc_t *md; 780 716 781 - for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 717 + p = NULL; 718 + while ((p = efi_map_next_entry(p))) { 782 719 md = p; 783 720 if (!(md->attribute & EFI_MEMORY_RUNTIME)) { 784 721 #ifdef CONFIG_X86_64
+1
arch/xtensa/include/asm/Kbuild
··· 28 28 generic-y += termios.h 29 29 generic-y += topology.h 30 30 generic-y += trace_clock.h 31 + generic-y += word-at-a-time.h 31 32 generic-y += xor.h
+5 -4
block/blk-mq-cpumap.c
··· 31 31 return cpu; 32 32 } 33 33 34 - int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) 34 + int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, 35 + const struct cpumask *online_mask) 35 36 { 36 37 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; 37 38 cpumask_var_t cpus; ··· 42 41 43 42 cpumask_clear(cpus); 44 43 nr_cpus = nr_uniq_cpus = 0; 45 - for_each_online_cpu(i) { 44 + for_each_cpu(i, online_mask) { 46 45 nr_cpus++; 47 46 first_sibling = get_first_sibling(i); 48 47 if (!cpumask_test_cpu(first_sibling, cpus)) ··· 52 51 53 52 queue = 0; 54 53 for_each_possible_cpu(i) { 55 - if (!cpu_online(i)) { 54 + if (!cpumask_test_cpu(i, online_mask)) { 56 55 map[i] = 0; 57 56 continue; 58 57 } ··· 96 95 if (!map) 97 96 return NULL; 98 97 99 - if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) 98 + if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) 100 99 return map; 101 100 102 101 kfree(map);
+22 -12
block/blk-mq-sysfs.c
··· 229 229 unsigned int i, first = 1; 230 230 ssize_t ret = 0; 231 231 232 - blk_mq_disable_hotplug(); 233 - 234 232 for_each_cpu(i, hctx->cpumask) { 235 233 if (first) 236 234 ret += sprintf(ret + page, "%u", i); ··· 237 239 238 240 first = 0; 239 241 } 240 - 241 - blk_mq_enable_hotplug(); 242 242 243 243 ret += sprintf(ret + page, "\n"); 244 244 return ret; ··· 339 343 struct blk_mq_ctx *ctx; 340 344 int i; 341 345 342 - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) 346 + if (!hctx->nr_ctx) 343 347 return; 344 348 345 349 hctx_for_each_ctx(hctx, ctx, i) ··· 354 358 struct blk_mq_ctx *ctx; 355 359 int i, ret; 356 360 357 - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) 361 + if (!hctx->nr_ctx) 358 362 return 0; 359 363 360 364 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); ··· 377 381 struct blk_mq_ctx *ctx; 378 382 int i, j; 379 383 384 + blk_mq_disable_hotplug(); 385 + 380 386 queue_for_each_hw_ctx(q, hctx, i) { 381 387 blk_mq_unregister_hctx(hctx); 382 388 ··· 393 395 kobject_put(&q->mq_kobj); 394 396 395 397 kobject_put(&disk_to_dev(disk)->kobj); 398 + 399 + q->mq_sysfs_init_done = false; 400 + blk_mq_enable_hotplug(); 396 401 } 397 402 398 403 static void blk_mq_sysfs_init(struct request_queue *q) ··· 426 425 struct blk_mq_hw_ctx *hctx; 427 426 int ret, i; 428 427 428 + blk_mq_disable_hotplug(); 429 + 429 430 blk_mq_sysfs_init(q); 430 431 431 432 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); 432 433 if (ret < 0) 433 - return ret; 434 + goto out; 434 435 435 436 kobject_uevent(&q->mq_kobj, KOBJ_ADD); 436 437 437 438 queue_for_each_hw_ctx(q, hctx, i) { 438 - hctx->flags |= BLK_MQ_F_SYSFS_UP; 439 439 ret = blk_mq_register_hctx(hctx); 440 440 if (ret) 441 441 break; 442 442 } 443 443 444 - if (ret) { 444 + if (ret) 445 445 blk_mq_unregister_disk(disk); 446 - return ret; 447 - } 446 + else 447 + q->mq_sysfs_init_done = true; 448 + out: 449 + blk_mq_enable_hotplug(); 448 450 449 - return 0; 451 + return ret; 450 452 } 451 453 EXPORT_SYMBOL_GPL(blk_mq_register_disk); 452 454 ··· 457 453 { 458 454 struct blk_mq_hw_ctx *hctx; 459 455 int i; 456 + 457 + if (!q->mq_sysfs_init_done) 458 + return; 460 459 461 460 queue_for_each_hw_ctx(q, hctx, i) 462 461 blk_mq_unregister_hctx(hctx); ··· 469 462 { 470 463 struct blk_mq_hw_ctx *hctx; 471 464 int i, ret = 0; 465 + 466 + if (!q->mq_sysfs_init_done) 467 + return ret; 472 468 473 469 queue_for_each_hw_ctx(q, hctx, i) { 474 470 ret = blk_mq_register_hctx(hctx);
+20 -7
block/blk-mq-tag.c
··· 471 471 } 472 472 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); 473 473 474 - void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 474 + void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, 475 475 void *priv) 476 476 { 477 - struct blk_mq_tags *tags = hctx->tags; 477 + struct blk_mq_hw_ctx *hctx; 478 + int i; 478 479 479 - if (tags->nr_reserved_tags) 480 - bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); 481 - bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, 482 - false); 480 + 481 + queue_for_each_hw_ctx(q, hctx, i) { 482 + struct blk_mq_tags *tags = hctx->tags; 483 + 484 + /* 485 + * If not software queues are currently mapped to this 486 + * hardware queue, there's nothing to check 487 + */ 488 + if (!blk_mq_hw_queue_mapped(hctx)) 489 + continue; 490 + 491 + if (tags->nr_reserved_tags) 492 + bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); 493 + bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, 494 + false); 495 + } 496 + 483 497 } 484 - EXPORT_SYMBOL(blk_mq_tag_busy_iter); 485 498 486 499 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) 487 500 {
+2
block/blk-mq-tag.h
··· 58 58 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); 59 59 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); 60 60 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 61 + void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, 62 + void *priv); 61 63 62 64 enum { 63 65 BLK_MQ_TAG_CACHE_MIN = 1,
+77 -41
block/blk-mq.c
··· 393 393 * Ends all I/O on a request. It does not handle partial completions. 394 394 * The actual completion happens out-of-order, through a IPI handler. 395 395 **/ 396 - void blk_mq_complete_request(struct request *rq) 396 + void blk_mq_complete_request(struct request *rq, int error) 397 397 { 398 398 struct request_queue *q = rq->q; 399 399 400 400 if (unlikely(blk_should_fake_timeout(q))) 401 401 return; 402 - if (!blk_mark_rq_complete(rq)) 402 + if (!blk_mark_rq_complete(rq)) { 403 + rq->errors = error; 403 404 __blk_mq_complete_request(rq); 405 + } 404 406 } 405 407 EXPORT_SYMBOL(blk_mq_complete_request); 406 408 ··· 618 616 * If a request wasn't started before the queue was 619 617 * marked dying, kill it here or it'll go unnoticed. 620 618 */ 621 - if (unlikely(blk_queue_dying(rq->q))) { 622 - rq->errors = -EIO; 623 - blk_mq_complete_request(rq); 624 - } 619 + if (unlikely(blk_queue_dying(rq->q))) 620 + blk_mq_complete_request(rq, -EIO); 625 621 return; 626 622 } 627 623 if (rq->cmd_flags & REQ_NO_TIMEOUT) ··· 641 641 .next = 0, 642 642 .next_set = 0, 643 643 }; 644 - struct blk_mq_hw_ctx *hctx; 645 644 int i; 646 645 647 - queue_for_each_hw_ctx(q, hctx, i) { 648 - /* 649 - * If not software queues are currently mapped to this 650 - * hardware queue, there's nothing to check 651 - */ 652 - if (!blk_mq_hw_queue_mapped(hctx)) 653 - continue; 654 - 655 - blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); 656 - } 646 + blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); 657 647 658 648 if (data.next_set) { 659 649 data.next = blk_rq_timeout(round_jiffies_up(data.next)); 660 650 mod_timer(&q->timeout, data.next); 661 651 } else { 652 + struct blk_mq_hw_ctx *hctx; 653 + 662 654 queue_for_each_hw_ctx(q, hctx, i) { 663 655 /* the hctx may be unmapped, so check it here */ 664 656 if (blk_mq_hw_queue_mapped(hctx)) ··· 1781 1789 } 1782 1790 } 1783 1791 1784 - static void blk_mq_map_swqueue(struct request_queue *q) 1792 + static void blk_mq_map_swqueue(struct request_queue *q, 1793 + const struct cpumask *online_mask) 1785 1794 { 1786 1795 unsigned int i; 1787 1796 struct blk_mq_hw_ctx *hctx; 1788 1797 struct blk_mq_ctx *ctx; 1789 1798 struct blk_mq_tag_set *set = q->tag_set; 1799 + 1800 + /* 1801 + * Avoid others reading imcomplete hctx->cpumask through sysfs 1802 + */ 1803 + mutex_lock(&q->sysfs_lock); 1790 1804 1791 1805 queue_for_each_hw_ctx(q, hctx, i) { 1792 1806 cpumask_clear(hctx->cpumask); ··· 1804 1806 */ 1805 1807 queue_for_each_ctx(q, ctx, i) { 1806 1808 /* If the cpu isn't online, the cpu is mapped to first hctx */ 1807 - if (!cpu_online(i)) 1809 + if (!cpumask_test_cpu(i, online_mask)) 1808 1810 continue; 1809 1811 1810 1812 hctx = q->mq_ops->map_queue(q, i); 1811 1813 cpumask_set_cpu(i, hctx->cpumask); 1812 - cpumask_set_cpu(i, hctx->tags->cpumask); 1813 1814 ctx->index_hw = hctx->nr_ctx; 1814 1815 hctx->ctxs[hctx->nr_ctx++] = ctx; 1815 1816 } 1817 + 1818 + mutex_unlock(&q->sysfs_lock); 1816 1819 1817 1820 queue_for_each_hw_ctx(q, hctx, i) { 1818 1821 struct blk_mq_ctxmap *map = &hctx->ctx_map; ··· 1849 1850 */ 1850 1851 hctx->next_cpu = cpumask_first(hctx->cpumask); 1851 1852 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; 1853 + } 1854 + 1855 + queue_for_each_ctx(q, ctx, i) { 1856 + if (!cpumask_test_cpu(i, online_mask)) 1857 + continue; 1858 + 1859 + hctx = q->mq_ops->map_queue(q, i); 1860 + cpumask_set_cpu(i, hctx->tags->cpumask); 1852 1861 } 1853 1862 } 1854 1863 ··· 1924 1917 kfree(hctx->ctxs); 1925 1918 kfree(hctx); 1926 1919 } 1920 + 1921 + kfree(q->mq_map); 1922 + q->mq_map = NULL; 1927 1923 1928 1924 kfree(q->queue_hw_ctx); 1929 1925 ··· 2037 2027 if (blk_mq_init_hw_queues(q, set)) 2038 2028 goto err_hctxs; 2039 2029 2030 + get_online_cpus(); 2040 2031 mutex_lock(&all_q_mutex); 2032 + 2041 2033 list_add_tail(&q->all_q_node, &all_q_list); 2042 - mutex_unlock(&all_q_mutex); 2043 - 2044 2034 blk_mq_add_queue_tag_set(set, q); 2035 + blk_mq_map_swqueue(q, cpu_online_mask); 2045 2036 2046 - blk_mq_map_swqueue(q); 2037 + mutex_unlock(&all_q_mutex); 2038 + put_online_cpus(); 2047 2039 2048 2040 return q; 2049 2041 ··· 2069 2057 { 2070 2058 struct blk_mq_tag_set *set = q->tag_set; 2071 2059 2060 + mutex_lock(&all_q_mutex); 2061 + list_del_init(&q->all_q_node); 2062 + mutex_unlock(&all_q_mutex); 2063 + 2072 2064 blk_mq_del_queue_tag_set(q); 2073 2065 2074 2066 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); 2075 2067 blk_mq_free_hw_queues(q, set); 2076 2068 2077 2069 percpu_ref_exit(&q->mq_usage_counter); 2078 - 2079 - kfree(q->mq_map); 2080 - 2081 - q->mq_map = NULL; 2082 - 2083 - mutex_lock(&all_q_mutex); 2084 - list_del_init(&q->all_q_node); 2085 - mutex_unlock(&all_q_mutex); 2086 2070 } 2087 2071 2088 2072 /* Basically redo blk_mq_init_queue with queue frozen */ 2089 - static void blk_mq_queue_reinit(struct request_queue *q) 2073 + static void blk_mq_queue_reinit(struct request_queue *q, 2074 + const struct cpumask *online_mask) 2090 2075 { 2091 2076 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2092 2077 2093 2078 blk_mq_sysfs_unregister(q); 2094 2079 2095 - blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); 2080 + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); 2096 2081 2097 2082 /* 2098 2083 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe ··· 2097 2088 * involves free and re-allocate memory, worthy doing?) 2098 2089 */ 2099 2090 2100 - blk_mq_map_swqueue(q); 2091 + blk_mq_map_swqueue(q, online_mask); 2101 2092 2102 2093 blk_mq_sysfs_register(q); 2103 2094 } ··· 2106 2097 unsigned long action, void *hcpu) 2107 2098 { 2108 2099 struct request_queue *q; 2100 + int cpu = (unsigned long)hcpu; 2101 + /* 2102 + * New online cpumask which is going to be set in this hotplug event. 2103 + * Declare this cpumasks as global as cpu-hotplug operation is invoked 2104 + * one-by-one and dynamically allocating this could result in a failure. 2105 + */ 2106 + static struct cpumask online_new; 2109 2107 2110 2108 /* 2111 - * Before new mappings are established, hotadded cpu might already 2112 - * start handling requests. This doesn't break anything as we map 2113 - * offline CPUs to first hardware queue. We will re-init the queue 2114 - * below to get optimal settings. 2109 + * Before hotadded cpu starts handling requests, new mappings must 2110 + * be established. Otherwise, these requests in hw queue might 2111 + * never be dispatched. 2112 + * 2113 + * For example, there is a single hw queue (hctx) and two CPU queues 2114 + * (ctx0 for CPU0, and ctx1 for CPU1). 2115 + * 2116 + * Now CPU1 is just onlined and a request is inserted into 2117 + * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is 2118 + * still zero. 2119 + * 2120 + * And then while running hw queue, flush_busy_ctxs() finds bit0 is 2121 + * set in pending bitmap and tries to retrieve requests in 2122 + * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0, 2123 + * so the request in ctx1->rq_list is ignored. 2115 2124 */ 2116 - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && 2117 - action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) 2125 + switch (action & ~CPU_TASKS_FROZEN) { 2126 + case CPU_DEAD: 2127 + case CPU_UP_CANCELED: 2128 + cpumask_copy(&online_new, cpu_online_mask); 2129 + break; 2130 + case CPU_UP_PREPARE: 2131 + cpumask_copy(&online_new, cpu_online_mask); 2132 + cpumask_set_cpu(cpu, &online_new); 2133 + break; 2134 + default: 2118 2135 return NOTIFY_OK; 2136 + } 2119 2137 2120 2138 mutex_lock(&all_q_mutex); 2121 2139 ··· 2166 2130 } 2167 2131 2168 2132 list_for_each_entry(q, &all_q_list, all_q_node) 2169 - blk_mq_queue_reinit(q); 2133 + blk_mq_queue_reinit(q, &online_new); 2170 2134 2171 2135 list_for_each_entry(q, &all_q_list, all_q_node) 2172 2136 blk_mq_unfreeze_queue(q);
+2 -1
block/blk-mq.h
··· 51 51 * CPU -> queue mappings 52 52 */ 53 53 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); 54 - extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); 54 + extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, 55 + const struct cpumask *online_mask); 55 56 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); 56 57 57 58 /*
-4
crypto/asymmetric_keys/x509_public_key.c
··· 332 332 srlen = cert->raw_serial_size; 333 333 q = cert->raw_serial; 334 334 } 335 - if (srlen > 1 && *q == 0) { 336 - srlen--; 337 - q++; 338 - } 339 335 340 336 ret = -ENOMEM; 341 337 desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
+2
drivers/acpi/ec.c
··· 1044 1044 goto err_exit; 1045 1045 1046 1046 mutex_lock(&ec->mutex); 1047 + result = -ENODATA; 1047 1048 list_for_each_entry(handler, &ec->list, node) { 1048 1049 if (value == handler->query_bit) { 1050 + result = 0; 1049 1051 q->handler = acpi_ec_get_query_handler(handler); 1050 1052 ec_dbg_evt("Query(0x%02x) scheduled", 1051 1053 q->handler->query_bit);
+1
drivers/acpi/pci_irq.c
··· 372 372 373 373 /* Interrupt Line values above 0xF are forbidden */ 374 374 if (dev->irq > 0 && (dev->irq <= 0xF) && 375 + acpi_isa_irq_available(dev->irq) && 375 376 (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { 376 377 dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", 377 378 pin_name(dev->pin), dev->irq);
+14 -2
drivers/acpi/pci_link.c
··· 498 498 PIRQ_PENALTY_PCI_POSSIBLE; 499 499 } 500 500 } 501 - /* Add a penalty for the SCI */ 502 - acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; 501 + 503 502 return 0; 504 503 } 505 504 ··· 551 552 acpi_irq_penalty[link->irq.possible[i]]) 552 553 irq = link->irq.possible[i]; 553 554 } 555 + } 556 + if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { 557 + printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " 558 + "Try pci=noacpi or acpi=off\n", 559 + acpi_device_name(link->device), 560 + acpi_device_bid(link->device)); 561 + return -ENODEV; 554 562 } 555 563 556 564 /* Attempt to enable the link device at this IRQ. */ ··· 825 819 else 826 820 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; 827 821 } 822 + } 823 + 824 + bool acpi_isa_irq_available(int irq) 825 + { 826 + return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || 827 + acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); 828 828 } 829 829 830 830 /*
+12 -5
drivers/base/power/opp.c
··· 892 892 u32 microvolt[3] = {0}; 893 893 int count, ret; 894 894 895 - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); 896 - if (!count) 895 + /* Missing property isn't a problem, but an invalid entry is */ 896 + if (!of_find_property(opp->np, "opp-microvolt", NULL)) 897 897 return 0; 898 + 899 + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); 900 + if (count < 0) { 901 + dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", 902 + __func__, count); 903 + return count; 904 + } 898 905 899 906 /* There can be one or three elements here */ 900 907 if (count != 1 && count != 3) { ··· 1070 1063 * share a common logic which is isolated here. 1071 1064 * 1072 1065 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1073 - * copy operation, returns 0 if no modifcation was done OR modification was 1066 + * copy operation, returns 0 if no modification was done OR modification was 1074 1067 * successful. 1075 1068 * 1076 1069 * Locking: The internal device_opp and opp structures are RCU protected. ··· 1158 1151 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1159 1152 * 1160 1153 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1161 - * copy operation, returns 0 if no modifcation was done OR modification was 1154 + * copy operation, returns 0 if no modification was done OR modification was 1162 1155 * successful. 1163 1156 */ 1164 1157 int dev_pm_opp_enable(struct device *dev, unsigned long freq) ··· 1184 1177 * mutex locking or synchronize_rcu() blocking calls cannot be used. 1185 1178 * 1186 1179 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 1187 - * copy operation, returns 0 if no modifcation was done OR modification was 1180 + * copy operation, returns 0 if no modification was done OR modification was 1188 1181 * successful. 1189 1182 */ 1190 1183 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
+5 -6
drivers/block/loop.c
··· 1486 1486 { 1487 1487 const bool write = cmd->rq->cmd_flags & REQ_WRITE; 1488 1488 struct loop_device *lo = cmd->rq->q->queuedata; 1489 - int ret = -EIO; 1489 + int ret = 0; 1490 1490 1491 - if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) 1491 + if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1492 + ret = -EIO; 1492 1493 goto failed; 1494 + } 1493 1495 1494 1496 ret = do_req_filebacked(lo, cmd->rq); 1495 - 1496 1497 failed: 1497 - if (ret) 1498 - cmd->rq->errors = -EIO; 1499 - blk_mq_complete_request(cmd->rq); 1498 + blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); 1500 1499 } 1501 1500 1502 1501 static void loop_queue_write_work(struct work_struct *work)
+1 -1
drivers/block/null_blk.c
··· 289 289 case NULL_IRQ_SOFTIRQ: 290 290 switch (queue_mode) { 291 291 case NULL_Q_MQ: 292 - blk_mq_complete_request(cmd->rq); 292 + blk_mq_complete_request(cmd->rq, cmd->rq->errors); 293 293 break; 294 294 case NULL_Q_RQ: 295 295 blk_complete_request(cmd->rq);
+24 -28
drivers/block/nvme-core.c
··· 618 618 spin_unlock_irqrestore(req->q->queue_lock, flags); 619 619 return; 620 620 } 621 + 621 622 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 622 623 if (cmd_rq->ctx == CMD_CTX_CANCELLED) 623 - req->errors = -EINTR; 624 - else 625 - req->errors = status; 624 + status = -EINTR; 626 625 } else { 627 - req->errors = nvme_error_status(status); 626 + status = nvme_error_status(status); 628 627 } 629 - } else 630 - req->errors = 0; 628 + } 629 + 631 630 if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 632 631 u32 result = le32_to_cpup(&cqe->result); 633 632 req->special = (void *)(uintptr_t)result; ··· 649 650 } 650 651 nvme_free_iod(nvmeq->dev, iod); 651 652 652 - blk_mq_complete_request(req); 653 + blk_mq_complete_request(req, status); 653 654 } 654 655 655 656 /* length is in bytes. gfp flags indicates whether we may sleep. */ ··· 862 863 if (ns && ns->ms && !blk_integrity_rq(req)) { 863 864 if (!(ns->pi_type && ns->ms == 8) && 864 865 req->cmd_type != REQ_TYPE_DRV_PRIV) { 865 - req->errors = -EFAULT; 866 - blk_mq_complete_request(req); 866 + blk_mq_complete_request(req, -EFAULT); 867 867 return BLK_MQ_RQ_QUEUE_OK; 868 868 } 869 869 } ··· 2437 2439 list_sort(NULL, &dev->namespaces, ns_cmp); 2438 2440 } 2439 2441 2442 + static void nvme_set_irq_hints(struct nvme_dev *dev) 2443 + { 2444 + struct nvme_queue *nvmeq; 2445 + int i; 2446 + 2447 + for (i = 0; i < dev->online_queues; i++) { 2448 + nvmeq = dev->queues[i]; 2449 + 2450 + if (!nvmeq->tags || !(*nvmeq->tags)) 2451 + continue; 2452 + 2453 + irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, 2454 + blk_mq_tags_cpumask(*nvmeq->tags)); 2455 + } 2456 + } 2457 + 2440 2458 static void nvme_dev_scan(struct work_struct *work) 2441 2459 { 2442 2460 struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); ··· 2464 2450 return; 2465 2451 nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); 2466 2452 kfree(ctrl); 2453 + nvme_set_irq_hints(dev); 2467 2454 } 2468 2455 2469 2456 /* ··· 2968 2953 .compat_ioctl = nvme_dev_ioctl, 2969 2954 }; 2970 2955 2971 - static void nvme_set_irq_hints(struct nvme_dev *dev) 2972 - { 2973 - struct nvme_queue *nvmeq; 2974 - int i; 2975 - 2976 - for (i = 0; i < dev->online_queues; i++) { 2977 - nvmeq = dev->queues[i]; 2978 - 2979 - if (!nvmeq->tags || !(*nvmeq->tags)) 2980 - continue; 2981 - 2982 - irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, 2983 - blk_mq_tags_cpumask(*nvmeq->tags)); 2984 - } 2985 - } 2986 - 2987 2956 static int nvme_dev_start(struct nvme_dev *dev) 2988 2957 { 2989 2958 int result; ··· 3008 3009 result = nvme_setup_io_queues(dev); 3009 3010 if (result) 3010 3011 goto free_tags; 3011 - 3012 - nvme_set_irq_hints(dev); 3013 3012 3014 3013 dev->event_limit = 1; 3015 3014 return result; ··· 3059 3062 } else { 3060 3063 nvme_unfreeze_queues(dev); 3061 3064 nvme_dev_add(dev); 3062 - nvme_set_irq_hints(dev); 3063 3065 } 3064 3066 return 0; 3065 3067 }
+1 -1
drivers/block/virtio_blk.c
··· 144 144 do { 145 145 virtqueue_disable_cb(vq); 146 146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { 147 - blk_mq_complete_request(vbr->req); 147 + blk_mq_complete_request(vbr->req, vbr->req->errors); 148 148 req_done = true; 149 149 } 150 150 if (unlikely(virtqueue_is_broken(vq)))
+21 -19
drivers/block/xen-blkback/xenbus.c
··· 212 212 213 213 static int xen_blkif_disconnect(struct xen_blkif *blkif) 214 214 { 215 + struct pending_req *req, *n; 216 + int i = 0, j; 217 + 215 218 if (blkif->xenblkd) { 216 219 kthread_stop(blkif->xenblkd); 217 220 wake_up(&blkif->shutdown_wq); ··· 241 238 /* Remove all persistent grants and the cache of ballooned pages. */ 242 239 xen_blkbk_free_caches(blkif); 243 240 244 - return 0; 245 - } 246 - 247 - static void xen_blkif_free(struct xen_blkif *blkif) 248 - { 249 - struct pending_req *req, *n; 250 - int i = 0, j; 251 - 252 - xen_blkif_disconnect(blkif); 253 - xen_vbd_free(&blkif->vbd); 254 - 255 - /* Make sure everything is drained before shutting down */ 256 - BUG_ON(blkif->persistent_gnt_c != 0); 257 - BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0); 258 - BUG_ON(blkif->free_pages_num != 0); 259 - BUG_ON(!list_empty(&blkif->persistent_purge_list)); 260 - BUG_ON(!list_empty(&blkif->free_pages)); 261 - BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 262 - 263 241 /* Check that there is no request in use */ 264 242 list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { 265 243 list_del(&req->free_list); ··· 256 272 } 257 273 258 274 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); 275 + blkif->nr_ring_pages = 0; 276 + 277 + return 0; 278 + } 279 + 280 + static void xen_blkif_free(struct xen_blkif *blkif) 281 + { 282 + 283 + xen_blkif_disconnect(blkif); 284 + xen_vbd_free(&blkif->vbd); 285 + 286 + /* Make sure everything is drained before shutting down */ 287 + BUG_ON(blkif->persistent_gnt_c != 0); 288 + BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0); 289 + BUG_ON(blkif->free_pages_num != 0); 290 + BUG_ON(!list_empty(&blkif->persistent_purge_list)); 291 + BUG_ON(!list_empty(&blkif->free_pages)); 292 + BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); 259 293 260 294 kmem_cache_free(xen_blkif_cachep, blkif); 261 295 }
+10 -9
drivers/block/xen-blkfront.c
··· 1142 1142 RING_IDX i, rp; 1143 1143 unsigned long flags; 1144 1144 struct blkfront_info *info = (struct blkfront_info *)dev_id; 1145 + int error; 1145 1146 1146 1147 spin_lock_irqsave(&info->io_lock, flags); 1147 1148 ··· 1183 1182 continue; 1184 1183 } 1185 1184 1186 - req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 1185 + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 1187 1186 switch (bret->operation) { 1188 1187 case BLKIF_OP_DISCARD: 1189 1188 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1190 1189 struct request_queue *rq = info->rq; 1191 1190 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1192 1191 info->gd->disk_name, op_name(bret->operation)); 1193 - req->errors = -EOPNOTSUPP; 1192 + error = -EOPNOTSUPP; 1194 1193 info->feature_discard = 0; 1195 1194 info->feature_secdiscard = 0; 1196 1195 queue_flag_clear(QUEUE_FLAG_DISCARD, rq); 1197 1196 queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); 1198 1197 } 1199 - blk_mq_complete_request(req); 1198 + blk_mq_complete_request(req, error); 1200 1199 break; 1201 1200 case BLKIF_OP_FLUSH_DISKCACHE: 1202 1201 case BLKIF_OP_WRITE_BARRIER: 1203 1202 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1204 1203 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1205 1204 info->gd->disk_name, op_name(bret->operation)); 1206 - req->errors = -EOPNOTSUPP; 1205 + error = -EOPNOTSUPP; 1207 1206 } 1208 1207 if (unlikely(bret->status == BLKIF_RSP_ERROR && 1209 1208 info->shadow[id].req.u.rw.nr_segments == 0)) { 1210 1209 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", 1211 1210 info->gd->disk_name, op_name(bret->operation)); 1212 - req->errors = -EOPNOTSUPP; 1211 + error = -EOPNOTSUPP; 1213 1212 } 1214 - if (unlikely(req->errors)) { 1215 - if (req->errors == -EOPNOTSUPP) 1216 - req->errors = 0; 1213 + if (unlikely(error)) { 1214 + if (error == -EOPNOTSUPP) 1215 + error = 0; 1217 1216 info->feature_flush = 0; 1218 1217 xlvbd_flush(info); 1219 1218 } ··· 1224 1223 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 1225 1224 "request: %x\n", bret->status); 1226 1225 1227 - blk_mq_complete_request(req); 1226 + blk_mq_complete_request(req, error); 1228 1227 break; 1229 1228 default: 1230 1229 BUG();
+1 -1
drivers/clocksource/rockchip_timer.c
··· 148 148 bc_timer.freq = clk_get_rate(timer_clk); 149 149 150 150 irq = irq_of_parse_and_map(np, 0); 151 - if (irq == NO_IRQ) { 151 + if (!irq) { 152 152 pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); 153 153 return; 154 154 }
+1 -1
drivers/clocksource/timer-keystone.c
··· 152 152 int irq, error; 153 153 154 154 irq = irq_of_parse_and_map(np, 0); 155 - if (irq == NO_IRQ) { 155 + if (!irq) { 156 156 pr_err("%s: failed to map interrupts\n", __func__); 157 157 return; 158 158 }
+12 -3
drivers/dma/at_xdmac.c
··· 455 455 return desc; 456 456 } 457 457 458 + void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) 459 + { 460 + memset(&desc->lld, 0, sizeof(desc->lld)); 461 + INIT_LIST_HEAD(&desc->descs_list); 462 + desc->direction = DMA_TRANS_NONE; 463 + desc->xfer_size = 0; 464 + desc->active_xfer = false; 465 + } 466 + 458 467 /* Call must be protected by lock. */ 459 468 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) 460 469 { ··· 475 466 desc = list_first_entry(&atchan->free_descs_list, 476 467 struct at_xdmac_desc, desc_node); 477 468 list_del(&desc->desc_node); 478 - desc->active_xfer = false; 469 + at_xdmac_init_used_desc(desc); 479 470 } 480 471 481 472 return desc; ··· 884 875 885 876 if (xt->src_inc) { 886 877 if (xt->src_sgl) 887 - chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; 878 + chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; 888 879 else 889 880 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; 890 881 } 891 882 892 883 if (xt->dst_inc) { 893 884 if (xt->dst_sgl) 894 - chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; 885 + chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; 895 886 else 896 887 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; 897 888 }
+9 -1
drivers/dma/dmaengine.c
··· 554 554 mutex_lock(&dma_list_mutex); 555 555 556 556 if (chan->client_count == 0) { 557 + struct dma_device *device = chan->device; 558 + 559 + dma_cap_set(DMA_PRIVATE, device->cap_mask); 560 + device->privatecnt++; 557 561 err = dma_chan_get(chan); 558 - if (err) 562 + if (err) { 559 563 pr_debug("%s: failed to get %s: (%d)\n", 560 564 __func__, dma_chan_name(chan), err); 565 + chan = NULL; 566 + if (--device->privatecnt == 0) 567 + dma_cap_clear(DMA_PRIVATE, device->cap_mask); 568 + } 561 569 } else 562 570 chan = NULL; 563 571
+2 -2
drivers/dma/dw/core.c
··· 1591 1591 INIT_LIST_HEAD(&dw->dma.channels); 1592 1592 for (i = 0; i < nr_channels; i++) { 1593 1593 struct dw_dma_chan *dwc = &dw->chan[i]; 1594 - int r = nr_channels - i - 1; 1595 1594 1596 1595 dwc->chan.device = &dw->dma; 1597 1596 dma_cookie_init(&dwc->chan); ··· 1602 1603 1603 1604 /* 7 is highest priority & 0 is lowest. */ 1604 1605 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1605 - dwc->priority = r; 1606 + dwc->priority = nr_channels - i - 1; 1606 1607 else 1607 1608 dwc->priority = i; 1608 1609 ··· 1621 1622 /* Hardware configuration */ 1622 1623 if (autocfg) { 1623 1624 unsigned int dwc_params; 1625 + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; 1624 1626 void __iomem *addr = chip->regs + r * sizeof(u32); 1625 1627 1626 1628 dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
+8 -8
drivers/dma/idma64.c
··· 355 355 struct idma64_desc *desc = idma64c->desc; 356 356 struct idma64_hw_desc *hw; 357 357 size_t bytes = desc->length; 358 - u64 llp; 359 - u32 ctlhi; 358 + u64 llp = channel_readq(idma64c, LLP); 359 + u32 ctlhi = channel_readl(idma64c, CTL_HI); 360 360 unsigned int i = 0; 361 361 362 - llp = channel_readq(idma64c, LLP); 363 362 do { 364 363 hw = &desc->hw[i]; 365 - } while ((hw->llp != llp) && (++i < desc->ndesc)); 364 + if (hw->llp == llp) 365 + break; 366 + bytes -= hw->len; 367 + } while (++i < desc->ndesc); 366 368 367 369 if (!i) 368 370 return bytes; 369 371 370 - do { 371 - bytes -= desc->hw[--i].len; 372 - } while (i); 372 + /* The current chunk is not fully transfered yet */ 373 + bytes += desc->hw[--i].len; 373 374 374 - ctlhi = channel_readl(idma64c, CTL_HI); 375 375 return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); 376 376 } 377 377
+25 -6
drivers/dma/pxa_dma.c
··· 473 473 return; 474 474 475 475 /* clear the channel mapping in DRCMR */ 476 - reg = pxad_drcmr(chan->drcmr); 477 - writel_relaxed(0, chan->phy->base + reg); 476 + if (chan->drcmr <= DRCMR_CHLNUM) { 477 + reg = pxad_drcmr(chan->drcmr); 478 + writel_relaxed(0, chan->phy->base + reg); 479 + } 478 480 479 481 spin_lock_irqsave(&pdev->phy_lock, flags); 480 482 for (i = 0; i < 32; i++) ··· 518 516 "%s(); phy=%p(%d) misaligned=%d\n", __func__, 519 517 phy, phy->idx, misaligned); 520 518 521 - reg = pxad_drcmr(phy->vchan->drcmr); 522 - writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); 519 + if (phy->vchan->drcmr <= DRCMR_CHLNUM) { 520 + reg = pxad_drcmr(phy->vchan->drcmr); 521 + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); 522 + } 523 523 524 524 dalgn = phy_readl_relaxed(phy, DALGN); 525 525 if (misaligned) ··· 891 887 struct dma_async_tx_descriptor *tx; 892 888 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); 893 889 890 + INIT_LIST_HEAD(&vd->node); 894 891 tx = vchan_tx_prep(vc, vd, tx_flags); 895 892 tx->tx_submit = pxad_tx_submit; 896 893 dev_dbg(&chan->vc.chan.dev->device, ··· 915 910 width = chan->cfg.src_addr_width; 916 911 dev_addr = chan->cfg.src_addr; 917 912 *dev_src = dev_addr; 918 - *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; 913 + *dcmd |= PXA_DCMD_INCTRGADDR; 914 + if (chan->drcmr <= DRCMR_CHLNUM) 915 + *dcmd |= PXA_DCMD_FLOWSRC; 919 916 } 920 917 if (dir == DMA_MEM_TO_DEV) { 921 918 maxburst = chan->cfg.dst_maxburst; 922 919 width = chan->cfg.dst_addr_width; 923 920 dev_addr = chan->cfg.dst_addr; 924 921 *dev_dst = dev_addr; 925 - *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; 922 + *dcmd |= PXA_DCMD_INCSRCADDR; 923 + if (chan->drcmr <= DRCMR_CHLNUM) 924 + *dcmd |= PXA_DCMD_FLOWTRG; 926 925 } 927 926 if (dir == DMA_MEM_TO_MEM) 928 927 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | ··· 1185 1176 curr = phy_readl_relaxed(chan->phy, DSADR); 1186 1177 else 1187 1178 curr = phy_readl_relaxed(chan->phy, DTADR); 1179 + 1180 + /* 1181 + * curr has to be actually read before checking descriptor 1182 + * completion, so that a curr inside a status updater 1183 + * descriptor implies the following test returns true, and 1184 + * preventing reordering of curr load and the test. 1185 + */ 1186 + rmb(); 1187 + if (is_desc_completed(vd)) 1188 + goto out; 1188 1189 1189 1190 for (i = 0; i < sw_desc->nb_desc - 1; i++) { 1190 1191 hw_desc = sw_desc->hw_desc[i];
+3 -3
drivers/dma/sun4i-dma.c
··· 599 599 static void sun4i_dma_free_contract(struct virt_dma_desc *vd) 600 600 { 601 601 struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); 602 - struct sun4i_dma_promise *promise; 602 + struct sun4i_dma_promise *promise, *tmp; 603 603 604 604 /* Free all the demands and completed demands */ 605 - list_for_each_entry(promise, &contract->demands, list) 605 + list_for_each_entry_safe(promise, tmp, &contract->demands, list) 606 606 kfree(promise); 607 607 608 - list_for_each_entry(promise, &contract->completed_demands, list) 608 + list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) 609 609 kfree(promise); 610 610 611 611 kfree(contract);
+17 -29
drivers/dma/xgene-dma.c
··· 59 59 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 60 60 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 61 61 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF 62 - #define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) 63 62 #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) 64 63 #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) 65 64 #define XGENE_DMA_RING_CMD_OFFSET 0x2C ··· 378 379 return flyby_type[src_cnt]; 379 380 } 380 381 381 - static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) 382 - { 383 - u32 __iomem *cmd_base = ring->cmd_base; 384 - u32 ring_state = ioread32(&cmd_base[1]); 385 - 386 - return XGENE_DMA_RING_DESC_CNT(ring_state); 387 - } 388 - 389 382 static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, 390 383 dma_addr_t *paddr) 391 384 { ··· 650 659 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); 651 660 } 652 661 653 - static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, 654 - struct xgene_dma_desc_sw *desc_sw) 662 + static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, 663 + struct xgene_dma_desc_sw *desc_sw) 655 664 { 665 + struct xgene_dma_ring *ring = &chan->tx_ring; 656 666 struct xgene_dma_desc_hw *desc_hw; 657 - 658 - /* Check if can push more descriptor to hw for execution */ 659 - if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) 660 - return -EBUSY; 661 667 662 668 /* Get hw descriptor from DMA tx ring */ 663 669 desc_hw = &ring->desc_hw[ring->head]; ··· 682 694 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); 683 695 } 684 696 697 + /* Increment the pending transaction count */ 698 + chan->pending += ((desc_sw->flags & 699 + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); 700 + 685 701 /* Notify the hw that we have descriptor ready for execution */ 686 702 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 687 703 2 : 1, ring->cmd); 688 - 689 - return 0; 690 704 } 691 705 692 706 /** ··· 700 710 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) 701 711 { 702 712 struct xgene_dma_desc_sw *desc_sw, *_desc_sw; 703 - int ret; 704 713 705 714 /* 706 715 * If the list of pending descriptors is empty, then we ··· 724 735 if (chan->pending >= chan->max_outstanding) 725 736 return; 726 737 727 - ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); 728 - if (ret) 729 - return; 738 + xgene_chan_xfer_request(chan, desc_sw); 730 739 731 740 /* 732 741 * Delete this element from ld pending queue and append it to 733 742 * ld running queue 734 743 */ 735 744 list_move_tail(&desc_sw->node, &chan->ld_running); 736 - 737 - /* Increment the pending transaction count */ 738 - chan->pending++; 739 745 } 740 746 } 741 747 ··· 805 821 * Decrement the pending transaction count 806 822 * as we have processed one 807 823 */ 808 - chan->pending--; 824 + chan->pending -= ((desc_sw->flags & 825 + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); 809 826 810 827 /* 811 828 * Delete this node from ld running queue and append it to ··· 1406 1421 struct xgene_dma_ring *ring, 1407 1422 enum xgene_dma_ring_cfgsize cfgsize) 1408 1423 { 1424 + int ret; 1425 + 1409 1426 /* Setup DMA ring descriptor variables */ 1410 1427 ring->pdma = chan->pdma; 1411 1428 ring->cfgsize = cfgsize; 1412 1429 ring->num = chan->pdma->ring_num++; 1413 1430 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); 1414 1431 1415 - ring->size = xgene_dma_get_ring_size(chan, cfgsize); 1416 - if (ring->size <= 0) 1417 - return ring->size; 1432 + ret = xgene_dma_get_ring_size(chan, cfgsize); 1433 + if (ret <= 0) 1434 + return ret; 1435 + ring->size = ret; 1418 1436 1419 1437 /* Allocate memory for DMA ring descriptor */ 1420 1438 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, ··· 1470 1482 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); 1471 1483 1472 1484 /* Set the max outstanding request possible to this channel */ 1473 - chan->max_outstanding = rx_ring->slots; 1485 + chan->max_outstanding = tx_ring->slots; 1474 1486 1475 1487 return ret; 1476 1488 }
+1 -1
drivers/dma/zx296702_dma.c
··· 739 739 struct dma_chan *chan; 740 740 struct zx_dma_chan *c; 741 741 742 - if (request > d->dma_requests) 742 + if (request >= d->dma_requests) 743 743 return NULL; 744 744 745 745 chan = dma_get_any_slave_channel(&d->slave);
+72 -14
drivers/firmware/efi/libstub/arm-stub.c
··· 13 13 */ 14 14 15 15 #include <linux/efi.h> 16 + #include <linux/sort.h> 16 17 #include <asm/efi.h> 17 18 18 19 #include "efistub.h" ··· 306 305 */ 307 306 #define EFI_RT_VIRTUAL_BASE 0x40000000 308 307 308 + static int cmp_mem_desc(const void *l, const void *r) 309 + { 310 + const efi_memory_desc_t *left = l, *right = r; 311 + 312 + return (left->phys_addr > right->phys_addr) ? 1 : -1; 313 + } 314 + 315 + /* 316 + * Returns whether region @left ends exactly where region @right starts, 317 + * or false if either argument is NULL. 318 + */ 319 + static bool regions_are_adjacent(efi_memory_desc_t *left, 320 + efi_memory_desc_t *right) 321 + { 322 + u64 left_end; 323 + 324 + if (left == NULL || right == NULL) 325 + return false; 326 + 327 + left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; 328 + 329 + return left_end == right->phys_addr; 330 + } 331 + 332 + /* 333 + * Returns whether region @left and region @right have compatible memory type 334 + * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. 335 + */ 336 + static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, 337 + efi_memory_desc_t *right) 338 + { 339 + static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | 340 + EFI_MEMORY_WC | EFI_MEMORY_UC | 341 + EFI_MEMORY_RUNTIME; 342 + 343 + return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; 344 + } 345 + 309 346 /* 310 347 * efi_get_virtmap() - create a virtual mapping for the EFI memory map 311 348 * ··· 356 317 int *count) 357 318 { 358 319 u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; 359 - efi_memory_desc_t *out = runtime_map; 320 + efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; 360 321 int l; 361 322 362 - for (l = 0; l < map_size; l += desc_size) { 363 - efi_memory_desc_t *in = (void *)memory_map + l; 323 + /* 324 + * To work around potential issues with the Properties Table feature 325 + * introduced in UEFI 2.5, which may split PE/COFF executable images 326 + * in memory into several RuntimeServicesCode and RuntimeServicesData 327 + * regions, we need to preserve the relative offsets between adjacent 328 + * EFI_MEMORY_RUNTIME regions with the same memory type attributes. 329 + * The easiest way to find adjacent regions is to sort the memory map 330 + * before traversing it. 331 + */ 332 + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); 333 + 334 + for (l = 0; l < map_size; l += desc_size, prev = in) { 364 335 u64 paddr, size; 365 336 337 + in = (void *)memory_map + l; 366 338 if (!(in->attribute & EFI_MEMORY_RUNTIME)) 367 339 continue; 340 + 341 + paddr = in->phys_addr; 342 + size = in->num_pages * EFI_PAGE_SIZE; 368 343 369 344 /* 370 345 * Make the mapping compatible with 64k pages: this allows 371 346 * a 4k page size kernel to kexec a 64k page size kernel and 372 347 * vice versa. 373 348 */ 374 - paddr = round_down(in->phys_addr, SZ_64K); 375 - size = round_up(in->num_pages * EFI_PAGE_SIZE + 376 - in->phys_addr - paddr, SZ_64K); 349 + if (!regions_are_adjacent(prev, in) || 350 + !regions_have_compatible_memory_type_attrs(prev, in)) { 377 351 378 - /* 379 - * Avoid wasting memory on PTEs by choosing a virtual base that 380 - * is compatible with section mappings if this region has the 381 - * appropriate size and physical alignment. (Sections are 2 MB 382 - * on 4k granule kernels) 383 - */ 384 - if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) 385 - efi_virt_base = round_up(efi_virt_base, SZ_2M); 352 + paddr = round_down(in->phys_addr, SZ_64K); 353 + size += in->phys_addr - paddr; 354 + 355 + /* 356 + * Avoid wasting memory on PTEs by choosing a virtual 357 + * base that is compatible with section mappings if this 358 + * region has the appropriate size and physical 359 + * alignment. (Sections are 2 MB on 4k granule kernels) 360 + */ 361 + if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) 362 + efi_virt_base = round_up(efi_virt_base, SZ_2M); 363 + else 364 + efi_virt_base = round_up(efi_virt_base, SZ_64K); 365 + } 386 366 387 367 in->virt_addr = efi_virt_base + in->phys_addr - paddr; 388 368 efi_virt_base += size;
-39
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
··· 208 208 return ret; 209 209 } 210 210 211 - static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, 212 - cgs_handle_t *handle) 213 - { 214 - CGS_FUNC_ADEV; 215 - int r; 216 - uint32_t dma_handle; 217 - struct drm_gem_object *obj; 218 - struct amdgpu_bo *bo; 219 - struct drm_device *dev = adev->ddev; 220 - struct drm_file *file_priv = NULL, *priv; 221 - 222 - mutex_lock(&dev->struct_mutex); 223 - list_for_each_entry(priv, &dev->filelist, lhead) { 224 - rcu_read_lock(); 225 - if (priv->pid == get_pid(task_pid(current))) 226 - file_priv = priv; 227 - rcu_read_unlock(); 228 - if (file_priv) 229 - break; 230 - } 231 - mutex_unlock(&dev->struct_mutex); 232 - r = dev->driver->prime_fd_to_handle(dev, 233 - file_priv, dmabuf_fd, 234 - &dma_handle); 235 - spin_lock(&file_priv->table_lock); 236 - 237 - /* Check if we currently have a reference on the object */ 238 - obj = idr_find(&file_priv->object_idr, dma_handle); 239 - if (obj == NULL) { 240 - spin_unlock(&file_priv->table_lock); 241 - return -EINVAL; 242 - } 243 - spin_unlock(&file_priv->table_lock); 244 - bo = gem_to_amdgpu_bo(obj); 245 - *handle = (cgs_handle_t)bo; 246 - return 0; 247 - } 248 - 249 211 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) 250 212 { 251 213 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; ··· 772 810 }; 773 811 774 812 static const struct cgs_os_ops amdgpu_cgs_os_ops = { 775 - amdgpu_cgs_import_gpu_mem, 776 813 amdgpu_cgs_add_irq_source, 777 814 amdgpu_cgs_irq_get, 778 815 amdgpu_cgs_irq_put
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 156 156 uint64_t *chunk_array_user; 157 157 uint64_t *chunk_array; 158 158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 159 - unsigned size, i; 159 + unsigned size; 160 + int i; 160 161 int ret; 161 162 162 163 if (cs->in.num_chunks == 0)
+1 -2
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
··· 1279 1279 amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1280 1280 } 1281 1281 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1282 - amdgpu_atombios_encoder_setup_dig_transmitter(encoder, 1283 - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); 1282 + amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); 1284 1283 if (ext_encoder) 1285 1284 amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); 1286 1285 } else {
+6 -2
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 1262 1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1263 1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1264 1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1265 + /* reset addr and status */ 1266 + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); 1267 + 1268 + if (!addr && !status) 1269 + return 0; 1270 + 1265 1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1266 1272 entry->src_id, entry->src_data); 1267 1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", ··· 1275 1269 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1276 1270 status); 1277 1271 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); 1278 - /* reset addr and status */ 1279 - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); 1280 1272 1281 1273 return 0; 1282 1274 }
+6 -2
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 1262 1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1263 1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1264 1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1265 + /* reset addr and status */ 1266 + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); 1267 + 1268 + if (!addr && !status) 1269 + return 0; 1270 + 1265 1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1266 1272 entry->src_id, entry->src_data); 1267 1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", ··· 1275 1269 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1276 1270 status); 1277 1271 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); 1278 - /* reset addr and status */ 1279 - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); 1280 1272 1281 1273 return 0; 1282 1274 }
-17
drivers/gpu/drm/amd/include/cgs_linux.h
··· 27 27 #include "cgs_common.h" 28 28 29 29 /** 30 - * cgs_import_gpu_mem() - Import dmabuf handle 31 - * @cgs_device: opaque device handle 32 - * @dmabuf_fd: DMABuf file descriptor 33 - * @handle: memory handle (output) 34 - * 35 - * Must be called in the process context that dmabuf_fd belongs to. 36 - * 37 - * Return: 0 on success, -errno otherwise 38 - */ 39 - typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, 40 - cgs_handle_t *handle); 41 - 42 - /** 43 30 * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources 44 31 * @private_data: private data provided to cgs_add_irq_source 45 32 * @src_id: interrupt source ID ··· 101 114 typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); 102 115 103 116 struct cgs_os_ops { 104 - cgs_import_gpu_mem_t import_gpu_mem; 105 - 106 117 /* IRQ handling */ 107 118 cgs_add_irq_source_t add_irq_source; 108 119 cgs_irq_get_t irq_get; 109 120 cgs_irq_put_t irq_put; 110 121 }; 111 122 112 - #define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ 113 - CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) 114 123 #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ 115 124 CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ 116 125 private_data)
+53 -34
drivers/gpu/drm/drm_dp_mst_topology.c
··· 53 53 struct drm_dp_mst_port *port, 54 54 int offset, int size, u8 *bytes); 55 55 56 - static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 57 - struct drm_dp_mst_branch *mstb); 56 + static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 57 + struct drm_dp_mst_branch *mstb); 58 58 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 59 59 struct drm_dp_mst_branch *mstb, 60 60 struct drm_dp_mst_port *port); ··· 804 804 struct drm_dp_mst_port *port, *tmp; 805 805 bool wake_tx = false; 806 806 807 - cancel_work_sync(&mstb->mgr->work); 808 - 809 807 /* 810 808 * destroy all ports - don't need lock 811 809 * as there are no more references to the mst branch ··· 861 863 { 862 864 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 863 865 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 866 + 864 867 if (!port->input) { 865 868 port->vcpi.num_slots = 0; 866 869 867 870 kfree(port->cached_edid); 868 871 869 - /* we can't destroy the connector here, as 870 - we might be holding the mode_config.mutex 871 - from an EDID retrieval */ 872 + /* 873 + * The only time we don't have a connector 874 + * on an output port is if the connector init 875 + * fails. 876 + */ 872 877 if (port->connector) { 878 + /* we can't destroy the connector here, as 879 + * we might be holding the mode_config.mutex 880 + * from an EDID retrieval */ 881 + 873 882 mutex_lock(&mgr->destroy_connector_lock); 874 883 list_add(&port->next, &mgr->destroy_connector_list); 875 884 mutex_unlock(&mgr->destroy_connector_lock); 876 885 schedule_work(&mgr->destroy_connector_work); 877 886 return; 878 887 } 888 + /* no need to clean up vcpi 889 + * as if we have no connector we never setup a vcpi */ 879 890 drm_dp_port_teardown_pdt(port, port->pdt); 880 - 881 - if (!port->input && port->vcpi.vcpi > 0) 882 - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 883 891 } 884 892 kfree(port); 885 - 886 - (*mgr->cbs->hotplug)(mgr); 887 893 } 888 894 889 895 static void drm_dp_put_port(struct drm_dp_mst_port *port) ··· 1029 1027 } 1030 1028 } 1031 1029 1032 - static void build_mst_prop_path(struct drm_dp_mst_port *port, 1033 - struct drm_dp_mst_branch *mstb, 1030 + static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, 1031 + int pnum, 1034 1032 char *proppath, 1035 1033 size_t proppath_size) 1036 1034 { ··· 1043 1041 snprintf(temp, sizeof(temp), "-%d", port_num); 1044 1042 strlcat(proppath, temp, proppath_size); 1045 1043 } 1046 - snprintf(temp, sizeof(temp), "-%d", port->port_num); 1044 + snprintf(temp, sizeof(temp), "-%d", pnum); 1047 1045 strlcat(proppath, temp, proppath_size); 1048 1046 } 1049 1047 ··· 1107 1105 drm_dp_port_teardown_pdt(port, old_pdt); 1108 1106 1109 1107 ret = drm_dp_port_setup_pdt(port); 1110 - if (ret == true) { 1108 + if (ret == true) 1111 1109 drm_dp_send_link_address(mstb->mgr, port->mstb); 1112 - port->mstb->link_address_sent = true; 1113 - } 1114 1110 } 1115 1111 1116 1112 if (created && !port->input) { 1117 1113 char proppath[255]; 1118 - build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); 1119 - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1120 1114 1121 - if (port->port_num >= 8) { 1122 - port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1115 + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); 1116 + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1117 + if (!port->connector) { 1118 + /* remove it from the port list */ 1119 + mutex_lock(&mstb->mgr->lock); 1120 + list_del(&port->next); 1121 + mutex_unlock(&mstb->mgr->lock); 1122 + /* drop port list reference */ 1123 + drm_dp_put_port(port); 1124 + goto out; 1123 1125 } 1126 + if (port->port_num >= DP_MST_LOGICAL_PORT_0) { 1127 + port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1128 + drm_mode_connector_set_tile_property(port->connector); 1129 + } 1130 + (*mstb->mgr->cbs->register_connector)(port->connector); 1124 1131 } 1125 1132 1133 + out: 1126 1134 /* put reference to this port */ 1127 1135 drm_dp_put_port(port); 1128 1136 } ··· 1214 1202 { 1215 1203 struct drm_dp_mst_port *port; 1216 1204 struct drm_dp_mst_branch *mstb_child; 1217 - if (!mstb->link_address_sent) { 1205 + if (!mstb->link_address_sent) 1218 1206 drm_dp_send_link_address(mgr, mstb); 1219 - mstb->link_address_sent = true; 1220 - } 1207 + 1221 1208 list_for_each_entry(port, &mstb->ports, next) { 1222 1209 if (port->input) 1223 1210 continue; ··· 1469 1458 mutex_unlock(&mgr->qlock); 1470 1459 } 1471 1460 1472 - static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1473 - struct drm_dp_mst_branch *mstb) 1461 + static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1462 + struct drm_dp_mst_branch *mstb) 1474 1463 { 1475 1464 int len; 1476 1465 struct drm_dp_sideband_msg_tx *txmsg; ··· 1478 1467 1479 1468 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1480 1469 if (!txmsg) 1481 - return -ENOMEM; 1470 + return; 1482 1471 1483 1472 txmsg->dst = mstb; 1484 1473 len = build_link_address(txmsg); 1485 1474 1475 + mstb->link_address_sent = true; 1486 1476 drm_dp_queue_down_tx(mgr, txmsg); 1487 1477 1488 1478 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); ··· 1511 1499 } 1512 1500 (*mgr->cbs->hotplug)(mgr); 1513 1501 } 1514 - } else 1502 + } else { 1503 + mstb->link_address_sent = false; 1515 1504 DRM_DEBUG_KMS("link address failed %d\n", ret); 1505 + } 1516 1506 1517 1507 kfree(txmsg); 1518 - return 0; 1519 1508 } 1520 1509 1521 1510 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, ··· 1991 1978 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 1992 1979 DP_MST_EN | DP_UPSTREAM_IS_SRC); 1993 1980 mutex_unlock(&mgr->lock); 1981 + flush_work(&mgr->work); 1982 + flush_work(&mgr->destroy_connector_work); 1994 1983 } 1995 1984 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 1996 1985 ··· 2278 2263 2279 2264 if (port->cached_edid) 2280 2265 edid = drm_edid_duplicate(port->cached_edid); 2281 - else 2266 + else { 2282 2267 edid = drm_get_edid(connector, &port->aux.ddc); 2283 - 2284 - drm_mode_connector_set_tile_property(connector); 2268 + drm_mode_connector_set_tile_property(connector); 2269 + } 2285 2270 drm_dp_put_port(port); 2286 2271 return edid; 2287 2272 } ··· 2686 2671 { 2687 2672 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2688 2673 struct drm_dp_mst_port *port; 2689 - 2674 + bool send_hotplug = false; 2690 2675 /* 2691 2676 * Not a regular list traverse as we have to drop the destroy 2692 2677 * connector lock before destroying the connector, to avoid AB->BA ··· 2709 2694 if (!port->input && port->vcpi.vcpi > 0) 2710 2695 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2711 2696 kfree(port); 2697 + send_hotplug = true; 2712 2698 } 2699 + if (send_hotplug) 2700 + (*mgr->cbs->hotplug)(mgr); 2713 2701 } 2714 2702 2715 2703 /** ··· 2765 2747 */ 2766 2748 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 2767 2749 { 2750 + flush_work(&mgr->work); 2768 2751 flush_work(&mgr->destroy_connector_work); 2769 2752 mutex_lock(&mgr->payload_lock); 2770 2753 kfree(mgr->payloads);
+5 -1
drivers/gpu/drm/drm_fb_helper.c
··· 345 345 struct drm_crtc *crtc = mode_set->crtc; 346 346 int ret; 347 347 348 - if (crtc->funcs->cursor_set) { 348 + if (crtc->funcs->cursor_set2) { 349 + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); 350 + if (ret) 351 + error = true; 352 + } else if (crtc->funcs->cursor_set) { 349 353 ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); 350 354 if (ret) 351 355 error = true;
+16 -3
drivers/gpu/drm/drm_probe_helper.c
··· 94 94 } 95 95 96 96 #define DRM_OUTPUT_POLL_PERIOD (10*HZ) 97 - static void __drm_kms_helper_poll_enable(struct drm_device *dev) 97 + /** 98 + * drm_kms_helper_poll_enable_locked - re-enable output polling. 99 + * @dev: drm_device 100 + * 101 + * This function re-enables the output polling work without 102 + * locking the mode_config mutex. 103 + * 104 + * This is like drm_kms_helper_poll_enable() however it is to be 105 + * called from a context where the mode_config mutex is locked 106 + * already. 107 + */ 108 + void drm_kms_helper_poll_enable_locked(struct drm_device *dev) 98 109 { 99 110 bool poll = false; 100 111 struct drm_connector *connector; ··· 124 113 if (poll) 125 114 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); 126 115 } 116 + EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); 117 + 127 118 128 119 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, 129 120 uint32_t maxX, uint32_t maxY, bool merge_type_bits) ··· 187 174 188 175 /* Re-enable polling in case the global poll config changed. */ 189 176 if (drm_kms_helper_poll != dev->mode_config.poll_running) 190 - __drm_kms_helper_poll_enable(dev); 177 + drm_kms_helper_poll_enable_locked(dev); 191 178 192 179 dev->mode_config.poll_running = drm_kms_helper_poll; 193 180 ··· 441 428 void drm_kms_helper_poll_enable(struct drm_device *dev) 442 429 { 443 430 mutex_lock(&dev->mode_config.mutex); 444 - __drm_kms_helper_poll_enable(dev); 431 + drm_kms_helper_poll_enable_locked(dev); 445 432 mutex_unlock(&dev->mode_config.mutex); 446 433 } 447 434 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
-12
drivers/gpu/drm/exynos/exynos7_drm_decon.c
··· 37 37 * DECON stands for Display and Enhancement controller. 38 38 */ 39 39 40 - #define DECON_DEFAULT_FRAMERATE 60 41 40 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 42 41 43 42 #define WINDOWS_NR 2 ··· 162 163 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->vclk), ideal_clk); 163 164 164 165 return (clkdiv < 0x100) ? clkdiv : 0xff; 165 - } 166 - 167 - static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, 168 - const struct drm_display_mode *mode, 169 - struct drm_display_mode *adjusted_mode) 170 - { 171 - if (adjusted_mode->vrefresh == 0) 172 - adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; 173 - 174 - return true; 175 166 } 176 167 177 168 static void decon_commit(struct exynos_drm_crtc *crtc) ··· 626 637 static const struct exynos_drm_crtc_ops decon_crtc_ops = { 627 638 .enable = decon_enable, 628 639 .disable = decon_disable, 629 - .mode_fixup = decon_mode_fixup, 630 640 .commit = decon_commit, 631 641 .enable_vblank = decon_enable_vblank, 632 642 .disable_vblank = decon_disable_vblank,
-23
drivers/gpu/drm/exynos/exynos_dp_core.c
··· 1383 1383 return 0; 1384 1384 } 1385 1385 1386 - #ifdef CONFIG_PM_SLEEP 1387 - static int exynos_dp_suspend(struct device *dev) 1388 - { 1389 - struct exynos_dp_device *dp = dev_get_drvdata(dev); 1390 - 1391 - exynos_dp_disable(&dp->encoder); 1392 - return 0; 1393 - } 1394 - 1395 - static int exynos_dp_resume(struct device *dev) 1396 - { 1397 - struct exynos_dp_device *dp = dev_get_drvdata(dev); 1398 - 1399 - exynos_dp_enable(&dp->encoder); 1400 - return 0; 1401 - } 1402 - #endif 1403 - 1404 - static const struct dev_pm_ops exynos_dp_pm_ops = { 1405 - SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) 1406 - }; 1407 - 1408 1386 static const struct of_device_id exynos_dp_match[] = { 1409 1387 { .compatible = "samsung,exynos5-dp" }, 1410 1388 {}, ··· 1395 1417 .driver = { 1396 1418 .name = "exynos-dp", 1397 1419 .owner = THIS_MODULE, 1398 - .pm = &exynos_dp_pm_ops, 1399 1420 .of_match_table = exynos_dp_match, 1400 1421 }, 1401 1422 };
-6
drivers/gpu/drm/exynos/exynos_drm_core.c
··· 28 28 29 29 return 0; 30 30 } 31 - EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); 32 31 33 32 int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) 34 33 { ··· 38 39 39 40 return 0; 40 41 } 41 - EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); 42 42 43 43 int exynos_drm_device_subdrv_probe(struct drm_device *dev) 44 44 { ··· 67 69 68 70 return 0; 69 71 } 70 - EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); 71 72 72 73 int exynos_drm_device_subdrv_remove(struct drm_device *dev) 73 74 { ··· 84 87 85 88 return 0; 86 89 } 87 - EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); 88 90 89 91 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) 90 92 { ··· 107 111 } 108 112 return ret; 109 113 } 110 - EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); 111 114 112 115 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) 113 116 { ··· 117 122 subdrv->close(dev, subdrv->dev, file); 118 123 } 119 124 } 120 - EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
-15
drivers/gpu/drm/exynos/exynos_drm_crtc.c
··· 41 41 exynos_crtc->ops->disable(exynos_crtc); 42 42 } 43 43 44 - static bool 45 - exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, 46 - const struct drm_display_mode *mode, 47 - struct drm_display_mode *adjusted_mode) 48 - { 49 - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 50 - 51 - if (exynos_crtc->ops->mode_fixup) 52 - return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, 53 - adjusted_mode); 54 - 55 - return true; 56 - } 57 - 58 44 static void 59 45 exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 60 46 { ··· 85 99 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { 86 100 .enable = exynos_drm_crtc_enable, 87 101 .disable = exynos_drm_crtc_disable, 88 - .mode_fixup = exynos_drm_crtc_mode_fixup, 89 102 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, 90 103 .atomic_begin = exynos_crtc_atomic_begin, 91 104 .atomic_flush = exynos_crtc_atomic_flush,
+2
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 304 304 return 0; 305 305 } 306 306 307 + #ifdef CONFIG_PM_SLEEP 307 308 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) 308 309 { 309 310 struct drm_connector *connector; ··· 341 340 342 341 return 0; 343 342 } 343 + #endif 344 344 345 345 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 346 346 {
-4
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 82 82 * 83 83 * @enable: enable the device 84 84 * @disable: disable the device 85 - * @mode_fixup: fix mode data before applying it 86 85 * @commit: set current hw specific display mode to hw. 87 86 * @enable_vblank: specific driver callback for enabling vblank interrupt. 88 87 * @disable_vblank: specific driver callback for disabling vblank interrupt. ··· 102 103 struct exynos_drm_crtc_ops { 103 104 void (*enable)(struct exynos_drm_crtc *crtc); 104 105 void (*disable)(struct exynos_drm_crtc *crtc); 105 - bool (*mode_fixup)(struct exynos_drm_crtc *crtc, 106 - const struct drm_display_mode *mode, 107 - struct drm_display_mode *adjusted_mode); 108 106 void (*commit)(struct exynos_drm_crtc *crtc); 109 107 int (*enable_vblank)(struct exynos_drm_crtc *crtc); 110 108 void (*disable_vblank)(struct exynos_drm_crtc *crtc);
+18 -18
drivers/gpu/drm/exynos/exynos_drm_fimc.c
··· 1206 1206 .set_addr = fimc_dst_set_addr, 1207 1207 }; 1208 1208 1209 - static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) 1210 - { 1211 - DRM_DEBUG_KMS("enable[%d]\n", enable); 1212 - 1213 - if (enable) { 1214 - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); 1215 - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); 1216 - ctx->suspended = false; 1217 - } else { 1218 - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); 1219 - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); 1220 - ctx->suspended = true; 1221 - } 1222 - 1223 - return 0; 1224 - } 1225 - 1226 1209 static irqreturn_t fimc_irq_handler(int irq, void *dev_id) 1227 1210 { 1228 1211 struct fimc_context *ctx = dev_id; ··· 1763 1780 return 0; 1764 1781 } 1765 1782 1783 + #ifdef CONFIG_PM 1784 + static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) 1785 + { 1786 + DRM_DEBUG_KMS("enable[%d]\n", enable); 1787 + 1788 + if (enable) { 1789 + clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); 1790 + clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); 1791 + ctx->suspended = false; 1792 + } else { 1793 + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); 1794 + clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); 1795 + ctx->suspended = true; 1796 + } 1797 + 1798 + return 0; 1799 + } 1800 + 1766 1801 #ifdef CONFIG_PM_SLEEP 1767 1802 static int fimc_suspend(struct device *dev) 1768 1803 { ··· 1807 1806 } 1808 1807 #endif 1809 1808 1810 - #ifdef CONFIG_PM 1811 1809 static int fimc_runtime_suspend(struct device *dev) 1812 1810 { 1813 1811 struct fimc_context *ctx = get_fimc_context(dev);
+1 -13
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 41 41 * CPU Interface. 42 42 */ 43 43 44 - #define FIMD_DEFAULT_FRAMERATE 60 45 44 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 46 45 47 46 /* position control register for hardware window 0, 2 ~ 4.*/ ··· 374 375 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk); 375 376 376 377 return (clkdiv < 0x100) ? clkdiv : 0xff; 377 - } 378 - 379 - static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, 380 - const struct drm_display_mode *mode, 381 - struct drm_display_mode *adjusted_mode) 382 - { 383 - if (adjusted_mode->vrefresh == 0) 384 - adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; 385 - 386 - return true; 387 378 } 388 379 389 380 static void fimd_commit(struct exynos_drm_crtc *crtc) ··· 871 882 return; 872 883 873 884 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; 874 - writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); 885 + writel(val, ctx->regs + DP_MIE_CLKCON); 875 886 } 876 887 877 888 static const struct exynos_drm_crtc_ops fimd_crtc_ops = { 878 889 .enable = fimd_enable, 879 890 .disable = fimd_disable, 880 - .mode_fixup = fimd_mode_fixup, 881 891 .commit = fimd_commit, 882 892 .enable_vblank = fimd_enable_vblank, 883 893 .disable_vblank = fimd_disable_vblank,
-3
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 1059 1059 1060 1060 return 0; 1061 1061 } 1062 - EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); 1063 1062 1064 1063 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, 1065 1064 struct drm_file *file) ··· 1229 1230 g2d_put_cmdlist(g2d, node); 1230 1231 return ret; 1231 1232 } 1232 - EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); 1233 1233 1234 1234 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, 1235 1235 struct drm_file *file) ··· 1291 1293 out: 1292 1294 return 0; 1293 1295 } 1294 - EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); 1295 1296 1296 1297 static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1297 1298 {
+41 -53
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 56 56 nr_pages = obj->size >> PAGE_SHIFT; 57 57 58 58 if (!is_drm_iommu_supported(dev)) { 59 - dma_addr_t start_addr; 60 - unsigned int i = 0; 61 - 62 59 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); 63 60 if (!obj->pages) { 64 61 DRM_ERROR("failed to allocate pages.\n"); 65 62 return -ENOMEM; 66 63 } 64 + } 67 65 68 - obj->cookie = dma_alloc_attrs(dev->dev, 69 - obj->size, 70 - &obj->dma_addr, GFP_KERNEL, 71 - &obj->dma_attrs); 72 - if (!obj->cookie) { 73 - DRM_ERROR("failed to allocate buffer.\n"); 66 + obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, 67 + GFP_KERNEL, &obj->dma_attrs); 68 + if (!obj->cookie) { 69 + DRM_ERROR("failed to allocate buffer.\n"); 70 + if (obj->pages) 74 71 drm_free_large(obj->pages); 75 - return -ENOMEM; 76 - } 72 + return -ENOMEM; 73 + } 74 + 75 + if (obj->pages) { 76 + dma_addr_t start_addr; 77 + unsigned int i = 0; 77 78 78 79 start_addr = obj->dma_addr; 79 80 while (i < nr_pages) { 80 - obj->pages[i] = phys_to_page(start_addr); 81 + obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, 82 + start_addr)); 81 83 start_addr += PAGE_SIZE; 82 84 i++; 83 85 } 84 86 } else { 85 - obj->pages = dma_alloc_attrs(dev->dev, obj->size, 86 - &obj->dma_addr, GFP_KERNEL, 87 - &obj->dma_attrs); 88 - if (!obj->pages) { 89 - DRM_ERROR("failed to allocate buffer.\n"); 90 - return -ENOMEM; 91 - } 87 + obj->pages = obj->cookie; 92 88 } 93 89 94 90 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", ··· 106 110 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 107 111 (unsigned long)obj->dma_addr, obj->size); 108 112 109 - if (!is_drm_iommu_supported(dev)) { 110 - dma_free_attrs(dev->dev, obj->size, obj->cookie, 111 - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); 112 - drm_free_large(obj->pages); 113 - } else 114 - dma_free_attrs(dev->dev, obj->size, obj->pages, 115 - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); 113 + dma_free_attrs(dev->dev, obj->size, obj->cookie, 114 + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); 116 115 117 - obj->dma_addr = (dma_addr_t)NULL; 116 + if (!is_drm_iommu_supported(dev)) 117 + drm_free_large(obj->pages); 118 118 } 119 119 120 120 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, ··· 148 156 * once dmabuf's refcount becomes 0. 149 157 */ 150 158 if (obj->import_attach) 151 - goto out; 152 - 153 - exynos_drm_free_buf(exynos_gem_obj); 154 - 155 - out: 156 - drm_gem_free_mmap_offset(obj); 159 + drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); 160 + else 161 + exynos_drm_free_buf(exynos_gem_obj); 157 162 158 163 /* release file pointer to gem object. */ 159 164 drm_gem_object_release(obj); 160 165 161 166 kfree(exynos_gem_obj); 162 - exynos_gem_obj = NULL; 163 167 } 164 168 165 169 unsigned long exynos_drm_gem_get_size(struct drm_device *dev, ··· 178 190 return exynos_gem_obj->size; 179 191 } 180 192 181 - 182 - struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 193 + static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 183 194 unsigned long size) 184 195 { 185 196 struct exynos_drm_gem_obj *exynos_gem_obj; ··· 195 208 ret = drm_gem_object_init(dev, obj, size); 196 209 if (ret < 0) { 197 210 DRM_ERROR("failed to initialize gem object\n"); 211 + kfree(exynos_gem_obj); 212 + return ERR_PTR(ret); 213 + } 214 + 215 + ret = drm_gem_create_mmap_offset(obj); 216 + if (ret < 0) { 217 + drm_gem_object_release(obj); 198 218 kfree(exynos_gem_obj); 199 219 return ERR_PTR(ret); 200 220 } ··· 307 313 drm_gem_object_unreference_unlocked(obj); 308 314 } 309 315 310 - int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, 316 + static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, 311 317 struct vm_area_struct *vma) 312 318 { 313 319 struct drm_device *drm_dev = exynos_gem_obj->base.dev; ··· 336 342 337 343 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 338 344 struct drm_file *file_priv) 339 - { struct exynos_drm_gem_obj *exynos_gem_obj; 345 + { 346 + struct exynos_drm_gem_obj *exynos_gem_obj; 340 347 struct drm_exynos_gem_info *args = data; 341 348 struct drm_gem_object *obj; 342 349 ··· 397 402 struct drm_mode_create_dumb *args) 398 403 { 399 404 struct exynos_drm_gem_obj *exynos_gem_obj; 405 + unsigned int flags; 400 406 int ret; 401 407 402 408 /* ··· 409 413 args->pitch = args->width * ((args->bpp + 7) / 8); 410 414 args->size = args->pitch * args->height; 411 415 412 - if (is_drm_iommu_supported(dev)) { 413 - exynos_gem_obj = exynos_drm_gem_create(dev, 414 - EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, 415 - args->size); 416 - } else { 417 - exynos_gem_obj = exynos_drm_gem_create(dev, 418 - EXYNOS_BO_CONTIG | EXYNOS_BO_WC, 419 - args->size); 420 - } 416 + if (is_drm_iommu_supported(dev)) 417 + flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; 418 + else 419 + flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; 421 420 421 + exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); 422 422 if (IS_ERR(exynos_gem_obj)) { 423 423 dev_warn(dev->dev, "FB allocation failed.\n"); 424 424 return PTR_ERR(exynos_gem_obj); ··· 452 460 goto unlock; 453 461 } 454 462 455 - ret = drm_gem_create_mmap_offset(obj); 456 - if (ret) 457 - goto out; 458 - 459 463 *offset = drm_vma_node_offset_addr(&obj->vma_node); 460 464 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 461 465 462 - out: 463 466 drm_gem_object_unreference(obj); 464 467 unlock: 465 468 mutex_unlock(&dev->struct_mutex); ··· 530 543 531 544 err_close_vm: 532 545 drm_gem_vm_close(vma); 533 - drm_gem_free_mmap_offset(obj); 534 546 535 547 return ret; 536 548 } ··· 573 587 npages); 574 588 if (ret < 0) 575 589 goto err_free_large; 590 + 591 + exynos_gem_obj->sgt = sgt; 576 592 577 593 if (sgt->nents == 1) { 578 594 /* always physically continuous memory if sgt->nents is 1. */
+2 -4
drivers/gpu/drm/exynos/exynos_drm_gem.h
··· 39 39 * - this address could be physical address without IOMMU and 40 40 * device address with IOMMU. 41 41 * @pages: Array of backing pages. 42 + * @sgt: Imported sg_table. 42 43 * 43 44 * P.S. this object would be transferred to user as kms_bo.handle so 44 45 * user can access the buffer through kms_bo.handle. ··· 53 52 dma_addr_t dma_addr; 54 53 struct dma_attrs dma_attrs; 55 54 struct page **pages; 55 + struct sg_table *sgt; 56 56 }; 57 57 58 58 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 59 59 60 60 /* destroy a buffer with gem object */ 61 61 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); 62 - 63 - /* create a private gem object and initialize it. */ 64 - struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 65 - unsigned long size); 66 62 67 63 /* create a new buffer with gem object */ 68 64 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+1 -1
drivers/gpu/drm/exynos/exynos_drm_rotator.c
··· 786 786 return 0; 787 787 } 788 788 789 + #ifdef CONFIG_PM 789 790 static int rotator_clk_crtl(struct rot_context *rot, bool enable) 790 791 { 791 792 if (enable) { ··· 823 822 } 824 823 #endif 825 824 826 - #ifdef CONFIG_PM 827 825 static int rotator_runtime_suspend(struct device *dev) 828 826 { 829 827 struct rot_context *rot = dev_get_drvdata(dev);
+8 -1
drivers/gpu/drm/i915/intel_dp_mst.c
··· 462 462 drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); 463 463 464 464 drm_mode_connector_set_path_property(connector, pathprop); 465 + return connector; 466 + } 467 + 468 + static void intel_dp_register_mst_connector(struct drm_connector *connector) 469 + { 470 + struct intel_connector *intel_connector = to_intel_connector(connector); 471 + struct drm_device *dev = connector->dev; 465 472 drm_modeset_lock_all(dev); 466 473 intel_connector_add_to_fbdev(intel_connector); 467 474 drm_modeset_unlock_all(dev); 468 475 drm_connector_register(&intel_connector->base); 469 - return connector; 470 476 } 471 477 472 478 static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ··· 518 512 519 513 static struct drm_dp_mst_topology_cbs mst_cbs = { 520 514 .add_connector = intel_dp_add_mst_connector, 515 + .register_connector = intel_dp_register_mst_connector, 521 516 .destroy_connector = intel_dp_destroy_mst_connector, 522 517 .hotplug = intel_dp_mst_hotplug, 523 518 };
+1 -1
drivers/gpu/drm/i915/intel_hotplug.c
··· 180 180 181 181 /* Enable polling and queue hotplug re-enabling. */ 182 182 if (hpd_disabled) { 183 - drm_kms_helper_poll_enable(dev); 183 + drm_kms_helper_poll_enable_locked(dev); 184 184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, 185 185 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 186 186 }
+32 -7
drivers/gpu/drm/i915/intel_lrc.c
··· 484 484 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 485 485 486 486 read_pointer = ring->next_context_status_buffer; 487 - write_pointer = status_pointer & 0x07; 487 + write_pointer = status_pointer & GEN8_CSB_PTR_MASK; 488 488 if (read_pointer > write_pointer) 489 - write_pointer += 6; 489 + write_pointer += GEN8_CSB_ENTRIES; 490 490 491 491 spin_lock(&ring->execlist_lock); 492 492 493 493 while (read_pointer < write_pointer) { 494 494 read_pointer++; 495 495 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 496 - (read_pointer % 6) * 8); 496 + (read_pointer % GEN8_CSB_ENTRIES) * 8); 497 497 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 498 - (read_pointer % 6) * 8 + 4); 498 + (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); 499 499 500 500 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) 501 501 continue; ··· 521 521 spin_unlock(&ring->execlist_lock); 522 522 523 523 WARN(submit_contexts > 2, "More than two context complete events?\n"); 524 - ring->next_context_status_buffer = write_pointer % 6; 524 + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; 525 525 526 526 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), 527 - _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); 527 + _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, 528 + ((u32)ring->next_context_status_buffer & 529 + GEN8_CSB_PTR_MASK) << 8)); 528 530 } 529 531 530 532 static int execlists_context_queue(struct drm_i915_gem_request *request) ··· 1424 1422 { 1425 1423 struct drm_device *dev = ring->dev; 1426 1424 struct drm_i915_private *dev_priv = dev->dev_private; 1425 + u8 next_context_status_buffer_hw; 1427 1426 1428 1427 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1429 1428 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); ··· 1439 1436 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1440 1437 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1441 1438 POSTING_READ(RING_MODE_GEN7(ring)); 1442 - ring->next_context_status_buffer = 0; 1439 + 1440 + /* 1441 + * Instead of resetting the Context Status Buffer (CSB) read pointer to 1442 + * zero, we need to read the write pointer from hardware and use its 1443 + * value because "this register is power context save restored". 1444 + * Effectively, these states have been observed: 1445 + * 1446 + * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | 1447 + * BDW | CSB regs not reset | CSB regs reset | 1448 + * CHT | CSB regs not reset | CSB regs not reset | 1449 + */ 1450 + next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) 1451 + & GEN8_CSB_PTR_MASK); 1452 + 1453 + /* 1454 + * When the CSB registers are reset (also after power-up / gpu reset), 1455 + * CSB write pointer is set to all 1's, which is not valid, use '5' in 1456 + * this special case, so the first element read is CSB[0]. 1457 + */ 1458 + if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) 1459 + next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); 1460 + 1461 + ring->next_context_status_buffer = next_context_status_buffer_hw; 1443 1462 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); 1444 1463 1445 1464 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+2
drivers/gpu/drm/i915/intel_lrc.h
··· 25 25 #define _INTEL_LRC_H_ 26 26 27 27 #define GEN8_LR_CONTEXT_ALIGN 4096 28 + #define GEN8_CSB_ENTRIES 6 29 + #define GEN8_CSB_PTR_MASK 0x07 28 30 29 31 /* Execlists regs */ 30 32 #define RING_ELSP(ring) ((ring)->mmio_base+0x230)
+2 -1
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 246 246 } 247 247 248 248 if (power_well->data == SKL_DISP_PW_1) { 249 - intel_prepare_ddi(dev); 249 + if (!dev_priv->power_domains.initializing) 250 + intel_prepare_ddi(dev); 250 251 gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); 251 252 } 252 253 }
+1 -1
drivers/gpu/drm/qxl/qxl_display.c
··· 618 618 adjusted_mode->hdisplay, 619 619 adjusted_mode->vdisplay); 620 620 621 - if (qcrtc->index == 0) 621 + if (bo->is_primary == false) 622 622 recreate_primary = true; 623 623 624 624 if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
+4 -4
drivers/gpu/drm/radeon/atombios_encoders.c
··· 1624 1624 } else 1625 1625 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1626 1626 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1627 - args.ucAction = ATOM_LCD_BLON; 1628 - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1627 + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 1628 + 1629 + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); 1629 1630 } 1630 1631 break; 1631 1632 case DRM_MODE_DPMS_STANDBY: ··· 1707 1706 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1708 1707 } 1709 1708 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1710 - atombios_dig_transmitter_setup(encoder, 1711 - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); 1709 + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); 1712 1710 if (ext_encoder) 1713 1711 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1714 1712 break;
+9 -2
drivers/gpu/drm/radeon/radeon_dp_mst.c
··· 265 265 { 266 266 struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); 267 267 struct drm_device *dev = master->base.dev; 268 - struct radeon_device *rdev = dev->dev_private; 269 268 struct radeon_connector *radeon_connector; 270 269 struct drm_connector *connector; 271 270 ··· 285 286 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); 286 287 drm_mode_connector_set_path_property(connector, pathprop); 287 288 289 + return connector; 290 + } 291 + 292 + static void radeon_dp_register_mst_connector(struct drm_connector *connector) 293 + { 294 + struct drm_device *dev = connector->dev; 295 + struct radeon_device *rdev = dev->dev_private; 296 + 288 297 drm_modeset_lock_all(dev); 289 298 radeon_fb_add_connector(rdev, connector); 290 299 drm_modeset_unlock_all(dev); 291 300 292 301 drm_connector_register(connector); 293 - return connector; 294 302 } 295 303 296 304 static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ··· 330 324 331 325 struct drm_dp_mst_topology_cbs mst_cbs = { 332 326 .add_connector = radeon_dp_add_mst_connector, 327 + .register_connector = radeon_dp_register_mst_connector, 333 328 .destroy_connector = radeon_dp_destroy_mst_connector, 334 329 .hotplug = radeon_dp_mst_hotplug, 335 330 };
+1 -31
drivers/gpu/drm/radeon/radeon_fb.c
··· 48 48 struct radeon_device *rdev; 49 49 }; 50 50 51 - /** 52 - * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. 53 - * 54 - * @info: fbdev info 55 - * 56 - * This function hides the cursor on all CRTCs used by fbdev. 57 - */ 58 - static int radeon_fb_helper_set_par(struct fb_info *info) 59 - { 60 - int ret; 61 - 62 - ret = drm_fb_helper_set_par(info); 63 - 64 - /* XXX: with universal plane support fbdev will automatically disable 65 - * all non-primary planes (including the cursor) 66 - */ 67 - if (ret == 0) { 68 - struct drm_fb_helper *fb_helper = info->par; 69 - int i; 70 - 71 - for (i = 0; i < fb_helper->crtc_count; i++) { 72 - struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; 73 - 74 - radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); 75 - } 76 - } 77 - 78 - return ret; 79 - } 80 - 81 51 static struct fb_ops radeonfb_ops = { 82 52 .owner = THIS_MODULE, 83 53 .fb_check_var = drm_fb_helper_check_var, 84 - .fb_set_par = radeon_fb_helper_set_par, 54 + .fb_set_par = drm_fb_helper_set_par, 85 55 .fb_fillrect = drm_fb_helper_cfb_fillrect, 86 56 .fb_copyarea = drm_fb_helper_cfb_copyarea, 87 57 .fb_imageblit = drm_fb_helper_cfb_imageblit,
+8
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 681 681 0, 0, 682 682 DRM_MM_SEARCH_DEFAULT, 683 683 DRM_MM_CREATE_DEFAULT); 684 + if (ret) { 685 + (void) vmw_cmdbuf_man_process(man); 686 + ret = drm_mm_insert_node_generic(&man->mm, info->node, 687 + info->page_size, 0, 0, 688 + DRM_MM_SEARCH_DEFAULT, 689 + DRM_MM_CREATE_DEFAULT); 690 + } 691 + 684 692 spin_unlock_bh(&man->lock); 685 693 info->done = !ret; 686 694
+1
drivers/hwmon/abx500.c
··· 470 470 { .compatible = "stericsson,abx500-temp" }, 471 471 {}, 472 472 }; 473 + MODULE_DEVICE_TABLE(of, abx500_temp_match); 473 474 #endif 474 475 475 476 static struct platform_driver abx500_temp_driver = {
+1
drivers/hwmon/gpio-fan.c
··· 539 539 { .compatible = "gpio-fan", }, 540 540 {}, 541 541 }; 542 + MODULE_DEVICE_TABLE(of, of_gpio_fan_match); 542 543 #endif /* CONFIG_OF_GPIO */ 543 544 544 545 static int gpio_fan_probe(struct platform_device *pdev)
+1
drivers/hwmon/pwm-fan.c
··· 323 323 { .compatible = "pwm-fan", }, 324 324 {}, 325 325 }; 326 + MODULE_DEVICE_TABLE(of, of_pwm_fan_match); 326 327 327 328 static struct platform_driver pwm_fan_driver = { 328 329 .probe = pwm_fan_probe,
+10 -2
drivers/idle/intel_idle.c
··· 620 620 .name = "C6-SKL", 621 621 .desc = "MWAIT 0x20", 622 622 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 623 - .exit_latency = 75, 623 + .exit_latency = 85, 624 624 .target_residency = 200, 625 625 .enter = &intel_idle, 626 626 .enter_freeze = intel_idle_freeze, }, ··· 636 636 .name = "C8-SKL", 637 637 .desc = "MWAIT 0x40", 638 638 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 639 - .exit_latency = 174, 639 + .exit_latency = 200, 640 640 .target_residency = 800, 641 + .enter = &intel_idle, 642 + .enter_freeze = intel_idle_freeze, }, 643 + { 644 + .name = "C9-SKL", 645 + .desc = "MWAIT 0x50", 646 + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, 647 + .exit_latency = 480, 648 + .target_residency = 5000, 641 649 .enter = &intel_idle, 642 650 .enter_freeze = intel_idle_freeze, }, 643 651 {
+1 -66
drivers/infiniband/hw/mlx5/main.c
··· 245 245 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 246 246 if (MLX5_CAP_GEN(mdev, apm)) 247 247 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 248 - props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 249 248 if (MLX5_CAP_GEN(mdev, xrc)) 250 249 props->device_cap_flags |= IB_DEVICE_XRC; 251 250 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; ··· 794 795 return 0; 795 796 } 796 797 797 - static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) 798 - { 799 - struct mlx5_create_mkey_mbox_in *in; 800 - struct mlx5_mkey_seg *seg; 801 - struct mlx5_core_mr mr; 802 - int err; 803 - 804 - in = kzalloc(sizeof(*in), GFP_KERNEL); 805 - if (!in) 806 - return -ENOMEM; 807 - 808 - seg = &in->seg; 809 - seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; 810 - seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); 811 - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 812 - seg->start_addr = 0; 813 - 814 - err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), 815 - NULL, NULL, NULL); 816 - if (err) { 817 - mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); 818 - goto err_in; 819 - } 820 - 821 - kfree(in); 822 - *key = mr.key; 823 - 824 - return 0; 825 - 826 - err_in: 827 - kfree(in); 828 - 829 - return err; 830 - } 831 - 832 - static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) 833 - { 834 - struct mlx5_core_mr mr; 835 - int err; 836 - 837 - memset(&mr, 0, sizeof(mr)); 838 - mr.key = key; 839 - err = mlx5_core_destroy_mkey(dev->mdev, &mr); 840 - if (err) 841 - mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); 842 - } 843 - 844 798 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 845 799 struct ib_ucontext *context, 846 800 struct ib_udata *udata) ··· 819 867 kfree(pd); 820 868 return ERR_PTR(-EFAULT); 821 869 } 822 - } else { 823 - err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); 824 - if (err) { 825 - mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 826 - kfree(pd); 827 - return ERR_PTR(err); 828 - } 829 870 } 830 871 831 872 return &pd->ibpd; ··· 828 883 { 829 884 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 830 885 struct mlx5_ib_pd *mpd = to_mpd(pd); 831 - 832 - if (!pd->uobject) 833 - free_pa_mkey(mdev, mpd->pa_lkey); 834 886 835 887 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 836 888 kfree(mpd); ··· 1187 1245 struct ib_srq_init_attr attr; 1188 1246 struct mlx5_ib_dev *dev; 1189 1247 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 1190 - u32 rsvd_lkey; 1191 1248 int ret = 0; 1192 1249 1193 1250 dev = container_of(devr, struct mlx5_ib_dev, devr); 1194 - 1195 - ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); 1196 - if (ret) { 1197 - pr_err("Failed to query special context %d\n", ret); 1198 - return ret; 1199 - } 1200 - dev->ib_dev.local_dma_lkey = rsvd_lkey; 1201 1251 1202 1252 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 1203 1253 if (IS_ERR(devr->p0)) { ··· 1352 1418 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); 1353 1419 dev->ib_dev.owner = THIS_MODULE; 1354 1420 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1421 + dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 1355 1422 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); 1356 1423 dev->ib_dev.phys_port_cnt = dev->num_ports; 1357 1424 dev->ib_dev.num_comp_vectors =
-2
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 103 103 struct mlx5_ib_pd { 104 104 struct ib_pd ibpd; 105 105 u32 pdn; 106 - u32 pa_lkey; 107 106 }; 108 107 109 108 /* Use macros here so that don't have to duplicate ··· 212 213 int uuarn; 213 214 214 215 int create_type; 215 - u32 pa_lkey; 216 216 217 217 /* Store signature errors */ 218 218 bool signature_en;
+1 -3
drivers/infiniband/hw/mlx5/qp.c
··· 925 925 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); 926 926 if (err) 927 927 mlx5_ib_dbg(dev, "err %d\n", err); 928 - else 929 - qp->pa_lkey = to_mpd(pd)->pa_lkey; 930 928 } 931 929 932 930 if (err) ··· 2043 2045 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); 2044 2046 dseg->addr = cpu_to_be64(mfrpl->map); 2045 2047 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); 2046 - dseg->lkey = cpu_to_be32(pd->pa_lkey); 2048 + dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); 2047 2049 } 2048 2050 2049 2051 static __be32 send_ieth(struct ib_send_wr *wr)
+3 -1
drivers/infiniband/ulp/ipoib/ipoib.h
··· 80 80 IPOIB_NUM_WC = 4, 81 81 82 82 IPOIB_MAX_PATH_REC_QUEUE = 3, 83 - IPOIB_MAX_MCAST_QUEUE = 3, 83 + IPOIB_MAX_MCAST_QUEUE = 64, 84 84 85 85 IPOIB_FLAG_OPER_UP = 0, 86 86 IPOIB_FLAG_INITIALIZED = 1, ··· 548 548 549 549 int ipoib_mcast_attach(struct net_device *dev, u16 mlid, 550 550 union ib_gid *mgid, int set_qkey); 551 + int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); 552 + struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); 551 553 552 554 int ipoib_init_qp(struct net_device *dev); 553 555 int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
+18
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1149 1149 unsigned long dt; 1150 1150 unsigned long flags; 1151 1151 int i; 1152 + LIST_HEAD(remove_list); 1153 + struct ipoib_mcast *mcast, *tmcast; 1154 + struct net_device *dev = priv->dev; 1152 1155 1153 1156 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1154 1157 return; ··· 1179 1176 lockdep_is_held(&priv->lock))) != NULL) { 1180 1177 /* was the neigh idle for two GC periods */ 1181 1178 if (time_after(neigh_obsolete, neigh->alive)) { 1179 + u8 *mgid = neigh->daddr + 4; 1180 + 1181 + /* Is this multicast ? */ 1182 + if (*mgid == 0xff) { 1183 + mcast = __ipoib_mcast_find(dev, mgid); 1184 + 1185 + if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 1186 + list_del(&mcast->list); 1187 + rb_erase(&mcast->rb_node, &priv->multicast_tree); 1188 + list_add_tail(&mcast->list, &remove_list); 1189 + } 1190 + } 1191 + 1182 1192 rcu_assign_pointer(*np, 1183 1193 rcu_dereference_protected(neigh->hnext, 1184 1194 lockdep_is_held(&priv->lock))); ··· 1207 1191 1208 1192 out_unlock: 1209 1193 spin_unlock_irqrestore(&priv->lock, flags); 1194 + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) 1195 + ipoib_mcast_leave(dev, mcast); 1210 1196 } 1211 1197 1212 1198 static void ipoib_reap_neigh(struct work_struct *work)
+14 -12
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 153 153 return mcast; 154 154 } 155 155 156 - static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) 156 + struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) 157 157 { 158 158 struct ipoib_dev_priv *priv = netdev_priv(dev); 159 159 struct rb_node *n = priv->multicast_tree.rb_node; ··· 508 508 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 509 509 510 510 /* 511 - * Historically Linux IPoIB has never properly supported SEND 512 - * ONLY join. It emulated it by not providing all the required 513 - * attributes, which is enough to prevent group creation and 514 - * detect if there are full members or not. A major problem 515 - * with supporting SEND ONLY is detecting when the group is 516 - * auto-destroyed as IPoIB will cache the MLID.. 511 + * Send-only IB Multicast joins do not work at the core 512 + * IB layer yet, so we can't use them here. However, 513 + * we are emulating an Ethernet multicast send, which 514 + * does not require a multicast subscription and will 515 + * still send properly. The most appropriate thing to 516 + * do is to create the group if it doesn't exist as that 517 + * most closely emulates the behavior, from a user space 518 + * application perspecitive, of Ethernet multicast 519 + * operation. For now, we do a full join, maybe later 520 + * when the core IB layers support send only joins we 521 + * will use them. 517 522 */ 518 - #if 1 519 - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 520 - comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 521 - #else 523 + #if 0 522 524 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 523 525 rec.join_state = 4; 524 526 #endif ··· 677 675 return 0; 678 676 } 679 677 680 - static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) 678 + int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) 681 679 { 682 680 struct ipoib_dev_priv *priv = netdev_priv(dev); 683 681 int ret = 0;
+5
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 97 97 module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); 98 98 MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); 99 99 100 + bool iser_always_reg = true; 101 + module_param_named(always_register, iser_always_reg, bool, S_IRUGO); 102 + MODULE_PARM_DESC(always_register, 103 + "Always register memory, even for continuous memory regions (default:true)"); 104 + 100 105 bool iser_pi_enable = false; 101 106 module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); 102 107 MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
+1
drivers/infiniband/ulp/iser/iscsi_iser.h
··· 611 611 extern bool iser_pi_enable; 612 612 extern int iser_pi_guard; 613 613 extern unsigned int iser_max_sectors; 614 + extern bool iser_always_reg; 614 615 615 616 int iser_assign_reg_ops(struct iser_device *device); 616 617
+12 -6
drivers/infiniband/ulp/iser/iser_memory.c
··· 803 803 iser_reg_prot_sg(struct iscsi_iser_task *task, 804 804 struct iser_data_buf *mem, 805 805 struct iser_fr_desc *desc, 806 + bool use_dma_key, 806 807 struct iser_mem_reg *reg) 807 808 { 808 809 struct iser_device *device = task->iser_conn->ib_conn.device; 809 810 810 - if (mem->dma_nents == 1) 811 + if (use_dma_key) 811 812 return iser_reg_dma(device, mem, reg); 812 813 813 814 return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); ··· 818 817 iser_reg_data_sg(struct iscsi_iser_task *task, 819 818 struct iser_data_buf *mem, 820 819 struct iser_fr_desc *desc, 820 + bool use_dma_key, 821 821 struct iser_mem_reg *reg) 822 822 { 823 823 struct iser_device *device = task->iser_conn->ib_conn.device; 824 824 825 - if (mem->dma_nents == 1) 825 + if (use_dma_key) 826 826 return iser_reg_dma(device, mem, reg); 827 827 828 828 return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); ··· 838 836 struct iser_mem_reg *reg = &task->rdma_reg[dir]; 839 837 struct iser_mem_reg *data_reg; 840 838 struct iser_fr_desc *desc = NULL; 839 + bool use_dma_key; 841 840 int err; 842 841 843 842 err = iser_handle_unaligned_buf(task, mem, dir); 844 843 if (unlikely(err)) 845 844 return err; 846 845 847 - if (mem->dma_nents != 1 || 848 - scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { 846 + use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && 847 + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); 848 + 849 + if (!use_dma_key) { 849 850 desc = device->reg_ops->reg_desc_get(ib_conn); 850 851 reg->mem_h = desc; 851 852 } ··· 858 853 else 859 854 data_reg = &task->desc.data_reg; 860 855 861 - err = iser_reg_data_sg(task, mem, desc, data_reg); 856 + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); 862 857 if (unlikely(err)) 863 858 goto err_reg; 864 859 ··· 871 866 if (unlikely(err)) 872 867 goto err_reg; 873 868 874 - err = iser_reg_prot_sg(task, mem, desc, prot_reg); 869 + err = iser_reg_prot_sg(task, mem, desc, 870 + use_dma_key, prot_reg); 875 871 if (unlikely(err)) 876 872 goto err_reg; 877 873 }
+13 -8
drivers/infiniband/ulp/iser/iser_verbs.c
··· 133 133 (unsigned long)comp); 134 134 } 135 135 136 - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | 137 - IB_ACCESS_REMOTE_WRITE | 138 - IB_ACCESS_REMOTE_READ); 139 - if (IS_ERR(device->mr)) 140 - goto dma_mr_err; 136 + if (!iser_always_reg) { 137 + int access = IB_ACCESS_LOCAL_WRITE | 138 + IB_ACCESS_REMOTE_WRITE | 139 + IB_ACCESS_REMOTE_READ; 140 + 141 + device->mr = ib_get_dma_mr(device->pd, access); 142 + if (IS_ERR(device->mr)) 143 + goto dma_mr_err; 144 + } 141 145 142 146 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, 143 147 iser_event_handler); ··· 151 147 return 0; 152 148 153 149 handler_err: 154 - ib_dereg_mr(device->mr); 150 + if (device->mr) 151 + ib_dereg_mr(device->mr); 155 152 dma_mr_err: 156 153 for (i = 0; i < device->comps_used; i++) 157 154 tasklet_kill(&device->comps[i].tasklet); ··· 178 173 static void iser_free_device_ib_res(struct iser_device *device) 179 174 { 180 175 int i; 181 - BUG_ON(device->mr == NULL); 182 176 183 177 for (i = 0; i < device->comps_used; i++) { 184 178 struct iser_comp *comp = &device->comps[i]; ··· 188 184 } 189 185 190 186 (void)ib_unregister_event_handler(&device->event_handler); 191 - (void)ib_dereg_mr(device->mr); 187 + if (device->mr) 188 + (void)ib_dereg_mr(device->mr); 192 189 ib_dealloc_pd(device->pd); 193 190 194 191 kfree(device->comps);
+1
drivers/input/joystick/Kconfig
··· 196 196 config JOYSTICK_ZHENHUA 197 197 tristate "5-byte Zhenhua RC transmitter" 198 198 select SERIO 199 + select BITREVERSE 199 200 help 200 201 Say Y here if you have a Zhen Hua PPM-4CH transmitter which is 201 202 supplied with a ready to fly micro electric indoor helicopters
+2 -2
drivers/input/joystick/walkera0701.c
··· 150 150 if (w->counter == 24) { /* full frame */ 151 151 walkera0701_parse_frame(w); 152 152 w->counter = NO_SYNC; 153 - if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ 153 + if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ 154 154 w->counter = 0; 155 155 } else { 156 156 if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) ··· 161 161 } else 162 162 w->counter = NO_SYNC; 163 163 } 164 - } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < 164 + } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) < 165 165 RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ 166 166 w->counter = 0; 167 167
+1 -1
drivers/input/misc/pm8941-pwrkey.c
··· 93 93 default: 94 94 reset_type = PON_PS_HOLD_TYPE_HARD_RESET; 95 95 break; 96 - }; 96 + } 97 97 98 98 error = regmap_update_bits(pwrkey->regmap, 99 99 pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
+1 -1
drivers/input/misc/uinput.c
··· 414 414 dev->id.product = user_dev->id.product; 415 415 dev->id.version = user_dev->id.version; 416 416 417 - for_each_set_bit(i, dev->absbit, ABS_CNT) { 417 + for (i = 0; i < ABS_CNT; i++) { 418 418 input_abs_set_max(dev, i, user_dev->absmax[i]); 419 419 input_abs_set_min(dev, i, user_dev->absmin[i]); 420 420 input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
+1 -1
drivers/input/mouse/elan_i2c.h
··· 60 60 int (*get_sm_version)(struct i2c_client *client, 61 61 u8* ic_type, u8 *version); 62 62 int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); 63 - int (*get_product_id)(struct i2c_client *client, u8 *id); 63 + int (*get_product_id)(struct i2c_client *client, u16 *id); 64 64 65 65 int (*get_max)(struct i2c_client *client, 66 66 unsigned int *max_x, unsigned int *max_y);
+19 -7
drivers/input/mouse/elan_i2c_core.c
··· 40 40 #include "elan_i2c.h" 41 41 42 42 #define DRIVER_NAME "elan_i2c" 43 - #define ELAN_DRIVER_VERSION "1.6.0" 43 + #define ELAN_DRIVER_VERSION "1.6.1" 44 44 #define ETP_MAX_PRESSURE 255 45 45 #define ETP_FWIDTH_REDUCE 90 46 46 #define ETP_FINGER_WIDTH 15 ··· 76 76 unsigned int x_res; 77 77 unsigned int y_res; 78 78 79 - u8 product_id; 79 + u16 product_id; 80 80 u8 fw_version; 81 81 u8 sm_version; 82 82 u8 iap_version; ··· 98 98 u16 *signature_address) 99 99 { 100 100 switch (iap_version) { 101 + case 0x00: 102 + case 0x06: 101 103 case 0x08: 102 104 *validpage_count = 512; 103 105 break; 106 + case 0x03: 107 + case 0x07: 104 108 case 0x09: 109 + case 0x0A: 110 + case 0x0B: 111 + case 0x0C: 105 112 *validpage_count = 768; 106 113 break; 107 114 case 0x0D: 108 115 *validpage_count = 896; 116 + break; 117 + case 0x0E: 118 + *validpage_count = 640; 109 119 break; 110 120 default: 111 121 /* unknown ic type clear value */ ··· 276 266 277 267 error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, 278 268 &data->fw_signature_address); 279 - if (error) { 280 - dev_err(&data->client->dev, 281 - "unknown iap version %d\n", data->iap_version); 282 - return error; 283 - } 269 + if (error) 270 + dev_warn(&data->client->dev, 271 + "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", 272 + data->iap_version, data->ic_type); 284 273 285 274 return 0; 286 275 } ··· 494 485 int error; 495 486 const u8 *fw_signature; 496 487 static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; 488 + 489 + if (data->fw_validpage_count == 0) 490 + return -EINVAL; 497 491 498 492 /* Look for a firmware with the product id appended. */ 499 493 fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
+2 -2
drivers/input/mouse/elan_i2c_i2c.c
··· 276 276 return 0; 277 277 } 278 278 279 - static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) 279 + static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id) 280 280 { 281 281 int error; 282 282 u8 val[3]; ··· 287 287 return error; 288 288 } 289 289 290 - *id = val[0]; 290 + *id = le16_to_cpup((__le16 *)val); 291 291 return 0; 292 292 } 293 293
+2 -2
drivers/input/mouse/elan_i2c_smbus.c
··· 183 183 return 0; 184 184 } 185 185 186 - static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) 186 + static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) 187 187 { 188 188 int error; 189 189 u8 val[3]; ··· 195 195 return error; 196 196 } 197 197 198 - *id = val[1]; 198 + *id = be16_to_cpup((__be16 *)val); 199 199 return 0; 200 200 } 201 201
+4 -8
drivers/input/mouse/synaptics.c
··· 519 519 struct synaptics_data *priv = psmouse->private; 520 520 521 521 priv->mode = 0; 522 - 523 - if (priv->absolute_mode) { 522 + if (priv->absolute_mode) 524 523 priv->mode |= SYN_BIT_ABSOLUTE_MODE; 525 - if (SYN_CAP_EXTENDED(priv->capabilities)) 526 - priv->mode |= SYN_BIT_W_MODE; 527 - } 528 - 529 - if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture) 524 + if (priv->disable_gesture) 530 525 priv->mode |= SYN_BIT_DISABLE_GESTURE; 531 - 532 526 if (psmouse->rate >= 80) 533 527 priv->mode |= SYN_BIT_HIGH_RATE; 528 + if (SYN_CAP_EXTENDED(priv->capabilities)) 529 + priv->mode |= SYN_BIT_W_MODE; 534 530 535 531 if (synaptics_mode_cmd(psmouse, priv->mode)) 536 532 return -1;
+14 -8
drivers/input/serio/libps2.c
··· 212 212 * time before the ACK arrives. 213 213 */ 214 214 if (ps2_sendbyte(ps2dev, command & 0xff, 215 - command == PS2_CMD_RESET_BAT ? 1000 : 200)) 216 - goto out; 215 + command == PS2_CMD_RESET_BAT ? 1000 : 200)) { 216 + serio_pause_rx(ps2dev->serio); 217 + goto out_reset_flags; 218 + } 217 219 218 - for (i = 0; i < send; i++) 219 - if (ps2_sendbyte(ps2dev, param[i], 200)) 220 - goto out; 220 + for (i = 0; i < send; i++) { 221 + if (ps2_sendbyte(ps2dev, param[i], 200)) { 222 + serio_pause_rx(ps2dev->serio); 223 + goto out_reset_flags; 224 + } 225 + } 221 226 222 227 /* 223 228 * The reset command takes a long time to execute. ··· 239 234 !(ps2dev->flags & PS2_FLAG_CMD), timeout); 240 235 } 241 236 237 + serio_pause_rx(ps2dev->serio); 238 + 242 239 if (param) 243 240 for (i = 0; i < receive; i++) 244 241 param[i] = ps2dev->cmdbuf[(receive - 1) - i]; 245 242 246 243 if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) 247 - goto out; 244 + goto out_reset_flags; 248 245 249 246 rc = 0; 250 247 251 - out: 252 - serio_pause_rx(ps2dev->serio); 248 + out_reset_flags: 253 249 ps2dev->flags = 0; 254 250 serio_continue_rx(ps2dev->serio); 255 251
+1
drivers/input/serio/parkbd.c
··· 194 194 parkbd_port = parkbd_allocate_serio(); 195 195 if (!parkbd_port) { 196 196 parport_release(parkbd_dev); 197 + parport_unregister_device(parkbd_dev); 197 198 return -ENOMEM; 198 199 } 199 200
+22 -12
drivers/input/touchscreen/imx6ul_tsc.c
··· 94 94 * TSC module need ADC to get the measure value. So 95 95 * before config TSC, we should initialize ADC module. 96 96 */ 97 - static void imx6ul_adc_init(struct imx6ul_tsc *tsc) 97 + static int imx6ul_adc_init(struct imx6ul_tsc *tsc) 98 98 { 99 99 int adc_hc = 0; 100 100 int adc_gc; ··· 122 122 123 123 timeout = wait_for_completion_timeout 124 124 (&tsc->completion, ADC_TIMEOUT); 125 - if (timeout == 0) 125 + if (timeout == 0) { 126 126 dev_err(tsc->dev, "Timeout for adc calibration\n"); 127 + return -ETIMEDOUT; 128 + } 127 129 128 130 adc_gs = readl(tsc->adc_regs + REG_ADC_GS); 129 - if (adc_gs & ADC_CALF) 131 + if (adc_gs & ADC_CALF) { 130 132 dev_err(tsc->dev, "ADC calibration failed\n"); 133 + return -EINVAL; 134 + } 131 135 132 136 /* TSC need the ADC work in hardware trigger */ 133 137 adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); 134 138 adc_cfg |= ADC_HARDWARE_TRIGGER; 135 139 writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); 140 + 141 + return 0; 136 142 } 137 143 138 144 /* ··· 194 188 writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); 195 189 } 196 190 197 - static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) 191 + static int imx6ul_tsc_init(struct imx6ul_tsc *tsc) 198 192 { 199 - imx6ul_adc_init(tsc); 193 + int err; 194 + 195 + err = imx6ul_adc_init(tsc); 196 + if (err) 197 + return err; 200 198 imx6ul_tsc_channel_config(tsc); 201 199 imx6ul_tsc_set(tsc); 200 + 201 + return 0; 202 202 } 203 203 204 204 static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) ··· 323 311 return err; 324 312 } 325 313 326 - imx6ul_tsc_init(tsc); 327 - 328 - return 0; 314 + return imx6ul_tsc_init(tsc); 329 315 } 330 316 331 317 static void imx6ul_tsc_close(struct input_dev *input_dev) ··· 347 337 int tsc_irq; 348 338 int adc_irq; 349 339 350 - tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); 340 + tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL); 351 341 if (!tsc) 352 342 return -ENOMEM; 353 343 ··· 355 345 if (!input_dev) 356 346 return -ENOMEM; 357 347 358 - input_dev->name = "iMX6UL TouchScreen Controller"; 348 + input_dev->name = "iMX6UL Touchscreen Controller"; 359 349 input_dev->id.bustype = BUS_HOST; 360 350 361 351 input_dev->open = imx6ul_tsc_open; ··· 416 406 } 417 407 418 408 adc_irq = platform_get_irq(pdev, 1); 419 - if (adc_irq <= 0) { 409 + if (adc_irq < 0) { 420 410 dev_err(&pdev->dev, "no adc irq resource?\n"); 421 411 return adc_irq; 422 412 } ··· 501 491 goto out; 502 492 } 503 493 504 - imx6ul_tsc_init(tsc); 494 + retval = imx6ul_tsc_init(tsc); 505 495 } 506 496 507 497 out:
+2 -2
drivers/input/touchscreen/mms114.c
··· 394 394 if (of_property_read_u32(np, "x-size", &pdata->x_size)) { 395 395 dev_err(dev, "failed to get x-size property\n"); 396 396 return NULL; 397 - }; 397 + } 398 398 399 399 if (of_property_read_u32(np, "y-size", &pdata->y_size)) { 400 400 dev_err(dev, "failed to get y-size property\n"); 401 401 return NULL; 402 - }; 402 + } 403 403 404 404 of_property_read_u32(np, "contact-threshold", 405 405 &pdata->contact_threshold);
+1 -1
drivers/iommu/Kconfig
··· 43 43 endmenu 44 44 45 45 config IOMMU_IOVA 46 - bool 46 + tristate 47 47 48 48 config OF_IOMMU 49 49 def_bool y
+5 -3
drivers/iommu/intel-iommu.c
··· 3215 3215 3216 3216 /* Restrict dma_mask to the width that the iommu can handle */ 3217 3217 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); 3218 + /* Ensure we reserve the whole size-aligned region */ 3219 + nrpages = __roundup_pow_of_two(nrpages); 3218 3220 3219 3221 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { 3220 3222 /* ··· 3713 3711 static int __init iommu_init_mempool(void) 3714 3712 { 3715 3713 int ret; 3716 - ret = iommu_iova_cache_init(); 3714 + ret = iova_cache_get(); 3717 3715 if (ret) 3718 3716 return ret; 3719 3717 ··· 3727 3725 3728 3726 kmem_cache_destroy(iommu_domain_cache); 3729 3727 domain_error: 3730 - iommu_iova_cache_destroy(); 3728 + iova_cache_put(); 3731 3729 3732 3730 return -ENOMEM; 3733 3731 } ··· 3736 3734 { 3737 3735 kmem_cache_destroy(iommu_devinfo_cache); 3738 3736 kmem_cache_destroy(iommu_domain_cache); 3739 - iommu_iova_cache_destroy(); 3737 + iova_cache_put(); 3740 3738 } 3741 3739 3742 3740 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+69 -51
drivers/iommu/iova.c
··· 18 18 */ 19 19 20 20 #include <linux/iova.h> 21 + #include <linux/module.h> 21 22 #include <linux/slab.h> 22 - 23 - static struct kmem_cache *iommu_iova_cache; 24 - 25 - int iommu_iova_cache_init(void) 26 - { 27 - int ret = 0; 28 - 29 - iommu_iova_cache = kmem_cache_create("iommu_iova", 30 - sizeof(struct iova), 31 - 0, 32 - SLAB_HWCACHE_ALIGN, 33 - NULL); 34 - if (!iommu_iova_cache) { 35 - pr_err("Couldn't create iova cache\n"); 36 - ret = -ENOMEM; 37 - } 38 - 39 - return ret; 40 - } 41 - 42 - void iommu_iova_cache_destroy(void) 43 - { 44 - kmem_cache_destroy(iommu_iova_cache); 45 - } 46 - 47 - struct iova *alloc_iova_mem(void) 48 - { 49 - return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); 50 - } 51 - 52 - void free_iova_mem(struct iova *iova) 53 - { 54 - kmem_cache_free(iommu_iova_cache, iova); 55 - } 56 23 57 24 void 58 25 init_iova_domain(struct iova_domain *iovad, unsigned long granule, ··· 39 72 iovad->start_pfn = start_pfn; 40 73 iovad->dma_32bit_pfn = pfn_32bit; 41 74 } 75 + EXPORT_SYMBOL_GPL(init_iova_domain); 42 76 43 77 static struct rb_node * 44 78 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) ··· 88 120 } 89 121 } 90 122 91 - /* Computes the padding size required, to make the 92 - * the start address naturally aligned on its size 123 + /* 124 + * Computes the padding size required, to make the start address 125 + * naturally aligned on the power-of-two order of its size 93 126 */ 94 - static int 95 - iova_get_pad_size(int size, unsigned int limit_pfn) 127 + static unsigned int 128 + iova_get_pad_size(unsigned int size, unsigned int limit_pfn) 96 129 { 97 - unsigned int pad_size = 0; 98 - unsigned int order = ilog2(size); 99 - 100 - if (order) 101 - pad_size = (limit_pfn + 1) % (1 << order); 102 - 103 - return pad_size; 130 + return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); 104 131 } 105 132 106 133 static int __alloc_and_insert_iova_range(struct iova_domain *iovad, ··· 205 242 rb_insert_color(&iova->node, root); 206 243 } 207 244 245 + static struct kmem_cache *iova_cache; 246 + static unsigned int iova_cache_users; 247 + static DEFINE_MUTEX(iova_cache_mutex); 248 + 249 + struct iova *alloc_iova_mem(void) 250 + { 251 + return kmem_cache_alloc(iova_cache, GFP_ATOMIC); 252 + } 253 + EXPORT_SYMBOL(alloc_iova_mem); 254 + 255 + void free_iova_mem(struct iova *iova) 256 + { 257 + kmem_cache_free(iova_cache, iova); 258 + } 259 + EXPORT_SYMBOL(free_iova_mem); 260 + 261 + int iova_cache_get(void) 262 + { 263 + mutex_lock(&iova_cache_mutex); 264 + if (!iova_cache_users) { 265 + iova_cache = kmem_cache_create( 266 + "iommu_iova", sizeof(struct iova), 0, 267 + SLAB_HWCACHE_ALIGN, NULL); 268 + if (!iova_cache) { 269 + mutex_unlock(&iova_cache_mutex); 270 + printk(KERN_ERR "Couldn't create iova cache\n"); 271 + return -ENOMEM; 272 + } 273 + } 274 + 275 + iova_cache_users++; 276 + mutex_unlock(&iova_cache_mutex); 277 + 278 + return 0; 279 + } 280 + EXPORT_SYMBOL_GPL(iova_cache_get); 281 + 282 + void iova_cache_put(void) 283 + { 284 + mutex_lock(&iova_cache_mutex); 285 + if (WARN_ON(!iova_cache_users)) { 286 + mutex_unlock(&iova_cache_mutex); 287 + return; 288 + } 289 + iova_cache_users--; 290 + if (!iova_cache_users) 291 + kmem_cache_destroy(iova_cache); 292 + mutex_unlock(&iova_cache_mutex); 293 + } 294 + EXPORT_SYMBOL_GPL(iova_cache_put); 295 + 208 296 /** 209 297 * alloc_iova - allocates an iova 210 298 * @iovad: - iova domain in question ··· 279 265 if (!new_iova) 280 266 return NULL; 281 267 282 - /* If size aligned is set then round the size to 283 - * to next power of two. 284 - */ 285 - if (size_aligned) 286 - size = __roundup_pow_of_two(size); 287 - 288 268 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, 289 269 new_iova, size_aligned); 290 270 ··· 289 281 290 282 return new_iova; 291 283 } 284 + EXPORT_SYMBOL_GPL(alloc_iova); 292 285 293 286 /** 294 287 * find_iova - find's an iova for a given pfn ··· 330 321 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 331 322 return NULL; 332 323 } 324 + EXPORT_SYMBOL_GPL(find_iova); 333 325 334 326 /** 335 327 * __free_iova - frees the given iova ··· 349 339 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 350 340 free_iova_mem(iova); 351 341 } 342 + EXPORT_SYMBOL_GPL(__free_iova); 352 343 353 344 /** 354 345 * free_iova - finds and frees the iova for a given pfn ··· 367 356 __free_iova(iovad, iova); 368 357 369 358 } 359 + EXPORT_SYMBOL_GPL(free_iova); 370 360 371 361 /** 372 362 * put_iova_domain - destroys the iova doamin ··· 390 378 } 391 379 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 392 380 } 381 + EXPORT_SYMBOL_GPL(put_iova_domain); 393 382 394 383 static int 395 384 __is_range_overlap(struct rb_node *node, ··· 480 467 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 481 468 return iova; 482 469 } 470 + EXPORT_SYMBOL_GPL(reserve_iova); 483 471 484 472 /** 485 473 * copy_reserved_iova - copies the reserved between domains ··· 507 493 } 508 494 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); 509 495 } 496 + EXPORT_SYMBOL_GPL(copy_reserved_iova); 510 497 511 498 struct iova * 512 499 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, ··· 549 534 free_iova_mem(prev); 550 535 return NULL; 551 536 } 537 + 538 + MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); 539 + MODULE_LICENSE("GPL");
+1 -1
drivers/irqchip/irq-gic-v3-its-pci-msi.c
··· 62 62 63 63 dev_alias->dev_id = alias; 64 64 if (pdev != dev_alias->pdev) 65 - dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); 65 + dev_alias->count += its_pci_msi_vec_count(pdev); 66 66 67 67 return 0; 68 68 }
+3
drivers/irqchip/irq-gic-v3-its.c
··· 719 719 out: 720 720 spin_unlock(&lpi_lock); 721 721 722 + if (!bitmap) 723 + *base = *nr_ids = 0; 724 + 722 725 return bitmap; 723 726 } 724 727
+10 -2
drivers/irqchip/irq-mips-gic.c
··· 320 320 intrmask[i] = gic_read(intrmask_reg); 321 321 pending_reg += gic_reg_step; 322 322 intrmask_reg += gic_reg_step; 323 + 324 + if (!config_enabled(CONFIG_64BIT) || mips_cm_is64) 325 + continue; 326 + 327 + pending[i] |= (u64)gic_read(pending_reg) << 32; 328 + intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; 329 + pending_reg += gic_reg_step; 330 + intrmask_reg += gic_reg_step; 323 331 } 324 332 325 333 bitmap_and(pending, pending, intrmask, gic_shared_intrs); ··· 434 426 spin_lock_irqsave(&gic_lock, flags); 435 427 436 428 /* Re-route this IRQ */ 437 - gic_map_to_vpe(irq, cpumask_first(&tmp)); 429 + gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 438 430 439 431 /* Update the pcpu_masks */ 440 432 for (i = 0; i < NR_CPUS; i++) ··· 607 599 GIC_SHARED_TO_HWIRQ(intr)); 608 600 int i; 609 601 610 - gic_map_to_vpe(intr, cpu); 602 + gic_map_to_vpe(intr, mips_cm_vp_id(cpu)); 611 603 for (i = 0; i < NR_CPUS; i++) 612 604 clear_bit(intr, pcpu_masks[i].pcpu_mask); 613 605 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
+2 -1
drivers/md/bitmap.c
··· 1997 1997 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) 1998 1998 ret = bitmap_storage_alloc(&store, chunks, 1999 1999 !bitmap->mddev->bitmap_info.external, 2000 - bitmap->cluster_slot); 2000 + mddev_is_clustered(bitmap->mddev) 2001 + ? bitmap->cluster_slot : 0); 2001 2002 if (ret) 2002 2003 goto err; 2003 2004
+5
drivers/md/md.c
··· 5409 5409 * which will now never happen */ 5410 5410 wake_up_process(mddev->sync_thread->tsk); 5411 5411 5412 + if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) 5413 + return -EBUSY; 5412 5414 mddev_unlock(mddev); 5413 5415 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 5414 5416 &mddev->recovery)); 5417 + wait_event(mddev->sb_wait, 5418 + !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 5415 5419 mddev_lock_nointr(mddev); 5416 5420 5417 5421 mutex_lock(&mddev->open_mutex); ··· 8164 8160 md_reap_sync_thread(mddev); 8165 8161 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8166 8162 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8163 + clear_bit(MD_CHANGE_PENDING, &mddev->flags); 8167 8164 goto unlock; 8168 8165 } 8169 8166
+1 -2
drivers/md/multipath.c
··· 470 470 return 0; 471 471 472 472 out_free_conf: 473 - if (conf->pool) 474 - mempool_destroy(conf->pool); 473 + mempool_destroy(conf->pool); 475 474 kfree(conf->multipaths); 476 475 kfree(conf); 477 476 mddev->private = NULL;
+6 -6
drivers/md/raid0.c
··· 376 376 struct md_rdev *rdev; 377 377 bool discard_supported = false; 378 378 379 - rdev_for_each(rdev, mddev) { 380 - disk_stack_limits(mddev->gendisk, rdev->bdev, 381 - rdev->data_offset << 9); 382 - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 383 - discard_supported = true; 384 - } 385 379 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 386 380 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); 387 381 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); ··· 384 390 blk_queue_io_opt(mddev->queue, 385 391 (mddev->chunk_sectors << 9) * mddev->raid_disks); 386 392 393 + rdev_for_each(rdev, mddev) { 394 + disk_stack_limits(mddev->gendisk, rdev->bdev, 395 + rdev->data_offset << 9); 396 + if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 397 + discard_supported = true; 398 + } 387 399 if (!discard_supported) 388 400 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 389 401 else
+4 -7
drivers/md/raid1.c
··· 881 881 } 882 882 883 883 if (bio && bio_data_dir(bio) == WRITE) { 884 - if (bio->bi_iter.bi_sector >= 885 - conf->mddev->curr_resync_completed) { 884 + if (bio->bi_iter.bi_sector >= conf->next_resync) { 886 885 if (conf->start_next_window == MaxSector) 887 886 conf->start_next_window = 888 887 conf->next_resync + ··· 1515 1516 conf->r1buf_pool = NULL; 1516 1517 1517 1518 spin_lock_irq(&conf->resync_lock); 1518 - conf->next_resync = 0; 1519 + conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; 1519 1520 conf->start_next_window = MaxSector; 1520 1521 conf->current_window_requests += 1521 1522 conf->next_window_requests; ··· 2842 2843 2843 2844 abort: 2844 2845 if (conf) { 2845 - if (conf->r1bio_pool) 2846 - mempool_destroy(conf->r1bio_pool); 2846 + mempool_destroy(conf->r1bio_pool); 2847 2847 kfree(conf->mirrors); 2848 2848 safe_put_page(conf->tmppage); 2849 2849 kfree(conf->poolinfo); ··· 2944 2946 { 2945 2947 struct r1conf *conf = priv; 2946 2948 2947 - if (conf->r1bio_pool) 2948 - mempool_destroy(conf->r1bio_pool); 2949 + mempool_destroy(conf->r1bio_pool); 2949 2950 kfree(conf->mirrors); 2950 2951 safe_put_page(conf->tmppage); 2951 2952 kfree(conf->poolinfo);
+3 -6
drivers/md/raid10.c
··· 3486 3486 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", 3487 3487 mdname(mddev)); 3488 3488 if (conf) { 3489 - if (conf->r10bio_pool) 3490 - mempool_destroy(conf->r10bio_pool); 3489 + mempool_destroy(conf->r10bio_pool); 3491 3490 kfree(conf->mirrors); 3492 3491 safe_put_page(conf->tmppage); 3493 3492 kfree(conf); ··· 3681 3682 3682 3683 out_free_conf: 3683 3684 md_unregister_thread(&mddev->thread); 3684 - if (conf->r10bio_pool) 3685 - mempool_destroy(conf->r10bio_pool); 3685 + mempool_destroy(conf->r10bio_pool); 3686 3686 safe_put_page(conf->tmppage); 3687 3687 kfree(conf->mirrors); 3688 3688 kfree(conf); ··· 3694 3696 { 3695 3697 struct r10conf *conf = priv; 3696 3698 3697 - if (conf->r10bio_pool) 3698 - mempool_destroy(conf->r10bio_pool); 3699 + mempool_destroy(conf->r10bio_pool); 3699 3700 safe_put_page(conf->tmppage); 3700 3701 kfree(conf->mirrors); 3701 3702 kfree(conf->mirrors_old);
+7 -4
drivers/md/raid5.c
··· 2271 2271 drop_one_stripe(conf)) 2272 2272 ; 2273 2273 2274 - if (conf->slab_cache) 2275 - kmem_cache_destroy(conf->slab_cache); 2274 + kmem_cache_destroy(conf->slab_cache); 2276 2275 conf->slab_cache = NULL; 2277 2276 } 2278 2277 ··· 3149 3150 spin_unlock_irq(&sh->stripe_lock); 3150 3151 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3151 3152 wake_up(&conf->wait_for_overlap); 3153 + if (bi) 3154 + s->to_read--; 3152 3155 while (bi && bi->bi_iter.bi_sector < 3153 3156 sh->dev[i].sector + STRIPE_SECTORS) { 3154 3157 struct bio *nextbi = ··· 3170 3169 */ 3171 3170 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3172 3171 } 3172 + s->to_write = 0; 3173 + s->written = 0; 3173 3174 3174 3175 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3175 3176 if (atomic_dec_and_test(&conf->pending_full_writes)) ··· 3303 3300 */ 3304 3301 return 0; 3305 3302 3306 - for (i = 0; i < s->failed; i++) { 3303 + for (i = 0; i < s->failed && i < 2; i++) { 3307 3304 if (fdev[i]->towrite && 3308 3305 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3309 3306 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) ··· 3327 3324 sh->sector < sh->raid_conf->mddev->recovery_cp) 3328 3325 /* reconstruct-write isn't being forced */ 3329 3326 return 0; 3330 - for (i = 0; i < s->failed; i++) { 3327 + for (i = 0; i < s->failed && i < 2; i++) { 3331 3328 if (s->failed_num[i] != sh->pd_idx && 3332 3329 s->failed_num[i] != sh->qd_idx && 3333 3330 !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
+4 -2
drivers/mmc/core/core.c
··· 134 134 int err = cmd->error; 135 135 136 136 /* Flag re-tuning needed on CRC errors */ 137 - if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || 137 + if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && 138 + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && 139 + (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || 138 140 (mrq->data && mrq->data->error == -EILSEQ) || 139 - (mrq->stop && mrq->stop->error == -EILSEQ)) 141 + (mrq->stop && mrq->stop->error == -EILSEQ))) 140 142 mmc_retune_needed(host); 141 143 142 144 if (err && cmd->retries && mmc_host_is_spi(host)) {
+2 -2
drivers/mmc/core/host.c
··· 457 457 0, &cd_gpio_invert); 458 458 if (!ret) 459 459 dev_info(host->parent, "Got CD GPIO\n"); 460 - else if (ret != -ENOENT) 460 + else if (ret != -ENOENT && ret != -ENOSYS) 461 461 return ret; 462 462 463 463 /* ··· 481 481 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); 482 482 if (!ret) 483 483 dev_info(host->parent, "Got WP GPIO\n"); 484 - else if (ret != -ENOENT) 484 + else if (ret != -ENOENT && ret != -ENOSYS) 485 485 return ret; 486 486 487 487 if (of_property_read_bool(np, "disable-wp"))
+22 -44
drivers/mmc/host/pxamci.c
··· 28 28 #include <linux/clk.h> 29 29 #include <linux/err.h> 30 30 #include <linux/mmc/host.h> 31 + #include <linux/mmc/slot-gpio.h> 31 32 #include <linux/io.h> 32 33 #include <linux/regulator/consumer.h> 33 34 #include <linux/gpio.h> ··· 455 454 { 456 455 struct pxamci_host *host = mmc_priv(mmc); 457 456 458 - if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { 459 - if (host->pdata->gpio_card_ro_invert) 460 - return !gpio_get_value(host->pdata->gpio_card_ro); 461 - else 462 - return gpio_get_value(host->pdata->gpio_card_ro); 463 - } 457 + if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) 458 + return mmc_gpio_get_ro(mmc); 464 459 if (host->pdata && host->pdata->get_ro) 465 460 return !!host->pdata->get_ro(mmc_dev(mmc)); 466 461 /* ··· 548 551 549 552 static const struct mmc_host_ops pxamci_ops = { 550 553 .request = pxamci_request, 554 + .get_cd = mmc_gpio_get_cd, 551 555 .get_ro = pxamci_get_ro, 552 556 .set_ios = pxamci_set_ios, 553 557 .enable_sdio_irq = pxamci_enable_sdio_irq, ··· 788 790 gpio_power = host->pdata->gpio_power; 789 791 } 790 792 if (gpio_is_valid(gpio_power)) { 791 - ret = gpio_request(gpio_power, "mmc card power"); 793 + ret = devm_gpio_request(&pdev->dev, gpio_power, 794 + "mmc card power"); 792 795 if (ret) { 793 - dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); 796 + dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", 797 + gpio_power); 794 798 goto out; 795 799 } 796 800 gpio_direction_output(gpio_power, 797 801 host->pdata->gpio_power_invert); 798 802 } 799 - if (gpio_is_valid(gpio_ro)) { 800 - ret = gpio_request(gpio_ro, "mmc card read only"); 801 - if (ret) { 802 - dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 803 - goto err_gpio_ro; 804 - } 805 - gpio_direction_input(gpio_ro); 803 + if (gpio_is_valid(gpio_ro)) 804 + ret = mmc_gpio_request_ro(mmc, gpio_ro); 805 + if (ret) { 806 + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 807 + goto out; 808 + } else { 809 + mmc->caps |= host->pdata->gpio_card_ro_invert ? 810 + MMC_CAP2_RO_ACTIVE_HIGH : 0; 806 811 } 807 - if (gpio_is_valid(gpio_cd)) { 808 - ret = gpio_request(gpio_cd, "mmc card detect"); 809 - if (ret) { 810 - dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); 811 - goto err_gpio_cd; 812 - } 813 - gpio_direction_input(gpio_cd); 814 812 815 - ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, 816 - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 817 - "mmc card detect", mmc); 818 - if (ret) { 819 - dev_err(&pdev->dev, "failed to request card detect IRQ\n"); 820 - goto err_request_irq; 821 - } 813 + if (gpio_is_valid(gpio_cd)) 814 + ret = mmc_gpio_request_cd(mmc, gpio_cd, 0); 815 + if (ret) { 816 + dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); 817 + goto out; 822 818 } 823 819 824 820 if (host->pdata && host->pdata->init) ··· 827 835 828 836 return 0; 829 837 830 - err_request_irq: 831 - gpio_free(gpio_cd); 832 - err_gpio_cd: 833 - gpio_free(gpio_ro); 834 - err_gpio_ro: 835 - gpio_free(gpio_power); 836 - out: 838 + out: 837 839 if (host) { 838 840 if (host->dma_chan_rx) 839 841 dma_release_channel(host->dma_chan_rx); ··· 859 873 gpio_ro = host->pdata->gpio_card_ro; 860 874 gpio_power = host->pdata->gpio_power; 861 875 } 862 - if (gpio_is_valid(gpio_cd)) { 863 - free_irq(gpio_to_irq(gpio_cd), mmc); 864 - gpio_free(gpio_cd); 865 - } 866 - if (gpio_is_valid(gpio_ro)) 867 - gpio_free(gpio_ro); 868 - if (gpio_is_valid(gpio_power)) 869 - gpio_free(gpio_power); 870 876 if (host->vcc) 871 877 regulator_put(host->vcc); 872 878
+39 -14
drivers/mmc/host/sunxi-mmc.c
··· 210 210 #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ 211 211 #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ 212 212 213 + #define SDXC_CLK_400K 0 214 + #define SDXC_CLK_25M 1 215 + #define SDXC_CLK_50M 2 216 + #define SDXC_CLK_50M_DDR 3 217 + 218 + struct sunxi_mmc_clk_delay { 219 + u32 output; 220 + u32 sample; 221 + }; 222 + 213 223 struct sunxi_idma_des { 214 224 u32 config; 215 225 u32 buf_size; ··· 239 229 struct clk *clk_mmc; 240 230 struct clk *clk_sample; 241 231 struct clk *clk_output; 232 + const struct sunxi_mmc_clk_delay *clk_delays; 242 233 243 234 /* irq */ 244 235 spinlock_t lock; ··· 665 654 666 655 /* determine delays */ 667 656 if (rate <= 400000) { 668 - oclk_dly = 180; 669 - sclk_dly = 42; 657 + oclk_dly = host->clk_delays[SDXC_CLK_400K].output; 658 + sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; 670 659 } else if (rate <= 25000000) { 671 - oclk_dly = 180; 672 - sclk_dly = 75; 660 + oclk_dly = host->clk_delays[SDXC_CLK_25M].output; 661 + sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; 673 662 } else if (rate <= 50000000) { 674 663 if (ios->timing == MMC_TIMING_UHS_DDR50) { 675 - oclk_dly = 60; 676 - sclk_dly = 120; 664 + oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output; 665 + sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample; 677 666 } else { 678 - oclk_dly = 90; 679 - sclk_dly = 150; 667 + oclk_dly = host->clk_delays[SDXC_CLK_50M].output; 668 + sclk_dly = host->clk_delays[SDXC_CLK_50M].sample; 680 669 } 681 - } else if (rate <= 100000000) { 682 - oclk_dly = 6; 683 - sclk_dly = 24; 684 - } else if (rate <= 200000000) { 685 - oclk_dly = 3; 686 - sclk_dly = 12; 687 670 } else { 688 671 return -EINVAL; 689 672 } ··· 876 871 static const struct of_device_id sunxi_mmc_of_match[] = { 877 872 { .compatible = "allwinner,sun4i-a10-mmc", }, 878 873 { .compatible = "allwinner,sun5i-a13-mmc", }, 874 + { .compatible = "allwinner,sun9i-a80-mmc", }, 879 875 { /* sentinel */ } 880 876 }; 881 877 MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); ··· 890 884 .hw_reset = sunxi_mmc_hw_reset, 891 885 }; 892 886 887 + static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { 888 + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, 889 + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, 890 + [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, 891 + [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, 892 + }; 893 + 894 + static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { 895 + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, 896 + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, 897 + [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, 898 + [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, 899 + }; 900 + 893 901 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, 894 902 struct platform_device *pdev) 895 903 { ··· 914 894 host->idma_des_size_bits = 13; 915 895 else 916 896 host->idma_des_size_bits = 16; 897 + 898 + if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc")) 899 + host->clk_delays = sun9i_mmc_clk_delays; 900 + else 901 + host->clk_delays = sunxi_mmc_clk_delays; 917 902 918 903 ret = mmc_regulator_get_supply(host->mmc); 919 904 if (ret) {
+5
drivers/mtd/ubi/io.c
··· 926 926 goto bad; 927 927 } 928 928 929 + if (data_size > ubi->leb_size) { 930 + ubi_err(ubi, "bad data_size"); 931 + goto bad; 932 + } 933 + 929 934 if (vol_type == UBI_VID_STATIC) { 930 935 /* 931 936 * Although from high-level point of view static volumes may
+1
drivers/mtd/ubi/vtbl.c
··· 649 649 if (ubi->corr_peb_count) 650 650 ubi_err(ubi, "%d PEBs are corrupted and not used", 651 651 ubi->corr_peb_count); 652 + return -ENOSPC; 652 653 } 653 654 ubi->rsvd_pebs += reserved_pebs; 654 655 ubi->avail_pebs -= reserved_pebs;
+1
drivers/mtd/ubi/wl.c
··· 1601 1601 if (ubi->corr_peb_count) 1602 1602 ubi_err(ubi, "%d PEBs are corrupted and not used", 1603 1603 ubi->corr_peb_count); 1604 + err = -ENOSPC; 1604 1605 goto out_free; 1605 1606 } 1606 1607 ubi->avail_pebs -= reserved_pebs;
+2
drivers/net/dsa/mv88e6xxx.c
··· 2051 2051 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; 2052 2052 else 2053 2053 reg |= PORT_CONTROL_FRAME_MODE_DSA; 2054 + reg |= PORT_CONTROL_FORWARD_UNKNOWN | 2055 + PORT_CONTROL_FORWARD_UNKNOWN_MC; 2054 2056 } 2055 2057 2056 2058 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+7 -6
drivers/net/ethernet/brocade/bna/bfa_ioc.c
··· 1543 1543 } 1544 1544 1545 1545 /* Flush FLI data fifo. */ 1546 - static u32 1546 + static int 1547 1547 bfa_flash_fifo_flush(void __iomem *pci_bar) 1548 1548 { 1549 1549 u32 i; ··· 1573 1573 } 1574 1574 1575 1575 /* Read flash status. */ 1576 - static u32 1576 + static int 1577 1577 bfa_flash_status_read(void __iomem *pci_bar) 1578 1578 { 1579 1579 union bfa_flash_dev_status_reg dev_status; 1580 - u32 status; 1580 + int status; 1581 1581 u32 ret_status; 1582 1582 int i; 1583 1583 ··· 1611 1611 } 1612 1612 1613 1613 /* Start flash read operation. */ 1614 - static u32 1614 + static int 1615 1615 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, 1616 1616 char *buf) 1617 1617 { 1618 - u32 status; 1618 + int status; 1619 1619 1620 1620 /* len must be mutiple of 4 and not exceeding fifo size */ 1621 1621 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) ··· 1703 1703 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, 1704 1704 u32 len) 1705 1705 { 1706 - u32 n, status; 1706 + u32 n; 1707 + int status; 1707 1708 u32 off, l, s, residue, fifo_sz; 1708 1709 1709 1710 residue = len;
+1 -1
drivers/net/ethernet/hisilicon/hip04_eth.c
··· 816 816 struct net_device *ndev; 817 817 struct hip04_priv *priv; 818 818 struct resource *res; 819 - unsigned int irq; 819 + int irq; 820 820 int ret; 821 821 822 822 ndev = alloc_etherdev(sizeof(struct hip04_priv));
+3 -3
drivers/net/ethernet/ibm/emac/core.h
··· 460 460 u32 index; 461 461 }; 462 462 463 - #define EMAC_ETHTOOL_REGS_VER 0 464 - #define EMAC4_ETHTOOL_REGS_VER 1 465 - #define EMAC4SYNC_ETHTOOL_REGS_VER 2 463 + #define EMAC_ETHTOOL_REGS_VER 3 464 + #define EMAC4_ETHTOOL_REGS_VER 4 465 + #define EMAC4SYNC_ETHTOOL_REGS_VER 5 466 466 467 467 #endif /* __IBM_NEWEMAC_CORE_H */
+9
drivers/net/ethernet/intel/i40e/i40e_adminq.c
··· 946 946 /* take the lock before we start messing with the ring */ 947 947 mutex_lock(&hw->aq.arq_mutex); 948 948 949 + if (hw->aq.arq.count == 0) { 950 + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 951 + "AQRX: Admin queue not initialized.\n"); 952 + ret_code = I40E_ERR_QUEUE_EMPTY; 953 + goto clean_arq_element_err; 954 + } 955 + 949 956 /* set next_to_use to head */ 950 957 ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); 951 958 if (ntu == ntc) { ··· 1014 1007 /* Set pending if needed, unlock and return */ 1015 1008 if (pending != NULL) 1016 1009 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1010 + 1011 + clean_arq_element_err: 1017 1012 mutex_unlock(&hw->aq.arq_mutex); 1018 1013 1019 1014 if (i40e_is_nvm_update_op(&e->desc)) {
+2 -1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 2672 2672 rx_ctx.lrxqthresh = 2; 2673 2673 rx_ctx.crcstrip = 1; 2674 2674 rx_ctx.l2tsel = 1; 2675 - rx_ctx.showiv = 1; 2675 + /* this controls whether VLAN is stripped from inner headers */ 2676 + rx_ctx.showiv = 0; 2676 2677 #ifdef I40E_FCOE 2677 2678 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); 2678 2679 #endif
+9
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
··· 887 887 /* take the lock before we start messing with the ring */ 888 888 mutex_lock(&hw->aq.arq_mutex); 889 889 890 + if (hw->aq.arq.count == 0) { 891 + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 892 + "AQRX: Admin queue not initialized.\n"); 893 + ret_code = I40E_ERR_QUEUE_EMPTY; 894 + goto clean_arq_element_err; 895 + } 896 + 890 897 /* set next_to_use to head */ 891 898 ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); 892 899 if (ntu == ntc) { ··· 955 948 /* Set pending if needed, unlock and return */ 956 949 if (pending != NULL) 957 950 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 951 + 952 + clean_arq_element_err: 958 953 mutex_unlock(&hw->aq.arq_mutex); 959 954 960 955 return ret_code;
+4 -3
drivers/net/ethernet/mellanox/mlx4/mcg.c
··· 1184 1184 if (prot == MLX4_PROT_ETH) { 1185 1185 /* manage the steering entry for promisc mode */ 1186 1186 if (new_entry) 1187 - new_steering_entry(dev, port, steer, index, qp->qpn); 1187 + err = new_steering_entry(dev, port, steer, 1188 + index, qp->qpn); 1188 1189 else 1189 - existing_steering_entry(dev, port, steer, 1190 - index, qp->qpn); 1190 + err = existing_steering_entry(dev, port, steer, 1191 + index, qp->qpn); 1191 1192 } 1192 1193 if (err && link && index != -1) { 1193 1194 if (index < dev->caps.num_mgms)
-22
drivers/net/ethernet/mellanox/mlx5/core/fw.c
··· 200 200 201 201 return err; 202 202 } 203 - 204 - int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey) 205 - { 206 - struct mlx5_cmd_query_special_contexts_mbox_in in; 207 - struct mlx5_cmd_query_special_contexts_mbox_out out; 208 - int err; 209 - 210 - memset(&in, 0, sizeof(in)); 211 - memset(&out, 0, sizeof(out)); 212 - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); 213 - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 214 - if (err) 215 - return err; 216 - 217 - if (out.hdr.status) 218 - err = mlx5_cmd_status_to_err(&out.hdr); 219 - 220 - *rsvd_lkey = be32_to_cpu(out.resd_lkey); 221 - 222 - return err; 223 - } 224 - EXPORT_SYMBOL(mlx5_core_query_special_context);
+1 -1
drivers/net/ethernet/realtek/r8169.c
··· 6081 6081 { 6082 6082 void __iomem *ioaddr = tp->mmio_addr; 6083 6083 struct pci_dev *pdev = tp->pci_dev; 6084 - u16 rg_saw_cnt; 6084 + int rg_saw_cnt; 6085 6085 u32 data; 6086 6086 static const struct ephy_info e_info_8168h_1[] = { 6087 6087 { 0x1e, 0x0800, 0x0001 },
+4 -3
drivers/pci/pci-driver.c
··· 299 299 * Unbound PCI devices are always put in D0, regardless of 300 300 * runtime PM status. During probe, the device is set to 301 301 * active and the usage count is incremented. If the driver 302 - * supports runtime PM, it should call pm_runtime_put_noidle() 303 - * in its probe routine and pm_runtime_get_noresume() in its 304 - * remove routine. 302 + * supports runtime PM, it should call pm_runtime_put_noidle(), 303 + * or any other runtime PM helper function decrementing the usage 304 + * count, in its probe routine and pm_runtime_get_noresume() in 305 + * its remove routine. 305 306 */ 306 307 pm_runtime_get_sync(dev); 307 308 pci_dev->driver = pci_drv;
+1 -1
drivers/scsi/scsi_lib.c
··· 1957 1957 static void scsi_mq_done(struct scsi_cmnd *cmd) 1958 1958 { 1959 1959 trace_scsi_dispatch_cmd_done(cmd); 1960 - blk_mq_complete_request(cmd->request); 1960 + blk_mq_complete_request(cmd->request, cmd->request->errors); 1961 1961 } 1962 1962 1963 1963 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+10
drivers/thermal/power_allocator.c
··· 144 144 switch_on_temp = 0; 145 145 146 146 temperature_threshold = control_temp - switch_on_temp; 147 + /* 148 + * estimate_pid_constants() tries to find appropriate default 149 + * values for thermal zones that don't provide them. If a 150 + * system integrator has configured a thermal zone with two 151 + * passive trip points at the same temperature, that person 152 + * hasn't put any effort to set up the thermal zone properly 153 + * so just give up. 154 + */ 155 + if (!temperature_threshold) 156 + return; 147 157 148 158 if (!tz->tzp->k_po || force) 149 159 tz->tzp->k_po = int_to_frac(sustainable_power) /
+2 -1
drivers/watchdog/Kconfig
··· 817 817 tristate "Intel TCO Timer/Watchdog" 818 818 depends on (X86 || IA64) && PCI 819 819 select WATCHDOG_CORE 820 + depends on I2C || I2C=n 820 821 select LPC_ICH if !EXPERT 821 - select I2C_I801 if !EXPERT 822 + select I2C_I801 if !EXPERT && I2C 822 823 ---help--- 823 824 Hardware driver for the intel TCO timer based watchdog devices. 824 825 These drivers are included in the Intel 82801 I/O Controller
+8 -2
drivers/watchdog/bcm2835_wdt.c
··· 36 36 #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 37 37 #define PM_RSTC_RESET 0x00000102 38 38 39 + /* 40 + * The Raspberry Pi firmware uses the RSTS register to know which partiton 41 + * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10. 42 + * Partiton 63 is a special partition used by the firmware to indicate halt. 43 + */ 44 + #define PM_RSTS_RASPBERRYPI_HALT 0x555 45 + 39 46 #define SECS_TO_WDOG_TICKS(x) ((x) << 16) 40 47 #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) 41 48 ··· 158 151 * hard reset. 159 152 */ 160 153 val = readl_relaxed(wdt->base + PM_RSTS); 161 - val &= PM_RSTC_WRCFG_CLR; 162 - val |= PM_PASSWORD | PM_RSTS_HADWRH_SET; 154 + val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT; 163 155 writel_relaxed(val, wdt->base + PM_RSTS); 164 156 165 157 /* Continue with normal reset mechanism */
+1
drivers/watchdog/gef_wdt.c
··· 303 303 }, 304 304 {}, 305 305 }; 306 + MODULE_DEVICE_TABLE(of, gef_wdt_ids); 306 307 307 308 static struct platform_driver gef_wdt_driver = { 308 309 .driver = {
+1
drivers/watchdog/mena21_wdt.c
··· 253 253 { .compatible = "men,a021-wdt" }, 254 254 { }, 255 255 }; 256 + MODULE_DEVICE_TABLE(of, a21_wdt_ids); 256 257 257 258 static struct platform_driver a21_wdt_driver = { 258 259 .probe = a21_wdt_probe,
+1
drivers/watchdog/moxart_wdt.c
··· 168 168 { .compatible = "moxa,moxart-watchdog" }, 169 169 { }, 170 170 }; 171 + MODULE_DEVICE_TABLE(of, moxart_watchdog_match); 171 172 172 173 static struct platform_driver moxart_wdt_driver = { 173 174 .probe = moxart_wdt_probe,
+12 -1
fs/dax.c
··· 569 569 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) 570 570 goto fallback; 571 571 572 + sector = bh.b_blocknr << (blkbits - 9); 573 + 572 574 if (buffer_unwritten(&bh) || buffer_new(&bh)) { 573 575 int i; 576 + 577 + length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, 578 + bh.b_size); 579 + if (length < 0) { 580 + result = VM_FAULT_SIGBUS; 581 + goto out; 582 + } 583 + if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) 584 + goto fallback; 585 + 574 586 for (i = 0; i < PTRS_PER_PMD; i++) 575 587 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); 576 588 wmb_pmem(); ··· 635 623 result = VM_FAULT_NOPAGE; 636 624 spin_unlock(ptl); 637 625 } else { 638 - sector = bh.b_blocknr << (blkbits - 9); 639 626 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, 640 627 bh.b_size); 641 628 if (length < 0) {
-3
fs/ubifs/xattr.c
··· 652 652 { 653 653 int err; 654 654 655 - mutex_lock(&inode->i_mutex); 656 655 err = security_inode_init_security(inode, dentry, qstr, 657 656 &init_xattrs, 0); 658 - mutex_unlock(&inode->i_mutex); 659 - 660 657 if (err) { 661 658 struct ubifs_info *c = dentry->i_sb->s_fs_info; 662 659 ubifs_err(c, "cannot initialize security for inode %lu, error %d",
+72 -8
include/asm-generic/word-at-a-time.h
··· 1 1 #ifndef _ASM_WORD_AT_A_TIME_H 2 2 #define _ASM_WORD_AT_A_TIME_H 3 3 4 - /* 5 - * This says "generic", but it's actually big-endian only. 6 - * Little-endian can use more efficient versions of these 7 - * interfaces, see for example 8 - * arch/x86/include/asm/word-at-a-time.h 9 - * for those. 10 - */ 11 - 12 4 #include <linux/kernel.h> 5 + #include <asm/byteorder.h> 6 + 7 + #ifdef __BIG_ENDIAN 13 8 14 9 struct word_at_a_time { 15 10 const unsigned long high_bits, low_bits; ··· 47 52 #ifndef zero_bytemask 48 53 #define zero_bytemask(mask) (~1ul << __fls(mask)) 49 54 #endif 55 + 56 + #else 57 + 58 + /* 59 + * The optimal byte mask counting is probably going to be something 60 + * that is architecture-specific. If you have a reliably fast 61 + * bit count instruction, that might be better than the multiply 62 + * and shift, for example. 63 + */ 64 + struct word_at_a_time { 65 + const unsigned long one_bits, high_bits; 66 + }; 67 + 68 + #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } 69 + 70 + #ifdef CONFIG_64BIT 71 + 72 + /* 73 + * Jan Achrenius on G+: microoptimized version of 74 + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" 75 + * that works for the bytemasks without having to 76 + * mask them first. 77 + */ 78 + static inline long count_masked_bytes(unsigned long mask) 79 + { 80 + return mask*0x0001020304050608ul >> 56; 81 + } 82 + 83 + #else /* 32-bit case */ 84 + 85 + /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ 86 + static inline long count_masked_bytes(long mask) 87 + { 88 + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ 89 + long a = (0x0ff0001+mask) >> 23; 90 + /* Fix the 1 for 00 case */ 91 + return a & mask; 92 + } 93 + 94 + #endif 95 + 96 + /* Return nonzero if it has a zero */ 97 + static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) 98 + { 99 + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; 100 + *bits = mask; 101 + return mask; 102 + } 103 + 104 + static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) 105 + { 106 + return bits; 107 + } 108 + 109 + static inline unsigned long create_zero_mask(unsigned long bits) 110 + { 111 + bits = (bits - 1) & ~bits; 112 + return bits >> 7; 113 + } 114 + 115 + /* The mask we created is directly usable as a bytemask */ 116 + #define zero_bytemask(mask) (mask) 117 + 118 + static inline unsigned long find_zero(unsigned long mask) 119 + { 120 + return count_masked_bytes(mask); 121 + } 122 + 123 + #endif /* __BIG_ENDIAN */ 50 124 51 125 #endif /* _ASM_WORD_AT_A_TIME_H */
+1
include/drm/drm_crtc_helper.h
··· 240 240 241 241 extern void drm_kms_helper_poll_disable(struct drm_device *dev); 242 242 extern void drm_kms_helper_poll_enable(struct drm_device *dev); 243 + extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); 243 244 244 245 #endif
+4
include/drm/drm_dp_helper.h
··· 568 568 #define MODE_I2C_READ 4 569 569 #define MODE_I2C_STOP 8 570 570 571 + /* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ 572 + #define DP_MST_PHYSICAL_PORT_0 0 573 + #define DP_MST_LOGICAL_PORT_0 8 574 + 571 575 #define DP_LINK_STATUS_SIZE 6 572 576 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], 573 577 int lane_count);
+1
include/drm/drm_dp_mst_helper.h
··· 374 374 struct drm_dp_mst_topology_cbs { 375 375 /* create a connector for a port */ 376 376 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); 377 + void (*register_connector)(struct drm_connector *connector); 377 378 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, 378 379 struct drm_connector *connector); 379 380 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
+1
include/linux/acpi.h
··· 217 217 218 218 int acpi_pci_irq_enable (struct pci_dev *dev); 219 219 void acpi_penalize_isa_irq(int irq, int active); 220 + bool acpi_isa_irq_available(int irq); 220 221 void acpi_penalize_sci_irq(int irq, int trigger, int polarity); 221 222 void acpi_pci_irq_disable (struct pci_dev *dev); 222 223
+1 -4
include/linux/blk-mq.h
··· 145 145 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 146 146 BLK_MQ_F_TAG_SHARED = 1 << 1, 147 147 BLK_MQ_F_SG_MERGE = 1 << 2, 148 - BLK_MQ_F_SYSFS_UP = 1 << 3, 149 148 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 150 149 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, 151 150 BLK_MQ_F_ALLOC_POLICY_BITS = 1, ··· 214 215 void blk_mq_cancel_requeue_work(struct request_queue *q); 215 216 void blk_mq_kick_requeue_list(struct request_queue *q); 216 217 void blk_mq_abort_requeue_list(struct request_queue *q); 217 - void blk_mq_complete_request(struct request *rq); 218 + void blk_mq_complete_request(struct request *rq, int error); 218 219 219 220 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 220 221 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); ··· 223 224 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 224 225 void blk_mq_run_hw_queues(struct request_queue *q, bool async); 225 226 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 226 - void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 227 - void *priv); 228 227 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, 229 228 void *priv); 230 229 void blk_mq_freeze_queue(struct request_queue *q);
+2
include/linux/blkdev.h
··· 456 456 struct blk_mq_tag_set *tag_set; 457 457 struct list_head tag_set_list; 458 458 struct bio_set *bio_split; 459 + 460 + bool mq_sysfs_init_done; 459 461 }; 460 462 461 463 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
+2 -2
include/linux/iova.h
··· 68 68 return iova >> iova_shift(iovad); 69 69 } 70 70 71 - int iommu_iova_cache_init(void); 72 - void iommu_iova_cache_destroy(void); 71 + int iova_cache_get(void); 72 + void iova_cache_put(void); 73 73 74 74 struct iova *alloc_iova_mem(void); 75 75 void free_iova_mem(struct iova *iova);
-1
include/linux/memcontrol.h
··· 242 242 * percpu counter. 243 243 */ 244 244 struct mem_cgroup_stat_cpu __percpu *stat; 245 - spinlock_t pcp_counter_lock; 246 245 247 246 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) 248 247 struct cg_proto tcp_mem;
-11
include/linux/mlx5/device.h
··· 402 402 u8 rsvd[8]; 403 403 }; 404 404 405 - struct mlx5_cmd_query_special_contexts_mbox_in { 406 - struct mlx5_inbox_hdr hdr; 407 - u8 rsvd[8]; 408 - }; 409 - 410 - struct mlx5_cmd_query_special_contexts_mbox_out { 411 - struct mlx5_outbox_hdr hdr; 412 - __be32 dump_fill_mkey; 413 - __be32 resd_lkey; 414 - }; 415 - 416 405 struct mlx5_cmd_layout { 417 406 u8 type; 418 407 u8 rsvd0[3];
-1
include/linux/mlx5/driver.h
··· 845 845 int mlx5_register_interface(struct mlx5_interface *intf); 846 846 void mlx5_unregister_interface(struct mlx5_interface *intf); 847 847 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 848 - int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); 849 848 850 849 struct mlx5_profile { 851 850 u64 mask;
+21
include/linux/mm.h
··· 905 905 #endif 906 906 } 907 907 908 + #ifdef CONFIG_MEMCG 909 + static inline struct mem_cgroup *page_memcg(struct page *page) 910 + { 911 + return page->mem_cgroup; 912 + } 913 + 914 + static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) 915 + { 916 + page->mem_cgroup = memcg; 917 + } 918 + #else 919 + static inline struct mem_cgroup *page_memcg(struct page *page) 920 + { 921 + return NULL; 922 + } 923 + 924 + static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) 925 + { 926 + } 927 + #endif 928 + 908 929 /* 909 930 * Some inline functions in vmstat.h depend on page_zone() 910 931 */
+5 -6
include/linux/rcupdate.h
··· 230 230 struct rcu_synchronize *rs_array); 231 231 232 232 #define _wait_rcu_gp(checktiny, ...) \ 233 - do { \ 234 - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ 235 - const int __n = ARRAY_SIZE(__crcu_array); \ 236 - struct rcu_synchronize __rs_array[__n]; \ 237 - \ 238 - __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ 233 + do { \ 234 + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ 235 + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ 236 + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ 237 + __crcu_array, __rs_array); \ 239 238 } while (0) 240 239 241 240 #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+1 -1
include/linux/skbuff.h
··· 2708 2708 if (skb->ip_summed == CHECKSUM_COMPLETE) 2709 2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2710 2710 else if (skb->ip_summed == CHECKSUM_PARTIAL && 2711 - skb_checksum_start_offset(skb) <= len) 2711 + skb_checksum_start_offset(skb) < 0) 2712 2712 skb->ip_summed = CHECKSUM_NONE; 2713 2713 } 2714 2714
+3
include/linux/string.h
··· 25 25 #ifndef __HAVE_ARCH_STRLCPY 26 26 size_t strlcpy(char *, const char *, size_t); 27 27 #endif 28 + #ifndef __HAVE_ARCH_STRSCPY 29 + ssize_t __must_check strscpy(char *, const char *, size_t); 30 + #endif 28 31 #ifndef __HAVE_ARCH_STRCAT 29 32 extern char * strcat(char *, const char *); 30 33 #endif
+5 -1
include/net/af_unix.h
··· 63 63 #define UNIX_GC_MAYBE_CYCLE 1 64 64 struct socket_wq peer_wq; 65 65 }; 66 - #define unix_sk(__sk) ((struct unix_sock *)__sk) 66 + 67 + static inline struct unix_sock *unix_sk(struct sock *sk) 68 + { 69 + return (struct unix_sock *)sk; 70 + } 67 71 68 72 #define peer_wait peer_wq.wait 69 73
-2
include/uapi/linux/userfaultfd.h
··· 11 11 12 12 #include <linux/types.h> 13 13 14 - #include <linux/compiler.h> 15 - 16 14 #define UFFD_API ((__u64)0xAA) 17 15 /* 18 16 * After implementing the respective features it will become:
+7 -7
ipc/msg.c
··· 137 137 return retval; 138 138 } 139 139 140 - /* ipc_addid() locks msq upon success. */ 141 - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); 142 - if (id < 0) { 143 - ipc_rcu_putref(msq, msg_rcu_free); 144 - return id; 145 - } 146 - 147 140 msq->q_stime = msq->q_rtime = 0; 148 141 msq->q_ctime = get_seconds(); 149 142 msq->q_cbytes = msq->q_qnum = 0; ··· 145 152 INIT_LIST_HEAD(&msq->q_messages); 146 153 INIT_LIST_HEAD(&msq->q_receivers); 147 154 INIT_LIST_HEAD(&msq->q_senders); 155 + 156 + /* ipc_addid() locks msq upon success. */ 157 + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); 158 + if (id < 0) { 159 + ipc_rcu_putref(msq, msg_rcu_free); 160 + return id; 161 + } 148 162 149 163 ipc_unlock_object(&msq->q_perm); 150 164 rcu_read_unlock();
+7 -6
ipc/shm.c
··· 551 551 if (IS_ERR(file)) 552 552 goto no_file; 553 553 554 - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 555 - if (id < 0) { 556 - error = id; 557 - goto no_id; 558 - } 559 - 560 554 shp->shm_cprid = task_tgid_vnr(current); 561 555 shp->shm_lprid = 0; 562 556 shp->shm_atim = shp->shm_dtim = 0; ··· 559 565 shp->shm_nattch = 0; 560 566 shp->shm_file = file; 561 567 shp->shm_creator = current; 568 + 569 + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 570 + if (id < 0) { 571 + error = id; 572 + goto no_id; 573 + } 574 + 562 575 list_add(&shp->shm_clist, &current->sysvshm.shm_clist); 563 576 564 577 /*
+4 -4
ipc/util.c
··· 237 237 rcu_read_lock(); 238 238 spin_lock(&new->lock); 239 239 240 + current_euid_egid(&euid, &egid); 241 + new->cuid = new->uid = euid; 242 + new->gid = new->cgid = egid; 243 + 240 244 id = idr_alloc(&ids->ipcs_idr, new, 241 245 (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, 242 246 GFP_NOWAIT); ··· 252 248 } 253 249 254 250 ids->in_use++; 255 - 256 - current_euid_egid(&euid, &egid); 257 - new->cuid = new->uid = euid; 258 - new->gid = new->cgid = egid; 259 251 260 252 if (next_id < 0) { 261 253 new->seq = ids->seq++;
+81 -33
kernel/events/core.c
··· 1243 1243 PERF_EVENT_STATE_INACTIVE; 1244 1244 } 1245 1245 1246 - /* 1247 - * Called at perf_event creation and when events are attached/detached from a 1248 - * group. 1249 - */ 1250 - static void perf_event__read_size(struct perf_event *event) 1246 + static void __perf_event_read_size(struct perf_event *event, int nr_siblings) 1251 1247 { 1252 1248 int entry = sizeof(u64); /* value */ 1253 1249 int size = 0; ··· 1259 1263 entry += sizeof(u64); 1260 1264 1261 1265 if (event->attr.read_format & PERF_FORMAT_GROUP) { 1262 - nr += event->group_leader->nr_siblings; 1266 + nr += nr_siblings; 1263 1267 size += sizeof(u64); 1264 1268 } 1265 1269 ··· 1267 1271 event->read_size = size; 1268 1272 } 1269 1273 1270 - static void perf_event__header_size(struct perf_event *event) 1274 + static void __perf_event_header_size(struct perf_event *event, u64 sample_type) 1271 1275 { 1272 1276 struct perf_sample_data *data; 1273 - u64 sample_type = event->attr.sample_type; 1274 1277 u16 size = 0; 1275 - 1276 - perf_event__read_size(event); 1277 1278 1278 1279 if (sample_type & PERF_SAMPLE_IP) 1279 1280 size += sizeof(data->ip); ··· 1294 1301 size += sizeof(data->txn); 1295 1302 1296 1303 event->header_size = size; 1304 + } 1305 + 1306 + /* 1307 + * Called at perf_event creation and when events are attached/detached from a 1308 + * group. 1309 + */ 1310 + static void perf_event__header_size(struct perf_event *event) 1311 + { 1312 + __perf_event_read_size(event, 1313 + event->group_leader->nr_siblings); 1314 + __perf_event_header_size(event, event->attr.sample_type); 1297 1315 } 1298 1316 1299 1317 static void perf_event__id_header_size(struct perf_event *event) ··· 1332 1328 size += sizeof(data->cpu_entry); 1333 1329 1334 1330 event->id_header_size = size; 1331 + } 1332 + 1333 + static bool perf_event_validate_size(struct perf_event *event) 1334 + { 1335 + /* 1336 + * The values computed here will be over-written when we actually 1337 + * attach the event. 1338 + */ 1339 + __perf_event_read_size(event, event->group_leader->nr_siblings + 1); 1340 + __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); 1341 + perf_event__id_header_size(event); 1342 + 1343 + /* 1344 + * Sum the lot; should not exceed the 64k limit we have on records. 1345 + * Conservative limit to allow for callchains and other variable fields. 1346 + */ 1347 + if (event->read_size + event->header_size + 1348 + event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) 1349 + return false; 1350 + 1351 + return true; 1335 1352 } 1336 1353 1337 1354 static void perf_group_attach(struct perf_event *event) ··· 8322 8297 8323 8298 if (move_group) { 8324 8299 gctx = group_leader->ctx; 8300 + mutex_lock_double(&gctx->mutex, &ctx->mutex); 8301 + } else { 8302 + mutex_lock(&ctx->mutex); 8303 + } 8325 8304 8305 + if (!perf_event_validate_size(event)) { 8306 + err = -E2BIG; 8307 + goto err_locked; 8308 + } 8309 + 8310 + /* 8311 + * Must be under the same ctx::mutex as perf_install_in_context(), 8312 + * because we need to serialize with concurrent event creation. 8313 + */ 8314 + if (!exclusive_event_installable(event, ctx)) { 8315 + /* exclusive and group stuff are assumed mutually exclusive */ 8316 + WARN_ON_ONCE(move_group); 8317 + 8318 + err = -EBUSY; 8319 + goto err_locked; 8320 + } 8321 + 8322 + WARN_ON_ONCE(ctx->parent_ctx); 8323 + 8324 + if (move_group) { 8326 8325 /* 8327 8326 * See perf_event_ctx_lock() for comments on the details 8328 8327 * of swizzling perf_event::ctx. 8329 8328 */ 8330 - mutex_lock_double(&gctx->mutex, &ctx->mutex); 8331 - 8332 8329 perf_remove_from_context(group_leader, false); 8333 8330 8334 8331 list_for_each_entry(sibling, &group_leader->sibling_list, ··· 8358 8311 perf_remove_from_context(sibling, false); 8359 8312 put_ctx(gctx); 8360 8313 } 8361 - } else { 8362 - mutex_lock(&ctx->mutex); 8363 - } 8364 8314 8365 - WARN_ON_ONCE(ctx->parent_ctx); 8366 - 8367 - if (move_group) { 8368 8315 /* 8369 8316 * Wait for everybody to stop referencing the events through 8370 8317 * the old lists, before installing it on new lists. ··· 8390 8349 perf_event__state_init(group_leader); 8391 8350 perf_install_in_context(ctx, group_leader, group_leader->cpu); 8392 8351 get_ctx(ctx); 8352 + 8353 + /* 8354 + * Now that all events are installed in @ctx, nothing 8355 + * references @gctx anymore, so drop the last reference we have 8356 + * on it. 8357 + */ 8358 + put_ctx(gctx); 8393 8359 } 8394 8360 8395 - if (!exclusive_event_installable(event, ctx)) { 8396 - err = -EBUSY; 8397 - mutex_unlock(&ctx->mutex); 8398 - fput(event_file); 8399 - goto err_context; 8400 - } 8361 + /* 8362 + * Precalculate sample_data sizes; do while holding ctx::mutex such 8363 + * that we're serialized against further additions and before 8364 + * perf_install_in_context() which is the point the event is active and 8365 + * can use these values. 8366 + */ 8367 + perf_event__header_size(event); 8368 + perf_event__id_header_size(event); 8401 8369 8402 8370 perf_install_in_context(ctx, event, event->cpu); 8403 8371 perf_unpin_context(ctx); 8404 8372 8405 - if (move_group) { 8373 + if (move_group) 8406 8374 mutex_unlock(&gctx->mutex); 8407 - put_ctx(gctx); 8408 - } 8409 8375 mutex_unlock(&ctx->mutex); 8410 8376 8411 8377 put_online_cpus(); ··· 8424 8376 mutex_unlock(&current->perf_event_mutex); 8425 8377 8426 8378 /* 8427 - * Precalculate sample_data sizes 8428 - */ 8429 - perf_event__header_size(event); 8430 - perf_event__id_header_size(event); 8431 - 8432 - /* 8433 8379 * Drop the reference on the group_event after placing the 8434 8380 * new event on the sibling_list. This ensures destruction 8435 8381 * of the group leader will find the pointer to itself in ··· 8433 8391 fd_install(event_fd, event_file); 8434 8392 return event_fd; 8435 8393 8394 + err_locked: 8395 + if (move_group) 8396 + mutex_unlock(&gctx->mutex); 8397 + mutex_unlock(&ctx->mutex); 8398 + /* err_file: */ 8399 + fput(event_file); 8436 8400 err_context: 8437 8401 perf_unpin_context(ctx); 8438 8402 put_ctx(ctx);
+17 -2
kernel/irq/proc.c
··· 12 12 #include <linux/seq_file.h> 13 13 #include <linux/interrupt.h> 14 14 #include <linux/kernel_stat.h> 15 + #include <linux/mutex.h> 15 16 16 17 #include "internals.h" 17 18 ··· 324 323 325 324 void register_irq_proc(unsigned int irq, struct irq_desc *desc) 326 325 { 326 + static DEFINE_MUTEX(register_lock); 327 327 char name [MAX_NAMELEN]; 328 328 329 - if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) 329 + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) 330 330 return; 331 + 332 + /* 333 + * irq directories are registered only when a handler is 334 + * added, not when the descriptor is created, so multiple 335 + * tasks might try to register at the same time. 336 + */ 337 + mutex_lock(&register_lock); 338 + 339 + if (desc->dir) 340 + goto out_unlock; 331 341 332 342 memset(name, 0, MAX_NAMELEN); 333 343 sprintf(name, "%d", irq); ··· 346 334 /* create /proc/irq/1234 */ 347 335 desc->dir = proc_mkdir(name, root_irq_dir); 348 336 if (!desc->dir) 349 - return; 337 + goto out_unlock; 350 338 351 339 #ifdef CONFIG_SMP 352 340 /* create /proc/irq/<irq>/smp_affinity */ ··· 367 355 368 356 proc_create_data("spurious", 0444, desc->dir, 369 357 &irq_spurious_proc_fops, (void *)(long)irq); 358 + 359 + out_unlock: 360 + mutex_unlock(&register_lock); 370 361 } 371 362 372 363 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
+5 -5
kernel/locking/lockdep.c
··· 3068 3068 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3069 3069 int trylock, int read, int check, int hardirqs_off, 3070 3070 struct lockdep_map *nest_lock, unsigned long ip, 3071 - int references) 3071 + int references, int pin_count) 3072 3072 { 3073 3073 struct task_struct *curr = current; 3074 3074 struct lock_class *class = NULL; ··· 3157 3157 hlock->waittime_stamp = 0; 3158 3158 hlock->holdtime_stamp = lockstat_clock(); 3159 3159 #endif 3160 - hlock->pin_count = 0; 3160 + hlock->pin_count = pin_count; 3161 3161 3162 3162 if (check && !mark_irqflags(curr, hlock)) 3163 3163 return 0; ··· 3343 3343 hlock_class(hlock)->subclass, hlock->trylock, 3344 3344 hlock->read, hlock->check, hlock->hardirqs_off, 3345 3345 hlock->nest_lock, hlock->acquire_ip, 3346 - hlock->references)) 3346 + hlock->references, hlock->pin_count)) 3347 3347 return 0; 3348 3348 } 3349 3349 ··· 3433 3433 hlock_class(hlock)->subclass, hlock->trylock, 3434 3434 hlock->read, hlock->check, hlock->hardirqs_off, 3435 3435 hlock->nest_lock, hlock->acquire_ip, 3436 - hlock->references)) 3436 + hlock->references, hlock->pin_count)) 3437 3437 return 0; 3438 3438 } 3439 3439 ··· 3583 3583 current->lockdep_recursion = 1; 3584 3584 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); 3585 3585 __lock_acquire(lock, subclass, trylock, read, check, 3586 - irqs_disabled_flags(flags), nest_lock, ip, 0); 3586 + irqs_disabled_flags(flags), nest_lock, ip, 0, 0); 3587 3587 current->lockdep_recursion = 0; 3588 3588 raw_local_irq_restore(flags); 3589 3589 }
+5
kernel/rcu/tree.c
··· 3868 3868 static void __init 3869 3869 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) 3870 3870 { 3871 + static struct lock_class_key rcu_exp_sched_rdp_class; 3871 3872 unsigned long flags; 3872 3873 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3873 3874 struct rcu_node *rnp = rcu_get_root(rsp); ··· 3884 3883 mutex_init(&rdp->exp_funnel_mutex); 3885 3884 rcu_boot_init_nocb_percpu_data(rdp); 3886 3885 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3886 + if (rsp == &rcu_sched_state) 3887 + lockdep_set_class_and_name(&rdp->exp_funnel_mutex, 3888 + &rcu_exp_sched_rdp_class, 3889 + "rcu_data_exp_sched"); 3887 3890 } 3888 3891 3889 3892 /*
+11 -3
kernel/sched/core.c
··· 4934 4934 idle->state = TASK_RUNNING; 4935 4935 idle->se.exec_start = sched_clock(); 4936 4936 4937 - do_set_cpus_allowed(idle, cpumask_of(cpu)); 4937 + #ifdef CONFIG_SMP 4938 + /* 4939 + * Its possible that init_idle() gets called multiple times on a task, 4940 + * in that case do_set_cpus_allowed() will not do the right thing. 4941 + * 4942 + * And since this is boot we can forgo the serialization. 4943 + */ 4944 + set_cpus_allowed_common(idle, cpumask_of(cpu)); 4945 + #endif 4938 4946 /* 4939 4947 * We're having a chicken and egg problem, even though we are 4940 4948 * holding rq->lock, the cpu isn't yet set to this cpu so the ··· 4959 4951 4960 4952 rq->curr = rq->idle = idle; 4961 4953 idle->on_rq = TASK_ON_RQ_QUEUED; 4962 - #if defined(CONFIG_SMP) 4954 + #ifdef CONFIG_SMP 4963 4955 idle->on_cpu = 1; 4964 4956 #endif 4965 4957 raw_spin_unlock(&rq->lock); ··· 4974 4966 idle->sched_class = &idle_sched_class; 4975 4967 ftrace_graph_init_idle_task(idle, cpu); 4976 4968 vtime_init_idle(idle, cpu); 4977 - #if defined(CONFIG_SMP) 4969 + #ifdef CONFIG_SMP 4978 4970 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4979 4971 #endif 4980 4972 }
+1 -1
kernel/time/clocksource.c
··· 217 217 continue; 218 218 219 219 /* Check the deviation from the watchdog clocksource. */ 220 - if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { 220 + if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 221 221 pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", 222 222 cs->name); 223 223 pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
+88
lib/string.c
··· 27 27 #include <linux/bug.h> 28 28 #include <linux/errno.h> 29 29 30 + #include <asm/byteorder.h> 31 + #include <asm/word-at-a-time.h> 32 + #include <asm/page.h> 33 + 30 34 #ifndef __HAVE_ARCH_STRNCASECMP 31 35 /** 32 36 * strncasecmp - Case insensitive, length-limited string comparison ··· 148 144 return ret; 149 145 } 150 146 EXPORT_SYMBOL(strlcpy); 147 + #endif 148 + 149 + #ifndef __HAVE_ARCH_STRSCPY 150 + /** 151 + * strscpy - Copy a C-string into a sized buffer 152 + * @dest: Where to copy the string to 153 + * @src: Where to copy the string from 154 + * @count: Size of destination buffer 155 + * 156 + * Copy the string, or as much of it as fits, into the dest buffer. 157 + * The routine returns the number of characters copied (not including 158 + * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. 159 + * The behavior is undefined if the string buffers overlap. 160 + * The destination buffer is always NUL terminated, unless it's zero-sized. 161 + * 162 + * Preferred to strlcpy() since the API doesn't require reading memory 163 + * from the src string beyond the specified "count" bytes, and since 164 + * the return value is easier to error-check than strlcpy()'s. 165 + * In addition, the implementation is robust to the string changing out 166 + * from underneath it, unlike the current strlcpy() implementation. 167 + * 168 + * Preferred to strncpy() since it always returns a valid string, and 169 + * doesn't unnecessarily force the tail of the destination buffer to be 170 + * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() 171 + * with an overflow test, then just memset() the tail of the dest buffer. 172 + */ 173 + ssize_t strscpy(char *dest, const char *src, size_t count) 174 + { 175 + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; 176 + size_t max = count; 177 + long res = 0; 178 + 179 + if (count == 0) 180 + return -E2BIG; 181 + 182 + #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 183 + /* 184 + * If src is unaligned, don't cross a page boundary, 185 + * since we don't know if the next page is mapped. 186 + */ 187 + if ((long)src & (sizeof(long) - 1)) { 188 + size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); 189 + if (limit < max) 190 + max = limit; 191 + } 192 + #else 193 + /* If src or dest is unaligned, don't do word-at-a-time. */ 194 + if (((long) dest | (long) src) & (sizeof(long) - 1)) 195 + max = 0; 196 + #endif 197 + 198 + while (max >= sizeof(unsigned long)) { 199 + unsigned long c, data; 200 + 201 + c = *(unsigned long *)(src+res); 202 + *(unsigned long *)(dest+res) = c; 203 + if (has_zero(c, &data, &constants)) { 204 + data = prep_zero_mask(c, data, &constants); 205 + data = create_zero_mask(data); 206 + return res + find_zero(data); 207 + } 208 + res += sizeof(unsigned long); 209 + count -= sizeof(unsigned long); 210 + max -= sizeof(unsigned long); 211 + } 212 + 213 + while (count) { 214 + char c; 215 + 216 + c = src[res]; 217 + dest[res] = c; 218 + if (!c) 219 + return res; 220 + res++; 221 + count--; 222 + } 223 + 224 + /* Hit buffer length without finding a NUL; force NUL-termination. */ 225 + if (res) 226 + dest[res-1] = '\0'; 227 + 228 + return -E2BIG; 229 + } 230 + EXPORT_SYMBOL(strscpy); 151 231 #endif 152 232 153 233 #ifndef __HAVE_ARCH_STRCAT
+1 -1
mm/dmapool.c
··· 394 394 list_for_each_entry(page, &pool->page_list, page_list) { 395 395 if (dma < page->dma) 396 396 continue; 397 - if (dma < (page->dma + pool->allocation)) 397 + if ((dma - page->dma) < pool->allocation) 398 398 return page; 399 399 } 400 400 return NULL;
+8
mm/hugetlb.c
··· 3202 3202 continue; 3203 3203 3204 3204 /* 3205 + * Shared VMAs have their own reserves and do not affect 3206 + * MAP_PRIVATE accounting but it is possible that a shared 3207 + * VMA is using the same page so check and skip such VMAs. 3208 + */ 3209 + if (iter_vma->vm_flags & VM_MAYSHARE) 3210 + continue; 3211 + 3212 + /* 3205 3213 * Unmap the page from other VMAs without their own reserves. 3206 3214 * They get marked to be SIGKILLed if they fault in these 3207 3215 * areas. This is because a future no-page fault on this VMA
+18 -13
mm/memcontrol.c
··· 644 644 } 645 645 646 646 /* 647 + * Return page count for single (non recursive) @memcg. 648 + * 647 649 * Implementation Note: reading percpu statistics for memcg. 648 650 * 649 651 * Both of vmstat[] and percpu_counter has threshold and do periodic 650 652 * synchronization to implement "quick" read. There are trade-off between 651 653 * reading cost and precision of value. Then, we may have a chance to implement 652 - * a periodic synchronizion of counter in memcg's counter. 654 + * a periodic synchronization of counter in memcg's counter. 653 655 * 654 656 * But this _read() function is used for user interface now. The user accounts 655 657 * memory usage by memory cgroup and he _always_ requires exact value because ··· 661 659 * 662 660 * If there are kernel internal actions which can make use of some not-exact 663 661 * value, and reading all cpu value can be performance bottleneck in some 664 - * common workload, threashold and synchonization as vmstat[] should be 662 + * common workload, threshold and synchronization as vmstat[] should be 665 663 * implemented. 666 664 */ 667 - static long mem_cgroup_read_stat(struct mem_cgroup *memcg, 668 - enum mem_cgroup_stat_index idx) 665 + static unsigned long 666 + mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) 669 667 { 670 668 long val = 0; 671 669 int cpu; 672 670 671 + /* Per-cpu values can be negative, use a signed accumulator */ 673 672 for_each_possible_cpu(cpu) 674 673 val += per_cpu(memcg->stat->count[idx], cpu); 674 + /* 675 + * Summing races with updates, so val may be negative. Avoid exposing 676 + * transient negative values. 677 + */ 678 + if (val < 0) 679 + val = 0; 675 680 return val; 676 681 } 677 682 ··· 1263 1254 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1264 1255 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1265 1256 continue; 1266 - pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], 1257 + pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1267 1258 K(mem_cgroup_read_stat(iter, i))); 1268 1259 } 1269 1260 ··· 2828 2819 enum mem_cgroup_stat_index idx) 2829 2820 { 2830 2821 struct mem_cgroup *iter; 2831 - long val = 0; 2822 + unsigned long val = 0; 2832 2823 2833 - /* Per-cpu values can be negative, use a signed accumulator */ 2834 2824 for_each_mem_cgroup_tree(iter, memcg) 2835 2825 val += mem_cgroup_read_stat(iter, idx); 2836 2826 2837 - if (val < 0) /* race ? */ 2838 - val = 0; 2839 2827 return val; 2840 2828 } 2841 2829 ··· 3175 3169 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3176 3170 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3177 3171 continue; 3178 - seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], 3172 + seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3179 3173 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3180 3174 } 3181 3175 ··· 3200 3194 (u64)memsw * PAGE_SIZE); 3201 3195 3202 3196 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3203 - long long val = 0; 3197 + unsigned long long val = 0; 3204 3198 3205 3199 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3206 3200 continue; 3207 3201 for_each_mem_cgroup_tree(mi, memcg) 3208 3202 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3209 - seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); 3203 + seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3210 3204 } 3211 3205 3212 3206 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { ··· 4185 4179 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4186 4180 goto out_free_stat; 4187 4181 4188 - spin_lock_init(&memcg->pcp_counter_lock); 4189 4182 return memcg; 4190 4183 4191 4184 out_free_stat:
+11 -1
mm/migrate.c
··· 740 740 if (PageSwapBacked(page)) 741 741 SetPageSwapBacked(newpage); 742 742 743 + /* 744 + * Indirectly called below, migrate_page_copy() copies PG_dirty and thus 745 + * needs newpage's memcg set to transfer memcg dirty page accounting. 746 + * So perform memcg migration in two steps: 747 + * 1. set newpage->mem_cgroup (here) 748 + * 2. clear page->mem_cgroup (below) 749 + */ 750 + set_page_memcg(newpage, page_memcg(page)); 751 + 743 752 mapping = page_mapping(page); 744 753 if (!mapping) 745 754 rc = migrate_page(mapping, newpage, page, mode); ··· 765 756 rc = fallback_migrate_page(mapping, newpage, page, mode); 766 757 767 758 if (rc != MIGRATEPAGE_SUCCESS) { 759 + set_page_memcg(newpage, NULL); 768 760 newpage->mapping = NULL; 769 761 } else { 770 - mem_cgroup_migrate(page, newpage, false); 762 + set_page_memcg(page, NULL); 771 763 if (page_was_mapped) 772 764 remove_migration_ptes(page, newpage); 773 765 page->mapping = NULL;
+10 -3
mm/slab.c
··· 2190 2190 size += BYTES_PER_WORD; 2191 2191 } 2192 2192 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2193 - if (size >= kmalloc_size(INDEX_NODE + 1) 2194 - && cachep->object_size > cache_line_size() 2195 - && ALIGN(size, cachep->align) < PAGE_SIZE) { 2193 + /* 2194 + * To activate debug pagealloc, off-slab management is necessary 2195 + * requirement. In early phase of initialization, small sized slab 2196 + * doesn't get initialized so it would not be possible. So, we need 2197 + * to check size >= 256. It guarantees that all necessary small 2198 + * sized slab is initialized in current slab initialization sequence. 2199 + */ 2200 + if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && 2201 + size >= 256 && cachep->object_size > cache_line_size() && 2202 + ALIGN(size, cachep->align) < PAGE_SIZE) { 2196 2203 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2197 2204 size = PAGE_SIZE; 2198 2205 }
+1 -2
net/core/net-sysfs.c
··· 31 31 static const char fmt_hex[] = "%#x\n"; 32 32 static const char fmt_long_hex[] = "%#lx\n"; 33 33 static const char fmt_dec[] = "%d\n"; 34 - static const char fmt_udec[] = "%u\n"; 35 34 static const char fmt_ulong[] = "%lu\n"; 36 35 static const char fmt_u64[] = "%llu\n"; 37 36 ··· 201 202 if (netif_running(netdev)) { 202 203 struct ethtool_cmd cmd; 203 204 if (!__ethtool_get_settings(netdev, &cmd)) 204 - ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); 205 + ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); 205 206 } 206 207 rtnl_unlock(); 207 208 return ret;
+5 -4
net/core/skbuff.c
··· 2958 2958 */ 2959 2959 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2960 2960 { 2961 + unsigned char *data = skb->data; 2962 + 2961 2963 BUG_ON(len > skb->len); 2962 - skb->len -= len; 2963 - BUG_ON(skb->len < skb->data_len); 2964 - skb_postpull_rcsum(skb, skb->data, len); 2965 - return skb->data += len; 2964 + __skb_pull(skb, len); 2965 + skb_postpull_rcsum(skb, data, len); 2966 + return skb->data; 2966 2967 } 2967 2968 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2968 2969
+8 -3
net/dsa/slave.c
··· 458 458 static int dsa_slave_port_attr_set(struct net_device *dev, 459 459 struct switchdev_attr *attr) 460 460 { 461 - int ret = 0; 461 + struct dsa_slave_priv *p = netdev_priv(dev); 462 + struct dsa_switch *ds = p->parent; 463 + int ret; 462 464 463 465 switch (attr->id) { 464 466 case SWITCHDEV_ATTR_PORT_STP_STATE: 465 - if (attr->trans == SWITCHDEV_TRANS_COMMIT) 466 - ret = dsa_slave_stp_update(dev, attr->u.stp_state); 467 + if (attr->trans == SWITCHDEV_TRANS_PREPARE) 468 + ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; 469 + else 470 + ret = ds->drv->port_stp_update(ds, p->port, 471 + attr->u.stp_state); 467 472 break; 468 473 default: 469 474 ret = -EOPNOTSUPP;
+1
net/ipv4/fib_frontend.c
··· 340 340 fl4.flowi4_tos = tos; 341 341 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 342 342 fl4.flowi4_tun_key.tun_id = 0; 343 + fl4.flowi4_flags = 0; 343 344 344 345 no_addr = idev->ifa_list == NULL; 345 346
+1
net/ipv4/route.c
··· 1737 1737 fl4.flowi4_mark = skb->mark; 1738 1738 fl4.flowi4_tos = tos; 1739 1739 fl4.flowi4_scope = RT_SCOPE_UNIVERSE; 1740 + fl4.flowi4_flags = 0; 1740 1741 fl4.daddr = daddr; 1741 1742 fl4.saddr = saddr; 1742 1743 err = fib_lookup(net, &fl4, &res, 0);
+2 -1
net/ipv6/route.c
··· 1193 1193 1194 1194 fl6->flowi6_iif = LOOPBACK_IFINDEX; 1195 1195 1196 - if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) 1196 + if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || 1197 + fl6->flowi6_oif) 1197 1198 flags |= RT6_LOOKUP_F_IFACE; 1198 1199 1199 1200 if (!ipv6_addr_any(&fl6->saddr))
+9 -2
net/l2tp/l2tp_core.c
··· 1319 1319 tunnel = container_of(work, struct l2tp_tunnel, del_work); 1320 1320 sk = l2tp_tunnel_sock_lookup(tunnel); 1321 1321 if (!sk) 1322 - return; 1322 + goto out; 1323 1323 1324 1324 sock = sk->sk_socket; 1325 1325 ··· 1341 1341 } 1342 1342 1343 1343 l2tp_tunnel_sock_put(sk); 1344 + out: 1345 + l2tp_tunnel_dec_refcount(tunnel); 1344 1346 } 1345 1347 1346 1348 /* Create a socket for the tunnel, if one isn't set up by ··· 1638 1636 */ 1639 1637 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1640 1638 { 1639 + l2tp_tunnel_inc_refcount(tunnel); 1641 1640 l2tp_tunnel_closeall(tunnel); 1642 - return (false == queue_work(l2tp_wq, &tunnel->del_work)); 1641 + if (false == queue_work(l2tp_wq, &tunnel->del_work)) { 1642 + l2tp_tunnel_dec_refcount(tunnel); 1643 + return 1; 1644 + } 1645 + return 0; 1643 1646 } 1644 1647 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1645 1648
+11 -9
net/sctp/associola.c
··· 1208 1208 * within this document. 1209 1209 * 1210 1210 * Our basic strategy is to round-robin transports in priorities 1211 - * according to sctp_state_prio_map[] e.g., if no such 1211 + * according to sctp_trans_score() e.g., if no such 1212 1212 * transport with state SCTP_ACTIVE exists, round-robin through 1213 1213 * SCTP_UNKNOWN, etc. You get the picture. 1214 1214 */ 1215 - static const u8 sctp_trans_state_to_prio_map[] = { 1216 - [SCTP_ACTIVE] = 3, /* best case */ 1217 - [SCTP_UNKNOWN] = 2, 1218 - [SCTP_PF] = 1, 1219 - [SCTP_INACTIVE] = 0, /* worst case */ 1220 - }; 1221 - 1222 1215 static u8 sctp_trans_score(const struct sctp_transport *trans) 1223 1216 { 1224 - return sctp_trans_state_to_prio_map[trans->state]; 1217 + switch (trans->state) { 1218 + case SCTP_ACTIVE: 1219 + return 3; /* best case */ 1220 + case SCTP_UNKNOWN: 1221 + return 2; 1222 + case SCTP_PF: 1223 + return 1; 1224 + default: /* case SCTP_INACTIVE */ 1225 + return 0; /* worst case */ 1226 + } 1225 1227 } 1226 1228 1227 1229 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
+24 -20
net/sctp/sm_sideeffect.c
··· 244 244 int error; 245 245 struct sctp_transport *transport = (struct sctp_transport *) peer; 246 246 struct sctp_association *asoc = transport->asoc; 247 - struct net *net = sock_net(asoc->base.sk); 247 + struct sock *sk = asoc->base.sk; 248 + struct net *net = sock_net(sk); 248 249 249 250 /* Check whether a task is in the sock. */ 250 251 251 - bh_lock_sock(asoc->base.sk); 252 - if (sock_owned_by_user(asoc->base.sk)) { 252 + bh_lock_sock(sk); 253 + if (sock_owned_by_user(sk)) { 253 254 pr_debug("%s: sock is busy\n", __func__); 254 255 255 256 /* Try again later. */ ··· 273 272 transport, GFP_ATOMIC); 274 273 275 274 if (error) 276 - asoc->base.sk->sk_err = -error; 275 + sk->sk_err = -error; 277 276 278 277 out_unlock: 279 - bh_unlock_sock(asoc->base.sk); 278 + bh_unlock_sock(sk); 280 279 sctp_transport_put(transport); 281 280 } 282 281 ··· 286 285 static void sctp_generate_timeout_event(struct sctp_association *asoc, 287 286 sctp_event_timeout_t timeout_type) 288 287 { 289 - struct net *net = sock_net(asoc->base.sk); 288 + struct sock *sk = asoc->base.sk; 289 + struct net *net = sock_net(sk); 290 290 int error = 0; 291 291 292 - bh_lock_sock(asoc->base.sk); 293 - if (sock_owned_by_user(asoc->base.sk)) { 292 + bh_lock_sock(sk); 293 + if (sock_owned_by_user(sk)) { 294 294 pr_debug("%s: sock is busy: timer %d\n", __func__, 295 295 timeout_type); 296 296 ··· 314 312 (void *)timeout_type, GFP_ATOMIC); 315 313 316 314 if (error) 317 - asoc->base.sk->sk_err = -error; 315 + sk->sk_err = -error; 318 316 319 317 out_unlock: 320 - bh_unlock_sock(asoc->base.sk); 318 + bh_unlock_sock(sk); 321 319 sctp_association_put(asoc); 322 320 } 323 321 ··· 367 365 int error = 0; 368 366 struct sctp_transport *transport = (struct sctp_transport *) data; 369 367 struct sctp_association *asoc = transport->asoc; 370 - struct net *net = sock_net(asoc->base.sk); 368 + struct sock *sk = asoc->base.sk; 369 + struct net *net = sock_net(sk); 371 370 372 - bh_lock_sock(asoc->base.sk); 373 - if (sock_owned_by_user(asoc->base.sk)) { 371 + bh_lock_sock(sk); 372 + if (sock_owned_by_user(sk)) { 374 373 pr_debug("%s: sock is busy\n", __func__); 375 374 376 375 /* Try again later. */ ··· 391 388 asoc->state, asoc->ep, asoc, 392 389 transport, GFP_ATOMIC); 393 390 394 - if (error) 395 - asoc->base.sk->sk_err = -error; 391 + if (error) 392 + sk->sk_err = -error; 396 393 397 394 out_unlock: 398 - bh_unlock_sock(asoc->base.sk); 395 + bh_unlock_sock(sk); 399 396 sctp_transport_put(transport); 400 397 } 401 398 ··· 406 403 { 407 404 struct sctp_transport *transport = (struct sctp_transport *) data; 408 405 struct sctp_association *asoc = transport->asoc; 409 - struct net *net = sock_net(asoc->base.sk); 406 + struct sock *sk = asoc->base.sk; 407 + struct net *net = sock_net(sk); 410 408 411 - bh_lock_sock(asoc->base.sk); 412 - if (sock_owned_by_user(asoc->base.sk)) { 409 + bh_lock_sock(sk); 410 + if (sock_owned_by_user(sk)) { 413 411 pr_debug("%s: sock is busy\n", __func__); 414 412 415 413 /* Try again later. */ ··· 431 427 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); 432 428 433 429 out_unlock: 434 - bh_unlock_sock(asoc->base.sk); 430 + bh_unlock_sock(sk); 435 431 sctp_association_put(asoc); 436 432 } 437 433
-19
net/sunrpc/xprtrdma/fmr_ops.c
··· 39 39 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 40 40 struct rpcrdma_create_data_internal *cdata) 41 41 { 42 - struct ib_device_attr *devattr = &ia->ri_devattr; 43 - struct ib_mr *mr; 44 - 45 - /* Obtain an lkey to use for the regbufs, which are 46 - * protected from remote access. 47 - */ 48 - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { 49 - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; 50 - } else { 51 - mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); 52 - if (IS_ERR(mr)) { 53 - pr_err("%s: ib_get_dma_mr for failed with %lX\n", 54 - __func__, PTR_ERR(mr)); 55 - return -ENOMEM; 56 - } 57 - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; 58 - ia->ri_dma_mr = mr; 59 - } 60 - 61 42 return 0; 62 43 } 63 44
-5
net/sunrpc/xprtrdma/frwr_ops.c
··· 189 189 struct ib_device_attr *devattr = &ia->ri_devattr; 190 190 int depth, delta; 191 191 192 - /* Obtain an lkey to use for the regbufs, which are 193 - * protected from remote access. 194 - */ 195 - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; 196 - 197 192 ia->ri_max_frmr_depth = 198 193 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 199 194 devattr->max_fast_reg_page_list_len);
+1 -9
net/sunrpc/xprtrdma/physical_ops.c
··· 23 23 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 24 24 struct rpcrdma_create_data_internal *cdata) 25 25 { 26 - struct ib_device_attr *devattr = &ia->ri_devattr; 27 26 struct ib_mr *mr; 28 27 29 28 /* Obtain an rkey to use for RPC data payloads. ··· 36 37 __func__, PTR_ERR(mr)); 37 38 return -ENOMEM; 38 39 } 40 + 39 41 ia->ri_dma_mr = mr; 40 - 41 - /* Obtain an lkey to use for regbufs. 42 - */ 43 - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 44 - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; 45 - else 46 - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; 47 - 48 42 return 0; 49 43 } 50 44
+1 -1
net/sunrpc/xprtrdma/verbs.c
··· 1252 1252 goto out_free; 1253 1253 1254 1254 iov->length = size; 1255 - iov->lkey = ia->ri_dma_lkey; 1255 + iov->lkey = ia->ri_pd->local_dma_lkey; 1256 1256 rb->rg_size = size; 1257 1257 rb->rg_owner = NULL; 1258 1258 return rb;
-1
net/sunrpc/xprtrdma/xprt_rdma.h
··· 65 65 struct rdma_cm_id *ri_id; 66 66 struct ib_pd *ri_pd; 67 67 struct ib_mr *ri_dma_mr; 68 - u32 ri_dma_lkey; 69 68 struct completion ri_done; 70 69 int ri_async_rc; 71 70 unsigned int ri_max_frmr_depth;
+14 -1
net/unix/af_unix.c
··· 2179 2179 if (UNIXCB(skb).fp) 2180 2180 scm.fp = scm_fp_dup(UNIXCB(skb).fp); 2181 2181 2182 - sk_peek_offset_fwd(sk, chunk); 2182 + if (skip) { 2183 + sk_peek_offset_fwd(sk, chunk); 2184 + skip -= chunk; 2185 + } 2183 2186 2187 + if (UNIXCB(skb).fp) 2188 + break; 2189 + 2190 + last = skb; 2191 + last_len = skb->len; 2192 + unix_state_lock(sk); 2193 + skb = skb_peek_next(skb, &sk->sk_receive_queue); 2194 + if (skb) 2195 + goto again; 2196 + unix_state_unlock(sk); 2184 2197 break; 2185 2198 } 2186 2199 } while (size);
+7 -7
samples/kprobes/jprobe_example.c
··· 1 1 /* 2 2 * Here's a sample kernel module showing the use of jprobes to dump 3 - * the arguments of do_fork(). 3 + * the arguments of _do_fork(). 4 4 * 5 5 * For more information on theory of operation of jprobes, see 6 6 * Documentation/kprobes.txt 7 7 * 8 8 * Build and insert the kernel module as done in the kprobe example. 9 9 * You will see the trace data in /var/log/messages and on the 10 - * console whenever do_fork() is invoked to create a new process. 10 + * console whenever _do_fork() is invoked to create a new process. 11 11 * (Some messages may be suppressed if syslogd is configured to 12 12 * eliminate duplicate messages.) 13 13 */ ··· 17 17 #include <linux/kprobes.h> 18 18 19 19 /* 20 - * Jumper probe for do_fork. 20 + * Jumper probe for _do_fork. 21 21 * Mirror principle enables access to arguments of the probed routine 22 22 * from the probe handler. 23 23 */ 24 24 25 - /* Proxy routine having the same arguments as actual do_fork() routine */ 26 - static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, 25 + /* Proxy routine having the same arguments as actual _do_fork() routine */ 26 + static long j_do_fork(unsigned long clone_flags, unsigned long stack_start, 27 27 unsigned long stack_size, int __user *parent_tidptr, 28 28 int __user *child_tidptr) 29 29 { ··· 36 36 } 37 37 38 38 static struct jprobe my_jprobe = { 39 - .entry = jdo_fork, 39 + .entry = j_do_fork, 40 40 .kp = { 41 - .symbol_name = "do_fork", 41 + .symbol_name = "_do_fork", 42 42 }, 43 43 }; 44 44
+3 -3
samples/kprobes/kprobe_example.c
··· 1 1 /* 2 2 * NOTE: This example is works on x86 and powerpc. 3 3 * Here's a sample kernel module showing the use of kprobes to dump a 4 - * stack trace and selected registers when do_fork() is called. 4 + * stack trace and selected registers when _do_fork() is called. 5 5 * 6 6 * For more information on theory of operation of kprobes, see 7 7 * Documentation/kprobes.txt 8 8 * 9 9 * You will see the trace data in /var/log/messages and on the console 10 - * whenever do_fork() is invoked to create a new process. 10 + * whenever _do_fork() is invoked to create a new process. 11 11 */ 12 12 13 13 #include <linux/kernel.h> ··· 16 16 17 17 /* For each probe you need to allocate a kprobe structure */ 18 18 static struct kprobe kp = { 19 - .symbol_name = "do_fork", 19 + .symbol_name = "_do_fork", 20 20 }; 21 21 22 22 /* kprobe pre_handler: called just before the probed instruction is executed */
+2 -2
samples/kprobes/kretprobe_example.c
··· 7 7 * 8 8 * usage: insmod kretprobe_example.ko func=<func_name> 9 9 * 10 - * If no func_name is specified, do_fork is instrumented 10 + * If no func_name is specified, _do_fork is instrumented 11 11 * 12 12 * For more information on theory of operation of kretprobes, see 13 13 * Documentation/kprobes.txt ··· 25 25 #include <linux/limits.h> 26 26 #include <linux/sched.h> 27 27 28 - static char func_name[NAME_MAX] = "do_fork"; 28 + static char func_name[NAME_MAX] = "_do_fork"; 29 29 module_param_string(func, func_name, NAME_MAX, S_IRUGO); 30 30 MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" 31 31 " function's execution time");
-4
scripts/extract-cert.c
··· 17 17 #include <stdint.h> 18 18 #include <stdbool.h> 19 19 #include <string.h> 20 - #include <getopt.h> 21 20 #include <err.h> 22 - #include <arpa/inet.h> 23 21 #include <openssl/bio.h> 24 - #include <openssl/evp.h> 25 22 #include <openssl/pem.h> 26 - #include <openssl/pkcs7.h> 27 23 #include <openssl/err.h> 28 24 #include <openssl/engine.h> 29 25
+77 -17
scripts/sign-file.c
··· 20 20 #include <getopt.h> 21 21 #include <err.h> 22 22 #include <arpa/inet.h> 23 + #include <openssl/opensslv.h> 23 24 #include <openssl/bio.h> 24 25 #include <openssl/evp.h> 25 26 #include <openssl/pem.h> 26 - #include <openssl/cms.h> 27 27 #include <openssl/err.h> 28 28 #include <openssl/engine.h> 29 + 30 + /* 31 + * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to 32 + * assume that it's not available and its header file is missing and that we 33 + * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts 34 + * the options we have on specifying the X.509 certificate we want. 35 + * 36 + * Further, older versions of OpenSSL don't support manually adding signers to 37 + * the PKCS#7 message so have to accept that we get a certificate included in 38 + * the signature message. Nor do such older versions of OpenSSL support 39 + * signing with anything other than SHA1 - so we're stuck with that if such is 40 + * the case. 41 + */ 42 + #if OPENSSL_VERSION_NUMBER < 0x10000000L 43 + #define USE_PKCS7 44 + #endif 45 + #ifndef USE_PKCS7 46 + #include <openssl/cms.h> 47 + #else 48 + #include <openssl/pkcs7.h> 49 + #endif 29 50 30 51 struct module_signature { 31 52 uint8_t algo; /* Public-key crypto algorithm [0] */ ··· 131 110 struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; 132 111 char *hash_algo = NULL; 133 112 char *private_key_name, *x509_name, *module_name, *dest_name; 134 - bool save_cms = false, replace_orig; 113 + bool save_sig = false, replace_orig; 135 114 bool sign_only = false; 136 115 unsigned char buf[4096]; 137 - unsigned long module_size, cms_size; 138 - unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR; 116 + unsigned long module_size, sig_size; 117 + unsigned int use_signed_attrs; 139 118 const EVP_MD *digest_algo; 140 119 EVP_PKEY *private_key; 120 + #ifndef USE_PKCS7 141 121 CMS_ContentInfo *cms; 122 + unsigned int use_keyid = 0; 123 + #else 124 + PKCS7 *pkcs7; 125 + #endif 142 126 X509 *x509; 143 127 BIO *b, *bd = NULL, *bm; 144 128 int opt, n; 145 - 146 129 OpenSSL_add_all_algorithms(); 147 130 ERR_load_crypto_strings(); 148 131 ERR_clear_error(); 149 132 150 133 key_pass = getenv("KBUILD_SIGN_PIN"); 151 134 135 + #ifndef USE_PKCS7 136 + use_signed_attrs = CMS_NOATTR; 137 + #else 138 + use_signed_attrs = PKCS7_NOATTR; 139 + #endif 140 + 152 141 do { 153 142 opt = getopt(argc, argv, "dpk"); 154 143 switch (opt) { 155 - case 'p': save_cms = true; break; 156 - case 'd': sign_only = true; save_cms = true; break; 144 + case 'p': save_sig = true; break; 145 + case 'd': sign_only = true; save_sig = true; break; 146 + #ifndef USE_PKCS7 157 147 case 'k': use_keyid = CMS_USE_KEYID; break; 148 + #endif 158 149 case -1: break; 159 150 default: format(); 160 151 } ··· 189 156 "asprintf"); 190 157 replace_orig = true; 191 158 } 159 + 160 + #ifdef USE_PKCS7 161 + if (strcmp(hash_algo, "sha1") != 0) { 162 + fprintf(stderr, "sign-file: %s only supports SHA1 signing\n", 163 + OPENSSL_VERSION_TEXT); 164 + exit(3); 165 + } 166 + #endif 192 167 193 168 /* Read the private key and the X.509 cert the PKCS#7 message 194 169 * will point to. ··· 254 213 bm = BIO_new_file(module_name, "rb"); 255 214 ERR(!bm, "%s", module_name); 256 215 257 - /* Load the CMS message from the digest buffer. */ 216 + #ifndef USE_PKCS7 217 + /* Load the signature message from the digest buffer. */ 258 218 cms = CMS_sign(NULL, NULL, NULL, NULL, 259 219 CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); 260 220 ERR(!cms, "CMS_sign"); ··· 263 221 ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, 264 222 CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | 265 223 use_keyid | use_signed_attrs), 266 - "CMS_sign_add_signer"); 224 + "CMS_add1_signer"); 267 225 ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, 268 226 "CMS_final"); 269 227 270 - if (save_cms) { 271 - char *cms_name; 228 + #else 229 + pkcs7 = PKCS7_sign(x509, private_key, NULL, bm, 230 + PKCS7_NOCERTS | PKCS7_BINARY | 231 + PKCS7_DETACHED | use_signed_attrs); 232 + ERR(!pkcs7, "PKCS7_sign"); 233 + #endif 272 234 273 - ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf"); 274 - b = BIO_new_file(cms_name, "wb"); 275 - ERR(!b, "%s", cms_name); 276 - ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name); 235 + if (save_sig) { 236 + char *sig_file_name; 237 + 238 + ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0, 239 + "asprintf"); 240 + b = BIO_new_file(sig_file_name, "wb"); 241 + ERR(!b, "%s", sig_file_name); 242 + #ifndef USE_PKCS7 243 + ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, 244 + "%s", sig_file_name); 245 + #else 246 + ERR(i2d_PKCS7_bio(b, pkcs7) < 0, 247 + "%s", sig_file_name); 248 + #endif 277 249 BIO_free(b); 278 250 } 279 251 ··· 303 247 ERR(n < 0, "%s", module_name); 304 248 module_size = BIO_number_written(bd); 305 249 250 + #ifndef USE_PKCS7 306 251 ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); 307 - cms_size = BIO_number_written(bd) - module_size; 308 - sig_info.sig_len = htonl(cms_size); 252 + #else 253 + ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name); 254 + #endif 255 + sig_size = BIO_number_written(bd) - module_size; 256 + sig_info.sig_len = htonl(sig_size); 309 257 ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); 310 258 ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); 311 259
+4 -4
security/keys/gc.c
··· 134 134 kdebug("- %u", key->serial); 135 135 key_check(key); 136 136 137 + /* Throw away the key data */ 138 + if (key->type->destroy) 139 + key->type->destroy(key); 140 + 137 141 security_key_free(key); 138 142 139 143 /* deal with the user's key tracking and quota */ ··· 151 147 atomic_dec(&key->user->nkeys); 152 148 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 153 149 atomic_dec(&key->user->nikeys); 154 - 155 - /* now throw away the key memory */ 156 - if (key->type->destroy) 157 - key->type->destroy(key); 158 150 159 151 key_user_put(key->user); 160 152
+6 -2
tools/build/Makefile.feature
··· 41 41 libelf-getphdrnum \ 42 42 libelf-mmap \ 43 43 libnuma \ 44 + numa_num_possible_cpus \ 44 45 libperl \ 45 46 libpython \ 46 47 libpython-version \ ··· 52 51 timerfd \ 53 52 libdw-dwarf-unwind \ 54 53 zlib \ 55 - lzma 54 + lzma \ 55 + get_cpuid 56 56 57 57 FEATURE_DISPLAY ?= \ 58 58 dwarf \ ··· 63 61 libbfd \ 64 62 libelf \ 65 63 libnuma \ 64 + numa_num_possible_cpus \ 66 65 libperl \ 67 66 libpython \ 68 67 libslang \ 69 68 libunwind \ 70 69 libdw-dwarf-unwind \ 71 70 zlib \ 72 - lzma 71 + lzma \ 72 + get_cpuid 73 73 74 74 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. 75 75 # If in the future we need per-feature checks/flags for features not
+9 -1
tools/build/feature/Makefile
··· 19 19 test-libelf-getphdrnum.bin \ 20 20 test-libelf-mmap.bin \ 21 21 test-libnuma.bin \ 22 + test-numa_num_possible_cpus.bin \ 22 23 test-libperl.bin \ 23 24 test-libpython.bin \ 24 25 test-libpython-version.bin \ ··· 35 34 test-compile-x32.bin \ 36 35 test-zlib.bin \ 37 36 test-lzma.bin \ 38 - test-bpf.bin 37 + test-bpf.bin \ 38 + test-get_cpuid.bin 39 39 40 40 CC := $(CROSS_COMPILE)gcc -MD 41 41 PKG_CONFIG := $(CROSS_COMPILE)pkg-config ··· 87 85 $(BUILD) -lelf 88 86 89 87 test-libnuma.bin: 88 + $(BUILD) -lnuma 89 + 90 + test-numa_num_possible_cpus.bin: 90 91 $(BUILD) -lnuma 91 92 92 93 test-libunwind.bin: ··· 166 161 167 162 test-lzma.bin: 168 163 $(BUILD) -llzma 164 + 165 + test-get_cpuid.bin: 166 + $(BUILD) 169 167 170 168 test-bpf.bin: 171 169 $(BUILD)
+10
tools/build/feature/test-all.c
··· 77 77 # include "test-libnuma.c" 78 78 #undef main 79 79 80 + #define main main_test_numa_num_possible_cpus 81 + # include "test-numa_num_possible_cpus.c" 82 + #undef main 83 + 80 84 #define main main_test_timerfd 81 85 # include "test-timerfd.c" 82 86 #undef main ··· 121 117 # include "test-lzma.c" 122 118 #undef main 123 119 120 + #define main main_test_get_cpuid 121 + # include "test-get_cpuid.c" 122 + #undef main 123 + 124 124 int main(int argc, char *argv[]) 125 125 { 126 126 main_test_libpython(); ··· 144 136 main_test_libbfd(); 145 137 main_test_backtrace(); 146 138 main_test_libnuma(); 139 + main_test_numa_num_possible_cpus(); 147 140 main_test_timerfd(); 148 141 main_test_stackprotector_all(); 149 142 main_test_libdw_dwarf_unwind(); ··· 152 143 main_test_zlib(); 153 144 main_test_pthread_attr_setaffinity_np(); 154 145 main_test_lzma(); 146 + main_test_get_cpuid(); 155 147 156 148 return 0; 157 149 }
+7
tools/build/feature/test-get_cpuid.c
··· 1 + #include <cpuid.h> 2 + 3 + int main(void) 4 + { 5 + unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; 6 + return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); 7 + }
+6
tools/build/feature/test-numa_num_possible_cpus.c
··· 1 + #include <numa.h> 2 + 3 + int main(void) 4 + { 5 + return numa_num_possible_cpus(); 6 + }
+20 -3
tools/lib/traceevent/event-parse.c
··· 3795 3795 struct format_field *field; 3796 3796 struct printk_map *printk; 3797 3797 long long val, fval; 3798 - unsigned long addr; 3798 + unsigned long long addr; 3799 3799 char *str; 3800 3800 unsigned char *hex; 3801 3801 int print; ··· 3828 3828 */ 3829 3829 if (!(field->flags & FIELD_IS_ARRAY) && 3830 3830 field->size == pevent->long_size) { 3831 - addr = *(unsigned long *)(data + field->offset); 3831 + 3832 + /* Handle heterogeneous recording and processing 3833 + * architectures 3834 + * 3835 + * CASE I: 3836 + * Traces recorded on 32-bit devices (32-bit 3837 + * addressing) and processed on 64-bit devices: 3838 + * In this case, only 32 bits should be read. 3839 + * 3840 + * CASE II: 3841 + * Traces recorded on 64 bit devices and processed 3842 + * on 32-bit devices: 3843 + * In this case, 64 bits must be read. 3844 + */ 3845 + addr = (pevent->long_size == 8) ? 3846 + *(unsigned long long *)(data + field->offset) : 3847 + (unsigned long long)*(unsigned int *)(data + field->offset); 3848 + 3832 3849 /* Check if it matches a print format */ 3833 3850 printk = find_printk(pevent, addr); 3834 3851 if (printk) 3835 3852 trace_seq_puts(s, printk->printk); 3836 3853 else 3837 - trace_seq_printf(s, "%lx", addr); 3854 + trace_seq_printf(s, "%llx", addr); 3838 3855 break; 3839 3856 } 3840 3857 str = malloc(len + 1);
-15
tools/perf/Documentation/intel-pt.txt
··· 364 364 365 365 CYC packets are not requested by default. 366 366 367 - no_force_psb This is a driver option and is not in the IA32_RTIT_CTL MSR. 368 - 369 - It stops the driver resetting the byte count to zero whenever 370 - enabling the trace (for example on context switches) which in 371 - turn results in no PSB being forced. However some processors 372 - will produce a PSB anyway. 373 - 374 - In any case, there is still a PSB when the trace is enabled for 375 - the first time. 376 - 377 - no_force_psb can be used to slightly decrease the trace size but 378 - may make it harder for the decoder to recover from errors. 379 - 380 - no_force_psb is not selected by default. 381 - 382 367 383 368 new snapshot option 384 369 -------------------
+15 -5
tools/perf/config/Makefile
··· 573 573 msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); 574 574 NO_LIBNUMA := 1 575 575 else 576 - CFLAGS += -DHAVE_LIBNUMA_SUPPORT 577 - EXTLIBS += -lnuma 578 - $(call detected,CONFIG_NUMA) 576 + ifeq ($(feature-numa_num_possible_cpus), 0) 577 + msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8); 578 + NO_LIBNUMA := 1 579 + else 580 + CFLAGS += -DHAVE_LIBNUMA_SUPPORT 581 + EXTLIBS += -lnuma 582 + $(call detected,CONFIG_NUMA) 583 + endif 579 584 endif 580 585 endif 581 586 ··· 626 621 endif 627 622 628 623 ifndef NO_AUXTRACE 629 - $(call detected,CONFIG_AUXTRACE) 630 - CFLAGS += -DHAVE_AUXTRACE_SUPPORT 624 + ifeq ($(feature-get_cpuid), 0) 625 + msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); 626 + NO_AUXTRACE := 1 627 + else 628 + $(call detected,CONFIG_AUXTRACE) 629 + CFLAGS += -DHAVE_AUXTRACE_SUPPORT 630 + endif 631 631 endif 632 632 633 633 # Among the variables below, these:
+7 -6
tools/perf/util/probe-event.c
··· 270 270 int ret = 0; 271 271 272 272 if (module) { 273 - list_for_each_entry(dso, &host_machine->dsos.head, node) { 274 - if (!dso->kernel) 275 - continue; 276 - if (strncmp(dso->short_name + 1, module, 277 - dso->short_name_len - 2) == 0) 278 - goto found; 273 + char module_name[128]; 274 + 275 + snprintf(module_name, sizeof(module_name), "[%s]", module); 276 + map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); 277 + if (map) { 278 + dso = map->dso; 279 + goto found; 279 280 } 280 281 pr_debug("Failed to find module %s.\n", module); 281 282 return -ENOENT;
+4 -1
tools/perf/util/session.c
··· 1580 1580 file_offset = page_offset; 1581 1581 head = data_offset - page_offset; 1582 1582 1583 - if (data_size && (data_offset + data_size < file_size)) 1583 + if (data_size == 0) 1584 + goto out; 1585 + 1586 + if (data_offset + data_size < file_size) 1584 1587 file_size = data_offset + data_size; 1585 1588 1586 1589 ui_progress__init(&prog, file_size, "Processing events...");
+14 -2
tools/perf/util/stat.c
··· 196 196 memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); 197 197 } 198 198 199 - static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) 199 + static int check_per_pkg(struct perf_evsel *counter, 200 + struct perf_counts_values *vals, int cpu, bool *skip) 200 201 { 201 202 unsigned long *mask = counter->per_pkg_mask; 202 203 struct cpu_map *cpus = perf_evsel__cpus(counter); ··· 219 218 counter->per_pkg_mask = mask; 220 219 } 221 220 221 + /* 222 + * we do not consider an event that has not run as a good 223 + * instance to mark a package as used (skip=1). Otherwise 224 + * we may run into a situation where the first CPU in a package 225 + * is not running anything, yet the second is, and this function 226 + * would mark the package as used after the first CPU and would 227 + * not read the values from the second CPU. 228 + */ 229 + if (!(vals->run && vals->ena)) 230 + return 0; 231 + 222 232 s = cpu_map__get_socket(cpus, cpu); 223 233 if (s < 0) 224 234 return -1; ··· 247 235 static struct perf_counts_values zero; 248 236 bool skip = false; 249 237 250 - if (check_per_pkg(evsel, cpu, &skip)) { 238 + if (check_per_pkg(evsel, count, cpu, &skip)) { 251 239 pr_err("failed to read per-pkg counter\n"); 252 240 return -1; 253 241 }
+13 -22
tools/perf/util/symbol-elf.c
··· 38 38 #endif 39 39 40 40 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT 41 - int elf_getphdrnum(Elf *elf, size_t *dst) 41 + static int elf_getphdrnum(Elf *elf, size_t *dst) 42 42 { 43 43 GElf_Ehdr gehdr; 44 44 GElf_Ehdr *ehdr; ··· 1271 1271 static int kcore__init(struct kcore *kcore, char *filename, int elfclass, 1272 1272 bool temp) 1273 1273 { 1274 - GElf_Ehdr *ehdr; 1275 - 1276 1274 kcore->elfclass = elfclass; 1277 1275 1278 1276 if (temp) ··· 1287 1289 if (!gelf_newehdr(kcore->elf, elfclass)) 1288 1290 goto out_end; 1289 1291 1290 - ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1291 - if (!ehdr) 1292 - goto out_end; 1292 + memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); 1293 1293 1294 1294 return 0; 1295 1295 ··· 1344 1348 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, 1345 1349 u64 addr, u64 len) 1346 1350 { 1347 - GElf_Phdr gphdr; 1348 - GElf_Phdr *phdr; 1351 + GElf_Phdr phdr = { 1352 + .p_type = PT_LOAD, 1353 + .p_flags = PF_R | PF_W | PF_X, 1354 + .p_offset = offset, 1355 + .p_vaddr = addr, 1356 + .p_paddr = 0, 1357 + .p_filesz = len, 1358 + .p_memsz = len, 1359 + .p_align = page_size, 1360 + }; 1349 1361 1350 - phdr = gelf_getphdr(kcore->elf, idx, &gphdr); 1351 - if (!phdr) 1352 - return -1; 1353 - 1354 - phdr->p_type = PT_LOAD; 1355 - phdr->p_flags = PF_R | PF_W | PF_X; 1356 - phdr->p_offset = offset; 1357 - phdr->p_vaddr = addr; 1358 - phdr->p_paddr = 0; 1359 - phdr->p_filesz = len; 1360 - phdr->p_memsz = len; 1361 - phdr->p_align = page_size; 1362 - 1363 - if (!gelf_update_phdr(kcore->elf, idx, phdr)) 1362 + if (!gelf_update_phdr(kcore->elf, idx, &phdr)) 1364 1363 return -1; 1365 1364 1366 1365 return 0;
+1 -1
tools/perf/util/util.c
··· 709 709 710 710 dir = opendir(procfs__mountpoint()); 711 711 if (!dir) 712 - return -1; 712 + return false; 713 713 714 714 /* Walk through the directory. */ 715 715 while (ret && (d = readdir(dir)) != NULL) {
+34 -5
tools/power/x86/turbostat/turbostat.c
··· 71 71 unsigned int extra_msr_offset64; 72 72 unsigned int extra_delta_offset32; 73 73 unsigned int extra_delta_offset64; 74 + unsigned int aperf_mperf_multiplier = 1; 74 75 int do_smi; 75 76 double bclk; 77 + double base_hz; 78 + double tsc_tweak = 1.0; 76 79 unsigned int show_pkg; 77 80 unsigned int show_core; 78 81 unsigned int show_cpu; ··· 505 502 /* %Busy */ 506 503 if (has_aperf) { 507 504 if (!skip_c0) 508 - outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); 505 + outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); 509 506 else 510 507 outp += sprintf(outp, "********"); 511 508 } ··· 513 510 /* Bzy_MHz */ 514 511 if (has_aperf) 515 512 outp += sprintf(outp, "%8.0f", 516 - 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); 513 + 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float); 517 514 518 515 /* TSC_MHz */ 519 516 outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); ··· 987 984 return -3; 988 985 if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) 989 986 return -4; 987 + t->aperf = t->aperf * aperf_mperf_multiplier; 988 + t->mperf = t->mperf * aperf_mperf_multiplier; 990 989 } 991 990 992 991 if (do_smi) { ··· 1153 1148 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1154 1149 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1155 1150 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; 1151 + 1152 + 1153 + static void 1154 + calculate_tsc_tweak() 1155 + { 1156 + unsigned long long msr; 1157 + unsigned int base_ratio; 1158 + 1159 + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1160 + base_ratio = (msr >> 8) & 0xFF; 1161 + base_hz = base_ratio * bclk * 1000000; 1162 + tsc_tweak = base_hz / tsc_hz; 1163 + } 1156 1164 1157 1165 static void 1158 1166 dump_nhm_platform_info(void) ··· 1944 1926 1945 1927 switch (model) { 1946 1928 case 0x3A: /* IVB */ 1947 - case 0x3E: /* IVB Xeon */ 1948 - 1949 1929 case 0x3C: /* HSW */ 1950 1930 case 0x3F: /* HSX */ 1951 1931 case 0x45: /* HSW */ ··· 2559 2543 return 0; 2560 2544 } 2561 2545 2546 + unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) 2547 + { 2548 + if (is_knl(family, model)) 2549 + return 1024; 2550 + return 1; 2551 + } 2552 + 2562 2553 #define SLM_BCLK_FREQS 5 2563 2554 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; 2564 2555 ··· 2767 2744 } 2768 2745 } 2769 2746 2747 + if (has_aperf) 2748 + aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); 2749 + 2770 2750 do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); 2771 2751 do_snb_cstates = has_snb_msrs(family, model); 2772 2752 do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); ··· 2787 2761 2788 2762 if (debug) 2789 2763 dump_cstate_pstate_config_info(); 2764 + 2765 + if (has_skl_msrs(family, model)) 2766 + calculate_tsc_tweak(); 2790 2767 2791 2768 return; 2792 2769 } ··· 3119 3090 } 3120 3091 3121 3092 void print_version() { 3122 - fprintf(stderr, "turbostat version 4.7 17-June, 2015" 3093 + fprintf(stderr, "turbostat version 4.8 26-Sep, 2015" 3123 3094 " - Len Brown <lenb@kernel.org>\n"); 3124 3095 } 3125 3096