Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

udplite conflict is resolved by taking what 'net-next' did
which removed the backlog receive method assignment, since
it is no longer necessary.

Two entries were added to the non-priv ethtool operations
switch statement, one in 'net' and one in 'net-next, so
simple overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1245 -680
+10
MAINTAINERS
··· 77 77 Q: Patchwork web based patch tracking system site 78 78 T: SCM tree type and location. 79 79 Type is one of: git, hg, quilt, stgit, topgit 80 + B: Bug tracking system location. 80 81 S: Status, one of the following: 81 82 Supported: Someone is actually paid to look after this. 82 83 Maintained: Someone actually looks after it. ··· 282 281 W: https://01.org/linux-acpi 283 282 Q: https://patchwork.kernel.org/project/linux-acpi/list/ 284 283 T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm 284 + B: https://bugzilla.kernel.org 285 285 S: Supported 286 286 F: drivers/acpi/ 287 287 F: drivers/pnp/pnpacpi/ ··· 306 304 W: https://github.com/acpica/acpica/ 307 305 Q: https://patchwork.kernel.org/project/linux-acpi/list/ 308 306 T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm 307 + B: https://bugzilla.kernel.org 308 + B: https://bugs.acpica.org 309 309 S: Supported 310 310 F: drivers/acpi/acpica/ 311 311 F: include/acpi/ ··· 317 313 M: Zhang Rui <rui.zhang@intel.com> 318 314 L: linux-acpi@vger.kernel.org 319 315 W: https://01.org/linux-acpi 316 + B: https://bugzilla.kernel.org 320 317 S: Supported 321 318 F: drivers/acpi/fan.c 322 319 ··· 333 328 M: Zhang Rui <rui.zhang@intel.com> 334 329 L: linux-acpi@vger.kernel.org 335 330 W: https://01.org/linux-acpi 331 + B: https://bugzilla.kernel.org 336 332 S: Supported 337 333 F: drivers/acpi/*thermal* 338 334 ··· 341 335 M: Zhang Rui <rui.zhang@intel.com> 342 336 L: linux-acpi@vger.kernel.org 343 337 W: https://01.org/linux-acpi 338 + B: https://bugzilla.kernel.org 344 339 S: Supported 345 340 F: drivers/acpi/acpi_video.c 346 341 ··· 5672 5665 M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 5673 5666 M: Pavel Machek <pavel@ucw.cz> 5674 5667 L: linux-pm@vger.kernel.org 5668 + B: https://bugzilla.kernel.org 5675 5669 S: Supported 5676 5670 F: arch/x86/power/ 5677 5671 F: drivers/base/power/ ··· 9633 9625 M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 9634 9626 L: linux-pm@vger.kernel.org 9635 9627 T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm 9628 + B: https://bugzilla.kernel.org 9636 9629 S: Supported 9637 9630 F: drivers/base/power/ 9638 9631 F: include/linux/pm.h ··· 11623 11614 M: Len Brown <len.brown@intel.com> 11624 11615 M: Pavel Machek <pavel@ucw.cz> 11625 11616 L: linux-pm@vger.kernel.org 11617 + B: https://bugzilla.kernel.org 11626 11618 S: Supported 11627 11619 F: Documentation/power/ 11628 11620 F: arch/x86/kernel/acpi/
-1
arch/arm/include/asm/Kbuild
··· 8 8 generic-y += emergency-restart.h 9 9 generic-y += errno.h 10 10 generic-y += exec.h 11 - generic-y += export.h 12 11 generic-y += ioctl.h 13 12 generic-y += ipcbuf.h 14 13 generic-y += irq_regs.h
+1 -1
arch/arm/kernel/Makefile
··· 33 33 obj-$(CONFIG_CPU_IDLE) += cpuidle.o 34 34 obj-$(CONFIG_ISA_DMA_API) += dma.o 35 35 obj-$(CONFIG_FIQ) += fiq.o fiqasm.o 36 - obj-$(CONFIG_MODULES) += module.o 36 + obj-$(CONFIG_MODULES) += armksyms.o module.o 37 37 obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o 38 38 obj-$(CONFIG_ISA_DMA) += dma-isa.o 39 39 obj-$(CONFIG_PCI) += bios32.o isa.o
+183
arch/arm/kernel/armksyms.c
··· 1 + /* 2 + * linux/arch/arm/kernel/armksyms.c 3 + * 4 + * Copyright (C) 2000 Russell King 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #include <linux/export.h> 11 + #include <linux/sched.h> 12 + #include <linux/string.h> 13 + #include <linux/cryptohash.h> 14 + #include <linux/delay.h> 15 + #include <linux/in6.h> 16 + #include <linux/syscalls.h> 17 + #include <linux/uaccess.h> 18 + #include <linux/io.h> 19 + #include <linux/arm-smccc.h> 20 + 21 + #include <asm/checksum.h> 22 + #include <asm/ftrace.h> 23 + 24 + /* 25 + * libgcc functions - functions that are used internally by the 26 + * compiler... (prototypes are not correct though, but that 27 + * doesn't really matter since they're not versioned). 28 + */ 29 + extern void __ashldi3(void); 30 + extern void __ashrdi3(void); 31 + extern void __divsi3(void); 32 + extern void __lshrdi3(void); 33 + extern void __modsi3(void); 34 + extern void __muldi3(void); 35 + extern void __ucmpdi2(void); 36 + extern void __udivsi3(void); 37 + extern void __umodsi3(void); 38 + extern void __do_div64(void); 39 + extern void __bswapsi2(void); 40 + extern void __bswapdi2(void); 41 + 42 + extern void __aeabi_idiv(void); 43 + extern void __aeabi_idivmod(void); 44 + extern void __aeabi_lasr(void); 45 + extern void __aeabi_llsl(void); 46 + extern void __aeabi_llsr(void); 47 + extern void __aeabi_lmul(void); 48 + extern void __aeabi_uidiv(void); 49 + extern void __aeabi_uidivmod(void); 50 + extern void __aeabi_ulcmp(void); 51 + 52 + extern void fpundefinstr(void); 53 + 54 + void mmioset(void *, unsigned int, size_t); 55 + void mmiocpy(void *, const void *, size_t); 56 + 57 + /* platform dependent support */ 58 + EXPORT_SYMBOL(arm_delay_ops); 59 + 60 + /* networking */ 61 + EXPORT_SYMBOL(csum_partial); 62 + EXPORT_SYMBOL(csum_partial_copy_from_user); 63 + EXPORT_SYMBOL(csum_partial_copy_nocheck); 64 + EXPORT_SYMBOL(__csum_ipv6_magic); 65 + 66 + /* io */ 67 + #ifndef __raw_readsb 68 + EXPORT_SYMBOL(__raw_readsb); 69 + #endif 70 + #ifndef __raw_readsw 71 + EXPORT_SYMBOL(__raw_readsw); 72 + #endif 73 + #ifndef __raw_readsl 74 + EXPORT_SYMBOL(__raw_readsl); 75 + #endif 76 + #ifndef __raw_writesb 77 + EXPORT_SYMBOL(__raw_writesb); 78 + #endif 79 + #ifndef __raw_writesw 80 + EXPORT_SYMBOL(__raw_writesw); 81 + #endif 82 + #ifndef __raw_writesl 83 + EXPORT_SYMBOL(__raw_writesl); 84 + #endif 85 + 86 + /* string / mem functions */ 87 + EXPORT_SYMBOL(strchr); 88 + EXPORT_SYMBOL(strrchr); 89 + EXPORT_SYMBOL(memset); 90 + EXPORT_SYMBOL(memcpy); 91 + EXPORT_SYMBOL(memmove); 92 + EXPORT_SYMBOL(memchr); 93 + EXPORT_SYMBOL(__memzero); 94 + 95 + EXPORT_SYMBOL(mmioset); 96 + EXPORT_SYMBOL(mmiocpy); 97 + 98 + #ifdef CONFIG_MMU 99 + EXPORT_SYMBOL(copy_page); 100 + 101 + EXPORT_SYMBOL(arm_copy_from_user); 102 + EXPORT_SYMBOL(arm_copy_to_user); 103 + EXPORT_SYMBOL(arm_clear_user); 104 + 105 + EXPORT_SYMBOL(__get_user_1); 106 + EXPORT_SYMBOL(__get_user_2); 107 + EXPORT_SYMBOL(__get_user_4); 108 + EXPORT_SYMBOL(__get_user_8); 109 + 110 + #ifdef __ARMEB__ 111 + EXPORT_SYMBOL(__get_user_64t_1); 112 + EXPORT_SYMBOL(__get_user_64t_2); 113 + EXPORT_SYMBOL(__get_user_64t_4); 114 + EXPORT_SYMBOL(__get_user_32t_8); 115 + #endif 116 + 117 + EXPORT_SYMBOL(__put_user_1); 118 + EXPORT_SYMBOL(__put_user_2); 119 + EXPORT_SYMBOL(__put_user_4); 120 + EXPORT_SYMBOL(__put_user_8); 121 + #endif 122 + 123 + /* gcc lib functions */ 124 + EXPORT_SYMBOL(__ashldi3); 125 + EXPORT_SYMBOL(__ashrdi3); 126 + EXPORT_SYMBOL(__divsi3); 127 + EXPORT_SYMBOL(__lshrdi3); 128 + EXPORT_SYMBOL(__modsi3); 129 + EXPORT_SYMBOL(__muldi3); 130 + EXPORT_SYMBOL(__ucmpdi2); 131 + EXPORT_SYMBOL(__udivsi3); 132 + EXPORT_SYMBOL(__umodsi3); 133 + EXPORT_SYMBOL(__do_div64); 134 + EXPORT_SYMBOL(__bswapsi2); 135 + EXPORT_SYMBOL(__bswapdi2); 136 + 137 + #ifdef CONFIG_AEABI 138 + EXPORT_SYMBOL(__aeabi_idiv); 139 + EXPORT_SYMBOL(__aeabi_idivmod); 140 + EXPORT_SYMBOL(__aeabi_lasr); 141 + EXPORT_SYMBOL(__aeabi_llsl); 142 + EXPORT_SYMBOL(__aeabi_llsr); 143 + EXPORT_SYMBOL(__aeabi_lmul); 144 + EXPORT_SYMBOL(__aeabi_uidiv); 145 + EXPORT_SYMBOL(__aeabi_uidivmod); 146 + EXPORT_SYMBOL(__aeabi_ulcmp); 147 + #endif 148 + 149 + /* bitops */ 150 + EXPORT_SYMBOL(_set_bit); 151 + EXPORT_SYMBOL(_test_and_set_bit); 152 + EXPORT_SYMBOL(_clear_bit); 153 + EXPORT_SYMBOL(_test_and_clear_bit); 154 + EXPORT_SYMBOL(_change_bit); 155 + EXPORT_SYMBOL(_test_and_change_bit); 156 + EXPORT_SYMBOL(_find_first_zero_bit_le); 157 + EXPORT_SYMBOL(_find_next_zero_bit_le); 158 + EXPORT_SYMBOL(_find_first_bit_le); 159 + EXPORT_SYMBOL(_find_next_bit_le); 160 + 161 + #ifdef __ARMEB__ 162 + EXPORT_SYMBOL(_find_first_zero_bit_be); 163 + EXPORT_SYMBOL(_find_next_zero_bit_be); 164 + EXPORT_SYMBOL(_find_first_bit_be); 165 + EXPORT_SYMBOL(_find_next_bit_be); 166 + #endif 167 + 168 + #ifdef CONFIG_FUNCTION_TRACER 169 + #ifdef CONFIG_OLD_MCOUNT 170 + EXPORT_SYMBOL(mcount); 171 + #endif 172 + EXPORT_SYMBOL(__gnu_mcount_nc); 173 + #endif 174 + 175 + #ifdef CONFIG_ARM_PATCH_PHYS_VIRT 176 + EXPORT_SYMBOL(__pv_phys_pfn_offset); 177 + EXPORT_SYMBOL(__pv_offset); 178 + #endif 179 + 180 + #ifdef CONFIG_HAVE_ARM_SMCCC 181 + EXPORT_SYMBOL(arm_smccc_smc); 182 + EXPORT_SYMBOL(arm_smccc_hvc); 183 + #endif
-3
arch/arm/kernel/entry-ftrace.S
··· 7 7 #include <asm/assembler.h> 8 8 #include <asm/ftrace.h> 9 9 #include <asm/unwind.h> 10 - #include <asm/export.h> 11 10 12 11 #include "entry-header.S" 13 12 ··· 153 154 __mcount _old 154 155 #endif 155 156 ENDPROC(mcount) 156 - EXPORT_SYMBOL(mcount) 157 157 158 158 #ifdef CONFIG_DYNAMIC_FTRACE 159 159 ENTRY(ftrace_caller_old) ··· 205 207 #endif 206 208 UNWIND(.fnend) 207 209 ENDPROC(__gnu_mcount_nc) 208 - EXPORT_SYMBOL(__gnu_mcount_nc) 209 210 210 211 #ifdef CONFIG_DYNAMIC_FTRACE 211 212 ENTRY(ftrace_caller)
-3
arch/arm/kernel/head.S
··· 22 22 #include <asm/memory.h> 23 23 #include <asm/thread_info.h> 24 24 #include <asm/pgtable.h> 25 - #include <asm/export.h> 26 25 27 26 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) 28 27 #include CONFIG_DEBUG_LL_INCLUDE ··· 727 728 __pv_offset: 728 729 .quad 0 729 730 .size __pv_offset, . -__pv_offset 730 - EXPORT_SYMBOL(__pv_phys_pfn_offset) 731 - EXPORT_SYMBOL(__pv_offset) 732 731 #endif 733 732 734 733 #include "head-common.S"
-3
arch/arm/kernel/smccc-call.S
··· 16 16 #include <asm/opcodes-sec.h> 17 17 #include <asm/opcodes-virt.h> 18 18 #include <asm/unwind.h> 19 - #include <asm/export.h> 20 19 21 20 /* 22 21 * Wrap c macros in asm macros to delay expansion until after the ··· 51 52 ENTRY(arm_smccc_smc) 52 53 SMCCC SMCCC_SMC 53 54 ENDPROC(arm_smccc_smc) 54 - EXPORT_SYMBOL(arm_smccc_smc) 55 55 56 56 /* 57 57 * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, ··· 60 62 ENTRY(arm_smccc_hvc) 61 63 SMCCC SMCCC_HVC 62 64 ENDPROC(arm_smccc_hvc) 63 - EXPORT_SYMBOL(arm_smccc_hvc)
-3
arch/arm/lib/ashldi3.S
··· 28 28 29 29 #include <linux/linkage.h> 30 30 #include <asm/assembler.h> 31 - #include <asm/export.h> 32 31 33 32 #ifdef __ARMEB__ 34 33 #define al r1 ··· 52 53 53 54 ENDPROC(__ashldi3) 54 55 ENDPROC(__aeabi_llsl) 55 - EXPORT_SYMBOL(__ashldi3) 56 - EXPORT_SYMBOL(__aeabi_llsl)
-3
arch/arm/lib/ashrdi3.S
··· 28 28 29 29 #include <linux/linkage.h> 30 30 #include <asm/assembler.h> 31 - #include <asm/export.h> 32 31 33 32 #ifdef __ARMEB__ 34 33 #define al r1 ··· 52 53 53 54 ENDPROC(__ashrdi3) 54 55 ENDPROC(__aeabi_lasr) 55 - EXPORT_SYMBOL(__ashrdi3) 56 - EXPORT_SYMBOL(__aeabi_lasr)
-5
arch/arm/lib/bitops.h
··· 1 1 #include <asm/assembler.h> 2 2 #include <asm/unwind.h> 3 - #include <asm/export.h> 4 3 5 4 #if __LINUX_ARM_ARCH__ >= 6 6 5 .macro bitop, name, instr ··· 25 26 bx lr 26 27 UNWIND( .fnend ) 27 28 ENDPROC(\name ) 28 - EXPORT_SYMBOL(\name ) 29 29 .endm 30 30 31 31 .macro testop, name, instr, store ··· 55 57 2: bx lr 56 58 UNWIND( .fnend ) 57 59 ENDPROC(\name ) 58 - EXPORT_SYMBOL(\name ) 59 60 .endm 60 61 #else 61 62 .macro bitop, name, instr ··· 74 77 ret lr 75 78 UNWIND( .fnend ) 76 79 ENDPROC(\name ) 77 - EXPORT_SYMBOL(\name ) 78 80 .endm 79 81 80 82 /** ··· 102 106 ret lr 103 107 UNWIND( .fnend ) 104 108 ENDPROC(\name ) 105 - EXPORT_SYMBOL(\name ) 106 109 .endm 107 110 #endif
-3
arch/arm/lib/bswapsdi2.S
··· 1 1 #include <linux/linkage.h> 2 2 #include <asm/assembler.h> 3 - #include <asm/export.h> 4 3 5 4 #if __LINUX_ARM_ARCH__ >= 6 6 5 ENTRY(__bswapsi2) ··· 35 36 ret lr 36 37 ENDPROC(__bswapdi2) 37 38 #endif 38 - EXPORT_SYMBOL(__bswapsi2) 39 - EXPORT_SYMBOL(__bswapdi2)
-4
arch/arm/lib/clear_user.S
··· 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 12 #include <asm/unwind.h> 13 - #include <asm/export.h> 14 13 15 14 .text 16 15 ··· 50 51 UNWIND(.fnend) 51 52 ENDPROC(arm_clear_user) 52 53 ENDPROC(__clear_user_std) 53 - #ifndef CONFIG_UACCESS_WITH_MEMCPY 54 - EXPORT_SYMBOL(arm_clear_user) 55 - #endif 56 54 57 55 .pushsection .text.fixup,"ax" 58 56 .align 0
-2
arch/arm/lib/copy_from_user.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 15 #include <asm/unwind.h> 16 - #include <asm/export.h> 17 16 18 17 /* 19 18 * Prototype: ··· 94 95 #include "copy_template.S" 95 96 96 97 ENDPROC(arm_copy_from_user) 97 - EXPORT_SYMBOL(arm_copy_from_user) 98 98 99 99 .pushsection .fixup,"ax" 100 100 .align 0
-2
arch/arm/lib/copy_page.S
··· 13 13 #include <asm/assembler.h> 14 14 #include <asm/asm-offsets.h> 15 15 #include <asm/cache.h> 16 - #include <asm/export.h> 17 16 18 17 #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) 19 18 ··· 45 46 PLD( beq 2b ) 46 47 ldmfd sp!, {r4, pc} @ 3 47 48 ENDPROC(copy_page) 48 - EXPORT_SYMBOL(copy_page)
-4
arch/arm/lib/copy_to_user.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 15 #include <asm/unwind.h> 16 - #include <asm/export.h> 17 16 18 17 /* 19 18 * Prototype: ··· 99 100 100 101 ENDPROC(arm_copy_to_user) 101 102 ENDPROC(__copy_to_user_std) 102 - #ifndef CONFIG_UACCESS_WITH_MEMCPY 103 - EXPORT_SYMBOL(arm_copy_to_user) 104 - #endif 105 103 106 104 .pushsection .text.fixup,"ax" 107 105 .align 0
+1 -2
arch/arm/lib/csumipv6.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 .text 15 14 ··· 30 31 adcs r0, r0, #0 31 32 ldmfd sp!, {pc} 32 33 ENDPROC(__csum_ipv6_magic) 33 - EXPORT_SYMBOL(__csum_ipv6_magic) 34 +
-2
arch/arm/lib/csumpartial.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 .text 15 14 ··· 140 141 bne 4b 141 142 b .Lless4 142 143 ENDPROC(csum_partial) 143 - EXPORT_SYMBOL(csum_partial)
-1
arch/arm/lib/csumpartialcopy.S
··· 49 49 50 50 #define FN_ENTRY ENTRY(csum_partial_copy_nocheck) 51 51 #define FN_EXIT ENDPROC(csum_partial_copy_nocheck) 52 - #define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck) 53 52 54 53 #include "csumpartialcopygeneric.S"
-2
arch/arm/lib/csumpartialcopygeneric.S
··· 8 8 * published by the Free Software Foundation. 9 9 */ 10 10 #include <asm/assembler.h> 11 - #include <asm/export.h> 12 11 13 12 /* 14 13 * unsigned int ··· 331 332 mov r5, r4, get_byte_1 332 333 b .Lexit 333 334 FN_EXIT 334 - FN_EXPORT
-1
arch/arm/lib/csumpartialcopyuser.S
··· 73 73 74 74 #define FN_ENTRY ENTRY(csum_partial_copy_from_user) 75 75 #define FN_EXIT ENDPROC(csum_partial_copy_from_user) 76 - #define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user) 77 76 78 77 #include "csumpartialcopygeneric.S" 79 78
-2
arch/arm/lib/delay.c
··· 24 24 #include <linux/init.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/module.h> 27 - #include <linux/export.h> 28 27 #include <linux/timex.h> 29 28 30 29 /* ··· 34 35 .const_udelay = __loop_const_udelay, 35 36 .udelay = __loop_udelay, 36 37 }; 37 - EXPORT_SYMBOL(arm_delay_ops); 38 38 39 39 static const struct delay_timer *delay_timer; 40 40 static bool delay_calibrated;
-2
arch/arm/lib/div64.S
··· 15 15 #include <linux/linkage.h> 16 16 #include <asm/assembler.h> 17 17 #include <asm/unwind.h> 18 - #include <asm/export.h> 19 18 20 19 #ifdef __ARMEB__ 21 20 #define xh r0 ··· 210 211 211 212 UNWIND(.fnend) 212 213 ENDPROC(__do_div64) 213 - EXPORT_SYMBOL(__do_div64)
-9
arch/arm/lib/findbit.S
··· 15 15 */ 16 16 #include <linux/linkage.h> 17 17 #include <asm/assembler.h> 18 - #include <asm/export.h> 19 18 .text 20 19 21 20 /* ··· 37 38 3: mov r0, r1 @ no free bits 38 39 ret lr 39 40 ENDPROC(_find_first_zero_bit_le) 40 - EXPORT_SYMBOL(_find_first_zero_bit_le) 41 41 42 42 /* 43 43 * Purpose : Find next 'zero' bit ··· 57 59 add r2, r2, #1 @ align bit pointer 58 60 b 2b @ loop for next bit 59 61 ENDPROC(_find_next_zero_bit_le) 60 - EXPORT_SYMBOL(_find_next_zero_bit_le) 61 62 62 63 /* 63 64 * Purpose : Find a 'one' bit ··· 78 81 3: mov r0, r1 @ no free bits 79 82 ret lr 80 83 ENDPROC(_find_first_bit_le) 81 - EXPORT_SYMBOL(_find_first_bit_le) 82 84 83 85 /* 84 86 * Purpose : Find next 'one' bit ··· 97 101 add r2, r2, #1 @ align bit pointer 98 102 b 2b @ loop for next bit 99 103 ENDPROC(_find_next_bit_le) 100 - EXPORT_SYMBOL(_find_next_bit_le) 101 104 102 105 #ifdef __ARMEB__ 103 106 ··· 116 121 3: mov r0, r1 @ no free bits 117 122 ret lr 118 123 ENDPROC(_find_first_zero_bit_be) 119 - EXPORT_SYMBOL(_find_first_zero_bit_be) 120 124 121 125 ENTRY(_find_next_zero_bit_be) 122 126 teq r1, #0 ··· 133 139 add r2, r2, #1 @ align bit pointer 134 140 b 2b @ loop for next bit 135 141 ENDPROC(_find_next_zero_bit_be) 136 - EXPORT_SYMBOL(_find_next_zero_bit_be) 137 142 138 143 ENTRY(_find_first_bit_be) 139 144 teq r1, #0 ··· 150 157 3: mov r0, r1 @ no free bits 151 158 ret lr 152 159 ENDPROC(_find_first_bit_be) 153 - EXPORT_SYMBOL(_find_first_bit_be) 154 160 155 161 ENTRY(_find_next_bit_be) 156 162 teq r1, #0 ··· 166 174 add r2, r2, #1 @ align bit pointer 167 175 b 2b @ loop for next bit 168 176 ENDPROC(_find_next_bit_be) 169 - EXPORT_SYMBOL(_find_next_bit_be) 170 177 171 178 #endif 172 179
-9
arch/arm/lib/getuser.S
··· 31 31 #include <asm/assembler.h> 32 32 #include <asm/errno.h> 33 33 #include <asm/domain.h> 34 - #include <asm/export.h> 35 34 36 35 ENTRY(__get_user_1) 37 36 check_uaccess r0, 1, r1, r2, __get_user_bad ··· 38 39 mov r0, #0 39 40 ret lr 40 41 ENDPROC(__get_user_1) 41 - EXPORT_SYMBOL(__get_user_1) 42 42 43 43 ENTRY(__get_user_2) 44 44 check_uaccess r0, 2, r1, r2, __get_user_bad ··· 58 60 mov r0, #0 59 61 ret lr 60 62 ENDPROC(__get_user_2) 61 - EXPORT_SYMBOL(__get_user_2) 62 63 63 64 ENTRY(__get_user_4) 64 65 check_uaccess r0, 4, r1, r2, __get_user_bad ··· 65 68 mov r0, #0 66 69 ret lr 67 70 ENDPROC(__get_user_4) 68 - EXPORT_SYMBOL(__get_user_4) 69 71 70 72 ENTRY(__get_user_8) 71 73 check_uaccess r0, 8, r1, r2, __get_user_bad ··· 78 82 mov r0, #0 79 83 ret lr 80 84 ENDPROC(__get_user_8) 81 - EXPORT_SYMBOL(__get_user_8) 82 85 83 86 #ifdef __ARMEB__ 84 87 ENTRY(__get_user_32t_8) ··· 91 96 mov r0, #0 92 97 ret lr 93 98 ENDPROC(__get_user_32t_8) 94 - EXPORT_SYMBOL(__get_user_32t_8) 95 99 96 100 ENTRY(__get_user_64t_1) 97 101 check_uaccess r0, 1, r1, r2, __get_user_bad8 ··· 98 104 mov r0, #0 99 105 ret lr 100 106 ENDPROC(__get_user_64t_1) 101 - EXPORT_SYMBOL(__get_user_64t_1) 102 107 103 108 ENTRY(__get_user_64t_2) 104 109 check_uaccess r0, 2, r1, r2, __get_user_bad8 ··· 114 121 mov r0, #0 115 122 ret lr 116 123 ENDPROC(__get_user_64t_2) 117 - EXPORT_SYMBOL(__get_user_64t_2) 118 124 119 125 ENTRY(__get_user_64t_4) 120 126 check_uaccess r0, 4, r1, r2, __get_user_bad8 ··· 121 129 mov r0, #0 122 130 ret lr 123 131 ENDPROC(__get_user_64t_4) 124 - EXPORT_SYMBOL(__get_user_64t_4) 125 132 #endif 126 133 127 134 __get_user_bad8:
-2
arch/arm/lib/io-readsb.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 .Linsb_align: rsb ip, ip, #4 15 14 cmp ip, r2 ··· 121 122 122 123 ldmfd sp!, {r4 - r6, pc} 123 124 ENDPROC(__raw_readsb) 124 - EXPORT_SYMBOL(__raw_readsb)
-2
arch/arm/lib/io-readsl.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 ENTRY(__raw_readsl) 15 14 teq r2, #0 @ do we have to check for the zero len? ··· 77 78 strb r3, [r1, #0] 78 79 ret lr 79 80 ENDPROC(__raw_readsl) 80 - EXPORT_SYMBOL(__raw_readsl)
+1 -2
arch/arm/lib/io-readsw-armv3.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 .Linsw_bad_alignment: 15 14 adr r0, .Linsw_bad_align_msg ··· 103 104 104 105 ldmfd sp!, {r4, r5, r6, pc} 105 106 106 - EXPORT_SYMBOL(__raw_readsw) 107 +
-2
arch/arm/lib/io-readsw-armv4.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 .macro pack, rd, hw1, hw2 15 14 #ifndef __ARMEB__ ··· 129 130 strneb ip, [r1] 130 131 ldmfd sp!, {r4, pc} 131 132 ENDPROC(__raw_readsw) 132 - EXPORT_SYMBOL(__raw_readsw)
-2
arch/arm/lib/io-writesb.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 .macro outword, rd 15 14 #ifndef __ARMEB__ ··· 92 93 93 94 ldmfd sp!, {r4, r5, pc} 94 95 ENDPROC(__raw_writesb) 95 - EXPORT_SYMBOL(__raw_writesb)
-2
arch/arm/lib/io-writesl.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 ENTRY(__raw_writesl) 15 14 teq r2, #0 @ do we have to check for the zero len? ··· 65 66 bne 6b 66 67 ret lr 67 68 ENDPROC(__raw_writesl) 68 - EXPORT_SYMBOL(__raw_writesl)
-2
arch/arm/lib/io-writesw-armv3.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 .Loutsw_bad_alignment: 15 14 adr r0, .Loutsw_bad_align_msg ··· 124 125 strne ip, [r0] 125 126 126 127 ldmfd sp!, {r4, r5, r6, pc} 127 - EXPORT_SYMBOL(__raw_writesw)
-2
arch/arm/lib/io-writesw-armv4.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/export.h> 13 12 14 13 .macro outword, rd 15 14 #ifndef __ARMEB__ ··· 98 99 strneh ip, [r0] 99 100 ret lr 100 101 ENDPROC(__raw_writesw) 101 - EXPORT_SYMBOL(__raw_writesw)
-9
arch/arm/lib/lib1funcs.S
··· 36 36 #include <linux/linkage.h> 37 37 #include <asm/assembler.h> 38 38 #include <asm/unwind.h> 39 - #include <asm/export.h> 40 39 41 40 .macro ARM_DIV_BODY dividend, divisor, result, curbit 42 41 ··· 238 239 UNWIND(.fnend) 239 240 ENDPROC(__udivsi3) 240 241 ENDPROC(__aeabi_uidiv) 241 - EXPORT_SYMBOL(__udivsi3) 242 - EXPORT_SYMBOL(__aeabi_uidiv) 243 242 244 243 ENTRY(__umodsi3) 245 244 UNWIND(.fnstart) ··· 256 259 257 260 UNWIND(.fnend) 258 261 ENDPROC(__umodsi3) 259 - EXPORT_SYMBOL(__umodsi3) 260 262 261 263 #ifdef CONFIG_ARM_PATCH_IDIV 262 264 .align 3 ··· 303 307 UNWIND(.fnend) 304 308 ENDPROC(__divsi3) 305 309 ENDPROC(__aeabi_idiv) 306 - EXPORT_SYMBOL(__divsi3) 307 - EXPORT_SYMBOL(__aeabi_idiv) 308 310 309 311 ENTRY(__modsi3) 310 312 UNWIND(.fnstart) ··· 327 333 328 334 UNWIND(.fnend) 329 335 ENDPROC(__modsi3) 330 - EXPORT_SYMBOL(__modsi3) 331 336 332 337 #ifdef CONFIG_AEABI 333 338 ··· 343 350 344 351 UNWIND(.fnend) 345 352 ENDPROC(__aeabi_uidivmod) 346 - EXPORT_SYMBOL(__aeabi_uidivmod) 347 353 348 354 ENTRY(__aeabi_idivmod) 349 355 UNWIND(.fnstart) ··· 356 364 357 365 UNWIND(.fnend) 358 366 ENDPROC(__aeabi_idivmod) 359 - EXPORT_SYMBOL(__aeabi_idivmod) 360 367 361 368 #endif 362 369
-3
arch/arm/lib/lshrdi3.S
··· 28 28 29 29 #include <linux/linkage.h> 30 30 #include <asm/assembler.h> 31 - #include <asm/export.h> 32 31 33 32 #ifdef __ARMEB__ 34 33 #define al r1 ··· 52 53 53 54 ENDPROC(__lshrdi3) 54 55 ENDPROC(__aeabi_llsr) 55 - EXPORT_SYMBOL(__lshrdi3) 56 - EXPORT_SYMBOL(__aeabi_llsr)
-2
arch/arm/lib/memchr.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <asm/assembler.h> 14 - #include <asm/export.h> 15 14 16 15 .text 17 16 .align 5 ··· 24 25 2: movne r0, #0 25 26 ret lr 26 27 ENDPROC(memchr) 27 - EXPORT_SYMBOL(memchr)
-3
arch/arm/lib/memcpy.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 15 #include <asm/unwind.h> 16 - #include <asm/export.h> 17 16 18 17 #define LDR1W_SHIFT 0 19 18 #define STR1W_SHIFT 0 ··· 68 69 69 70 ENDPROC(memcpy) 70 71 ENDPROC(mmiocpy) 71 - EXPORT_SYMBOL(memcpy) 72 - EXPORT_SYMBOL(mmiocpy)
-2
arch/arm/lib/memmove.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 15 #include <asm/unwind.h> 16 - #include <asm/export.h> 17 16 18 17 .text 19 18 ··· 225 226 18: backward_copy_shift push=24 pull=8 226 227 227 228 ENDPROC(memmove) 228 - EXPORT_SYMBOL(memmove)
-3
arch/arm/lib/memset.S
··· 12 12 #include <linux/linkage.h> 13 13 #include <asm/assembler.h> 14 14 #include <asm/unwind.h> 15 - #include <asm/export.h> 16 15 17 16 .text 18 17 .align 5 ··· 135 136 UNWIND( .fnend ) 136 137 ENDPROC(memset) 137 138 ENDPROC(mmioset) 138 - EXPORT_SYMBOL(memset) 139 - EXPORT_SYMBOL(mmioset)
-2
arch/arm/lib/memzero.S
··· 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 12 #include <asm/unwind.h> 13 - #include <asm/export.h> 14 13 15 14 .text 16 15 .align 5 ··· 135 136 ret lr @ 1 136 137 UNWIND( .fnend ) 137 138 ENDPROC(__memzero) 138 - EXPORT_SYMBOL(__memzero)
-3
arch/arm/lib/muldi3.S
··· 12 12 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 - #include <asm/export.h> 16 15 17 16 #ifdef __ARMEB__ 18 17 #define xh r0 ··· 46 47 47 48 ENDPROC(__muldi3) 48 49 ENDPROC(__aeabi_lmul) 49 - EXPORT_SYMBOL(__muldi3) 50 - EXPORT_SYMBOL(__aeabi_lmul)
-5
arch/arm/lib/putuser.S
··· 31 31 #include <asm/assembler.h> 32 32 #include <asm/errno.h> 33 33 #include <asm/domain.h> 34 - #include <asm/export.h> 35 34 36 35 ENTRY(__put_user_1) 37 36 check_uaccess r0, 1, r1, ip, __put_user_bad ··· 38 39 mov r0, #0 39 40 ret lr 40 41 ENDPROC(__put_user_1) 41 - EXPORT_SYMBOL(__put_user_1) 42 42 43 43 ENTRY(__put_user_2) 44 44 check_uaccess r0, 2, r1, ip, __put_user_bad ··· 62 64 mov r0, #0 63 65 ret lr 64 66 ENDPROC(__put_user_2) 65 - EXPORT_SYMBOL(__put_user_2) 66 67 67 68 ENTRY(__put_user_4) 68 69 check_uaccess r0, 4, r1, ip, __put_user_bad ··· 69 72 mov r0, #0 70 73 ret lr 71 74 ENDPROC(__put_user_4) 72 - EXPORT_SYMBOL(__put_user_4) 73 75 74 76 ENTRY(__put_user_8) 75 77 check_uaccess r0, 8, r1, ip, __put_user_bad ··· 82 86 mov r0, #0 83 87 ret lr 84 88 ENDPROC(__put_user_8) 85 - EXPORT_SYMBOL(__put_user_8) 86 89 87 90 __put_user_bad: 88 91 mov r0, #-EFAULT
-2
arch/arm/lib/strchr.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <asm/assembler.h> 14 - #include <asm/export.h> 15 14 16 15 .text 17 16 .align 5 ··· 25 26 subeq r0, r0, #1 26 27 ret lr 27 28 ENDPROC(strchr) 28 - EXPORT_SYMBOL(strchr)
-2
arch/arm/lib/strrchr.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <asm/assembler.h> 14 - #include <asm/export.h> 15 14 16 15 .text 17 16 .align 5 ··· 24 25 mov r0, r3 25 26 ret lr 26 27 ENDPROC(strrchr) 27 - EXPORT_SYMBOL(strrchr)
-3
arch/arm/lib/uaccess_with_memcpy.c
··· 19 19 #include <linux/gfp.h> 20 20 #include <linux/highmem.h> 21 21 #include <linux/hugetlb.h> 22 - #include <linux/export.h> 23 22 #include <asm/current.h> 24 23 #include <asm/page.h> 25 24 ··· 156 157 } 157 158 return n; 158 159 } 159 - EXPORT_SYMBOL(arm_copy_to_user); 160 160 161 161 static unsigned long noinline 162 162 __clear_user_memset(void __user *addr, unsigned long n) ··· 213 215 } 214 216 return n; 215 217 } 216 - EXPORT_SYMBOL(arm_clear_user); 217 218 218 219 #if 0 219 220
-3
arch/arm/lib/ucmpdi2.S
··· 12 12 13 13 #include <linux/linkage.h> 14 14 #include <asm/assembler.h> 15 - #include <asm/export.h> 16 15 17 16 #ifdef __ARMEB__ 18 17 #define xh r0 ··· 35 36 ret lr 36 37 37 38 ENDPROC(__ucmpdi2) 38 - EXPORT_SYMBOL(__ucmpdi2) 39 39 40 40 #ifdef CONFIG_AEABI 41 41 ··· 48 50 ret lr 49 51 50 52 ENDPROC(__aeabi_ulcmp) 51 - EXPORT_SYMBOL(__aeabi_ulcmp) 52 53 53 54 #endif 54 55
+1
arch/arm/mach-imx/Makefile
··· 32 32 33 33 ifdef CONFIG_SND_IMX_SOC 34 34 obj-y += ssi-fiq.o 35 + obj-y += ssi-fiq-ksym.o 35 36 endif 36 37 37 38 # i.MX21 based machines
+20
arch/arm/mach-imx/ssi-fiq-ksym.c
··· 1 + /* 2 + * Exported ksyms for the SSI FIQ handler 3 + * 4 + * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <linux/module.h> 12 + 13 + #include <linux/platform_data/asoc-imx-ssi.h> 14 + 15 + EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer); 16 + EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer); 17 + EXPORT_SYMBOL(imx_ssi_fiq_start); 18 + EXPORT_SYMBOL(imx_ssi_fiq_end); 19 + EXPORT_SYMBOL(imx_ssi_fiq_base); 20 +
+1 -6
arch/arm/mach-imx/ssi-fiq.S
··· 8 8 9 9 #include <linux/linkage.h> 10 10 #include <asm/assembler.h> 11 - #include <asm/export.h> 12 11 13 12 /* 14 13 * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size ··· 144 145 .word 0x0 145 146 .L_imx_ssi_fiq_end: 146 147 imx_ssi_fiq_end: 147 - EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer) 148 - EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer) 149 - EXPORT_SYMBOL(imx_ssi_fiq_start) 150 - EXPORT_SYMBOL(imx_ssi_fiq_end) 151 - EXPORT_SYMBOL(imx_ssi_fiq_base) 148 +
+3 -1
arch/parisc/Kconfig
··· 34 34 select HAVE_ARCH_HASH 35 35 select HAVE_ARCH_SECCOMP_FILTER 36 36 select HAVE_ARCH_TRACEHOOK 37 - select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT) 37 + select GENERIC_SCHED_CLOCK 38 + select HAVE_UNSTABLE_SCHED_CLOCK if SMP 39 + select GENERIC_CLOCKEVENTS 38 40 select ARCH_NO_COHERENT_DMA_MMAP 39 41 select CPU_NO_EFFICIENT_FFS 40 42
+12 -19
arch/parisc/kernel/cache.c
··· 369 369 { 370 370 unsigned long rangetime, alltime; 371 371 unsigned long size, start; 372 + unsigned long threshold; 372 373 373 374 alltime = mfctl(16); 374 375 flush_data_cache(); ··· 383 382 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", 384 383 alltime, size, rangetime); 385 384 386 - /* Racy, but if we see an intermediate value, it's ok too... */ 387 - parisc_cache_flush_threshold = size * alltime / rangetime; 388 - 389 - parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); 390 - if (!parisc_cache_flush_threshold) 391 - parisc_cache_flush_threshold = FLUSH_THRESHOLD; 392 - 393 - if (parisc_cache_flush_threshold > cache_info.dc_size) 394 - parisc_cache_flush_threshold = cache_info.dc_size; 395 - 396 - printk(KERN_INFO "Setting cache flush threshold to %lu kB\n", 385 + threshold = L1_CACHE_ALIGN(size * alltime / rangetime); 386 + if (threshold > cache_info.dc_size) 387 + threshold = cache_info.dc_size; 388 + if (threshold) 389 + parisc_cache_flush_threshold = threshold; 390 + printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", 397 391 parisc_cache_flush_threshold/1024); 398 392 399 393 /* calculate TLB flush threshold */ ··· 397 401 flush_tlb_all(); 398 402 alltime = mfctl(16) - alltime; 399 403 400 - size = PAGE_SIZE; 404 + size = 0; 401 405 start = (unsigned long) _text; 402 406 rangetime = mfctl(16); 403 407 while (start < (unsigned long) _end) { ··· 410 414 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", 411 415 alltime, size, rangetime); 412 416 413 - parisc_tlb_flush_threshold = size * alltime / rangetime; 414 - parisc_tlb_flush_threshold *= num_online_cpus(); 415 - parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); 416 - if (!parisc_tlb_flush_threshold) 417 - parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; 418 - 419 - printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n", 417 + threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); 418 + if (threshold) 419 + parisc_tlb_flush_threshold = threshold; 420 + printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", 420 421 parisc_tlb_flush_threshold/1024); 421 422 } 422 423
+4 -4
arch/parisc/kernel/inventory.c
··· 58 58 status = pdc_system_map_find_mods(&module_result, &module_path, 0); 59 59 if (status == PDC_OK) { 60 60 pdc_type = PDC_TYPE_SYSTEM_MAP; 61 - printk("System Map.\n"); 61 + pr_cont("System Map.\n"); 62 62 return; 63 63 } 64 64 ··· 77 77 status = pdc_pat_cell_get_number(&cell_info); 78 78 if (status == PDC_OK) { 79 79 pdc_type = PDC_TYPE_PAT; 80 - printk("64 bit PAT.\n"); 80 + pr_cont("64 bit PAT.\n"); 81 81 return; 82 82 } 83 83 #endif ··· 97 97 case 0xC: /* 715/64, at least */ 98 98 99 99 pdc_type = PDC_TYPE_SNAKE; 100 - printk("Snake.\n"); 100 + pr_cont("Snake.\n"); 101 101 return; 102 102 103 103 default: /* Everything else */ 104 104 105 - printk("Unsupported.\n"); 105 + pr_cont("Unsupported.\n"); 106 106 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); 107 107 } 108 108 }
+22 -15
arch/parisc/kernel/pacache.S
··· 96 96 97 97 fitmanymiddle: /* Loop if LOOP >= 2 */ 98 98 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ 99 - pitlbe 0(%sr1, %r28) 99 + pitlbe %r0(%sr1, %r28) 100 100 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ 101 101 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ 102 102 copy %arg3, %r31 /* Re-init inner loop count */ ··· 139 139 140 140 fdtmanymiddle: /* Loop if LOOP >= 2 */ 141 141 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ 142 - pdtlbe 0(%sr1, %r28) 142 + pdtlbe %r0(%sr1, %r28) 143 143 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ 144 144 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ 145 145 copy %arg3, %r31 /* Re-init inner loop count */ ··· 626 626 /* Purge any old translations */ 627 627 628 628 #ifdef CONFIG_PA20 629 - pdtlb,l 0(%r28) 630 - pdtlb,l 0(%r29) 629 + pdtlb,l %r0(%r28) 630 + pdtlb,l %r0(%r29) 631 631 #else 632 632 tlb_lock %r20,%r21,%r22 633 - pdtlb 0(%r28) 634 - pdtlb 0(%r29) 633 + pdtlb %r0(%r28) 634 + pdtlb %r0(%r29) 635 635 tlb_unlock %r20,%r21,%r22 636 636 #endif 637 637 ··· 774 774 /* Purge any old translation */ 775 775 776 776 #ifdef CONFIG_PA20 777 - pdtlb,l 0(%r28) 777 + pdtlb,l %r0(%r28) 778 778 #else 779 779 tlb_lock %r20,%r21,%r22 780 - pdtlb 0(%r28) 780 + pdtlb %r0(%r28) 781 781 tlb_unlock %r20,%r21,%r22 782 782 #endif 783 783 ··· 858 858 /* Purge any old translation */ 859 859 860 860 #ifdef CONFIG_PA20 861 - pdtlb,l 0(%r28) 861 + pdtlb,l %r0(%r28) 862 862 #else 863 863 tlb_lock %r20,%r21,%r22 864 - pdtlb 0(%r28) 864 + pdtlb %r0(%r28) 865 865 tlb_unlock %r20,%r21,%r22 866 866 #endif 867 867 ··· 898 898 sync 899 899 900 900 #ifdef CONFIG_PA20 901 - pdtlb,l 0(%r25) 901 + pdtlb,l %r0(%r25) 902 902 #else 903 903 tlb_lock %r20,%r21,%r22 904 - pdtlb 0(%r25) 904 + pdtlb %r0(%r25) 905 905 tlb_unlock %r20,%r21,%r22 906 906 #endif 907 907 ··· 931 931 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ 932 932 #endif 933 933 934 - /* Purge any old translation */ 934 + /* Purge any old translation. Note that the FIC instruction 935 + * may use either the instruction or data TLB. Given that we 936 + * have a flat address space, it's not clear which TLB will be 937 + * used. So, we purge both entries. */ 935 938 936 939 #ifdef CONFIG_PA20 940 + pdtlb,l %r0(%r28) 937 941 pitlb,l %r0(%sr4,%r28) 938 942 #else 939 943 tlb_lock %r20,%r21,%r22 940 - pitlb (%sr4,%r28) 944 + pdtlb %r0(%r28) 945 + pitlb %r0(%sr4,%r28) 941 946 tlb_unlock %r20,%r21,%r22 942 947 #endif 943 948 ··· 981 976 sync 982 977 983 978 #ifdef CONFIG_PA20 979 + pdtlb,l %r0(%r28) 984 980 pitlb,l %r0(%sr4,%r25) 985 981 #else 986 982 tlb_lock %r20,%r21,%r22 987 - pitlb (%sr4,%r25) 983 + pdtlb %r0(%r28) 984 + pitlb %r0(%sr4,%r25) 988 985 tlb_unlock %r20,%r21,%r22 989 986 #endif 990 987
+1 -1
arch/parisc/kernel/pci-dma.c
··· 95 95 96 96 if (!pte_none(*pte)) 97 97 printk(KERN_ERR "map_pte_uncached: page already exists\n"); 98 - set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); 99 98 purge_tlb_start(flags); 99 + set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); 100 100 pdtlb_kernel(orig_vaddr); 101 101 purge_tlb_end(flags); 102 102 vaddr += PAGE_SIZE;
+4
arch/parisc/kernel/setup.c
··· 334 334 /* tell PDC we're Linux. Nevermind failure. */ 335 335 pdc_stable_write(0x40, &osid, sizeof(osid)); 336 336 337 + /* start with known state */ 338 + flush_cache_all_local(); 339 + flush_tlb_all_local(NULL); 340 + 337 341 processor_init(); 338 342 #ifdef CONFIG_SMP 339 343 pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
+11 -46
arch/parisc/kernel/time.c
··· 14 14 #include <linux/module.h> 15 15 #include <linux/rtc.h> 16 16 #include <linux/sched.h> 17 + #include <linux/sched_clock.h> 17 18 #include <linux/kernel.h> 18 19 #include <linux/param.h> 19 20 #include <linux/string.h> ··· 39 38 #include <linux/timex.h> 40 39 41 40 static unsigned long clocktick __read_mostly; /* timer cycles per tick */ 42 - 43 - #ifndef CONFIG_64BIT 44 - /* 45 - * The processor-internal cycle counter (Control Register 16) is used as time 46 - * source for the sched_clock() function. This register is 64bit wide on a 47 - * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always 48 - * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits 49 - * with a per-cpu variable which we increase every time the counter 50 - * wraps-around (which happens every ~4 secounds). 51 - */ 52 - static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits); 53 - #endif 54 41 55 42 /* 56 43 * We keep time on PA-RISC Linux by using the Interval Timer which is ··· 109 120 * Only bottom 32-bits of next_tick are writable in CR16! 110 121 */ 111 122 mtctl(next_tick, 16); 112 - 113 - #if !defined(CONFIG_64BIT) 114 - /* check for overflow on a 32bit kernel (every ~4 seconds). */ 115 - if (unlikely(next_tick < now)) 116 - this_cpu_inc(cr16_high_32_bits); 117 - #endif 118 123 119 124 /* Skip one clocktick on purpose if we missed next_tick. 120 125 * The new CR16 must be "later" than current CR16 otherwise ··· 191 208 192 209 /* clock source code */ 193 210 194 - static cycle_t read_cr16(struct clocksource *cs) 211 + static cycle_t notrace read_cr16(struct clocksource *cs) 195 212 { 196 213 return get_cycles(); 197 214 } ··· 270 287 } 271 288 272 289 273 - /* 274 - * sched_clock() framework 275 - */ 276 - 277 - static u32 cyc2ns_mul __read_mostly; 278 - static u32 cyc2ns_shift __read_mostly; 279 - 280 - u64 sched_clock(void) 290 + static u64 notrace read_cr16_sched_clock(void) 281 291 { 282 - u64 now; 283 - 284 - /* Get current cycle counter (Control Register 16). */ 285 - #ifdef CONFIG_64BIT 286 - now = mfctl(16); 287 - #else 288 - now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32); 289 - #endif 290 - 291 - /* return the value in ns (cycles_2_ns) */ 292 - return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift); 292 + return get_cycles(); 293 293 } 294 294 295 295 ··· 282 316 283 317 void __init time_init(void) 284 318 { 285 - unsigned long current_cr16_khz; 319 + unsigned long cr16_hz; 286 320 287 - current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ 288 321 clocktick = (100 * PAGE0->mem_10msec) / HZ; 289 - 290 - /* calculate mult/shift values for cr16 */ 291 - clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, 292 - NSEC_PER_MSEC, 0); 293 - 294 322 start_cpu_itimer(); /* get CPU 0 started */ 295 323 324 + cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ 325 + 296 326 /* register at clocksource framework */ 297 - clocksource_register_khz(&clocksource_cr16, current_cr16_khz); 327 + clocksource_register_hz(&clocksource_cr16, cr16_hz); 328 + 329 + /* register as sched_clock source */ 330 + sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); 298 331 }
+6 -2
arch/powerpc/boot/main.c
··· 232 232 console_ops.close(); 233 233 234 234 kentry = (kernel_entry_t) vmlinux.addr; 235 - if (ft_addr) 236 - kentry(ft_addr, 0, NULL); 235 + if (ft_addr) { 236 + if(platform_ops.kentry) 237 + platform_ops.kentry(ft_addr, vmlinux.addr); 238 + else 239 + kentry(ft_addr, 0, NULL); 240 + } 237 241 else 238 242 kentry((unsigned long)initrd.addr, initrd.size, 239 243 loader_info.promptr);
+13
arch/powerpc/boot/opal-calls.S
··· 12 12 13 13 .text 14 14 15 + .globl opal_kentry 16 + opal_kentry: 17 + /* r3 is the fdt ptr */ 18 + mtctr r4 19 + li r4, 0 20 + li r5, 0 21 + li r6, 0 22 + li r7, 0 23 + ld r11,opal@got(r2) 24 + ld r8,0(r11) 25 + ld r9,8(r11) 26 + bctr 27 + 15 28 #define OPAL_CALL(name, token) \ 16 29 .globl name; \ 17 30 name: \
+11
arch/powerpc/boot/opal.c
··· 23 23 24 24 static u32 opal_con_id; 25 25 26 + /* see opal-wrappers.S */ 26 27 int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer); 27 28 int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer); 28 29 int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length); 29 30 int64_t opal_console_flush(uint64_t term_number); 30 31 int64_t opal_poll_events(uint64_t *outstanding_event_mask); 31 32 33 + void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr); 34 + 32 35 static int opal_con_open(void) 33 36 { 37 + /* 38 + * When OPAL loads the boot kernel it stashes the OPAL base and entry 39 + * address in r8 and r9 so the kernel can use the OPAL console 40 + * before unflattening the devicetree. While executing the wrapper will 41 + * probably trash r8 and r9 so this kentry hook restores them before 42 + * entering the decompressed kernel. 43 + */ 44 + platform_ops.kentry = opal_kentry; 34 45 return 0; 35 46 } 36 47
+1
arch/powerpc/boot/ops.h
··· 30 30 void * (*realloc)(void *ptr, unsigned long size); 31 31 void (*exit)(void); 32 32 void * (*vmlinux_alloc)(unsigned long size); 33 + void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr); 33 34 }; 34 35 extern struct platform_ops platform_ops; 35 36
+12
arch/powerpc/include/asm/asm-prototypes.h
··· 14 14 15 15 #include <linux/threads.h> 16 16 #include <linux/kprobes.h> 17 + #include <asm/cacheflush.h> 18 + #include <asm/checksum.h> 19 + #include <asm/uaccess.h> 20 + #include <asm/epapr_hcalls.h> 17 21 18 22 #include <uapi/asm/ucontext.h> 19 23 ··· 112 108 113 109 /* time */ 114 110 void accumulate_stolen_time(void); 111 + 112 + /* misc runtime */ 113 + extern u64 __bswapdi2(u64); 114 + extern s64 __lshrdi3(s64, int); 115 + extern s64 __ashldi3(s64, int); 116 + extern s64 __ashrdi3(s64, int); 117 + extern int __cmpdi2(s64, s64); 118 + extern int __ucmpdi2(u64, u64); 115 119 116 120 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
+10 -4
arch/powerpc/include/asm/mmu.h
··· 29 29 */ 30 30 31 31 /* 32 + * Kernel read only support. 33 + * We added the ppp value 0b110 in ISA 2.04. 34 + */ 35 + #define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000) 36 + 37 + /* 32 38 * We need to clear top 16bits of va (from the remaining 64 bits )in 33 39 * tlbie* instructions 34 40 */ ··· 109 103 #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 110 104 #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA 111 105 #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 112 - #define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 113 - #define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 114 - #define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 115 - #define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 106 + #define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO 107 + #define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO 108 + #define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO 109 + #define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO 116 110 #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 117 111 MMU_FTR_CI_LARGE_PAGE 118 112 #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+1
arch/powerpc/include/asm/reg.h
··· 355 355 #define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */ 356 356 #define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */ 357 357 #define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */ 358 + #define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */ 358 359 #define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */ 359 360 #define LPCR_MER_SH 11 360 361 #define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
+4 -4
arch/powerpc/kernel/cpu_setup_power.S
··· 98 98 li r0,0 99 99 mtspr SPRN_LPID,r0 100 100 mfspr r3,SPRN_LPCR 101 - ori r3, r3, LPCR_PECEDH 102 - ori r3, r3, LPCR_HVICE 101 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) 102 + or r3, r3, r4 103 103 bl __init_LPCR 104 104 bl __init_HFSCR 105 105 bl __init_tlb_power9 ··· 118 118 li r0,0 119 119 mtspr SPRN_LPID,r0 120 120 mfspr r3,SPRN_LPCR 121 - ori r3, r3, LPCR_PECEDH 122 - ori r3, r3, LPCR_HVICE 121 + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) 122 + or r3, r3, r4 123 123 bl __init_LPCR 124 124 bl __init_HFSCR 125 125 bl __init_tlb_power9
+6 -2
arch/powerpc/mm/hash_utils_64.c
··· 193 193 /* 194 194 * Kernel read only mapped with ppp bits 0b110 195 195 */ 196 - if (!(pteflags & _PAGE_WRITE)) 197 - rflags |= (HPTE_R_PP0 | 0x2); 196 + if (!(pteflags & _PAGE_WRITE)) { 197 + if (mmu_has_feature(MMU_FTR_KERNEL_RO)) 198 + rflags |= (HPTE_R_PP0 | 0x2); 199 + else 200 + rflags |= 0x3; 201 + } 198 202 } else { 199 203 if (pteflags & _PAGE_RWX) 200 204 rflags |= 0x2;
+2 -2
arch/tile/kernel/time.c
··· 218 218 */ 219 219 unsigned long long sched_clock(void) 220 220 { 221 - return clocksource_cyc2ns(get_cycles(), 222 - sched_clock_mult, SCHED_CLOCK_SHIFT); 221 + return mult_frac(get_cycles(), 222 + sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); 223 223 } 224 224 225 225 int setup_profiling_timer(unsigned int multiplier)
+2 -3
arch/x86/boot/compressed/Makefile
··· 40 40 UBSAN_SANITIZE :=n 41 41 42 42 LDFLAGS := -m elf_$(UTS_MACHINE) 43 - ifeq ($(CONFIG_RELOCATABLE),y) 44 - # If kernel is relocatable, build compressed kernel as PIE. 43 + # Compressed kernel should be built as PIE since it may be loaded at any 44 + # address by the bootloader. 45 45 ifeq ($(CONFIG_X86_32),y) 46 46 LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) 47 47 else ··· 50 50 # command-line option, -z noreloc-overflow. 51 51 LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ 52 52 && echo "-z noreloc-overflow -pie --no-dynamic-linker") 53 - endif 54 53 endif 55 54 LDFLAGS_vmlinux := -T 56 55
+6
arch/x86/boot/cpu.c
··· 87 87 return -1; 88 88 } 89 89 90 + if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) && 91 + !has_eflag(X86_EFLAGS_ID)) { 92 + printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n"); 93 + return -1; 94 + } 95 + 90 96 if (err_flags) { 91 97 puts("This kernel requires the following features " 92 98 "not present on the CPU:\n");
+7 -1
arch/x86/events/amd/core.c
··· 662 662 pr_cont("Fam15h "); 663 663 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; 664 664 break; 665 - 665 + case 0x17: 666 + pr_cont("Fam17h "); 667 + /* 668 + * In family 17h, there are no event constraints in the PMC hardware. 669 + * We fallback to using default amd_get_event_constraints. 670 + */ 671 + break; 666 672 default: 667 673 pr_err("core perfctr but no constraints; unknown hardware!\n"); 668 674 return -ENODEV;
+2 -8
arch/x86/events/core.c
··· 2352 2352 frame.next_frame = 0; 2353 2353 frame.return_address = 0; 2354 2354 2355 - if (!access_ok(VERIFY_READ, fp, 8)) 2355 + if (!valid_user_frame(fp, sizeof(frame))) 2356 2356 break; 2357 2357 2358 2358 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); ··· 2360 2360 break; 2361 2361 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4); 2362 2362 if (bytes != 0) 2363 - break; 2364 - 2365 - if (!valid_user_frame(fp, sizeof(frame))) 2366 2363 break; 2367 2364 2368 2365 perf_callchain_store(entry, cs_base + frame.return_address); ··· 2410 2413 frame.next_frame = NULL; 2411 2414 frame.return_address = 0; 2412 2415 2413 - if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) 2416 + if (!valid_user_frame(fp, sizeof(frame))) 2414 2417 break; 2415 2418 2416 2419 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); ··· 2418 2421 break; 2419 2422 bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp)); 2420 2423 if (bytes != 0) 2421 - break; 2422 - 2423 - if (!valid_user_frame(fp, sizeof(frame))) 2424 2424 break; 2425 2425 2426 2426 perf_callchain_store(entry, frame.return_address);
+23 -12
arch/x86/events/intel/ds.c
··· 1108 1108 } 1109 1109 1110 1110 /* 1111 - * We use the interrupt regs as a base because the PEBS record 1112 - * does not contain a full regs set, specifically it seems to 1113 - * lack segment descriptors, which get used by things like 1114 - * user_mode(). 1111 + * We use the interrupt regs as a base because the PEBS record does not 1112 + * contain a full regs set, specifically it seems to lack segment 1113 + * descriptors, which get used by things like user_mode(). 1115 1114 * 1116 - * In the simple case fix up only the IP and BP,SP regs, for 1117 - * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. 1118 - * A possible PERF_SAMPLE_REGS will have to transfer all regs. 1115 + * In the simple case fix up only the IP for PERF_SAMPLE_IP. 1116 + * 1117 + * We must however always use BP,SP from iregs for the unwinder to stay 1118 + * sane; the record BP,SP can point into thin air when the record is 1119 + * from a previous PMI context or an (I)RET happend between the record 1120 + * and PMI. 1119 1121 */ 1120 1122 *regs = *iregs; 1121 1123 regs->flags = pebs->flags; 1122 1124 set_linear_ip(regs, pebs->ip); 1123 - regs->bp = pebs->bp; 1124 - regs->sp = pebs->sp; 1125 1125 1126 1126 if (sample_type & PERF_SAMPLE_REGS_INTR) { 1127 1127 regs->ax = pebs->ax; ··· 1130 1130 regs->dx = pebs->dx; 1131 1131 regs->si = pebs->si; 1132 1132 regs->di = pebs->di; 1133 - regs->bp = pebs->bp; 1134 - regs->sp = pebs->sp; 1135 1133 1136 - regs->flags = pebs->flags; 1134 + /* 1135 + * Per the above; only set BP,SP if we don't need callchains. 1136 + * 1137 + * XXX: does this make sense? 1138 + */ 1139 + if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { 1140 + regs->bp = pebs->bp; 1141 + regs->sp = pebs->sp; 1142 + } 1143 + 1144 + /* 1145 + * Preserve PERF_EFLAGS_VM from set_linear_ip(). 1146 + */ 1147 + regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM); 1137 1148 #ifndef CONFIG_X86_32 1138 1149 regs->r8 = pebs->r8; 1139 1150 regs->r9 = pebs->r9;
+4 -4
arch/x86/events/intel/uncore.c
··· 319 319 */ 320 320 static int uncore_pmu_event_init(struct perf_event *event); 321 321 322 - static bool is_uncore_event(struct perf_event *event) 322 + static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) 323 323 { 324 - return event->pmu->event_init == uncore_pmu_event_init; 324 + return &box->pmu->pmu == event->pmu; 325 325 } 326 326 327 327 static int ··· 340 340 341 341 n = box->n_events; 342 342 343 - if (is_uncore_event(leader)) { 343 + if (is_box_event(box, leader)) { 344 344 box->event_list[n] = leader; 345 345 n++; 346 346 } ··· 349 349 return n; 350 350 351 351 list_for_each_entry(event, &leader->sibling_list, group_entry) { 352 - if (!is_uncore_event(event) || 352 + if (!is_box_event(box, event) || 353 353 event->state <= PERF_EVENT_STATE_OFF) 354 354 continue; 355 355
-12
arch/x86/events/intel/uncore_snb.c
··· 490 490 491 491 snb_uncore_imc_event_start(event, 0); 492 492 493 - box->n_events++; 494 - 495 493 return 0; 496 494 } 497 495 498 496 static void snb_uncore_imc_event_del(struct perf_event *event, int flags) 499 497 { 500 - struct intel_uncore_box *box = uncore_event_to_box(event); 501 - int i; 502 - 503 498 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); 504 - 505 - for (i = 0; i < box->n_events; i++) { 506 - if (event == box->event_list[i]) { 507 - --box->n_events; 508 - break; 509 - } 510 - } 511 499 } 512 500 513 501 int snb_pci2phy_map_init(int devid)
+1 -1
arch/x86/events/perf_event.h
··· 113 113 * Per register state. 114 114 */ 115 115 struct er_account { 116 - raw_spinlock_t lock; /* per-core: protect structure */ 116 + raw_spinlock_t lock; /* per-core: protect structure */ 117 117 u64 config; /* extra MSR config */ 118 118 u64 reg; /* extra MSR number */ 119 119 atomic_t ref; /* reference count */
+1 -1
arch/x86/kernel/dumpstack.c
··· 112 112 for (; stack < stack_info.end; stack++) { 113 113 unsigned long real_addr; 114 114 int reliable = 0; 115 - unsigned long addr = *stack; 115 + unsigned long addr = READ_ONCE_NOCHECK(*stack); 116 116 unsigned long *ret_addr_p = 117 117 unwind_get_return_address_ptr(&state); 118 118
+8 -8
arch/x86/kernel/fpu/core.c
··· 521 521 { 522 522 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */ 523 523 524 - if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { 525 - /* FPU state will be reallocated lazily at the first use. */ 526 - fpu__drop(fpu); 527 - } else { 528 - if (!fpu->fpstate_active) { 529 - fpu__activate_curr(fpu); 530 - user_fpu_begin(); 531 - } 524 + fpu__drop(fpu); 525 + 526 + /* 527 + * Make sure fpstate is cleared and initialized. 528 + */ 529 + if (static_cpu_has(X86_FEATURE_FPU)) { 530 + fpu__activate_curr(fpu); 531 + user_fpu_begin(); 532 532 copy_init_fpstate_to_fpregs(); 533 533 } 534 534 }
+6 -3
arch/x86/kernel/head_32.S
··· 665 665 initial_pg_pmd: 666 666 .fill 1024*KPMDS,4,0 667 667 #else 668 - ENTRY(initial_page_table) 668 + .globl initial_page_table 669 + initial_page_table: 669 670 .fill 1024,4,0 670 671 #endif 671 672 initial_pg_fixmap: 672 673 .fill 1024,4,0 673 - ENTRY(empty_zero_page) 674 + .globl empty_zero_page 675 + empty_zero_page: 674 676 .fill 4096,1,0 675 - ENTRY(swapper_pg_dir) 677 + .globl swapper_pg_dir 678 + swapper_pg_dir: 676 679 .fill 1024,4,0 677 680 EXPORT_SYMBOL(empty_zero_page) 678 681
+31 -8
arch/x86/kernel/sysfb_simplefb.c
··· 66 66 { 67 67 struct platform_device *pd; 68 68 struct resource res; 69 - unsigned long len; 69 + u64 base, size; 70 + u32 length; 70 71 71 - /* don't use lfb_size as it may contain the whole VMEM instead of only 72 - * the part that is occupied by the framebuffer */ 73 - len = mode->height * mode->stride; 74 - len = PAGE_ALIGN(len); 75 - if (len > (u64)si->lfb_size << 16) { 72 + /* 73 + * If the 64BIT_BASE capability is set, ext_lfb_base will contain the 74 + * upper half of the base address. Assemble the address, then make sure 75 + * it is valid and we can actually access it. 76 + */ 77 + base = si->lfb_base; 78 + if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE) 79 + base |= (u64)si->ext_lfb_base << 32; 80 + if (!base || (u64)(resource_size_t)base != base) { 81 + printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n"); 82 + return -EINVAL; 83 + } 84 + 85 + /* 86 + * Don't use lfb_size as IORESOURCE size, since it may contain the 87 + * entire VMEM, and thus require huge mappings. Use just the part we 88 + * need, that is, the part where the framebuffer is located. But verify 89 + * that it does not exceed the advertised VMEM. 90 + * Note that in case of VBE, the lfb_size is shifted by 16 bits for 91 + * historical reasons. 92 + */ 93 + size = si->lfb_size; 94 + if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) 95 + size <<= 16; 96 + length = mode->height * mode->stride; 97 + length = PAGE_ALIGN(length); 98 + if (length > size) { 76 99 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); 77 100 return -EINVAL; 78 101 } ··· 104 81 memset(&res, 0, sizeof(res)); 105 82 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 106 83 res.name = simplefb_resname; 107 - res.start = si->lfb_base; 108 - res.end = si->lfb_base + len - 1; 84 + res.start = base; 85 + res.end = res.start + length - 1; 109 86 if (res.end <= res.start) 110 87 return -EINVAL; 111 88
+6 -2
arch/x86/kernel/unwind_guess.c
··· 7 7 8 8 unsigned long unwind_get_return_address(struct unwind_state *state) 9 9 { 10 + unsigned long addr = READ_ONCE_NOCHECK(*state->sp); 11 + 10 12 if (unwind_done(state)) 11 13 return 0; 12 14 13 15 return ftrace_graph_ret_addr(state->task, &state->graph_idx, 14 - *state->sp, state->sp); 16 + addr, state->sp); 15 17 } 16 18 EXPORT_SYMBOL_GPL(unwind_get_return_address); 17 19 ··· 25 23 return false; 26 24 27 25 do { 26 + unsigned long addr = READ_ONCE_NOCHECK(*state->sp); 27 + 28 28 for (state->sp++; state->sp < info->end; state->sp++) 29 - if (__kernel_text_address(*state->sp)) 29 + if (__kernel_text_address(addr)) 30 30 return true; 31 31 32 32 state->sp = info->next_sp;
+11 -25
arch/x86/kvm/emulate.c
··· 2105 2105 static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 2106 2106 { 2107 2107 int rc; 2108 - unsigned short sel, old_sel; 2109 - struct desc_struct old_desc, new_desc; 2110 - const struct x86_emulate_ops *ops = ctxt->ops; 2108 + unsigned short sel; 2109 + struct desc_struct new_desc; 2111 2110 u8 cpl = ctxt->ops->cpl(ctxt); 2112 - 2113 - /* Assignment of RIP may only fail in 64-bit mode */ 2114 - if (ctxt->mode == X86EMUL_MODE_PROT64) 2115 - ops->get_segment(ctxt, &old_sel, &old_desc, NULL, 2116 - VCPU_SREG_CS); 2117 2111 2118 2112 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2119 2113 ··· 2118 2124 return rc; 2119 2125 2120 2126 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); 2121 - if (rc != X86EMUL_CONTINUE) { 2122 - WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); 2123 - /* assigning eip failed; restore the old cs */ 2124 - ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); 2125 - return rc; 2126 - } 2127 + /* Error handling is not implemented. */ 2128 + if (rc != X86EMUL_CONTINUE) 2129 + return X86EMUL_UNHANDLEABLE; 2130 + 2127 2131 return rc; 2128 2132 } 2129 2133 ··· 2181 2189 { 2182 2190 int rc; 2183 2191 unsigned long eip, cs; 2184 - u16 old_cs; 2185 2192 int cpl = ctxt->ops->cpl(ctxt); 2186 - struct desc_struct old_desc, new_desc; 2187 - const struct x86_emulate_ops *ops = ctxt->ops; 2188 - 2189 - if (ctxt->mode == X86EMUL_MODE_PROT64) 2190 - ops->get_segment(ctxt, &old_cs, &old_desc, NULL, 2191 - VCPU_SREG_CS); 2193 + struct desc_struct new_desc; 2192 2194 2193 2195 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 2194 2196 if (rc != X86EMUL_CONTINUE) ··· 2199 2213 if (rc != X86EMUL_CONTINUE) 2200 2214 return rc; 2201 2215 rc = assign_eip_far(ctxt, eip, &new_desc); 2202 - if (rc != X86EMUL_CONTINUE) { 2203 - WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); 2204 - ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); 2205 - } 2216 + /* Error handling is not implemented. */ 2217 + if (rc != X86EMUL_CONTINUE) 2218 + return X86EMUL_UNHANDLEABLE; 2219 + 2206 2220 return rc; 2207 2221 } 2208 2222
+1 -1
arch/x86/kvm/ioapic.c
··· 94 94 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) 95 95 { 96 96 ioapic->rtc_status.pending_eoi = 0; 97 - bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); 97 + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); 98 98 } 99 99 100 100 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
+2 -2
arch/x86/kvm/ioapic.h
··· 42 42 43 43 struct dest_map { 44 44 /* vcpu bitmap where IRQ has been sent */ 45 - DECLARE_BITMAP(map, KVM_MAX_VCPUS); 45 + DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); 46 46 47 47 /* 48 48 * Vector sent to a given vcpu, only valid when 49 49 * the vcpu's bit in map is set 50 50 */ 51 - u8 vectors[KVM_MAX_VCPUS]; 51 + u8 vectors[KVM_MAX_VCPU_ID]; 52 52 }; 53 53 54 54
+13
arch/x86/kvm/irq_comm.c
··· 41 41 bool line_status) 42 42 { 43 43 struct kvm_pic *pic = pic_irqchip(kvm); 44 + 45 + /* 46 + * XXX: rejecting pic routes when pic isn't in use would be better, 47 + * but the default routing table is installed while kvm->arch.vpic is 48 + * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE. 49 + */ 50 + if (!pic) 51 + return -1; 52 + 44 53 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); 45 54 } 46 55 ··· 58 49 bool line_status) 59 50 { 60 51 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 52 + 53 + if (!ioapic) 54 + return -1; 55 + 61 56 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, 62 57 line_status); 63 58 }
+1 -1
arch/x86/kvm/lapic.c
··· 138 138 *mask = dest_id & 0xff; 139 139 return true; 140 140 case KVM_APIC_MODE_XAPIC_CLUSTER: 141 - *cluster = map->xapic_cluster_map[dest_id >> 4]; 141 + *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; 142 142 *mask = dest_id & 0xf; 143 143 return true; 144 144 default:
+6 -1
arch/x86/mm/extable.c
··· 135 135 if (early_recursion_flag > 2) 136 136 goto halt_loop; 137 137 138 - if (regs->cs != __KERNEL_CS) 138 + /* 139 + * Old CPUs leave the high bits of CS on the stack 140 + * undefined. I'm not sure which CPUs do this, but at least 141 + * the 486 DX works this way. 142 + */ 143 + if ((regs->cs & 0xFFFF) != __KERNEL_CS) 139 144 goto fail; 140 145 141 146 /*
+1 -1
arch/x86/platform/intel-mid/device_libs/Makefile
··· 28 28 obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o 29 29 # MISC Devices 30 30 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o 31 - obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o 31 + obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
+28 -6
arch/x86/platform/intel-mid/device_libs/platform_wdt.c arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
··· 1 1 /* 2 - * platform_wdt.c: Watchdog platform library file 2 + * Intel Merrifield watchdog platform device library file 3 3 * 4 4 * (C) Copyright 2014 Intel Corporation 5 5 * Author: David Cohen <david.a.cohen@linux.intel.com> ··· 14 14 #include <linux/interrupt.h> 15 15 #include <linux/platform_device.h> 16 16 #include <linux/platform_data/intel-mid_wdt.h> 17 + 17 18 #include <asm/intel-mid.h> 19 + #include <asm/intel_scu_ipc.h> 18 20 #include <asm/io_apic.h> 19 21 20 22 #define TANGIER_EXT_TIMER0_MSI 15 ··· 52 50 .probe = tangier_probe, 53 51 }; 54 52 55 - static int __init register_mid_wdt(void) 53 + static int wdt_scu_status_change(struct notifier_block *nb, 54 + unsigned long code, void *data) 56 55 { 57 - if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) { 58 - wdt_dev.dev.platform_data = &tangier_pdata; 59 - return platform_device_register(&wdt_dev); 56 + if (code == SCU_DOWN) { 57 + platform_device_unregister(&wdt_dev); 58 + return 0; 60 59 } 61 60 62 - return -ENODEV; 61 + return platform_device_register(&wdt_dev); 63 62 } 64 63 64 + static struct notifier_block wdt_scu_notifier = { 65 + .notifier_call = wdt_scu_status_change, 66 + }; 67 + 68 + static int __init register_mid_wdt(void) 69 + { 70 + if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) 71 + return -ENODEV; 72 + 73 + wdt_dev.dev.platform_data = &tangier_pdata; 74 + 75 + /* 76 + * We need to be sure that the SCU IPC is ready before watchdog device 77 + * can be registered: 78 + */ 79 + intel_scu_notifier_add(&wdt_scu_notifier); 80 + 81 + return 0; 82 + } 65 83 rootfs_initcall(register_mid_wdt);
+1 -1
crypto/algif_hash.c
··· 214 214 215 215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); 216 216 217 - if (!result) { 217 + if (!result && !ctx->more) { 218 218 err = af_alg_wait_for_completion( 219 219 crypto_ahash_init(&ctx->req), 220 220 &ctx->completion);
-1
crypto/asymmetric_keys/x509_cert_parser.c
··· 133 133 return cert; 134 134 135 135 error_decode: 136 - kfree(cert->pub->key); 137 136 kfree(ctx); 138 137 error_no_ctx: 139 138 x509_free_certificate(cert);
-4
crypto/scatterwalk.c
··· 68 68 69 69 sg = scatterwalk_ffwd(tmp, sg, start); 70 70 71 - if (sg_page(sg) == virt_to_page(buf) && 72 - sg->offset == offset_in_page(buf)) 73 - return; 74 - 75 71 scatterwalk_start(&walk, sg); 76 72 scatterwalk_copychunks(buf, &walk, nbytes, out); 77 73 scatterwalk_done(&walk, out, 0);
+6 -23
drivers/acpi/sleep.c
··· 47 47 } 48 48 } 49 49 50 - static void acpi_sleep_pts_switch(u32 acpi_state) 51 - { 52 - acpi_status status; 53 - 54 - status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state); 55 - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 56 - /* 57 - * OS can't evaluate the _PTS object correctly. Some warning 58 - * message will be printed. But it won't break anything. 59 - */ 60 - printk(KERN_NOTICE "Failure in evaluating _PTS object\n"); 61 - } 62 - } 63 - 64 - static int sleep_notify_reboot(struct notifier_block *this, 50 + static int tts_notify_reboot(struct notifier_block *this, 65 51 unsigned long code, void *x) 66 52 { 67 53 acpi_sleep_tts_switch(ACPI_STATE_S5); 68 - 69 - acpi_sleep_pts_switch(ACPI_STATE_S5); 70 - 71 54 return NOTIFY_DONE; 72 55 } 73 56 74 - static struct notifier_block sleep_notifier = { 75 - .notifier_call = sleep_notify_reboot, 57 + static struct notifier_block tts_notifier = { 58 + .notifier_call = tts_notify_reboot, 76 59 .next = NULL, 77 60 .priority = 0, 78 61 }; ··· 899 916 pr_info(PREFIX "(supports%s)\n", supported); 900 917 901 918 /* 902 - * Register the sleep_notifier to reboot notifier list so that the _TTS 903 - * and _PTS object can also be evaluated when the system enters S5. 919 + * Register the tts_notifier to reboot notifier list so that the _TTS 920 + * object can also be evaluated when the system enters S5. 904 921 */ 905 - register_reboot_notifier(&sleep_notifier); 922 + register_reboot_notifier(&tts_notifier); 906 923 return 0; 907 924 }
+1 -1
drivers/clk/berlin/bg2.c
··· 685 685 } 686 686 687 687 /* register clk-provider */ 688 - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 688 + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); 689 689 690 690 return; 691 691
+1 -1
drivers/clk/berlin/bg2q.c
··· 382 382 } 383 383 384 384 /* register clk-provider */ 385 - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 385 + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); 386 386 387 387 return; 388 388
+1 -1
drivers/clk/clk-efm32gg.c
··· 82 82 hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0", 83 83 "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); 84 84 85 - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 85 + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); 86 86 } 87 87 CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
+12
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
··· 191 191 static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu", 192 192 0x050, 0, 3, axi_div_table, 0); 193 193 194 + #define SUN6I_A31_AHB1_REG 0x054 195 + 194 196 static const char * const ahb1_parents[] = { "osc32k", "osc24M", 195 197 "axi", "pll-periph" }; 196 198 ··· 1231 1229 val = readl(reg + SUN6I_A31_PLL_MIPI_REG); 1232 1230 val &= BIT(16); 1233 1231 writel(val, reg + SUN6I_A31_PLL_MIPI_REG); 1232 + 1233 + /* Force AHB1 to PLL6 / 3 */ 1234 + val = readl(reg + SUN6I_A31_AHB1_REG); 1235 + /* set PLL6 pre-div = 3 */ 1236 + val &= ~GENMASK(7, 6); 1237 + val |= 0x2 << 6; 1238 + /* select PLL6 / pre-div */ 1239 + val &= ~GENMASK(13, 12); 1240 + val |= 0x3 << 12; 1241 + writel(val, reg + SUN6I_A31_AHB1_REG); 1234 1242 1235 1243 sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); 1236 1244
+1 -1
drivers/clk/sunxi/clk-sunxi.c
··· 373 373 else 374 374 calcp = 3; 375 375 376 - calcm = (req->parent_rate >> calcp) - 1; 376 + calcm = (div >> calcp) - 1; 377 377 378 378 req->rate = (req->parent_rate >> calcp) / (calcm + 1); 379 379 req->m = calcm;
+2 -2
drivers/dax/dax.c
··· 270 270 if (!dax_dev->alive) 271 271 return -ENXIO; 272 272 273 - /* prevent private / writable mappings from being established */ 274 - if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) { 273 + /* prevent private mappings from being established */ 274 + if ((vma->vm_flags & VM_SHARED) != VM_SHARED) { 275 275 dev_info(dev, "%s: %s: fail, attempted private mapping\n", 276 276 current->comm, func); 277 277 return -EINVAL;
+3 -1
drivers/dax/pmem.c
··· 78 78 nsio = to_nd_namespace_io(&ndns->dev); 79 79 80 80 /* parse the 'pfn' info block via ->rw_bytes */ 81 - devm_nsio_enable(dev, nsio); 81 + rc = devm_nsio_enable(dev, nsio); 82 + if (rc) 83 + return rc; 82 84 altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap); 83 85 if (IS_ERR(altmap)) 84 86 return PTR_ERR(altmap);
+8 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
··· 34 34 35 35 static struct amdgpu_atpx_priv { 36 36 bool atpx_detected; 37 + bool bridge_pm_usable; 37 38 /* handle for device - and atpx */ 38 39 acpi_handle dhandle; 39 40 acpi_handle other_handle; ··· 206 205 atpx->is_hybrid = false; 207 206 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 208 207 printk("ATPX Hybrid Graphics\n"); 209 - atpx->functions.power_cntl = false; 208 + /* 209 + * Disable legacy PM methods only when pcie port PM is usable, 210 + * otherwise the device might fail to power off or power on. 211 + */ 212 + atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; 210 213 atpx->is_hybrid = true; 211 214 } 212 215 ··· 485 480 */ 486 481 static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev) 487 482 { 483 + struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); 488 484 acpi_handle dhandle, atpx_handle; 489 485 acpi_status status; 490 486 ··· 500 494 } 501 495 amdgpu_atpx_priv.dhandle = dhandle; 502 496 amdgpu_atpx_priv.atpx.handle = atpx_handle; 497 + amdgpu_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3; 503 498 return true; 504 499 } 505 500
+6 -6
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 2984 2984 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) 2985 2985 data->highest_mclk = memory_clock; 2986 2986 2987 - performance_level = &(ps->performance_levels 2988 - [ps->performance_level_count++]); 2989 - 2990 2987 PP_ASSERT_WITH_CODE( 2991 2988 (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), 2992 2989 "Performance levels exceeds SMC limit!", 2993 2990 return -EINVAL); 2994 2991 2995 2992 PP_ASSERT_WITH_CODE( 2996 - (ps->performance_level_count <= 2993 + (ps->performance_level_count < 2997 2994 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 2998 - "Performance levels exceeds Driver limit!", 2999 - return -EINVAL); 2995 + "Performance levels exceeds Driver limit, Skip!", 2996 + return 0); 2997 + 2998 + performance_level = &(ps->performance_levels 2999 + [ps->performance_level_count++]); 3000 3000 3001 3001 /* Performance levels are arranged from low to high. */ 3002 3002 performance_level->memory_clock = memory_clock;
+2 -3
drivers/gpu/drm/arm/hdlcd_crtc.c
··· 150 150 clk_prepare_enable(hdlcd->clk); 151 151 hdlcd_crtc_mode_set_nofb(crtc); 152 152 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); 153 + drm_crtc_vblank_on(crtc); 153 154 } 154 155 155 156 static void hdlcd_crtc_disable(struct drm_crtc *crtc) 156 157 { 157 158 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 158 159 159 - if (!crtc->state->active) 160 - return; 161 - 160 + drm_crtc_vblank_off(crtc); 162 161 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); 163 162 clk_disable_unprepare(hdlcd->clk); 164 163 }
+5
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 1907 1907 err_hdmiphy: 1908 1908 if (hdata->hdmiphy_port) 1909 1909 put_device(&hdata->hdmiphy_port->dev); 1910 + if (hdata->regs_hdmiphy) 1911 + iounmap(hdata->regs_hdmiphy); 1910 1912 err_ddc: 1911 1913 put_device(&hdata->ddc_adpt->dev); 1912 1914 ··· 1930 1928 1931 1929 if (hdata->hdmiphy_port) 1932 1930 put_device(&hdata->hdmiphy_port->dev); 1931 + 1932 + if (hdata->regs_hdmiphy) 1933 + iounmap(hdata->regs_hdmiphy); 1933 1934 1934 1935 put_device(&hdata->ddc_adpt->dev); 1935 1936
+7 -7
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
··· 251 251 if (irq < 0) 252 252 return irq; 253 253 254 - ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, 255 - IRQF_TRIGGER_NONE, dev_name(dev), priv); 256 - if (ret < 0) { 257 - dev_err(dev, "Failed to request irq %d: %d\n", irq, ret); 258 - return ret; 259 - } 260 - 261 254 comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL); 262 255 if (comp_id < 0) { 263 256 dev_err(dev, "Failed to identify by alias: %d\n", comp_id); ··· 265 272 } 266 273 267 274 platform_set_drvdata(pdev, priv); 275 + 276 + ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, 277 + IRQF_TRIGGER_NONE, dev_name(dev), priv); 278 + if (ret < 0) { 279 + dev_err(dev, "Failed to request irq %d: %d\n", irq, ret); 280 + return ret; 281 + } 268 282 269 283 ret = component_add(dev, &mtk_disp_ovl_component_ops); 270 284 if (ret)
+1 -1
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
··· 123 123 unsigned int bpc) 124 124 { 125 125 writel(w << 16 | h, comp->regs + DISP_OD_SIZE); 126 - writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE); 126 + writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG); 127 127 mtk_dither_set(comp, bpc, DISP_OD_CFG); 128 128 } 129 129
+50 -18
drivers/gpu/drm/mediatek/mtk_dsi.c
··· 86 86 87 87 #define DSI_PHY_TIMECON0 0x110 88 88 #define LPX (0xff << 0) 89 - #define HS_PRPR (0xff << 8) 89 + #define HS_PREP (0xff << 8) 90 90 #define HS_ZERO (0xff << 16) 91 91 #define HS_TRAIL (0xff << 24) 92 92 ··· 102 102 #define CLK_TRAIL (0xff << 24) 103 103 104 104 #define DSI_PHY_TIMECON3 0x11c 105 - #define CLK_HS_PRPR (0xff << 0) 105 + #define CLK_HS_PREP (0xff << 0) 106 106 #define CLK_HS_POST (0xff << 8) 107 107 #define CLK_HS_EXIT (0xff << 16) 108 + 109 + #define T_LPX 5 110 + #define T_HS_PREP 6 111 + #define T_HS_TRAIL 8 112 + #define T_HS_EXIT 7 113 + #define T_HS_ZERO 10 108 114 109 115 #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) 110 116 ··· 167 161 static void dsi_phy_timconfig(struct mtk_dsi *dsi) 168 162 { 169 163 u32 timcon0, timcon1, timcon2, timcon3; 170 - unsigned int ui, cycle_time; 171 - unsigned int lpx; 164 + u32 ui, cycle_time; 172 165 173 166 ui = 1000 / dsi->data_rate + 0x01; 174 167 cycle_time = 8000 / dsi->data_rate + 0x01; 175 - lpx = 5; 176 168 177 - timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx; 178 - timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 | 179 - (4 * lpx); 169 + timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24; 170 + timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 | 171 + T_HS_EXIT << 24; 180 172 timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | 181 173 (NS_TO_CYCLE(0x150, cycle_time) << 16); 182 - timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 | 183 - NS_TO_CYCLE(0x40, cycle_time); 174 + timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 | 175 + NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8; 184 176 185 177 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); 186 178 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); ··· 206 202 { 207 203 struct device *dev = dsi->dev; 208 204 int ret; 205 + u64 pixel_clock, total_bits; 206 + u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits; 209 207 210 208 if (++dsi->refcount != 1) 211 209 return 0; 212 210 213 - /** 214 - * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio; 215 - * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000. 216 - * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi. 217 - * we set mipi_ratio is 1.05. 218 - */ 219 - dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10); 211 + switch (dsi->format) { 212 + case MIPI_DSI_FMT_RGB565: 213 + bit_per_pixel = 16; 214 + break; 215 + case MIPI_DSI_FMT_RGB666_PACKED: 216 + bit_per_pixel = 18; 217 + break; 218 + case MIPI_DSI_FMT_RGB666: 219 + case MIPI_DSI_FMT_RGB888: 220 + default: 221 + bit_per_pixel = 24; 222 + break; 223 + } 220 224 221 - ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000); 225 + /** 226 + * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000 227 + * htotal_time = htotal * byte_per_pixel / num_lanes 228 + * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit 229 + * mipi_ratio = (htotal_time + overhead_time) / htotal_time 230 + * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes; 231 + */ 232 + pixel_clock = dsi->vm.pixelclock * 1000; 233 + htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch + 234 + dsi->vm.hsync_len; 235 + htotal_bits = htotal * bit_per_pixel; 236 + 237 + overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL + 238 + T_HS_EXIT; 239 + overhead_bits = overhead_cycles * dsi->lanes * 8; 240 + total_bits = htotal_bits + overhead_bits; 241 + 242 + dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits, 243 + htotal * dsi->lanes); 244 + 245 + ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); 222 246 if (ret < 0) { 223 247 dev_err(dev, "Failed to set data rate: %d\n", ret); 224 248 goto err_refcount;
+8 -1
drivers/gpu/drm/radeon/radeon_atpx_handler.c
··· 34 34 35 35 static struct radeon_atpx_priv { 36 36 bool atpx_detected; 37 + bool bridge_pm_usable; 37 38 /* handle for device - and atpx */ 38 39 acpi_handle dhandle; 39 40 struct radeon_atpx atpx; ··· 204 203 atpx->is_hybrid = false; 205 204 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 206 205 printk("ATPX Hybrid Graphics\n"); 207 - atpx->functions.power_cntl = false; 206 + /* 207 + * Disable legacy PM methods only when pcie port PM is usable, 208 + * otherwise the device might fail to power off or power on. 209 + */ 210 + atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable; 208 211 atpx->is_hybrid = true; 209 212 } 210 213 ··· 479 474 */ 480 475 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 481 476 { 477 + struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); 482 478 acpi_handle dhandle, atpx_handle; 483 479 acpi_status status; 484 480 ··· 493 487 494 488 radeon_atpx_priv.dhandle = dhandle; 495 489 radeon_atpx_priv.atpx.handle = atpx_handle; 490 + radeon_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3; 496 491 return true; 497 492 } 498 493
+79 -36
drivers/hid/hid-cp2112.c
··· 32 32 #include <linux/usb/ch9.h> 33 33 #include "hid-ids.h" 34 34 35 + #define CP2112_REPORT_MAX_LENGTH 64 36 + #define CP2112_GPIO_CONFIG_LENGTH 5 37 + #define CP2112_GPIO_GET_LENGTH 2 38 + #define CP2112_GPIO_SET_LENGTH 3 39 + 35 40 enum { 36 41 CP2112_GPIO_CONFIG = 0x02, 37 42 CP2112_GPIO_GET = 0x03, ··· 166 161 atomic_t read_avail; 167 162 atomic_t xfer_avail; 168 163 struct gpio_chip gc; 164 + u8 *in_out_buffer; 165 + spinlock_t lock; 169 166 }; 170 167 171 168 static int gpio_push_pull = 0xFF; ··· 178 171 { 179 172 struct cp2112_device *dev = gpiochip_get_data(chip); 180 173 struct hid_device *hdev = dev->hdev; 181 - u8 buf[5]; 174 + u8 *buf = dev->in_out_buffer; 175 + unsigned long flags; 182 176 int ret; 183 177 178 + spin_lock_irqsave(&dev->lock, flags); 179 + 184 180 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 185 - sizeof(buf), HID_FEATURE_REPORT, 186 - HID_REQ_GET_REPORT); 187 - if (ret != sizeof(buf)) { 181 + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 182 + HID_REQ_GET_REPORT); 183 + if (ret != CP2112_GPIO_CONFIG_LENGTH) { 188 184 hid_err(hdev, "error requesting GPIO config: %d\n", ret); 189 - return ret; 185 + goto exit; 190 186 } 191 187 192 188 buf[1] &= ~(1 << offset); 193 189 buf[2] = gpio_push_pull; 194 190 195 - ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), 196 - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 191 + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 192 + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 193 + HID_REQ_SET_REPORT); 197 194 if (ret < 0) { 198 195 hid_err(hdev, "error setting GPIO config: %d\n", ret); 199 - return ret; 196 + goto exit; 200 197 } 201 198 202 - return 0; 199 + ret = 0; 200 + 201 + exit: 202 + spin_unlock_irqrestore(&dev->lock, flags); 203 + return ret <= 0 ? ret : -EIO; 203 204 } 204 205 205 206 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 206 207 { 207 208 struct cp2112_device *dev = gpiochip_get_data(chip); 208 209 struct hid_device *hdev = dev->hdev; 209 - u8 buf[3]; 210 + u8 *buf = dev->in_out_buffer; 211 + unsigned long flags; 210 212 int ret; 213 + 214 + spin_lock_irqsave(&dev->lock, flags); 211 215 212 216 buf[0] = CP2112_GPIO_SET; 213 217 buf[1] = value ? 0xff : 0; 214 218 buf[2] = 1 << offset; 215 219 216 - ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf), 217 - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 220 + ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, 221 + CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, 222 + HID_REQ_SET_REPORT); 218 223 if (ret < 0) 219 224 hid_err(hdev, "error setting GPIO values: %d\n", ret); 225 + 226 + spin_unlock_irqrestore(&dev->lock, flags); 220 227 } 221 228 222 229 static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset) 223 230 { 224 231 struct cp2112_device *dev = gpiochip_get_data(chip); 225 232 struct hid_device *hdev = dev->hdev; 226 - u8 buf[2]; 233 + u8 *buf = dev->in_out_buffer; 234 + unsigned long flags; 227 235 int ret; 228 236 229 - ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf), 230 - HID_FEATURE_REPORT, HID_REQ_GET_REPORT); 231 - if (ret != sizeof(buf)) { 237 + spin_lock_irqsave(&dev->lock, flags); 238 + 239 + ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, 240 + CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, 241 + HID_REQ_GET_REPORT); 242 + if (ret != CP2112_GPIO_GET_LENGTH) { 232 243 hid_err(hdev, "error requesting GPIO values: %d\n", ret); 233 - return ret; 244 + ret = ret < 0 ? ret : -EIO; 245 + goto exit; 234 246 } 235 247 236 - return (buf[1] >> offset) & 1; 248 + ret = (buf[1] >> offset) & 1; 249 + 250 + exit: 251 + spin_unlock_irqrestore(&dev->lock, flags); 252 + 253 + return ret; 237 254 } 238 255 239 256 static int cp2112_gpio_direction_output(struct gpio_chip *chip, ··· 265 234 { 266 235 struct cp2112_device *dev = gpiochip_get_data(chip); 267 236 struct hid_device *hdev = dev->hdev; 268 - u8 buf[5]; 237 + u8 *buf = dev->in_out_buffer; 238 + unsigned long flags; 269 239 int ret; 270 240 241 + spin_lock_irqsave(&dev->lock, flags); 242 + 271 243 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 272 - sizeof(buf), HID_FEATURE_REPORT, 273 - HID_REQ_GET_REPORT); 274 - if (ret != sizeof(buf)) { 244 + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 245 + HID_REQ_GET_REPORT); 246 + if (ret != CP2112_GPIO_CONFIG_LENGTH) { 275 247 hid_err(hdev, "error requesting GPIO config: %d\n", ret); 276 - return ret; 248 + goto fail; 277 249 } 278 250 279 251 buf[1] |= 1 << offset; 280 252 buf[2] = gpio_push_pull; 281 253 282 - ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), 283 - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 254 + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 255 + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 256 + HID_REQ_SET_REPORT); 284 257 if (ret < 0) { 285 258 hid_err(hdev, "error setting GPIO config: %d\n", ret); 286 - return ret; 259 + goto fail; 287 260 } 261 + 262 + spin_unlock_irqrestore(&dev->lock, flags); 288 263 289 264 /* 290 265 * Set gpio value when output direction is already set, ··· 299 262 cp2112_gpio_set(chip, offset, value); 300 263 301 264 return 0; 265 + 266 + fail: 267 + spin_unlock_irqrestore(&dev->lock, flags); 268 + return ret < 0 ? ret : -EIO; 302 269 } 303 270 304 271 static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, ··· 1048 1007 struct cp2112_smbus_config_report config; 1049 1008 int ret; 1050 1009 1010 + dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); 1011 + if (!dev) 1012 + return -ENOMEM; 1013 + 1014 + dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH, 1015 + GFP_KERNEL); 1016 + if (!dev->in_out_buffer) 1017 + return -ENOMEM; 1018 + 1019 + spin_lock_init(&dev->lock); 1020 + 1051 1021 ret = hid_parse(hdev); 1052 1022 if (ret) { 1053 1023 hid_err(hdev, "parse failed\n"); ··· 1115 1063 goto err_power_normal; 1116 1064 } 1117 1065 1118 - dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1119 - if (!dev) { 1120 - ret = -ENOMEM; 1121 - goto err_power_normal; 1122 - } 1123 - 1124 1066 hid_set_drvdata(hdev, (void *)dev); 1125 1067 dev->hdev = hdev; 1126 1068 dev->adap.owner = THIS_MODULE; ··· 1133 1087 1134 1088 if (ret) { 1135 1089 hid_err(hdev, "error registering i2c adapter\n"); 1136 - goto err_free_dev; 1090 + goto err_power_normal; 1137 1091 } 1138 1092 1139 1093 hid_dbg(hdev, "adapter registered\n"); ··· 1169 1123 gpiochip_remove(&dev->gc); 1170 1124 err_free_i2c: 1171 1125 i2c_del_adapter(&dev->adap); 1172 - err_free_dev: 1173 - kfree(dev); 1174 1126 err_power_normal: 1175 1127 hid_hw_power(hdev, PM_HINT_NORMAL); 1176 1128 err_hid_close: ··· 1193 1149 */ 1194 1150 hid_hw_close(hdev); 1195 1151 hid_hw_stop(hdev); 1196 - kfree(dev); 1197 1152 } 1198 1153 1199 1154 static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
+10 -4
drivers/hid/hid-lg.c
··· 756 756 757 757 /* Setup wireless link with Logitech Wii wheel */ 758 758 if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { 759 - unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 759 + const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 760 + u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL); 760 761 761 - ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), 762 + if (!buf) { 763 + ret = -ENOMEM; 764 + goto err_free; 765 + } 766 + 767 + ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), 762 768 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 763 - 764 769 if (ret >= 0) { 765 770 /* insert a little delay of 10 jiffies ~ 40ms */ 766 771 wait_queue_head_t wait; ··· 777 772 buf[1] = 0xB2; 778 773 get_random_bytes(&buf[2], 2); 779 774 780 - ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), 775 + ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), 781 776 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 782 777 } 778 + kfree(buf); 783 779 } 784 780 785 781 if (drv_data->quirks & LG_FF)
+10 -2
drivers/hid/hid-magicmouse.c
··· 493 493 static int magicmouse_probe(struct hid_device *hdev, 494 494 const struct hid_device_id *id) 495 495 { 496 - __u8 feature[] = { 0xd7, 0x01 }; 496 + const u8 feature[] = { 0xd7, 0x01 }; 497 + u8 *buf; 497 498 struct magicmouse_sc *msc; 498 499 struct hid_report *report; 499 500 int ret; ··· 545 544 } 546 545 report->size = 6; 547 546 547 + buf = kmemdup(feature, sizeof(feature), GFP_KERNEL); 548 + if (!buf) { 549 + ret = -ENOMEM; 550 + goto err_stop_hw; 551 + } 552 + 548 553 /* 549 554 * Some devices repond with 'invalid report id' when feature 550 555 * report switching it into multitouch mode is sent to it. ··· 559 552 * but there seems to be no other way of switching the mode. 560 553 * Thus the super-ugly hacky success check below. 561 554 */ 562 - ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature), 555 + ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature), 563 556 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 557 + kfree(buf); 564 558 if (ret != -EIO && ret != sizeof(feature)) { 565 559 hid_err(hdev, "unable to request touch data (%d)\n", ret); 566 560 goto err_stop_hw;
+8 -2
drivers/hid/hid-rmi.c
··· 188 188 static int rmi_set_mode(struct hid_device *hdev, u8 mode) 189 189 { 190 190 int ret; 191 - u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; 191 + const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; 192 + u8 *buf; 192 193 193 - ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf, 194 + buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL); 195 + if (!buf) 196 + return -ENOMEM; 197 + 198 + ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf, 194 199 sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 200 + kfree(buf); 195 201 if (ret < 0) { 196 202 dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode, 197 203 ret);
+1
drivers/hid/hid-sensor-hub.c
··· 212 212 __s32 value; 213 213 int ret = 0; 214 214 215 + memset(buffer, 0, buffer_size); 215 216 mutex_lock(&data->mutex); 216 217 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); 217 218 if (!report || (field_index >= report->maxfield)) {
+25 -39
drivers/i2c/busses/i2c-designware-core.c
··· 91 91 DW_IC_INTR_TX_ABRT | \ 92 92 DW_IC_INTR_STOP_DET) 93 93 94 - #define DW_IC_STATUS_ACTIVITY 0x1 95 - #define DW_IC_STATUS_TFE BIT(2) 96 - #define DW_IC_STATUS_MST_ACTIVITY BIT(5) 94 + #define DW_IC_STATUS_ACTIVITY 0x1 97 95 98 96 #define DW_IC_SDA_HOLD_RX_SHIFT 16 99 97 #define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) ··· 476 478 { 477 479 struct i2c_msg *msgs = dev->msgs; 478 480 u32 ic_tar = 0; 479 - bool enabled; 480 481 481 - enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1; 482 - 483 - if (enabled) { 484 - u32 ic_status; 485 - 486 - /* 487 - * Only disable adapter if ic_tar and ic_con can't be 488 - * dynamically updated 489 - */ 490 - ic_status = dw_readl(dev, DW_IC_STATUS); 491 - if (!dev->dynamic_tar_update_enabled || 492 - (ic_status & DW_IC_STATUS_MST_ACTIVITY) || 493 - !(ic_status & DW_IC_STATUS_TFE)) { 494 - __i2c_dw_enable_and_wait(dev, false); 495 - enabled = false; 496 - } 497 - } 482 + /* Disable the adapter */ 483 + __i2c_dw_enable_and_wait(dev, false); 498 484 499 485 /* if the slave address is ten bit address, enable 10BITADDR */ 500 486 if (dev->dynamic_tar_update_enabled) { ··· 508 526 /* enforce disabled interrupts (due to HW issues) */ 509 527 i2c_dw_disable_int(dev); 510 528 511 - if (!enabled) 512 - __i2c_dw_enable(dev, true); 529 + /* Enable the adapter */ 530 + __i2c_dw_enable(dev, true); 513 531 514 532 /* Clear and enable interrupts */ 515 533 dw_readl(dev, DW_IC_CLR_INTR); ··· 593 611 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { 594 612 595 613 /* avoid rx buffer overrun */ 596 - if (rx_limit - dev->rx_outstanding <= 0) 614 + if (dev->rx_outstanding >= dev->rx_fifo_depth) 597 615 break; 598 616 599 617 dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); ··· 690 708 } 691 709 692 710 /* 693 - * Prepare controller for a transaction and start transfer by calling 694 - * i2c_dw_xfer_init() 711 + * Prepare controller for a transaction and call i2c_dw_xfer_msg 695 712 */ 696 713 static int 697 714 i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) ··· 733 752 goto done; 734 753 } 735 754 755 + /* 756 + * We must disable the adapter before returning and signaling the end 757 + * of the current transfer. Otherwise the hardware might continue 758 + * generating interrupts which in turn causes a race condition with 759 + * the following transfer. Needs some more investigation if the 760 + * additional interrupts are a hardware bug or this driver doesn't 761 + * handle them correctly yet. 762 + */ 763 + __i2c_dw_enable(dev, false); 764 + 736 765 if (dev->msg_err) { 737 766 ret = dev->msg_err; 738 767 goto done; 739 768 } 740 769 741 770 /* no error */ 742 - if (likely(!dev->cmd_err)) { 771 + if (likely(!dev->cmd_err && !dev->status)) { 743 772 ret = num; 744 773 goto done; 745 774 } ··· 759 768 ret = i2c_dw_handle_tx_abort(dev); 760 769 goto done; 761 770 } 771 + 772 + if (dev->status) 773 + dev_err(dev->dev, 774 + "transfer terminated early - interrupt latency too high?\n"); 775 + 762 776 ret = -EIO; 763 777 764 778 done: ··· 884 888 */ 885 889 886 890 tx_aborted: 887 - if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) 888 - || dev->msg_err) { 889 - /* 890 - * We must disable interruts before returning and signaling 891 - * the end of the current transfer. Otherwise the hardware 892 - * might continue generating interrupts for non-existent 893 - * transfers. 894 - */ 895 - i2c_dw_disable_int(dev); 896 - dw_readl(dev, DW_IC_CLR_INTR); 897 - 891 + if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) 898 892 complete(&dev->cmd_complete); 899 - } else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { 893 + else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { 900 894 /* workaround to trigger pending interrupt */ 901 895 stat = dw_readl(dev, DW_IC_INTR_MASK); 902 896 i2c_dw_disable_int(dev);
+16 -21
drivers/media/tuners/tuner-xc2028.c
··· 281 281 int i; 282 282 tuner_dbg("%s called\n", __func__); 283 283 284 + /* free allocated f/w string */ 285 + if (priv->fname != firmware_name) 286 + kfree(priv->fname); 287 + priv->fname = NULL; 288 + 289 + priv->state = XC2028_NO_FIRMWARE; 290 + memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); 291 + 284 292 if (!priv->firm) 285 293 return; 286 294 ··· 299 291 300 292 priv->firm = NULL; 301 293 priv->firm_size = 0; 302 - priv->state = XC2028_NO_FIRMWARE; 303 - 304 - memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); 305 294 } 306 295 307 296 static int load_all_firmwares(struct dvb_frontend *fe, ··· 889 884 return 0; 890 885 891 886 fail: 892 - priv->state = XC2028_NO_FIRMWARE; 887 + free_firmware(priv); 893 888 894 - memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); 895 889 if (retry_count < 8) { 896 890 msleep(50); 897 891 retry_count++; ··· 1336 1332 mutex_lock(&xc2028_list_mutex); 1337 1333 1338 1334 /* only perform final cleanup if this is the last instance */ 1339 - if (hybrid_tuner_report_instance_count(priv) == 1) { 1335 + if (hybrid_tuner_report_instance_count(priv) == 1) 1340 1336 free_firmware(priv); 1341 - kfree(priv->ctrl.fname); 1342 - priv->ctrl.fname = NULL; 1343 - } 1344 1337 1345 1338 if (priv) 1346 1339 hybrid_tuner_release_state(priv); ··· 1400 1399 1401 1400 /* 1402 1401 * Copy the config data. 1403 - * For the firmware name, keep a local copy of the string, 1404 - * in order to avoid troubles during device release. 1405 1402 */ 1406 - kfree(priv->ctrl.fname); 1407 - priv->ctrl.fname = NULL; 1408 1403 memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); 1409 - if (p->fname) { 1410 - priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); 1411 - if (priv->ctrl.fname == NULL) { 1412 - rc = -ENOMEM; 1413 - goto unlock; 1414 - } 1415 - } 1416 1404 1417 1405 /* 1418 1406 * If firmware name changed, frees firmware. As free_firmware will ··· 1416 1426 1417 1427 if (priv->state == XC2028_NO_FIRMWARE) { 1418 1428 if (!firmware_name[0]) 1419 - priv->fname = priv->ctrl.fname; 1429 + priv->fname = kstrdup(p->fname, GFP_KERNEL); 1420 1430 else 1421 1431 priv->fname = firmware_name; 1432 + 1433 + if (!priv->fname) { 1434 + rc = -ENOMEM; 1435 + goto unlock; 1436 + } 1422 1437 1423 1438 rc = request_firmware_nowait(THIS_MODULE, 1, 1424 1439 priv->fname,
+3 -1
drivers/mfd/syscon.c
··· 73 73 /* Parse the device's DT node for an endianness specification */ 74 74 if (of_property_read_bool(np, "big-endian")) 75 75 syscon_config.val_format_endian = REGMAP_ENDIAN_BIG; 76 - else if (of_property_read_bool(np, "little-endian")) 76 + else if (of_property_read_bool(np, "little-endian")) 77 77 syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE; 78 + else if (of_property_read_bool(np, "native-endian")) 79 + syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE; 78 80 79 81 /* 80 82 * search for reg-io-width property in DT. If it is not provided,
+12 -4
drivers/mfd/wm8994-core.c
··· 393 393 BUG(); 394 394 goto err; 395 395 } 396 - 397 - ret = devm_regulator_bulk_get(wm8994->dev, wm8994->num_supplies, 396 + 397 + /* 398 + * Can't use devres helper here as some of the supplies are provided by 399 + * wm8994->dev's children (regulators) and those regulators are 400 + * unregistered by the devres core before the supplies are freed. 401 + */ 402 + ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies, 398 403 wm8994->supplies); 399 404 if (ret != 0) { 400 405 dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret); ··· 410 405 wm8994->supplies); 411 406 if (ret != 0) { 412 407 dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret); 413 - goto err; 408 + goto err_regulator_free; 414 409 } 415 410 416 411 ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET); ··· 601 596 err_enable: 602 597 regulator_bulk_disable(wm8994->num_supplies, 603 598 wm8994->supplies); 599 + err_regulator_free: 600 + regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); 604 601 err: 605 602 mfd_remove_devices(wm8994->dev); 606 603 return ret; ··· 611 604 static void wm8994_device_exit(struct wm8994 *wm8994) 612 605 { 613 606 pm_runtime_disable(wm8994->dev); 614 - mfd_remove_devices(wm8994->dev); 615 607 wm8994_irq_exit(wm8994); 616 608 regulator_bulk_disable(wm8994->num_supplies, 617 609 wm8994->supplies); 610 + regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); 611 + mfd_remove_devices(wm8994->dev); 618 612 } 619 613 620 614 static const struct of_device_id wm8994_of_match[] = {
+1
drivers/mmc/host/dw_mmc.c
··· 1058 1058 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1059 1059 1060 1060 if (host->dma_ops->start(host, sg_len)) { 1061 + host->dma_ops->stop(host); 1061 1062 /* We can't do DMA, try PIO for this one */ 1062 1063 dev_dbg(host->dev, 1063 1064 "%s: fall back to PIO mode for current transfer\n",
+14
drivers/mmc/host/sdhci-of-esdhc.c
··· 66 66 return ret; 67 67 } 68 68 } 69 + /* 70 + * The DAT[3:0] line signal levels and the CMD line signal level are 71 + * not compatible with standard SDHC register. The line signal levels 72 + * DAT[7:0] are at bits 31:24 and the command line signal level is at 73 + * bit 23. All other bits are the same as in the standard SDHC 74 + * register. 75 + */ 76 + if (spec_reg == SDHCI_PRESENT_STATE) { 77 + ret = value & 0x000fffff; 78 + ret |= (value >> 4) & SDHCI_DATA_LVL_MASK; 79 + ret |= (value << 1) & SDHCI_CMD_LVL; 80 + return ret; 81 + } 82 + 69 83 ret = value; 70 84 return ret; 71 85 }
+1
drivers/mmc/host/sdhci.h
··· 73 73 #define SDHCI_DATA_LVL_MASK 0x00F00000 74 74 #define SDHCI_DATA_LVL_SHIFT 20 75 75 #define SDHCI_DATA_0_LVL_MASK 0x00100000 76 + #define SDHCI_CMD_LVL 0x01000000 76 77 77 78 #define SDHCI_HOST_CONTROL 0x28 78 79 #define SDHCI_CTRL_LED 0x01
+12 -3
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 1812 1812 if (atomic_read(&bp->intr_sem) != 0) 1813 1813 return LL_FLUSH_FAILED; 1814 1814 1815 + if (!bp->link_info.link_up) 1816 + return LL_FLUSH_FAILED; 1817 + 1815 1818 if (!bnxt_lock_poll(bnapi)) 1816 1819 return LL_FLUSH_BUSY; 1817 1820 ··· 3214 3211 goto err_out; 3215 3212 } 3216 3213 3217 - if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN) 3214 + switch (tunnel_type) { 3215 + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 3218 3216 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 3219 - 3220 - else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE) 3217 + break; 3218 + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 3221 3219 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 3220 + break; 3221 + default: 3222 + break; 3223 + } 3224 + 3222 3225 err_out: 3223 3226 mutex_unlock(&bp->hwrm_cmd_lock); 3224 3227 return rc;
-3
drivers/net/ethernet/freescale/fman/fman_tgec.c
··· 722 722 { 723 723 free_init_resources(tgec); 724 724 725 - if (tgec->cfg) 726 - tgec->cfg = NULL; 727 - 728 725 kfree(tgec->cfg); 729 726 kfree(tgec); 730 727
-1
drivers/net/ethernet/ibm/ibmvnic.c
··· 74 74 #include <asm/iommu.h> 75 75 #include <linux/uaccess.h> 76 76 #include <asm/firmware.h> 77 - #include <linux/seq_file.h> 78 77 #include <linux/workqueue.h> 79 78 80 79 #include "ibmvnic.h"
+1 -1
drivers/net/ethernet/marvell/mvneta.c
··· 4131 4131 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 4132 4132 dev->hw_features |= dev->features; 4133 4133 dev->vlan_features |= dev->features; 4134 - dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 4134 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 4135 4135 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; 4136 4136 4137 4137 /* MTU range: 68 - 9676 */
+1 -1
drivers/net/ethernet/marvell/mvpp2.c
··· 3293 3293 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 3294 3294 3295 3295 /* Clear classifier flow table */ 3296 - memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 3296 + memset(&fe.data, 0, sizeof(fe.data)); 3297 3297 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 3298 3298 fe.index = index; 3299 3299 mvpp2_cls_flow_write(priv, &fe);
+4 -1
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 129 129 } 130 130 }; 131 131 132 + /* Must not acquire state_lock, as its corresponding work_sync 133 + * is done under it. 134 + */ 132 135 static void mlx4_en_filter_work(struct work_struct *work) 133 136 { 134 137 struct mlx4_en_filter *filter = container_of(work, ··· 2242 2239 mutex_lock(&mdev->state_lock); 2243 2240 mdev->pndev[priv->port] = NULL; 2244 2241 mdev->upper[priv->port] = NULL; 2245 - mutex_unlock(&mdev->state_lock); 2246 2242 2247 2243 #ifdef CONFIG_RFS_ACCEL 2248 2244 mlx4_en_cleanup_filters(priv); 2249 2245 #endif 2250 2246 2251 2247 mlx4_en_free_resources(priv); 2248 + mutex_unlock(&mdev->state_lock); 2252 2249 2253 2250 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { 2254 2251 kfree(priv->tx_ring[t]);
-1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 46 46 #include <linux/mlx5/srq.h> 47 47 #include <linux/debugfs.h> 48 48 #include <linux/kmod.h> 49 - #include <linux/delay.h> 50 49 #include <linux/mlx5/mlx5_ifc.h> 51 50 #ifdef CONFIG_RFS_ACCEL 52 51 #include <linux/cpu_rmap.h>
-2
drivers/net/ethernet/synopsys/dwc_eth_qos.c
··· 33 33 #include <linux/stat.h> 34 34 #include <linux/types.h> 35 35 36 - #include <linux/types.h> 37 36 #include <linux/slab.h> 38 37 #include <linux/delay.h> 39 38 #include <linux/mm.h> ··· 42 43 43 44 #include <linux/phy.h> 44 45 #include <linux/mii.h> 45 - #include <linux/delay.h> 46 46 #include <linux/dma-mapping.h> 47 47 #include <linux/vmalloc.h> 48 48
-1
drivers/net/ieee802154/adf7242.c
··· 20 20 #include <linux/skbuff.h> 21 21 #include <linux/of.h> 22 22 #include <linux/irq.h> 23 - #include <linux/delay.h> 24 23 #include <linux/debugfs.h> 25 24 #include <linux/bitops.h> 26 25 #include <linux/ieee802154.h>
+2 -1
drivers/net/macvlan.c
··· 622 622 return 0; 623 623 624 624 clear_multi: 625 - dev_set_allmulti(lowerdev, -1); 625 + if (dev->flags & IFF_ALLMULTI) 626 + dev_set_allmulti(lowerdev, -1); 626 627 del_unicast: 627 628 dev_uc_del(lowerdev, dev->dev_addr); 628 629 out:
+4 -4
drivers/net/phy/micrel.c
··· 318 318 /* Limit supported and advertised modes in fiber mode */ 319 319 if (of_property_read_bool(of_node, "micrel,fiber-mode")) { 320 320 phydev->dev_flags |= MICREL_PHY_FXEN; 321 - phydev->supported &= SUPPORTED_FIBRE | 322 - SUPPORTED_100baseT_Full | 321 + phydev->supported &= SUPPORTED_100baseT_Full | 323 322 SUPPORTED_100baseT_Half; 324 - phydev->advertising &= ADVERTISED_FIBRE | 325 - ADVERTISED_100baseT_Full | 323 + phydev->supported |= SUPPORTED_FIBRE; 324 + phydev->advertising &= ADVERTISED_100baseT_Full | 326 325 ADVERTISED_100baseT_Half; 326 + phydev->advertising |= ADVERTISED_FIBRE; 327 327 phydev->autoneg = AUTONEG_DISABLE; 328 328 } 329 329
+2 -2
drivers/phy/phy-twl4030-usb.c
··· 459 459 struct twl4030_usb *twl = phy_get_drvdata(phy); 460 460 461 461 dev_dbg(twl->dev, "%s\n", __func__); 462 - pm_runtime_mark_last_busy(twl->dev); 463 - pm_runtime_put_autosuspend(twl->dev); 464 462 465 463 return 0; 466 464 } ··· 470 472 dev_dbg(twl->dev, "%s\n", __func__); 471 473 pm_runtime_get_sync(twl->dev); 472 474 schedule_delayed_work(&twl->id_workaround_work, HZ); 475 + pm_runtime_mark_last_busy(twl->dev); 476 + pm_runtime_put_autosuspend(twl->dev); 473 477 474 478 return 0; 475 479 }
+14 -1
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 4010 4010 SAM_STAT_CHECK_CONDITION; 4011 4011 } 4012 4012 4013 - 4013 + static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) 4014 + { 4015 + return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); 4016 + } 4014 4017 4015 4018 /** 4016 4019 * scsih_qcmd - main scsi request entry point ··· 4040 4037 4041 4038 if (ioc->logging_level & MPT_DEBUG_SCSI) 4042 4039 scsi_print_command(scmd); 4040 + 4041 + /* 4042 + * Lock the device for any subsequent command until command is 4043 + * done. 4044 + */ 4045 + if (ata_12_16_cmd(scmd)) 4046 + scsi_internal_device_block(scmd->device); 4043 4047 4044 4048 sas_device_priv_data = scmd->device->hostdata; 4045 4049 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { ··· 4622 4612 scmd = _scsih_scsi_lookup_get_clear(ioc, smid); 4623 4613 if (scmd == NULL) 4624 4614 return 1; 4615 + 4616 + if (ata_12_16_cmd(scmd)) 4617 + scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); 4625 4618 4626 4619 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4627 4620
+13 -8
drivers/scsi/qla2xxx/qla_os.c
··· 1456 1456 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1457 1457 sp = req->outstanding_cmds[cnt]; 1458 1458 if (sp) { 1459 - /* Get a reference to the sp and drop the lock. 1460 - * The reference ensures this sp->done() call 1461 - * - and not the call in qla2xxx_eh_abort() - 1462 - * ends the SCSI command (with result 'res'). 1459 + /* Don't abort commands in adapter during EEH 1460 + * recovery as it's not accessible/responding. 1463 1461 */ 1464 - sp_get(sp); 1465 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 1466 - qla2xxx_eh_abort(GET_CMD_SP(sp)); 1467 - spin_lock_irqsave(&ha->hardware_lock, flags); 1462 + if (!ha->flags.eeh_busy) { 1463 + /* Get a reference to the sp and drop the lock. 1464 + * The reference ensures this sp->done() call 1465 + * - and not the call in qla2xxx_eh_abort() - 1466 + * ends the SCSI command (with result 'res'). 1467 + */ 1468 + sp_get(sp); 1469 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1470 + qla2xxx_eh_abort(GET_CMD_SP(sp)); 1471 + spin_lock_irqsave(&ha->hardware_lock, flags); 1472 + } 1468 1473 req->outstanding_cmds[cnt] = NULL; 1469 1474 sp->done(vha, sp, res); 1470 1475 }
+8 -1
drivers/thermal/intel_powerclamp.c
··· 669 669 .set_cur_state = powerclamp_set_cur_state, 670 670 }; 671 671 672 + static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { 673 + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, 674 + {} 675 + }; 676 + MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); 677 + 672 678 static int __init powerclamp_probe(void) 673 679 { 674 - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { 680 + 681 + if (!x86_match_cpu(intel_powerclamp_ids)) { 675 682 pr_err("CPU does not support MWAIT"); 676 683 return -ENODEV; 677 684 }
+1
drivers/usb/chipidea/core.c
··· 914 914 if (!ci) 915 915 return -ENOMEM; 916 916 917 + spin_lock_init(&ci->lock); 917 918 ci->dev = dev; 918 919 ci->platdata = dev_get_platdata(dev); 919 920 ci->imx28_write_fix = !!(ci->platdata->flags &
-2
drivers/usb/chipidea/udc.c
··· 1889 1889 struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; 1890 1890 int retval = 0; 1891 1891 1892 - spin_lock_init(&ci->lock); 1893 - 1894 1892 ci->gadget.ops = &usb_gadget_ops; 1895 1893 ci->gadget.speed = USB_SPEED_UNKNOWN; 1896 1894 ci->gadget.max_speed = USB_SPEED_HIGH;
+4 -4
drivers/usb/gadget/function/f_fs.c
··· 3225 3225 3226 3226 switch (creq->bRequestType & USB_RECIP_MASK) { 3227 3227 case USB_RECIP_INTERFACE: 3228 - return ffs_func_revmap_intf(func, 3229 - le16_to_cpu(creq->wIndex) >= 0); 3228 + return (ffs_func_revmap_intf(func, 3229 + le16_to_cpu(creq->wIndex)) >= 0); 3230 3230 case USB_RECIP_ENDPOINT: 3231 - return ffs_func_revmap_ep(func, 3232 - le16_to_cpu(creq->wIndex) >= 0); 3231 + return (ffs_func_revmap_ep(func, 3232 + le16_to_cpu(creq->wIndex)) >= 0); 3233 3233 default: 3234 3234 return (bool) (func->ffs->user_flags & 3235 3235 FUNCTIONFS_ALL_CTRL_RECIP);
+129 -18
drivers/usb/musb/musb_core.c
··· 986 986 } 987 987 #endif 988 988 989 - schedule_work(&musb->irq_work); 989 + schedule_delayed_work(&musb->irq_work, 0); 990 990 991 991 return handled; 992 992 } ··· 1855 1855 MUSB_DEVCTL_HR; 1856 1856 switch (devctl & ~s) { 1857 1857 case MUSB_QUIRK_B_INVALID_VBUS_91: 1858 - if (!musb->session && !musb->quirk_invalid_vbus) { 1859 - musb->quirk_invalid_vbus = true; 1858 + if (musb->quirk_retries--) { 1860 1859 musb_dbg(musb, 1861 - "First invalid vbus, assume no session"); 1860 + "Poll devctl on invalid vbus, assume no session"); 1861 + schedule_delayed_work(&musb->irq_work, 1862 + msecs_to_jiffies(1000)); 1863 + 1862 1864 return; 1863 1865 } 1864 - break; 1865 1866 case MUSB_QUIRK_A_DISCONNECT_19: 1867 + if (musb->quirk_retries--) { 1868 + musb_dbg(musb, 1869 + "Poll devctl on possible host mode disconnect"); 1870 + schedule_delayed_work(&musb->irq_work, 1871 + msecs_to_jiffies(1000)); 1872 + 1873 + return; 1874 + } 1866 1875 if (!musb->session) 1867 1876 break; 1868 1877 musb_dbg(musb, "Allow PM on possible host mode disconnect"); ··· 1895 1886 if (error < 0) 1896 1887 dev_err(musb->controller, "Could not enable: %i\n", 1897 1888 error); 1889 + musb->quirk_retries = 3; 1898 1890 } else { 1899 1891 musb_dbg(musb, "Allow PM with no session: %02x", devctl); 1900 - musb->quirk_invalid_vbus = false; 1901 1892 pm_runtime_mark_last_busy(musb->controller); 1902 1893 pm_runtime_put_autosuspend(musb->controller); 1903 1894 } ··· 1908 1899 /* Only used to provide driver mode change events */ 1909 1900 static void musb_irq_work(struct work_struct *data) 1910 1901 { 1911 - struct musb *musb = container_of(data, struct musb, irq_work); 1902 + struct musb *musb = container_of(data, struct musb, irq_work.work); 1912 1903 1913 1904 musb_pm_runtime_check_session(musb); 1914 1905 ··· 1978 1969 INIT_LIST_HEAD(&musb->control); 1979 1970 INIT_LIST_HEAD(&musb->in_bulk); 1980 1971 INIT_LIST_HEAD(&musb->out_bulk); 1972 + INIT_LIST_HEAD(&musb->pending_list); 1981 1973 1982 1974 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 1983 1975 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; ··· 2028 2018 musb_host_free(musb); 2029 2019 } 2030 2020 2021 + struct musb_pending_work { 2022 + int (*callback)(struct musb *musb, void *data); 2023 + void *data; 2024 + struct list_head node; 2025 + }; 2026 + 2027 + /* 2028 + * Called from musb_runtime_resume(), musb_resume(), and 2029 + * musb_queue_resume_work(). Callers must take musb->lock. 2030 + */ 2031 + static int musb_run_resume_work(struct musb *musb) 2032 + { 2033 + struct musb_pending_work *w, *_w; 2034 + unsigned long flags; 2035 + int error = 0; 2036 + 2037 + spin_lock_irqsave(&musb->list_lock, flags); 2038 + list_for_each_entry_safe(w, _w, &musb->pending_list, node) { 2039 + if (w->callback) { 2040 + error = w->callback(musb, w->data); 2041 + if (error < 0) { 2042 + dev_err(musb->controller, 2043 + "resume callback %p failed: %i\n", 2044 + w->callback, error); 2045 + } 2046 + } 2047 + list_del(&w->node); 2048 + devm_kfree(musb->controller, w); 2049 + } 2050 + spin_unlock_irqrestore(&musb->list_lock, flags); 2051 + 2052 + return error; 2053 + } 2054 + 2055 + /* 2056 + * Called to run work if device is active or else queue the work to happen 2057 + * on resume. Caller must take musb->lock and must hold an RPM reference. 2058 + * 2059 + * Note that we cowardly refuse queuing work after musb PM runtime 2060 + * resume is done calling musb_run_resume_work() and return -EINPROGRESS 2061 + * instead. 2062 + */ 2063 + int musb_queue_resume_work(struct musb *musb, 2064 + int (*callback)(struct musb *musb, void *data), 2065 + void *data) 2066 + { 2067 + struct musb_pending_work *w; 2068 + unsigned long flags; 2069 + int error; 2070 + 2071 + if (WARN_ON(!callback)) 2072 + return -EINVAL; 2073 + 2074 + if (pm_runtime_active(musb->controller)) 2075 + return callback(musb, data); 2076 + 2077 + w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); 2078 + if (!w) 2079 + return -ENOMEM; 2080 + 2081 + w->callback = callback; 2082 + w->data = data; 2083 + spin_lock_irqsave(&musb->list_lock, flags); 2084 + if (musb->is_runtime_suspended) { 2085 + list_add_tail(&w->node, &musb->pending_list); 2086 + error = 0; 2087 + } else { 2088 + dev_err(musb->controller, "could not add resume work %p\n", 2089 + callback); 2090 + devm_kfree(musb->controller, w); 2091 + error = -EINPROGRESS; 2092 + } 2093 + spin_unlock_irqrestore(&musb->list_lock, flags); 2094 + 2095 + return error; 2096 + } 2097 + EXPORT_SYMBOL_GPL(musb_queue_resume_work); 2098 + 2031 2099 static void musb_deassert_reset(struct work_struct *work) 2032 2100 { 2033 2101 struct musb *musb; ··· 2153 2065 } 2154 2066 2155 2067 spin_lock_init(&musb->lock); 2068 + spin_lock_init(&musb->list_lock); 2156 2069 musb->board_set_power = plat->set_power; 2157 2070 musb->min_power = plat->min_power; 2158 2071 musb->ops = plat->platform_ops; ··· 2297 2208 musb_generic_disable(musb); 2298 2209 2299 2210 /* Init IRQ workqueue before request_irq */ 2300 - INIT_WORK(&musb->irq_work, musb_irq_work); 2211 + INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); 2301 2212 INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); 2302 2213 INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume); 2303 2214 ··· 2380 2291 if (status) 2381 2292 goto fail5; 2382 2293 2294 + musb->is_initialized = 1; 2383 2295 pm_runtime_mark_last_busy(musb->controller); 2384 2296 pm_runtime_put_autosuspend(musb->controller); 2385 2297 ··· 2394 2304 musb_host_cleanup(musb); 2395 2305 2396 2306 fail3: 2397 - cancel_work_sync(&musb->irq_work); 2307 + cancel_delayed_work_sync(&musb->irq_work); 2398 2308 cancel_delayed_work_sync(&musb->finish_resume_work); 2399 2309 cancel_delayed_work_sync(&musb->deassert_reset_work); 2400 2310 if (musb->dma_controller) ··· 2461 2371 */ 2462 2372 musb_exit_debugfs(musb); 2463 2373 2464 - cancel_work_sync(&musb->irq_work); 2374 + cancel_delayed_work_sync(&musb->irq_work); 2465 2375 cancel_delayed_work_sync(&musb->finish_resume_work); 2466 2376 cancel_delayed_work_sync(&musb->deassert_reset_work); 2467 2377 pm_runtime_get_sync(musb->controller); ··· 2647 2557 2648 2558 musb_platform_disable(musb); 2649 2559 musb_generic_disable(musb); 2560 + WARN_ON(!list_empty(&musb->pending_list)); 2650 2561 2651 2562 spin_lock_irqsave(&musb->lock, flags); 2652 2563 ··· 2669 2578 2670 2579 static int musb_resume(struct device *dev) 2671 2580 { 2672 - struct musb *musb = dev_to_musb(dev); 2673 - u8 devctl; 2674 - u8 mask; 2581 + struct musb *musb = dev_to_musb(dev); 2582 + unsigned long flags; 2583 + int error; 2584 + u8 devctl; 2585 + u8 mask; 2675 2586 2676 2587 /* 2677 2588 * For static cmos like DaVinci, register values were preserved ··· 2707 2614 2708 2615 musb_start(musb); 2709 2616 2617 + spin_lock_irqsave(&musb->lock, flags); 2618 + error = musb_run_resume_work(musb); 2619 + if (error) 2620 + dev_err(musb->controller, "resume work failed with %i\n", 2621 + error); 2622 + spin_unlock_irqrestore(&musb->lock, flags); 2623 + 2710 2624 return 0; 2711 2625 } 2712 2626 ··· 2722 2622 struct musb *musb = dev_to_musb(dev); 2723 2623 2724 2624 musb_save_context(musb); 2625 + musb->is_runtime_suspended = 1; 2725 2626 2726 2627 return 0; 2727 2628 } 2728 2629 2729 2630 static int musb_runtime_resume(struct device *dev) 2730 2631 { 2731 - struct musb *musb = dev_to_musb(dev); 2732 - static int first = 1; 2632 + struct musb *musb = dev_to_musb(dev); 2633 + unsigned long flags; 2634 + int error; 2733 2635 2734 2636 /* 2735 2637 * When pm_runtime_get_sync called for the first time in driver ··· 2742 2640 * Also context restore without save does not make 2743 2641 * any sense 2744 2642 */ 2745 - if (!first) 2746 - musb_restore_context(musb); 2747 - first = 0; 2643 + if (!musb->is_initialized) 2644 + return 0; 2645 + 2646 + musb_restore_context(musb); 2748 2647 2749 2648 if (musb->need_finish_resume) { 2750 2649 musb->need_finish_resume = 0; 2751 2650 schedule_delayed_work(&musb->finish_resume_work, 2752 2651 msecs_to_jiffies(USB_RESUME_TIMEOUT)); 2753 2652 } 2653 + 2654 + spin_lock_irqsave(&musb->lock, flags); 2655 + error = musb_run_resume_work(musb); 2656 + if (error) 2657 + dev_err(musb->controller, "resume work failed with %i\n", 2658 + error); 2659 + musb->is_runtime_suspended = 0; 2660 + spin_unlock_irqrestore(&musb->lock, flags); 2754 2661 2755 2662 return 0; 2756 2663 }
+11 -2
drivers/usb/musb/musb_core.h
··· 303 303 struct musb { 304 304 /* device lock */ 305 305 spinlock_t lock; 306 + spinlock_t list_lock; /* resume work list lock */ 306 307 307 308 struct musb_io io; 308 309 const struct musb_platform_ops *ops; 309 310 struct musb_context_registers context; 310 311 311 312 irqreturn_t (*isr)(int, void *); 312 - struct work_struct irq_work; 313 + struct delayed_work irq_work; 313 314 struct delayed_work deassert_reset_work; 314 315 struct delayed_work finish_resume_work; 315 316 struct delayed_work gadget_work; ··· 338 337 struct list_head control; /* of musb_qh */ 339 338 struct list_head in_bulk; /* of musb_qh */ 340 339 struct list_head out_bulk; /* of musb_qh */ 340 + struct list_head pending_list; /* pending work list */ 341 341 342 342 struct timer_list otg_timer; 343 343 struct notifier_block nb; ··· 381 379 382 380 int port_mode; /* MUSB_PORT_MODE_* */ 383 381 bool session; 384 - bool quirk_invalid_vbus; 382 + unsigned long quirk_retries; 385 383 bool is_host; 386 384 387 385 int a_wait_bcon; /* VBUS timeout in msecs */ 388 386 unsigned long idle_timeout; /* Next timeout in jiffies */ 387 + 388 + unsigned is_initialized:1; 389 + unsigned is_runtime_suspended:1; 389 390 390 391 /* active means connected and not suspended */ 391 392 unsigned is_active:1; ··· 544 539 extern irqreturn_t musb_interrupt(struct musb *); 545 540 546 541 extern void musb_hnp_stop(struct musb *musb); 542 + 543 + int musb_queue_resume_work(struct musb *musb, 544 + int (*callback)(struct musb *musb, void *data), 545 + void *data); 547 546 548 547 static inline void musb_platform_set_vbus(struct musb *musb, int is_on) 549 548 {
+28 -30
drivers/usb/musb/musb_dsps.c
··· 185 185 musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); 186 186 musb_writel(reg_base, wrp->epintr_clear, 187 187 wrp->txep_bitmap | wrp->rxep_bitmap); 188 + del_timer_sync(&glue->timer); 188 189 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 189 190 } 190 191 191 - static void otg_timer(unsigned long _musb) 192 + /* Caller must take musb->lock */ 193 + static int dsps_check_status(struct musb *musb, void *unused) 192 194 { 193 - struct musb *musb = (void *)_musb; 194 195 void __iomem *mregs = musb->mregs; 195 196 struct device *dev = musb->controller; 196 197 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 197 198 const struct dsps_musb_wrapper *wrp = glue->wrp; 198 199 u8 devctl; 199 - unsigned long flags; 200 200 int skip_session = 0; 201 - int err; 202 - 203 - err = pm_runtime_get_sync(dev); 204 - if (err < 0) 205 - dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); 206 201 207 202 /* 208 203 * We poll because DSPS IP's won't expose several OTG-critical ··· 207 212 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, 208 213 usb_otg_state_string(musb->xceiv->otg->state)); 209 214 210 - spin_lock_irqsave(&musb->lock, flags); 211 215 switch (musb->xceiv->otg->state) { 212 216 case OTG_STATE_A_WAIT_VRISE: 213 217 mod_timer(&glue->timer, jiffies + ··· 239 245 default: 240 246 break; 241 247 } 242 - spin_unlock_irqrestore(&musb->lock, flags); 243 248 249 + return 0; 250 + } 251 + 252 + static void otg_timer(unsigned long _musb) 253 + { 254 + struct musb *musb = (void *)_musb; 255 + struct device *dev = musb->controller; 256 + unsigned long flags; 257 + int err; 258 + 259 + err = pm_runtime_get(dev); 260 + if ((err != -EINPROGRESS) && err < 0) { 261 + dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); 262 + pm_runtime_put_noidle(dev); 263 + 264 + return; 265 + } 266 + 267 + spin_lock_irqsave(&musb->lock, flags); 268 + err = musb_queue_resume_work(musb, dsps_check_status, NULL); 269 + if (err < 0) 270 + dev_err(dev, "%s resume work: %i\n", __func__, err); 271 + spin_unlock_irqrestore(&musb->lock, flags); 244 272 pm_runtime_mark_last_busy(dev); 245 273 pm_runtime_put_autosuspend(dev); 246 274 } ··· 783 767 784 768 platform_set_drvdata(pdev, glue); 785 769 pm_runtime_enable(&pdev->dev); 786 - pm_runtime_use_autosuspend(&pdev->dev); 787 - pm_runtime_set_autosuspend_delay(&pdev->dev, 200); 788 - 789 - ret = pm_runtime_get_sync(&pdev->dev); 790 - if (ret < 0) { 791 - dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); 792 - goto err2; 793 - } 794 - 795 770 ret = dsps_create_musb_pdev(glue, pdev); 796 771 if (ret) 797 - goto err3; 798 - 799 - pm_runtime_mark_last_busy(&pdev->dev); 800 - pm_runtime_put_autosuspend(&pdev->dev); 772 + goto err; 801 773 802 774 return 0; 803 775 804 - err3: 805 - pm_runtime_put_sync(&pdev->dev); 806 - err2: 807 - pm_runtime_dont_use_autosuspend(&pdev->dev); 776 + err: 808 777 pm_runtime_disable(&pdev->dev); 809 778 return ret; 810 779 } ··· 800 799 801 800 platform_device_unregister(glue->musb); 802 801 803 - /* disable usbss clocks */ 804 - pm_runtime_dont_use_autosuspend(&pdev->dev); 805 - pm_runtime_put_sync(&pdev->dev); 806 802 pm_runtime_disable(&pdev->dev); 807 803 808 804 return 0;
+32 -7
drivers/usb/musb/musb_gadget.c
··· 1114 1114 musb_ep->dma ? "dma, " : "", 1115 1115 musb_ep->packet_sz); 1116 1116 1117 - schedule_work(&musb->irq_work); 1117 + schedule_delayed_work(&musb->irq_work, 0); 1118 1118 1119 1119 fail: 1120 1120 spin_unlock_irqrestore(&musb->lock, flags); ··· 1158 1158 musb_ep->desc = NULL; 1159 1159 musb_ep->end_point.desc = NULL; 1160 1160 1161 - schedule_work(&musb->irq_work); 1161 + schedule_delayed_work(&musb->irq_work, 0); 1162 1162 1163 1163 spin_unlock_irqrestore(&(musb->lock), flags); 1164 1164 ··· 1222 1222 rxstate(musb, req); 1223 1223 } 1224 1224 1225 + static int musb_ep_restart_resume_work(struct musb *musb, void *data) 1226 + { 1227 + struct musb_request *req = data; 1228 + 1229 + musb_ep_restart(musb, req); 1230 + 1231 + return 0; 1232 + } 1233 + 1225 1234 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1226 1235 gfp_t gfp_flags) 1227 1236 { 1228 1237 struct musb_ep *musb_ep; 1229 1238 struct musb_request *request; 1230 1239 struct musb *musb; 1231 - int status = 0; 1240 + int status; 1232 1241 unsigned long lockflags; 1233 1242 1234 1243 if (!ep || !req) ··· 1254 1245 if (request->ep != musb_ep) 1255 1246 return -EINVAL; 1256 1247 1248 + status = pm_runtime_get(musb->controller); 1249 + if ((status != -EINPROGRESS) && status < 0) { 1250 + dev_err(musb->controller, 1251 + "pm runtime get failed in %s\n", 1252 + __func__); 1253 + pm_runtime_put_noidle(musb->controller); 1254 + 1255 + return status; 1256 + } 1257 + status = 0; 1258 + 1257 1259 trace_musb_req_enq(request); 1258 1260 1259 1261 /* request is mine now... */ ··· 1275 1255 1276 1256 map_dma_buffer(request, musb, musb_ep); 1277 1257 1278 - pm_runtime_get_sync(musb->controller); 1279 1258 spin_lock_irqsave(&musb->lock, lockflags); 1280 1259 1281 1260 /* don't queue if the ep is down */ ··· 1290 1271 list_add_tail(&request->list, &musb_ep->req_list); 1291 1272 1292 1273 /* it this is the head of the queue, start i/o ... */ 1293 - if (!musb_ep->busy && &request->list == musb_ep->req_list.next) 1294 - musb_ep_restart(musb, request); 1274 + if (!musb_ep->busy && &request->list == musb_ep->req_list.next) { 1275 + status = musb_queue_resume_work(musb, 1276 + musb_ep_restart_resume_work, 1277 + request); 1278 + if (status < 0) 1279 + dev_err(musb->controller, "%s resume work: %i\n", 1280 + __func__, status); 1281 + } 1295 1282 1296 1283 unlock: 1297 1284 spin_unlock_irqrestore(&musb->lock, lockflags); ··· 1994 1969 */ 1995 1970 1996 1971 /* Force check of devctl register for PM runtime */ 1997 - schedule_work(&musb->irq_work); 1972 + schedule_delayed_work(&musb->irq_work, 0); 1998 1973 1999 1974 pm_runtime_mark_last_busy(musb->controller); 2000 1975 pm_runtime_put_autosuspend(musb->controller);
+4 -6
drivers/usb/musb/omap2430.c
··· 513 513 } 514 514 515 515 pm_runtime_enable(glue->dev); 516 - pm_runtime_use_autosuspend(glue->dev); 517 - pm_runtime_set_autosuspend_delay(glue->dev, 100); 518 516 519 517 ret = platform_device_add(musb); 520 518 if (ret) { 521 519 dev_err(&pdev->dev, "failed to register musb device\n"); 522 - goto err2; 520 + goto err3; 523 521 } 524 522 525 523 return 0; 524 + 525 + err3: 526 + pm_runtime_disable(glue->dev); 526 527 527 528 err2: 528 529 platform_device_put(musb); ··· 536 535 { 537 536 struct omap2430_glue *glue = platform_get_drvdata(pdev); 538 537 539 - pm_runtime_get_sync(glue->dev); 540 538 platform_device_unregister(glue->musb); 541 - pm_runtime_put_sync(glue->dev); 542 - pm_runtime_dont_use_autosuspend(glue->dev); 543 539 pm_runtime_disable(glue->dev); 544 540 545 541 return 0;
+3 -3
drivers/usb/musb/tusb6010.c
··· 724 724 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", 725 725 usb_otg_state_string(musb->xceiv->otg->state), otg_stat); 726 726 idle_timeout = jiffies + (1 * HZ); 727 - schedule_work(&musb->irq_work); 727 + schedule_delayed_work(&musb->irq_work, 0); 728 728 729 729 } else /* A-dev state machine */ { 730 730 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", ··· 814 814 break; 815 815 } 816 816 } 817 - schedule_work(&musb->irq_work); 817 + schedule_delayed_work(&musb->irq_work, 0); 818 818 819 819 return idle_timeout; 820 820 } ··· 864 864 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); 865 865 if (reg & ~TUSB_PRCM_WNORCS) { 866 866 musb->is_active = 1; 867 - schedule_work(&musb->irq_work); 867 + schedule_delayed_work(&musb->irq_work, 0); 868 868 } 869 869 dev_dbg(musb->controller, "wake %sactive %02x\n", 870 870 musb->is_active ? "" : "in", reg);
+1
drivers/usb/serial/cp210x.c
··· 131 131 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 132 132 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 133 133 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 134 + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ 134 135 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 135 136 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 136 137 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+2
drivers/usb/serial/ftdi_sio.c
··· 1012 1012 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, 1013 1013 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, 1014 1014 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, 1015 + { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), 1016 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 1015 1017 { } /* Terminating entry */ 1016 1018 }; 1017 1019
+6
drivers/usb/serial/ftdi_sio_ids.h
··· 596 596 #define STK541_PID 0x2109 /* Zigbee Controller */ 597 597 598 598 /* 599 + * Texas Instruments 600 + */ 601 + #define TI_VID 0x0451 602 + #define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */ 603 + 604 + /* 599 605 * Blackfin gnICE JTAG 600 606 * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice 601 607 */
+6 -1
drivers/usb/storage/transport.c
··· 954 954 955 955 /* COMMAND STAGE */ 956 956 /* let's send the command via the control pipe */ 957 + /* 958 + * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. 959 + * Stack may be vmallocated. So no DMA for us. Make a copy. 960 + */ 961 + memcpy(us->iobuf, srb->cmnd, srb->cmd_len); 957 962 result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 958 963 US_CBI_ADSC, 959 964 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 960 - us->ifnum, srb->cmnd, srb->cmd_len); 965 + us->ifnum, us->iobuf, srb->cmd_len); 961 966 962 967 /* check the return code for the command */ 963 968 usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
+1
drivers/watchdog/Kconfig
··· 155 155 config WDAT_WDT 156 156 tristate "ACPI Watchdog Action Table (WDAT)" 157 157 depends on ACPI 158 + select WATCHDOG_CORE 158 159 select ACPI_WATCHDOG 159 160 help 160 161 This driver adds support for systems with ACPI Watchdog Action
+1 -1
fs/nfs/callback.c
··· 197 197 } 198 198 199 199 ret = -EPROTONOSUPPORT; 200 - if (minorversion == 0) 200 + if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) 201 201 ret = nfs4_callback_up_net(serv, net); 202 202 else if (xprt->ops->bc_up) 203 203 ret = xprt->ops->bc_up(serv, net);
+7
fs/nfs/nfs4_fs.h
··· 542 542 return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; 543 543 } 544 544 545 + static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state, 546 + const nfs4_stateid *stateid) 547 + { 548 + return test_bit(NFS_OPEN_STATE, &state->flags) && 549 + nfs4_stateid_match_other(&state->open_stateid, stateid); 550 + } 551 + 545 552 #else 546 553 547 554 #define nfs4_close_state(a, b) do { } while (0)
+26 -12
fs/nfs/nfs4proc.c
··· 1451 1451 } 1452 1452 1453 1453 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1454 - nfs4_stateid *arg_stateid, 1455 1454 nfs4_stateid *stateid, fmode_t fmode) 1456 1455 { 1457 1456 clear_bit(NFS_O_RDWR_STATE, &state->flags); ··· 1468 1469 } 1469 1470 if (stateid == NULL) 1470 1471 return; 1471 - /* Handle races with OPEN */ 1472 - if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1473 - (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1474 - !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1472 + /* Handle OPEN+OPEN_DOWNGRADE races */ 1473 + if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1474 + !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1475 1475 nfs_resync_open_stateid_locked(state); 1476 1476 return; 1477 1477 } ··· 1484 1486 nfs4_stateid *stateid, fmode_t fmode) 1485 1487 { 1486 1488 write_seqlock(&state->seqlock); 1487 - nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1489 + /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1490 + if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1491 + nfs_clear_open_stateid_locked(state, stateid, fmode); 1488 1492 write_sequnlock(&state->seqlock); 1489 1493 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1490 1494 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); ··· 2564 2564 static int nfs41_check_expired_locks(struct nfs4_state *state) 2565 2565 { 2566 2566 int status, ret = NFS_OK; 2567 - struct nfs4_lock_state *lsp; 2567 + struct nfs4_lock_state *lsp, *prev = NULL; 2568 2568 struct nfs_server *server = NFS_SERVER(state->inode); 2569 2569 2570 2570 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2571 2571 goto out; 2572 + 2573 + spin_lock(&state->state_lock); 2572 2574 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 2573 2575 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 2574 2576 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 2577 + 2578 + atomic_inc(&lsp->ls_count); 2579 + spin_unlock(&state->state_lock); 2580 + 2581 + nfs4_put_lock_state(prev); 2582 + prev = lsp; 2575 2583 2576 2584 status = nfs41_test_and_free_expired_stateid(server, 2577 2585 &lsp->ls_stateid, ··· 2593 2585 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2594 2586 } else if (status != NFS_OK) { 2595 2587 ret = status; 2596 - break; 2588 + nfs4_put_lock_state(prev); 2589 + goto out; 2597 2590 } 2591 + spin_lock(&state->state_lock); 2598 2592 } 2599 - }; 2593 + } 2594 + spin_unlock(&state->state_lock); 2595 + nfs4_put_lock_state(prev); 2600 2596 out: 2601 2597 return ret; 2602 2598 } ··· 3134 3122 } else if (is_rdwr) 3135 3123 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3136 3124 3137 - if (!nfs4_valid_open_stateid(state)) 3125 + if (!nfs4_valid_open_stateid(state) || 3126 + test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3138 3127 call_close = 0; 3139 3128 spin_unlock(&state->owner->so_lock); 3140 3129 ··· 5582 5569 switch (task->tk_status) { 5583 5570 case 0: 5584 5571 renew_lease(data->res.server, data->timestamp); 5572 + break; 5585 5573 case -NFS4ERR_ADMIN_REVOKED: 5586 5574 case -NFS4ERR_DELEG_REVOKED: 5587 5575 case -NFS4ERR_EXPIRED: ··· 5593 5579 case -NFS4ERR_OLD_STATEID: 5594 5580 case -NFS4ERR_STALE_STATEID: 5595 5581 task->tk_status = 0; 5596 - if (data->roc) 5597 - pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5598 5582 break; 5599 5583 default: 5600 5584 if (nfs4_async_handle_error(task, data->res.server, ··· 5602 5590 } 5603 5591 } 5604 5592 data->rpc_status = task->tk_status; 5593 + if (data->roc && data->rpc_status == 0) 5594 + pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5605 5595 } 5606 5596 5607 5597 static void nfs4_delegreturn_release(void *calldata)
+1
fs/nfs/nfs4state.c
··· 1547 1547 ssleep(1); 1548 1548 case -NFS4ERR_ADMIN_REVOKED: 1549 1549 case -NFS4ERR_STALE_STATEID: 1550 + case -NFS4ERR_OLD_STATEID: 1550 1551 case -NFS4ERR_BAD_STATEID: 1551 1552 case -NFS4ERR_RECLAIM_BAD: 1552 1553 case -NFS4ERR_RECLAIM_CONFLICT:
+2 -1
fs/splice.c
··· 408 408 if (res <= 0) 409 409 return -ENOMEM; 410 410 411 - nr_pages = res / PAGE_SIZE; 411 + BUG_ON(dummy); 412 + nr_pages = DIV_ROUND_UP(res, PAGE_SIZE); 412 413 413 414 vec = __vec; 414 415 if (nr_pages > PIPE_DEF_BUFFERS) {
+1 -1
include/linux/netdevice.h
··· 1619 1619 * @dcbnl_ops: Data Center Bridging netlink ops 1620 1620 * @num_tc: Number of traffic classes in the net device 1621 1621 * @tc_to_txq: XXX: need comments on this one 1622 - * @prio_tc_map XXX: need comments on this one 1622 + * @prio_tc_map: XXX: need comments on this one 1623 1623 * 1624 1624 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1625 1625 *
+2
include/linux/sched.h
··· 2567 2567 extern void sched_autogroup_detach(struct task_struct *p); 2568 2568 extern void sched_autogroup_fork(struct signal_struct *sig); 2569 2569 extern void sched_autogroup_exit(struct signal_struct *sig); 2570 + extern void sched_autogroup_exit_task(struct task_struct *p); 2570 2571 #ifdef CONFIG_PROC_FS 2571 2572 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2572 2573 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); ··· 2577 2576 static inline void sched_autogroup_detach(struct task_struct *p) { } 2578 2577 static inline void sched_autogroup_fork(struct signal_struct *sig) { } 2579 2578 static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2579 + static inline void sched_autogroup_exit_task(struct task_struct *p) { } 2580 2580 #endif 2581 2581 2582 2582 extern int yield_to(struct task_struct *p, bool preempt);
+1 -1
include/net/bluetooth/hci_core.h
··· 1018 1018 } 1019 1019 1020 1020 struct hci_dev *hci_dev_get(int index); 1021 - struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src); 1021 + struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); 1022 1022 1023 1023 struct hci_dev *hci_alloc_dev(void); 1024 1024 void hci_free_dev(struct hci_dev *hdev);
+1
init/Kconfig
··· 1957 1957 1958 1958 config MODVERSIONS 1959 1959 bool "Module versioning support" 1960 + depends on BROKEN 1960 1961 help 1961 1962 Usually, you have to use modules compiled with your kernel. 1962 1963 Saying Y here makes it sometimes possible to use modules
+1 -1
init/do_mounts_rd.c
··· 272 272 sys_write(out_fd, buf, BLOCK_SIZE); 273 273 #if !defined(CONFIG_S390) 274 274 if (!(i % 16)) { 275 - printk("%c\b", rotator[rotate & 0x3]); 275 + pr_cont("%c\b", rotator[rotate & 0x3]); 276 276 rotate++; 277 277 } 278 278 #endif
+13
kernel/events/core.c
··· 902 902 * this will always be called from the right CPU. 903 903 */ 904 904 cpuctx = __get_cpu_context(ctx); 905 + 906 + /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */ 907 + if (perf_cgroup_from_task(current, ctx) != event->cgrp) { 908 + /* 909 + * We are removing the last cpu event in this context. 910 + * If that event is not active in this cpu, cpuctx->cgrp 911 + * should've been cleared by perf_cgroup_switch. 912 + */ 913 + WARN_ON_ONCE(!add && cpuctx->cgrp); 914 + return; 915 + } 905 916 cpuctx->cgrp = add ? event->cgrp : NULL; 906 917 } 907 918 ··· 8029 8018 * if <size> is not specified, the range is treated as a single address. 8030 8019 */ 8031 8020 enum { 8021 + IF_ACT_NONE = -1, 8032 8022 IF_ACT_FILTER, 8033 8023 IF_ACT_START, 8034 8024 IF_ACT_STOP, ··· 8053 8041 { IF_SRC_KERNEL, "%u/%u" }, 8054 8042 { IF_SRC_FILEADDR, "%u@%s" }, 8055 8043 { IF_SRC_KERNELADDR, "%u" }, 8044 + { IF_ACT_NONE, NULL }, 8056 8045 }; 8057 8046 8058 8047 /*
+1
kernel/exit.c
··· 836 836 */ 837 837 perf_event_exit_task(tsk); 838 838 839 + sched_autogroup_exit_task(tsk); 839 840 cgroup_exit(tsk); 840 841 841 842 /*
+28 -8
kernel/sched/auto_group.c
··· 111 111 { 112 112 if (tg != &root_task_group) 113 113 return false; 114 - 115 114 /* 116 - * We can only assume the task group can't go away on us if 117 - * autogroup_move_group() can see us on ->thread_group list. 115 + * If we race with autogroup_move_group() the caller can use the old 116 + * value of signal->autogroup but in this case sched_move_task() will 117 + * be called again before autogroup_kref_put(). 118 + * 119 + * However, there is no way sched_autogroup_exit_task() could tell us 120 + * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. 118 121 */ 119 122 if (p->flags & PF_EXITING) 120 123 return false; 121 124 122 125 return true; 126 + } 127 + 128 + void sched_autogroup_exit_task(struct task_struct *p) 129 + { 130 + /* 131 + * We are going to call exit_notify() and autogroup_move_group() can't 132 + * see this thread after that: we can no longer use signal->autogroup. 133 + * See the PF_EXITING check in task_wants_autogroup(). 134 + */ 135 + sched_move_task(p); 123 136 } 124 137 125 138 static void ··· 151 138 } 152 139 153 140 p->signal->autogroup = autogroup_kref_get(ag); 154 - 155 - if (!READ_ONCE(sysctl_sched_autogroup_enabled)) 156 - goto out; 157 - 141 + /* 142 + * We can't avoid sched_move_task() after we changed signal->autogroup, 143 + * this process can already run with task_group() == prev->tg or we can 144 + * race with cgroup code which can read autogroup = prev under rq->lock. 145 + * In the latter case for_each_thread() can not miss a migrating thread, 146 + * cpu_cgroup_attach() must not be possible after cgroup_exit() and it 147 + * can't be removed from thread list, we hold ->siglock. 148 + * 149 + * If an exiting thread was already removed from thread list we rely on 150 + * sched_autogroup_exit_task(). 151 + */ 158 152 for_each_thread(p, t) 159 153 sched_move_task(t); 160 - out: 154 + 161 155 unlock_task_sighand(p, &flags); 162 156 autogroup_kref_put(prev); 163 157 }
+6 -1
lib/mpi/mpi-pow.c
··· 64 64 if (!esize) { 65 65 /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 66 66 * depending on if MOD equals 1. */ 67 - rp[0] = 1; 68 67 res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; 68 + if (res->nlimbs) { 69 + if (mpi_resize(res, 1) < 0) 70 + goto enomem; 71 + rp = res->d; 72 + rp[0] = 1; 73 + } 69 74 res->sign = 0; 70 75 goto leave; 71 76 }
+2 -2
net/bluetooth/6lowpan.c
··· 1090 1090 { 1091 1091 struct hci_conn *hcon; 1092 1092 struct hci_dev *hdev; 1093 - bdaddr_t *src = BDADDR_ANY; 1094 1093 int n; 1095 1094 1096 1095 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", ··· 1100 1101 if (n < 7) 1101 1102 return -EINVAL; 1102 1103 1103 - hdev = hci_get_route(addr, src); 1104 + /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */ 1105 + hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC); 1104 1106 if (!hdev) 1105 1107 return -ENOENT; 1106 1108
+24 -2
net/bluetooth/hci_conn.c
··· 613 613 return 0; 614 614 } 615 615 616 - struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) 616 + struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) 617 617 { 618 618 int use_src = bacmp(src, BDADDR_ANY); 619 619 struct hci_dev *hdev = NULL, *d; ··· 634 634 */ 635 635 636 636 if (use_src) { 637 - if (!bacmp(&d->bdaddr, src)) { 637 + bdaddr_t id_addr; 638 + u8 id_addr_type; 639 + 640 + if (src_type == BDADDR_BREDR) { 641 + if (!lmp_bredr_capable(d)) 642 + continue; 643 + bacpy(&id_addr, &d->bdaddr); 644 + id_addr_type = BDADDR_BREDR; 645 + } else { 646 + if (!lmp_le_capable(d)) 647 + continue; 648 + 649 + hci_copy_identity_address(d, &id_addr, 650 + &id_addr_type); 651 + 652 + /* Convert from HCI to three-value type */ 653 + if (id_addr_type == ADDR_LE_DEV_PUBLIC) 654 + id_addr_type = BDADDR_LE_PUBLIC; 655 + else 656 + id_addr_type = BDADDR_LE_RANDOM; 657 + } 658 + 659 + if (!bacmp(&id_addr, src) && id_addr_type == src_type) { 638 660 hdev = d; break; 639 661 } 640 662 } else {
+1 -1
net/bluetooth/l2cap_core.c
··· 7060 7060 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, 7061 7061 dst_type, __le16_to_cpu(psm)); 7062 7062 7063 - hdev = hci_get_route(dst, &chan->src); 7063 + hdev = hci_get_route(dst, &chan->src, chan->src_type); 7064 7064 if (!hdev) 7065 7065 return -EHOSTUNREACH; 7066 7066
+1 -1
net/bluetooth/rfcomm/tty.c
··· 178 178 struct hci_dev *hdev; 179 179 struct hci_conn *conn; 180 180 181 - hdev = hci_get_route(&dev->dst, &dev->src); 181 + hdev = hci_get_route(&dev->dst, &dev->src, BDADDR_BREDR); 182 182 if (!hdev) 183 183 return; 184 184
+1 -1
net/bluetooth/sco.c
··· 219 219 220 220 BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); 221 221 222 - hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src); 222 + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); 223 223 if (!hdev) 224 224 return -EHOSTUNREACH; 225 225
+10 -8
net/can/bcm.c
··· 77 77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 78 78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 79 79 80 - #define CAN_BCM_VERSION "20160617" 80 + #define CAN_BCM_VERSION "20161123" 81 81 82 82 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 83 83 MODULE_LICENSE("Dual BSD/GPL"); ··· 109 109 u32 count; 110 110 u32 nframes; 111 111 u32 currframe; 112 - struct canfd_frame *frames; 113 - struct canfd_frame *last_frames; 112 + /* void pointers to arrays of struct can[fd]_frame */ 113 + void *frames; 114 + void *last_frames; 114 115 struct canfd_frame sframe; 115 116 struct canfd_frame last_sframe; 116 117 struct sock *sk; ··· 682 681 683 682 if (op->flags & RX_FILTER_ID) { 684 683 /* the easiest case */ 685 - bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 684 + bcm_rx_update_and_send(op, op->last_frames, rxframe); 686 685 goto rx_starttimer; 687 686 } 688 687 ··· 1069 1068 1070 1069 if (msg_head->nframes) { 1071 1070 /* update CAN frames content */ 1072 - err = memcpy_from_msg((u8 *)op->frames, msg, 1071 + err = memcpy_from_msg(op->frames, msg, 1073 1072 msg_head->nframes * op->cfsiz); 1074 1073 if (err < 0) 1075 1074 return err; ··· 1119 1118 } 1120 1119 1121 1120 if (msg_head->nframes) { 1122 - err = memcpy_from_msg((u8 *)op->frames, msg, 1121 + err = memcpy_from_msg(op->frames, msg, 1123 1122 msg_head->nframes * op->cfsiz); 1124 1123 if (err < 0) { 1125 1124 if (op->frames != &op->sframe) ··· 1164 1163 /* check flags */ 1165 1164 1166 1165 if (op->flags & RX_RTR_FRAME) { 1166 + struct canfd_frame *frame0 = op->frames; 1167 1167 1168 1168 /* no timers in RTR-mode */ 1169 1169 hrtimer_cancel(&op->thrtimer); ··· 1176 1174 * prevent a full-load-loopback-test ... ;-] 1177 1175 */ 1178 1176 if ((op->flags & TX_CP_CAN_ID) || 1179 - (op->frames[0].can_id == op->can_id)) 1180 - op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; 1177 + (frame0->can_id == op->can_id)) 1178 + frame0->can_id = op->can_id & ~CAN_RTR_FLAG; 1181 1179 1182 1180 } else { 1183 1181 if (op->flags & SETTIMER) {
+1
net/core/ethtool.c
··· 2570 2570 case ETHTOOL_GEEE: 2571 2571 case ETHTOOL_GTUNABLE: 2572 2572 case ETHTOOL_PHY_GTUNABLE: 2573 + case ETHTOOL_GLINKSETTINGS: 2573 2574 break; 2574 2575 default: 2575 2576 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+1 -1
net/core/flow_dissector.c
··· 1013 1013 return 0; 1014 1014 } 1015 1015 1016 - late_initcall_sync(init_default_flow_dissectors); 1016 + core_initcall(init_default_flow_dissectors);
+1 -1
net/core/rtnetlink.c
··· 2737 2737 ext_filter_mask)); 2738 2738 } 2739 2739 2740 - return min_ifinfo_dump_size; 2740 + return nlmsg_total_size(min_ifinfo_dump_size); 2741 2741 } 2742 2742 2743 2743 static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
+1 -1
net/ipv4/udp.c
··· 1562 1562 udp_lib_rehash(sk, new_hash); 1563 1563 } 1564 1564 1565 - static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1565 + int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1566 1566 { 1567 1567 int rc; 1568 1568
+1 -1
net/ipv4/udp_impl.h
··· 25 25 int flags, int *addr_len); 26 26 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 27 27 int flags); 28 - int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 28 + int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 29 29 void udp_destroy_sock(struct sock *sk); 30 30 31 31 #ifdef CONFIG_PROC_FS
+12 -6
net/ipv6/addrconf.c
··· 183 183 184 184 static void addrconf_dad_start(struct inet6_ifaddr *ifp); 185 185 static void addrconf_dad_work(struct work_struct *w); 186 - static void addrconf_dad_completed(struct inet6_ifaddr *ifp); 186 + static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id); 187 187 static void addrconf_dad_run(struct inet6_dev *idev); 188 188 static void addrconf_rs_timer(unsigned long data); 189 189 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); ··· 2906 2906 spin_lock_bh(&ifp->lock); 2907 2907 ifp->flags &= ~IFA_F_TENTATIVE; 2908 2908 spin_unlock_bh(&ifp->lock); 2909 + rt_genid_bump_ipv6(dev_net(idev->dev)); 2909 2910 ipv6_ifa_notify(RTM_NEWADDR, ifp); 2910 2911 in6_ifa_put(ifp); 2911 2912 } ··· 3749 3748 { 3750 3749 struct inet6_dev *idev = ifp->idev; 3751 3750 struct net_device *dev = idev->dev; 3752 - bool notify = false; 3751 + bool bump_id, notify = false; 3753 3752 3754 3753 addrconf_join_solict(dev, &ifp->addr); 3755 3754 ··· 3764 3763 idev->cnf.accept_dad < 1 || 3765 3764 !(ifp->flags&IFA_F_TENTATIVE) || 3766 3765 ifp->flags & IFA_F_NODAD) { 3766 + bump_id = ifp->flags & IFA_F_TENTATIVE; 3767 3767 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 3768 3768 spin_unlock(&ifp->lock); 3769 3769 read_unlock_bh(&idev->lock); 3770 3770 3771 - addrconf_dad_completed(ifp); 3771 + addrconf_dad_completed(ifp, bump_id); 3772 3772 return; 3773 3773 } 3774 3774 ··· 3829 3827 struct inet6_ifaddr, 3830 3828 dad_work); 3831 3829 struct inet6_dev *idev = ifp->idev; 3830 + bool bump_id, disable_ipv6 = false; 3832 3831 struct in6_addr mcaddr; 3833 - bool disable_ipv6 = false; 3834 3832 3835 3833 enum { 3836 3834 DAD_PROCESS, ··· 3900 3898 * DAD was successful 3901 3899 */ 3902 3900 3901 + bump_id = ifp->flags & IFA_F_TENTATIVE; 3903 3902 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 3904 3903 spin_unlock(&ifp->lock); 3905 3904 write_unlock_bh(&idev->lock); 3906 3905 3907 - addrconf_dad_completed(ifp); 3906 + addrconf_dad_completed(ifp, bump_id); 3908 3907 3909 3908 goto out; 3910 3909 } ··· 3942 3939 return true; 3943 3940 } 3944 3941 3945 - static void addrconf_dad_completed(struct inet6_ifaddr *ifp) 3942 + static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) 3946 3943 { 3947 3944 struct net_device *dev = ifp->idev->dev; 3948 3945 struct in6_addr lladdr; ··· 3994 3991 spin_unlock(&ifp->lock); 3995 3992 write_unlock_bh(&ifp->idev->lock); 3996 3993 } 3994 + 3995 + if (bump_id) 3996 + rt_genid_bump_ipv6(dev_net(dev)); 3997 3997 } 3998 3998 3999 3999 static void addrconf_dad_run(struct inet6_dev *idev)
+1 -1
net/ipv6/udp.c
··· 512 512 return; 513 513 } 514 514 515 - static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 515 + int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 516 516 { 517 517 int rc; 518 518
+1 -1
net/ipv6/udp_impl.h
··· 26 26 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 27 27 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 28 28 int flags, int *addr_len); 29 - int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 29 + int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 30 30 void udpv6_destroy_sock(struct sock *sk); 31 31 32 32 #ifdef CONFIG_PROC_FS
+1 -1
net/l2tp/l2tp_eth.c
··· 97 97 unsigned int len = skb->len; 98 98 int ret = l2tp_xmit_skb(session, skb, session->hdr_len); 99 99 100 - if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 100 + if (likely(ret == NET_XMIT_SUCCESS)) { 101 101 atomic_long_add(len, &priv->tx_bytes); 102 102 atomic_long_inc(&priv->tx_packets); 103 103 } else {
+1 -1
net/sched/cls_api.c
··· 112 112 113 113 for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL; 114 114 it_chain = &tp->next) 115 - tfilter_notify(net, oskb, n, tp, n->nlmsg_flags, event, false); 115 + tfilter_notify(net, oskb, n, tp, 0, event, false); 116 116 } 117 117 118 118 /* Select new prio value from the range, managed by kernel. */
+3 -2
net/tipc/link.c
··· 1492 1492 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1493 1493 l->tolerance = peers_tol; 1494 1494 1495 - if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI, 1496 - TIPC_MAX_LINK_PRI)) { 1495 + /* Update own prio if peer indicates a different value */ 1496 + if ((peers_prio != l->priority) && 1497 + in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { 1497 1498 l->priority = peers_prio; 1498 1499 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1499 1500 }
+5 -5
net/tipc/monitor.c
··· 455 455 int i, applied_bef; 456 456 457 457 state->probing = false; 458 - if (!dlen) 459 - return; 460 458 461 459 /* Sanity check received domain record */ 462 - if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) { 463 - pr_warn_ratelimited("Received illegal domain record\n"); 460 + if (dlen < dom_rec_len(arrv_dom, 0)) 464 461 return; 465 - } 462 + if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) 463 + return; 464 + if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) 465 + return; 466 466 467 467 /* Synch generation numbers with peer if link just came up */ 468 468 if (!state->synched) {
+1 -1
net/tipc/socket.c
··· 184 184 185 185 static bool tsk_conn_cong(struct tipc_sock *tsk) 186 186 { 187 - return tsk->snt_unacked >= tsk->snd_win; 187 + return tsk->snt_unacked > tsk->snd_win; 188 188 } 189 189 190 190 /* tsk_blocks(): translate a buffer size in bytes to number of