Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' into for-next

Back-merge 4.17-rc3 fixes for further development.
This will bump the base to 4.17-rc2, too.

Signed-off-by: Takashi Iwai <tiwai@suse.de>

+5903 -2614
+13
Documentation/core-api/kernel-api.rst
··· 136 136 .. kernel-doc:: lib/list_sort.c 137 137 :export: 138 138 139 + Text Searching 140 + -------------- 141 + 142 + .. kernel-doc:: lib/textsearch.c 143 + :doc: ts_intro 144 + 145 + .. kernel-doc:: lib/textsearch.c 146 + :export: 147 + 148 + .. kernel-doc:: include/linux/textsearch.h 149 + :functions: textsearch_find textsearch_next \ 150 + textsearch_get_pattern textsearch_get_pattern_len 151 + 139 152 UUID/GUID 140 153 --------- 141 154
+5 -18
Documentation/devicetree/bindings/thermal/exynos-thermal.txt
··· 49 49 - samsung,exynos5433-tmu: 8 50 50 - samsung,exynos7-tmu: 8 51 51 52 - Following properties are mandatory (depending on SoC): 53 - - samsung,tmu_gain: Gain value for internal TMU operation. 54 - - samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage 55 - - samsung,tmu_noise_cancel_mode: Mode for noise cancellation 56 - - samsung,tmu_efuse_value: Default level of temperature - it is needed when 57 - in factory fusing produced wrong value 58 - - samsung,tmu_min_efuse_value: Minimum temperature fused value 59 - - samsung,tmu_max_efuse_value: Maximum temperature fused value 60 - - samsung,tmu_first_point_trim: First point trimming value 61 - - samsung,tmu_second_point_trim: Second point trimming value 62 - - samsung,tmu_default_temp_offset: Default temperature offset 63 - - samsung,tmu_cal_type: Callibration type 64 - 65 52 ** Optional properties: 66 53 67 54 - vtmu-supply: This entry is optional and provides the regulator node supplying ··· 65 78 clocks = <&clock 383>; 66 79 clock-names = "tmu_apbif"; 67 80 vtmu-supply = <&tmu_regulator_node>; 68 - #include "exynos4412-tmu-sensor-conf.dtsi" 81 + #thermal-sensor-cells = <0>; 69 82 }; 70 83 71 84 Example 2): ··· 76 89 interrupts = <0 58 0>; 77 90 clocks = <&clock 21>; 78 91 clock-names = "tmu_apbif"; 79 - #include "exynos5440-tmu-sensor-conf.dtsi" 92 + #thermal-sensor-cells = <0>; 80 93 }; 81 94 82 95 Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") ··· 86 99 interrupts = <0 184 0>; 87 100 clocks = <&clock 318>, <&clock 318>; 88 101 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 89 - #include "exynos4412-tmu-sensor-conf.dtsi" 102 + #thermal-sensor-cells = <0>; 90 103 }; 91 104 92 105 tmu_cpu3: tmu@1006c000 { ··· 95 108 interrupts = <0 185 0>; 96 109 clocks = <&clock 318>, <&clock 319>; 97 110 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 98 - #include "exynos4412-tmu-sensor-conf.dtsi" 111 + #thermal-sensor-cells = <0>; 99 112 }; 100 113 101 114 tmu_gpu: tmu@100a0000 { ··· 104 117 interrupts = <0 215 0>; 105 118 clocks = <&clock 319>, <&clock 318>; 106 119 clock-names = "tmu_apbif", "tmu_triminfo_apbif"; 107 - #include "exynos4412-tmu-sensor-conf.dtsi" 120 + #thermal-sensor-cells = <0>; 108 121 }; 109 122 110 123 Note: For multi-instance tmu each instance should have an alias correctly
+1 -15
Documentation/devicetree/bindings/thermal/thermal.txt
··· 55 55 the different fan speeds possible. Cooling states are referred to by 56 56 single unsigned integers, where larger numbers mean greater heat 57 57 dissipation. The precise set of cooling states associated with a device 58 - (as referred to by the cooling-min-level and cooling-max-level 59 - properties) should be defined in a particular device's binding. 58 + should be defined in a particular device's binding. 60 59 For more examples of cooling devices, refer to the example sections below. 61 60 62 61 Required properties: ··· 67 68 the maximum cooling state requested in the reference. 68 69 See Cooling device maps section below for more details 69 70 on how consumers refer to cooling devices. 70 - 71 - Optional properties: 72 - - cooling-min-level: An integer indicating the smallest 73 - Type: unsigned cooling state accepted. Typically 0. 74 - Size: one cell 75 - 76 - - cooling-max-level: An integer indicating the largest 77 - Type: unsigned cooling state accepted. 78 - Size: one cell 79 71 80 72 * Trip points 81 73 ··· 216 226 396000 950000 217 227 198000 850000 218 228 >; 219 - cooling-min-level = <0>; 220 - cooling-max-level = <3>; 221 229 #cooling-cells = <2>; /* min followed by max */ 222 230 }; 223 231 ... ··· 229 241 */ 230 242 fan0: fan@48 { 231 243 ... 232 - cooling-min-level = <0>; 233 - cooling-max-level = <9>; 234 244 #cooling-cells = <2>; /* min followed by max */ 235 245 }; 236 246 };
+21
Documentation/devicetree/bindings/timer/nuvoton,npcm7xx-timer.txt
··· 1 + Nuvoton NPCM7xx timer 2 + 3 + Nuvoton NPCM7xx have three timer modules, each timer module provides five 24-bit 4 + timer counters. 5 + 6 + Required properties: 7 + - compatible : "nuvoton,npcm750-timer" for Poleg NPCM750. 8 + - reg : Offset and length of the register set for the device. 9 + - interrupts : Contain the timer interrupt with flags for 10 + falling edge. 11 + - clocks : phandle of timer reference clock (usually a 25 MHz clock). 12 + 13 + Example: 14 + 15 + timer@f0008000 { 16 + compatible = "nuvoton,npcm750-timer"; 17 + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; 18 + reg = <0xf0008000 0x50>; 19 + clocks = <&clk NPCM7XX_CLK_TIMER>; 20 + }; 21 +
+1 -1
Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt
··· 15 15 - interrupts : Should be the clock event device interrupt. 16 16 - clocks : The clocks provided by the SoC to drive the timer, must contain 17 17 an entry for each entry in clock-names. 18 - - clock-names : Must include the following entries: "igp" and "per". 18 + - clock-names : Must include the following entries: "ipg" and "per". 19 19 20 20 Example: 21 21 tpm5: tpm@40260000 {
+29 -12
Documentation/livepatch/shadow-vars.txt
··· 34 34 - data[] - storage for shadow data 35 35 36 36 It is important to note that the klp_shadow_alloc() and 37 - klp_shadow_get_or_alloc() calls, described below, store a *copy* of the 38 - data that the functions are provided. Callers should provide whatever 39 - mutual exclusion is required of the shadow data. 37 + klp_shadow_get_or_alloc() are zeroing the variable by default. 38 + They also allow to call a custom constructor function when a non-zero 39 + value is needed. Callers should provide whatever mutual exclusion 40 + is required. 41 + 42 + Note that the constructor is called under klp_shadow_lock spinlock. It allows 43 + to do actions that can be done only once when a new variable is allocated. 40 44 41 45 * klp_shadow_get() - retrieve a shadow variable data pointer 42 46 - search hashtable for <obj, id> pair ··· 51 47 - WARN and return NULL 52 48 - if <obj, id> doesn't already exist 53 49 - allocate a new shadow variable 54 - - copy data into the new shadow variable 50 + - initialize the variable using a custom constructor and data when provided 55 51 - add <obj, id> to the global hashtable 56 52 57 53 * klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable ··· 60 56 - return existing shadow variable 61 57 - if <obj, id> doesn't already exist 62 58 - allocate a new shadow variable 63 - - copy data into the new shadow variable 59 + - initialize the variable using a custom constructor and data when provided 64 60 - add <obj, id> pair to the global hashtable 65 61 66 62 * klp_shadow_free() - detach and free a <obj, id> shadow variable 67 63 - find and remove a <obj, id> reference from global hashtable 68 - - if found, free shadow variable 64 + - if found 65 + - call destructor function if defined 66 + - free shadow variable 69 67 70 68 * klp_shadow_free_all() - detach and free all <*, id> shadow variables 71 69 - find and remove any <*, id> references from global hashtable 72 - - if found, free shadow variable 70 + - if found 71 + - call destructor function if defined 72 + - free shadow variable 73 73 74 74 75 75 2. Use cases ··· 115 107 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 116 108 117 109 /* Attach a corresponding shadow variable, then initialize it */ 118 - ps_lock = klp_shadow_alloc(sta, PS_LOCK, NULL, sizeof(*ps_lock), gfp); 110 + ps_lock = klp_shadow_alloc(sta, PS_LOCK, sizeof(*ps_lock), gfp, 111 + NULL, NULL); 119 112 if (!ps_lock) 120 113 goto shadow_fail; 121 114 spin_lock_init(ps_lock); ··· 140 131 141 132 void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 142 133 { 143 - klp_shadow_free(sta, PS_LOCK); 134 + klp_shadow_free(sta, PS_LOCK, NULL); 144 135 kfree(sta); 145 136 ... 146 137 ··· 157 148 For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is 158 149 inside ieee80211_sta_ps_deliver_wakeup(): 159 150 151 + int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data) 152 + { 153 + spinlock_t *lock = shadow_data; 154 + 155 + spin_lock_init(lock); 156 + return 0; 157 + } 158 + 160 159 #define PS_LOCK 1 161 160 void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 162 161 { 163 - DEFINE_SPINLOCK(ps_lock_fallback); 164 162 spinlock_t *ps_lock; 165 163 166 164 /* sync with ieee80211_tx_h_unicast_ps_buf */ 167 165 ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK, 168 - &ps_lock_fallback, sizeof(ps_lock_fallback), 169 - GFP_ATOMIC); 166 + sizeof(*ps_lock), GFP_ATOMIC, 167 + ps_lock_shadow_ctor, NULL); 168 + 170 169 if (ps_lock) 171 170 spin_lock(ps_lock); 172 171 ...
+3 -3
Documentation/networking/filter.txt
··· 169 169 BPF engine and instruction set 170 170 ------------------------------ 171 171 172 - Under tools/net/ there's a small helper tool called bpf_asm which can 172 + Under tools/bpf/ there's a small helper tool called bpf_asm which can 173 173 be used to write low-level filters for example scenarios mentioned in the 174 174 previous section. Asm-like syntax mentioned here has been implemented in 175 175 bpf_asm and will be used for further explanations (instead of dealing with ··· 359 359 In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF 360 360 filters that might not be obvious at first, it's good to test filters before 361 361 attaching to a live system. For that purpose, there's a small tool called 362 - bpf_dbg under tools/net/ in the kernel source directory. This debugger allows 362 + bpf_dbg under tools/bpf/ in the kernel source directory. This debugger allows 363 363 for testing BPF filters against given pcap files, single stepping through the 364 364 BPF code on the pcap's packets and to do BPF machine register dumps. 365 365 ··· 483 483 [ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00 484 484 [ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3 485 485 486 - In the kernel source tree under tools/net/, there's bpf_jit_disasm for 486 + In the kernel source tree under tools/bpf/, there's bpf_jit_disasm for 487 487 generating disassembly out of the kernel log's hexdump: 488 488 489 489 # ./bpf_jit_disasm
+4 -4
Documentation/networking/ip-sysctl.txt
··· 1390 1390 Default: 2 (as specified by RFC3810 9.1) 1391 1391 Minimum: 1 (as specified by RFC6636 4.5) 1392 1392 1393 - max_dst_opts_cnt - INTEGER 1393 + max_dst_opts_number - INTEGER 1394 1394 Maximum number of non-padding TLVs allowed in a Destination 1395 1395 options extension header. If this value is less than zero 1396 1396 then unknown options are disallowed and the number of known 1397 1397 TLVs allowed is the absolute value of this number. 1398 1398 Default: 8 1399 1399 1400 - max_hbh_opts_cnt - INTEGER 1400 + max_hbh_opts_number - INTEGER 1401 1401 Maximum number of non-padding TLVs allowed in a Hop-by-Hop 1402 1402 options extension header. If this value is less than zero 1403 1403 then unknown options are disallowed and the number of known 1404 1404 TLVs allowed is the absolute value of this number. 1405 1405 Default: 8 1406 1406 1407 - max dst_opts_len - INTEGER 1407 + max_dst_opts_length - INTEGER 1408 1408 Maximum length allowed for a Destination options extension 1409 1409 header. 1410 1410 Default: INT_MAX (unlimited) 1411 1411 1412 - max hbh_opts_len - INTEGER 1412 + max_hbh_length - INTEGER 1413 1413 Maximum length allowed for a Hop-by-Hop options extension 1414 1414 header. 1415 1415 Default: INT_MAX (unlimited)
+35 -6
MAINTAINERS
··· 1373 1373 F: drivers/net/ethernet/amd/am79c961a.* 1374 1374 1375 1375 ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT 1376 - M: Uwe Kleine-König <kernel@pengutronix.de> 1376 + M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> 1377 + R: Pengutronix Kernel Team <kernel@pengutronix.de> 1377 1378 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1378 1379 S: Maintained 1379 1380 N: efm32 ··· 1402 1401 1403 1402 ARM/FREESCALE IMX / MXC ARM ARCHITECTURE 1404 1403 M: Shawn Guo <shawnguo@kernel.org> 1405 - M: Sascha Hauer <kernel@pengutronix.de> 1404 + M: Sascha Hauer <s.hauer@pengutronix.de> 1405 + R: Pengutronix Kernel Team <kernel@pengutronix.de> 1406 1406 R: Fabio Estevam <fabio.estevam@nxp.com> 1407 1407 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1408 1408 S: Maintained ··· 1418 1416 1419 1417 ARM/FREESCALE VYBRID ARM ARCHITECTURE 1420 1418 M: Shawn Guo <shawnguo@kernel.org> 1421 - M: Sascha Hauer <kernel@pengutronix.de> 1419 + M: Sascha Hauer <s.hauer@pengutronix.de> 1420 + R: Pengutronix Kernel Team <kernel@pengutronix.de> 1422 1421 R: Stefan Agner <stefan@agner.ch> 1423 1422 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1424 1423 S: Maintained ··· 4248 4245 4249 4246 DEVICE DIRECT ACCESS (DAX) 4250 4247 M: Dan Williams <dan.j.williams@intel.com> 4248 + M: Dave Jiang <dave.jiang@intel.com> 4249 + M: Ross Zwisler <ross.zwisler@linux.intel.com> 4250 + M: Vishal Verma <vishal.l.verma@intel.com> 4251 4251 L: linux-nvdimm@lists.01.org 4252 4252 S: Supported 4253 4253 F: drivers/dax/ ··· 5658 5652 F: Documentation/devicetree/bindings/net/fsl-fec.txt 5659 5653 5660 5654 FREESCALE IMX / MXC FRAMEBUFFER DRIVER 5661 - M: Sascha Hauer <kernel@pengutronix.de> 5655 + M: Sascha Hauer <s.hauer@pengutronix.de> 5656 + R: Pengutronix Kernel Team <kernel@pengutronix.de> 5662 5657 L: linux-fbdev@vger.kernel.org 5663 5658 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 5664 5659 S: Maintained ··· 5790 5783 F: fs/crypto/ 5791 5784 F: include/linux/fscrypt*.h 5792 5785 F: Documentation/filesystems/fscrypt.rst 5786 + 5787 + FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE 5788 + M: Jan Kara <jack@suse.cz> 5789 + R: Amir Goldstein <amir73il@gmail.com> 5790 + L: linux-fsdevel@vger.kernel.org 5791 + S: Maintained 5792 + F: fs/notify/ 5793 + F: include/linux/fsnotify*.h 5793 5794 5794 5795 FUJITSU LAPTOP EXTRAS 5795 5796 M: Jonathan Woithe <jwoithe@just42.net> ··· 6271 6256 F: drivers/media/usb/hdpvr/ 6272 6257 6273 6258 HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER 6274 - M: Jimmy Vance <jimmy.vance@hpe.com> 6259 + M: Jerry Hoemann <jerry.hoemann@hpe.com> 6275 6260 S: Supported 6276 6261 F: Documentation/watchdog/hpwdt.txt 6277 6262 F: drivers/watchdog/hpwdt.c ··· 8063 8048 8064 8049 LIBNVDIMM BLK: MMIO-APERTURE DRIVER 8065 8050 M: Ross Zwisler <ross.zwisler@linux.intel.com> 8051 + M: Dan Williams <dan.j.williams@intel.com> 8052 + M: Vishal Verma <vishal.l.verma@intel.com> 8053 + M: Dave Jiang <dave.jiang@intel.com> 8066 8054 L: linux-nvdimm@lists.01.org 8067 8055 Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8068 8056 S: Supported ··· 8074 8056 8075 8057 LIBNVDIMM BTT: BLOCK TRANSLATION TABLE 8076 8058 M: Vishal Verma <vishal.l.verma@intel.com> 8059 + M: Dan Williams <dan.j.williams@intel.com> 8060 + M: Ross Zwisler <ross.zwisler@linux.intel.com> 8061 + M: Dave Jiang <dave.jiang@intel.com> 8077 8062 L: linux-nvdimm@lists.01.org 8078 8063 Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8079 8064 S: Supported ··· 8084 8063 8085 8064 LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER 8086 8065 M: Ross Zwisler <ross.zwisler@linux.intel.com> 8066 + M: Dan Williams <dan.j.williams@intel.com> 8067 + M: Vishal Verma <vishal.l.verma@intel.com> 8068 + M: Dave Jiang <dave.jiang@intel.com> 8087 8069 L: linux-nvdimm@lists.01.org 8088 8070 Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8089 8071 S: Supported ··· 8102 8078 8103 8079 LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM 8104 8080 M: Dan Williams <dan.j.williams@intel.com> 8081 + M: Ross Zwisler <ross.zwisler@linux.intel.com> 8082 + M: Vishal Verma <vishal.l.verma@intel.com> 8083 + M: Dave Jiang <dave.jiang@intel.com> 8105 8084 L: linux-nvdimm@lists.01.org 8106 8085 Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 8107 8086 T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git ··· 9792 9765 F: tools/testing/selftests/net/ 9793 9766 F: lib/net_utils.c 9794 9767 F: lib/random32.c 9768 + F: Documentation/networking/ 9795 9769 9796 9770 NETWORKING [IPSEC] 9797 9771 M: Steffen Klassert <steffen.klassert@secunet.com> ··· 12844 12816 12845 12817 SIOX 12846 12818 M: Gavin Schenk <g.schenk@eckelmann.de> 12847 - M: Uwe Kleine-König <kernel@pengutronix.de> 12819 + M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> 12820 + R: Pengutronix Kernel Team <kernel@pengutronix.de> 12848 12821 S: Supported 12849 12822 F: drivers/siox/* 12850 12823 F: include/trace/events/siox.h
+1 -1
Makefile
··· 2 2 VERSION = 4 3 3 PATCHLEVEL = 17 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc1 5 + EXTRAVERSION = -rc2 6 6 NAME = Fearless Coyote 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/arm64/kernel/traps.c
··· 366 366 } 367 367 368 368 /* Force signals we don't understand to SIGKILL */ 369 - if (WARN_ON(signal != SIGKILL || 369 + if (WARN_ON(signal != SIGKILL && 370 370 siginfo_layout(signal, code) != SIL_FAULT)) { 371 371 signal = SIGKILL; 372 372 }
+2 -2
arch/arm64/mm/kasan_init.c
··· 204 204 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 205 205 206 206 kasan_map_populate(kimg_shadow_start, kimg_shadow_end, 207 - pfn_to_nid(virt_to_pfn(lm_alias(_text)))); 207 + early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); 208 208 209 209 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, 210 210 (void *)mod_shadow_start); ··· 224 224 225 225 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), 226 226 (unsigned long)kasan_mem_to_shadow(end), 227 - pfn_to_nid(virt_to_pfn(start))); 227 + early_pfn_to_nid(virt_to_pfn(start))); 228 228 } 229 229 230 230 /*
+6
arch/mips/boot/dts/img/boston.dts
··· 51 51 ranges = <0x02000000 0 0x40000000 52 52 0x40000000 0 0x40000000>; 53 53 54 + bus-range = <0x00 0xff>; 55 + 54 56 interrupt-map-mask = <0 0 0 7>; 55 57 interrupt-map = <0 0 0 1 &pci0_intc 1>, 56 58 <0 0 0 2 &pci0_intc 2>, ··· 81 79 ranges = <0x02000000 0 0x20000000 82 80 0x20000000 0 0x20000000>; 83 81 82 + bus-range = <0x00 0xff>; 83 + 84 84 interrupt-map-mask = <0 0 0 7>; 85 85 interrupt-map = <0 0 0 1 &pci1_intc 1>, 86 86 <0 0 0 2 &pci1_intc 2>, ··· 110 106 111 107 ranges = <0x02000000 0 0x16000000 112 108 0x16000000 0 0x100000>; 109 + 110 + bus-range = <0x00 0xff>; 113 111 114 112 interrupt-map-mask = <0 0 0 7>; 115 113 interrupt-map = <0 0 0 1 &pci2_intc 1>,
+3 -1
arch/mips/include/asm/io.h
··· 307 307 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) 308 308 #define war_io_reorder_wmb() wmb() 309 309 #else 310 - #define war_io_reorder_wmb() do { } while (0) 310 + #define war_io_reorder_wmb() barrier() 311 311 #endif 312 312 313 313 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ ··· 377 377 BUG(); \ 378 378 } \ 379 379 \ 380 + /* prevent prefetching of coherent DMA data prematurely */ \ 381 + rmb(); \ 380 382 return pfx##ioswab##bwlq(__mem, __val); \ 381 383 } 382 384
+9 -2
arch/mips/include/asm/uaccess.h
··· 654 654 { 655 655 __kernel_size_t res; 656 656 657 + #ifdef CONFIG_CPU_MICROMIPS 658 + /* micromips memset / bzero also clobbers t7 & t8 */ 659 + #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" 660 + #else 661 + #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" 662 + #endif /* CONFIG_CPU_MICROMIPS */ 663 + 657 664 if (eva_kernel_access()) { 658 665 __asm__ __volatile__( 659 666 "move\t$4, %1\n\t" ··· 670 663 "move\t%0, $6" 671 664 : "=r" (res) 672 665 : "r" (addr), "r" (size) 673 - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 666 + : bzero_clobbers); 674 667 } else { 675 668 might_fault(); 676 669 __asm__ __volatile__( ··· 681 674 "move\t%0, $6" 682 675 : "=r" (res) 683 676 : "r" (addr), "r" (size) 684 - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 677 + : bzero_clobbers); 685 678 } 686 679 687 680 return res;
+8 -3
arch/mips/lib/memset.S
··· 219 219 1: PTR_ADDIU a0, 1 /* fill bytewise */ 220 220 R10KCBARRIER(0(ra)) 221 221 bne t1, a0, 1b 222 - sb a1, -1(a0) 222 + EX(sb, a1, -1(a0), .Lsmall_fixup\@) 223 223 224 224 2: jr ra /* done */ 225 225 move a2, zero ··· 252 252 PTR_L t0, TI_TASK($28) 253 253 andi a2, STORMASK 254 254 LONG_L t0, THREAD_BUADDR(t0) 255 - LONG_ADDU a2, t1 255 + LONG_ADDU a2, a0 256 256 jr ra 257 257 LONG_SUBU a2, t0 258 258 259 259 .Llast_fixup\@: 260 260 jr ra 261 - andi v1, a2, STORMASK 261 + nop 262 + 263 + .Lsmall_fixup\@: 264 + PTR_SUBU a2, t1, a0 265 + jr ra 266 + PTR_ADDIU a2, 1 262 267 263 268 .endm 264 269
+1 -1
arch/parisc/kernel/Makefile
··· 23 23 obj-$(CONFIG_PA11) += pci-dma.o 24 24 obj-$(CONFIG_PCI) += pci.o 25 25 obj-$(CONFIG_MODULES) += module.o 26 - obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o 26 + obj-$(CONFIG_64BIT) += sys_parisc32.o signal32.o 27 27 obj-$(CONFIG_STACKTRACE)+= stacktrace.o 28 28 obj-$(CONFIG_AUDIT) += audit.o 29 29 obj64-$(CONFIG_AUDIT) += compat_audit.o
+2 -1
arch/powerpc/kernel/eeh_pe.c
··· 807 807 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); 808 808 809 809 /* PCI Command: 0x4 */ 810 - eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); 810 + eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | 811 + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 811 812 812 813 /* Check the PCIe link is ready */ 813 814 eeh_bridge_check_link(edev);
+2 -2
arch/powerpc/kernel/idle_book3s.S
··· 553 553 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 554 554 lbz r0,HSTATE_HWTHREAD_STATE(r13) 555 555 cmpwi r0,KVM_HWTHREAD_IN_KERNEL 556 - beq 1f 556 + beq 0f 557 557 li r0,KVM_HWTHREAD_IN_KERNEL 558 558 stb r0,HSTATE_HWTHREAD_STATE(r13) 559 559 /* Order setting hwthread_state vs. testing hwthread_req */ 560 560 sync 561 - lbz r0,HSTATE_HWTHREAD_REQ(r13) 561 + 0: lbz r0,HSTATE_HWTHREAD_REQ(r13) 562 562 cmpwi r0,0 563 563 beq 1f 564 564 b kvm_start_guest
+11
arch/powerpc/kernel/setup_64.c
··· 890 890 return; 891 891 892 892 l1d_size = ppc64_caches.l1d.size; 893 + 894 + /* 895 + * If there is no d-cache-size property in the device tree, l1d_size 896 + * could be zero. That leads to the loop in the asm wrapping around to 897 + * 2^64-1, and then walking off the end of the fallback area and 898 + * eventually causing a page fault which is fatal. Just default to 899 + * something vaguely sane. 900 + */ 901 + if (!l1d_size) 902 + l1d_size = (64 * 1024); 903 + 893 904 limit = min(ppc64_bolted_size(), ppc64_rma_size); 894 905 895 906 /*
+1 -1
arch/powerpc/lib/feature-fixups.c
··· 55 55 unsigned int *target = (unsigned int *)branch_target(src); 56 56 57 57 /* Branch within the section doesn't need translating */ 58 - if (target < alt_start || target >= alt_end) { 58 + if (target < alt_start || target > alt_end) { 59 59 instr = translate_branch(dest, src); 60 60 if (!instr) 61 61 return 1;
+1 -1
arch/powerpc/platforms/cell/spufs/sched.c
··· 1093 1093 LOAD_INT(c), LOAD_FRAC(c), 1094 1094 count_active_contexts(), 1095 1095 atomic_read(&nr_spu_contexts), 1096 - idr_get_cursor(&task_active_pid_ns(current)->idr)); 1096 + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); 1097 1097 return 0; 1098 1098 } 1099 1099
+4
arch/powerpc/sysdev/xive/native.c
··· 389 389 if (xive_pool_vps == XIVE_INVALID_VP) 390 390 return; 391 391 392 + /* Check if pool VP already active, if it is, pull it */ 393 + if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP) 394 + in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); 395 + 392 396 /* Enable the pool VP */ 393 397 vp = xive_pool_vps + cpu; 394 398 pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
+1
arch/s390/Kbuild
··· 8 8 obj-y += net/ 9 9 obj-$(CONFIG_PCI) += pci/ 10 10 obj-$(CONFIG_NUMA) += numa/ 11 + obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/
+24 -8
arch/s390/Kconfig
··· 47 47 config ARCH_SUPPORTS_DEBUG_PAGEALLOC 48 48 def_bool y 49 49 50 - config KEXEC 51 - def_bool y 52 - select KEXEC_CORE 53 - 54 50 config AUDIT_ARCH 55 51 def_bool y 56 52 ··· 286 290 older machines. 287 291 288 292 config MARCH_Z14 289 - bool "IBM z14" 293 + bool "IBM z14 ZR1 and z14" 290 294 select HAVE_MARCH_Z14_FEATURES 291 295 help 292 - Select this to enable optimizations for IBM z14 (3906 series). 293 - The kernel will be slightly faster but will not work on older 294 - machines. 296 + Select this to enable optimizations for IBM z14 ZR1 and z14 (3907 297 + and 3906 series). The kernel will be slightly faster but will not 298 + work on older machines. 295 299 296 300 endchoice 297 301 ··· 520 524 source kernel/Kconfig.preempt 521 525 522 526 source kernel/Kconfig.hz 527 + 528 + config KEXEC 529 + def_bool y 530 + select KEXEC_CORE 531 + 532 + config KEXEC_FILE 533 + bool "kexec file based system call" 534 + select KEXEC_CORE 535 + select BUILD_BIN2C 536 + depends on CRYPTO 537 + depends on CRYPTO_SHA256 538 + depends on CRYPTO_SHA256_S390 539 + help 540 + Enable the kexec file based system call. In contrast to the normal 541 + kexec system call this system call takes file descriptors for the 542 + kernel and initramfs as arguments. 543 + 544 + config ARCH_HAS_KEXEC_PURGATORY 545 + def_bool y 546 + depends on KEXEC_FILE 523 547 524 548 config ARCH_RANDOM 525 549 def_bool y
-6
arch/s390/boot/Makefile
··· 3 3 # Makefile for the linux s390-specific parts of the memory manager. 4 4 # 5 5 6 - COMPILE_VERSION := __linux_compile_version_id__`hostname | \ 7 - tr -c '[0-9A-Za-z]' '_'`__`date | \ 8 - tr -c '[0-9A-Za-z]' '_'`_t 9 - 10 - ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. 11 - 12 6 targets := image 13 7 targets += bzImage 14 8 subdir- := compressed
+1
arch/s390/boot/compressed/.gitignore
··· 1 1 sizes.h 2 2 vmlinux 3 3 vmlinux.lds 4 + vmlinux.bin.full
+20 -10
arch/s390/configs/default_defconfig arch/s390/configs/debug_defconfig
··· 24 24 CONFIG_CGROUP_DEVICE=y 25 25 CONFIG_CGROUP_CPUACCT=y 26 26 CONFIG_CGROUP_PERF=y 27 - CONFIG_CHECKPOINT_RESTORE=y 28 27 CONFIG_NAMESPACES=y 29 28 CONFIG_USER_NS=y 30 29 CONFIG_SCHED_AUTOGROUP=y 31 30 CONFIG_BLK_DEV_INITRD=y 32 31 CONFIG_EXPERT=y 33 32 # CONFIG_SYSFS_SYSCALL is not set 33 + CONFIG_CHECKPOINT_RESTORE=y 34 34 CONFIG_BPF_SYSCALL=y 35 35 CONFIG_USERFAULTFD=y 36 36 # CONFIG_COMPAT_BRK is not set ··· 59 59 CONFIG_DEFAULT_DEADLINE=y 60 60 CONFIG_LIVEPATCH=y 61 61 CONFIG_TUNE_ZEC12=y 62 - CONFIG_NR_CPUS=256 62 + CONFIG_NR_CPUS=512 63 63 CONFIG_NUMA=y 64 64 CONFIG_PREEMPT=y 65 65 CONFIG_HZ_100=y 66 + CONFIG_KEXEC_FILE=y 66 67 CONFIG_MEMORY_HOTPLUG=y 67 68 CONFIG_MEMORY_HOTREMOVE=y 68 69 CONFIG_KSM=y ··· 306 305 CONFIG_IP6_NF_NAT=m 307 306 CONFIG_IP6_NF_TARGET_MASQUERADE=m 308 307 CONFIG_NF_TABLES_BRIDGE=m 309 - CONFIG_NET_SCTPPROBE=m 310 308 CONFIG_RDS=m 311 309 CONFIG_RDS_RDMA=m 312 310 CONFIG_RDS_TCP=m ··· 364 364 CONFIG_NET_ACT_SKBEDIT=m 365 365 CONFIG_NET_ACT_CSUM=m 366 366 CONFIG_DNS_RESOLVER=y 367 + CONFIG_OPENVSWITCH=m 367 368 CONFIG_NETLINK_DIAG=m 368 369 CONFIG_CGROUP_NET_PRIO=y 369 370 CONFIG_BPF_JIT=y 370 371 CONFIG_NET_PKTGEN=m 371 - CONFIG_NET_TCPPROBE=m 372 372 CONFIG_DEVTMPFS=y 373 373 CONFIG_DMA_CMA=y 374 374 CONFIG_CMA_SIZE_MBYTES=0 ··· 380 380 CONFIG_BLK_DEV_NBD=m 381 381 CONFIG_BLK_DEV_RAM=y 382 382 CONFIG_BLK_DEV_RAM_SIZE=32768 383 - CONFIG_BLK_DEV_RAM_DAX=y 384 383 CONFIG_VIRTIO_BLK=y 385 384 CONFIG_BLK_DEV_RBD=m 385 + CONFIG_BLK_DEV_NVME=m 386 386 CONFIG_ENCLOSURE_SERVICES=m 387 387 CONFIG_GENWQE=m 388 388 CONFIG_RAID_ATTRS=m ··· 461 461 CONFIG_PPPOL2TP=m 462 462 CONFIG_PPP_ASYNC=m 463 463 CONFIG_PPP_SYNC_TTY=m 464 + CONFIG_INPUT_EVDEV=y 464 465 # CONFIG_INPUT_KEYBOARD is not set 465 466 # CONFIG_INPUT_MOUSE is not set 466 467 # CONFIG_SERIO is not set ··· 475 474 CONFIG_WATCHDOG_NOWAYOUT=y 476 475 CONFIG_SOFT_WATCHDOG=m 477 476 CONFIG_DIAG288_WATCHDOG=m 477 + CONFIG_DRM=y 478 + CONFIG_DRM_VIRTIO_GPU=y 479 + CONFIG_FRAMEBUFFER_CONSOLE=y 478 480 # CONFIG_HID is not set 479 481 # CONFIG_USB_SUPPORT is not set 480 482 CONFIG_INFINIBAND=m ··· 486 482 CONFIG_MLX5_INFINIBAND=m 487 483 CONFIG_VFIO=m 488 484 CONFIG_VFIO_PCI=m 485 + CONFIG_VIRTIO_PCI=m 489 486 CONFIG_VIRTIO_BALLOON=m 487 + CONFIG_VIRTIO_INPUT=y 490 488 CONFIG_EXT4_FS=y 491 489 CONFIG_EXT4_FS_POSIX_ACL=y 492 490 CONFIG_EXT4_FS_SECURITY=y ··· 647 641 CONFIG_TEST_BPF=m 648 642 CONFIG_BUG_ON_DATA_CORRUPTION=y 649 643 CONFIG_S390_PTDUMP=y 644 + CONFIG_PERSISTENT_KEYRINGS=y 645 + CONFIG_BIG_KEYS=y 650 646 CONFIG_ENCRYPTED_KEYS=m 651 647 CONFIG_SECURITY=y 652 648 CONFIG_SECURITY_NETWORK=y ··· 657 649 CONFIG_SECURITY_SELINUX_BOOTPARAM=y 658 650 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 659 651 CONFIG_SECURITY_SELINUX_DISABLE=y 652 + CONFIG_INTEGRITY_SIGNATURE=y 653 + CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y 660 654 CONFIG_IMA=y 655 + CONFIG_IMA_DEFAULT_HASH_SHA256=y 656 + CONFIG_IMA_WRITE_POLICY=y 661 657 CONFIG_IMA_APPRAISE=y 662 - CONFIG_CRYPTO_RSA=m 663 658 CONFIG_CRYPTO_DH=m 664 659 CONFIG_CRYPTO_ECDH=m 665 660 CONFIG_CRYPTO_USER=m 661 + # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 666 662 CONFIG_CRYPTO_PCRYPT=m 667 663 CONFIG_CRYPTO_CRYPTD=m 668 664 CONFIG_CRYPTO_MCRYPTD=m 669 665 CONFIG_CRYPTO_TEST=m 670 - CONFIG_CRYPTO_GCM=m 671 666 CONFIG_CRYPTO_CHACHA20POLY1305=m 672 667 CONFIG_CRYPTO_LRW=m 673 668 CONFIG_CRYPTO_PCBC=m ··· 718 707 CONFIG_CRYPTO_AES_S390=m 719 708 CONFIG_CRYPTO_GHASH_S390=m 720 709 CONFIG_CRYPTO_CRC32_S390=y 721 - CONFIG_ASYMMETRIC_KEY_TYPE=y 722 - CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m 723 - CONFIG_X509_CERTIFICATE_PARSER=m 710 + CONFIG_PKCS7_MESSAGE_PARSER=y 711 + CONFIG_SYSTEM_TRUSTED_KEYRING=y 724 712 CONFIG_CRC7=m 725 713 CONFIG_CRC8=m 726 714 CONFIG_RANDOM32_SELFTEST=y
-661
arch/s390/configs/gcov_defconfig
··· 1 - CONFIG_SYSVIPC=y 2 - CONFIG_POSIX_MQUEUE=y 3 - CONFIG_AUDIT=y 4 - CONFIG_NO_HZ_IDLE=y 5 - CONFIG_HIGH_RES_TIMERS=y 6 - CONFIG_BSD_PROCESS_ACCT=y 7 - CONFIG_BSD_PROCESS_ACCT_V3=y 8 - CONFIG_TASKSTATS=y 9 - CONFIG_TASK_DELAY_ACCT=y 10 - CONFIG_TASK_XACCT=y 11 - CONFIG_TASK_IO_ACCOUNTING=y 12 - CONFIG_IKCONFIG=y 13 - CONFIG_IKCONFIG_PROC=y 14 - CONFIG_NUMA_BALANCING=y 15 - # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set 16 - CONFIG_MEMCG=y 17 - CONFIG_MEMCG_SWAP=y 18 - CONFIG_BLK_CGROUP=y 19 - CONFIG_CFS_BANDWIDTH=y 20 - CONFIG_RT_GROUP_SCHED=y 21 - CONFIG_CGROUP_PIDS=y 22 - CONFIG_CGROUP_FREEZER=y 23 - CONFIG_CGROUP_HUGETLB=y 24 - CONFIG_CPUSETS=y 25 - CONFIG_CGROUP_DEVICE=y 26 - CONFIG_CGROUP_CPUACCT=y 27 - CONFIG_CGROUP_PERF=y 28 - CONFIG_CHECKPOINT_RESTORE=y 29 - CONFIG_NAMESPACES=y 30 - CONFIG_USER_NS=y 31 - CONFIG_SCHED_AUTOGROUP=y 32 - CONFIG_BLK_DEV_INITRD=y 33 - CONFIG_EXPERT=y 34 - # CONFIG_SYSFS_SYSCALL is not set 35 - CONFIG_BPF_SYSCALL=y 36 - CONFIG_USERFAULTFD=y 37 - # CONFIG_COMPAT_BRK is not set 38 - CONFIG_PROFILING=y 39 - CONFIG_OPROFILE=m 40 - CONFIG_KPROBES=y 41 - CONFIG_JUMP_LABEL=y 42 - CONFIG_GCOV_KERNEL=y 43 - CONFIG_GCOV_PROFILE_ALL=y 44 - CONFIG_MODULES=y 45 - CONFIG_MODULE_FORCE_LOAD=y 46 - CONFIG_MODULE_UNLOAD=y 47 - CONFIG_MODULE_FORCE_UNLOAD=y 48 - CONFIG_MODVERSIONS=y 49 - CONFIG_MODULE_SRCVERSION_ALL=y 50 - CONFIG_BLK_DEV_INTEGRITY=y 51 - CONFIG_BLK_DEV_THROTTLING=y 52 - CONFIG_BLK_WBT=y 53 - CONFIG_BLK_WBT_SQ=y 54 - CONFIG_PARTITION_ADVANCED=y 55 - CONFIG_IBM_PARTITION=y 56 - CONFIG_BSD_DISKLABEL=y 57 - CONFIG_MINIX_SUBPARTITION=y 58 - CONFIG_SOLARIS_X86_PARTITION=y 59 - CONFIG_UNIXWARE_DISKLABEL=y 60 - CONFIG_CFQ_GROUP_IOSCHED=y 61 - CONFIG_DEFAULT_DEADLINE=y 62 - CONFIG_LIVEPATCH=y 63 - CONFIG_TUNE_ZEC12=y 64 - CONFIG_NR_CPUS=512 65 - CONFIG_NUMA=y 66 - CONFIG_HZ_100=y 67 - CONFIG_MEMORY_HOTPLUG=y 68 - CONFIG_MEMORY_HOTREMOVE=y 69 - CONFIG_KSM=y 70 - CONFIG_TRANSPARENT_HUGEPAGE=y 71 - CONFIG_CLEANCACHE=y 72 - CONFIG_FRONTSWAP=y 73 - CONFIG_MEM_SOFT_DIRTY=y 74 - CONFIG_ZSWAP=y 75 - CONFIG_ZBUD=m 76 - CONFIG_ZSMALLOC=m 77 - CONFIG_ZSMALLOC_STAT=y 78 - CONFIG_DEFERRED_STRUCT_PAGE_INIT=y 79 - CONFIG_IDLE_PAGE_TRACKING=y 80 - CONFIG_PCI=y 81 - CONFIG_HOTPLUG_PCI=y 82 - CONFIG_HOTPLUG_PCI_S390=y 83 - CONFIG_CHSC_SCH=y 84 - CONFIG_CRASH_DUMP=y 85 - CONFIG_BINFMT_MISC=m 86 - CONFIG_HIBERNATION=y 87 - CONFIG_NET=y 88 - CONFIG_PACKET=y 89 - CONFIG_PACKET_DIAG=m 90 - CONFIG_UNIX=y 91 - CONFIG_UNIX_DIAG=m 92 - CONFIG_XFRM_USER=m 93 - CONFIG_NET_KEY=m 94 - CONFIG_SMC=m 95 - CONFIG_SMC_DIAG=m 96 - CONFIG_INET=y 97 - CONFIG_IP_MULTICAST=y 98 - CONFIG_IP_ADVANCED_ROUTER=y 99 - CONFIG_IP_MULTIPLE_TABLES=y 100 - CONFIG_IP_ROUTE_MULTIPATH=y 101 - CONFIG_IP_ROUTE_VERBOSE=y 102 - CONFIG_NET_IPIP=m 103 - CONFIG_NET_IPGRE_DEMUX=m 104 - CONFIG_NET_IPGRE=m 105 - CONFIG_NET_IPGRE_BROADCAST=y 106 - CONFIG_IP_MROUTE=y 107 - CONFIG_IP_MROUTE_MULTIPLE_TABLES=y 108 - CONFIG_IP_PIMSM_V1=y 109 - CONFIG_IP_PIMSM_V2=y 110 - CONFIG_SYN_COOKIES=y 111 - CONFIG_NET_IPVTI=m 112 - CONFIG_INET_AH=m 113 - CONFIG_INET_ESP=m 114 - CONFIG_INET_IPCOMP=m 115 - CONFIG_INET_XFRM_MODE_TRANSPORT=m 116 - CONFIG_INET_XFRM_MODE_TUNNEL=m 117 - CONFIG_INET_XFRM_MODE_BEET=m 118 - CONFIG_INET_DIAG=m 119 - CONFIG_INET_UDP_DIAG=m 120 - CONFIG_TCP_CONG_ADVANCED=y 121 - CONFIG_TCP_CONG_HSTCP=m 122 - CONFIG_TCP_CONG_HYBLA=m 123 - CONFIG_TCP_CONG_SCALABLE=m 124 - CONFIG_TCP_CONG_LP=m 125 - CONFIG_TCP_CONG_VENO=m 126 - CONFIG_TCP_CONG_YEAH=m 127 - CONFIG_TCP_CONG_ILLINOIS=m 128 - CONFIG_IPV6_ROUTER_PREF=y 129 - CONFIG_INET6_AH=m 130 - CONFIG_INET6_ESP=m 131 - CONFIG_INET6_IPCOMP=m 132 - CONFIG_IPV6_MIP6=m 133 - CONFIG_INET6_XFRM_MODE_TRANSPORT=m 134 - CONFIG_INET6_XFRM_MODE_TUNNEL=m 135 - CONFIG_INET6_XFRM_MODE_BEET=m 136 - CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m 137 - CONFIG_IPV6_VTI=m 138 - CONFIG_IPV6_SIT=m 139 - CONFIG_IPV6_GRE=m 140 - CONFIG_IPV6_MULTIPLE_TABLES=y 141 - CONFIG_IPV6_SUBTREES=y 142 - CONFIG_NETFILTER=y 143 - CONFIG_NF_CONNTRACK=m 144 - CONFIG_NF_CONNTRACK_SECMARK=y 145 - CONFIG_NF_CONNTRACK_EVENTS=y 146 - CONFIG_NF_CONNTRACK_TIMEOUT=y 147 - CONFIG_NF_CONNTRACK_TIMESTAMP=y 148 - CONFIG_NF_CONNTRACK_AMANDA=m 149 - CONFIG_NF_CONNTRACK_FTP=m 150 - CONFIG_NF_CONNTRACK_H323=m 151 - CONFIG_NF_CONNTRACK_IRC=m 152 - CONFIG_NF_CONNTRACK_NETBIOS_NS=m 153 - CONFIG_NF_CONNTRACK_SNMP=m 154 - CONFIG_NF_CONNTRACK_PPTP=m 155 - CONFIG_NF_CONNTRACK_SANE=m 156 - CONFIG_NF_CONNTRACK_SIP=m 157 - CONFIG_NF_CONNTRACK_TFTP=m 158 - CONFIG_NF_CT_NETLINK=m 159 - CONFIG_NF_CT_NETLINK_TIMEOUT=m 160 - CONFIG_NF_TABLES=m 161 - CONFIG_NFT_EXTHDR=m 162 - CONFIG_NFT_META=m 163 - CONFIG_NFT_CT=m 164 - CONFIG_NFT_COUNTER=m 165 - CONFIG_NFT_LOG=m 166 - CONFIG_NFT_LIMIT=m 167 - CONFIG_NFT_NAT=m 168 - CONFIG_NFT_COMPAT=m 169 - CONFIG_NFT_HASH=m 170 - CONFIG_NETFILTER_XT_SET=m 171 - CONFIG_NETFILTER_XT_TARGET_AUDIT=m 172 - CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 173 - CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 174 - CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 175 - CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m 176 - CONFIG_NETFILTER_XT_TARGET_CT=m 177 - CONFIG_NETFILTER_XT_TARGET_DSCP=m 178 - CONFIG_NETFILTER_XT_TARGET_HMARK=m 179 - CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m 180 - CONFIG_NETFILTER_XT_TARGET_LOG=m 181 - CONFIG_NETFILTER_XT_TARGET_MARK=m 182 - CONFIG_NETFILTER_XT_TARGET_NFLOG=m 183 - CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 184 - CONFIG_NETFILTER_XT_TARGET_TEE=m 185 - CONFIG_NETFILTER_XT_TARGET_TPROXY=m 186 - CONFIG_NETFILTER_XT_TARGET_TRACE=m 187 - CONFIG_NETFILTER_XT_TARGET_SECMARK=m 188 - CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 189 - CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 190 - CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m 191 - CONFIG_NETFILTER_XT_MATCH_BPF=m 192 - CONFIG_NETFILTER_XT_MATCH_CLUSTER=m 193 - CONFIG_NETFILTER_XT_MATCH_COMMENT=m 194 - CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m 195 - CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m 196 - CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m 197 - CONFIG_NETFILTER_XT_MATCH_CONNMARK=m 198 - CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m 199 - CONFIG_NETFILTER_XT_MATCH_CPU=m 200 - CONFIG_NETFILTER_XT_MATCH_DCCP=m 201 - CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m 202 - CONFIG_NETFILTER_XT_MATCH_DSCP=m 203 - CONFIG_NETFILTER_XT_MATCH_ESP=m 204 - CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m 205 - CONFIG_NETFILTER_XT_MATCH_HELPER=m 206 - CONFIG_NETFILTER_XT_MATCH_IPRANGE=m 207 - CONFIG_NETFILTER_XT_MATCH_IPVS=m 208 - CONFIG_NETFILTER_XT_MATCH_LENGTH=m 209 - CONFIG_NETFILTER_XT_MATCH_LIMIT=m 210 - CONFIG_NETFILTER_XT_MATCH_MAC=m 211 - CONFIG_NETFILTER_XT_MATCH_MARK=m 212 - CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m 213 - CONFIG_NETFILTER_XT_MATCH_NFACCT=m 214 - CONFIG_NETFILTER_XT_MATCH_OSF=m 215 - CONFIG_NETFILTER_XT_MATCH_OWNER=m 216 - CONFIG_NETFILTER_XT_MATCH_POLICY=m 217 - CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m 218 - CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 219 - CONFIG_NETFILTER_XT_MATCH_QUOTA=m 220 - CONFIG_NETFILTER_XT_MATCH_RATEEST=m 221 - CONFIG_NETFILTER_XT_MATCH_REALM=m 222 - CONFIG_NETFILTER_XT_MATCH_RECENT=m 223 - CONFIG_NETFILTER_XT_MATCH_STATE=m 224 - CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 225 - CONFIG_NETFILTER_XT_MATCH_STRING=m 226 - CONFIG_NETFILTER_XT_MATCH_TCPMSS=m 227 - CONFIG_NETFILTER_XT_MATCH_TIME=m 228 - CONFIG_NETFILTER_XT_MATCH_U32=m 229 - CONFIG_IP_SET=m 230 - CONFIG_IP_SET_BITMAP_IP=m 231 - CONFIG_IP_SET_BITMAP_IPMAC=m 232 - CONFIG_IP_SET_BITMAP_PORT=m 233 - CONFIG_IP_SET_HASH_IP=m 234 - CONFIG_IP_SET_HASH_IPPORT=m 235 - CONFIG_IP_SET_HASH_IPPORTIP=m 236 - CONFIG_IP_SET_HASH_IPPORTNET=m 237 - CONFIG_IP_SET_HASH_NETPORTNET=m 238 - CONFIG_IP_SET_HASH_NET=m 239 - CONFIG_IP_SET_HASH_NETNET=m 240 - CONFIG_IP_SET_HASH_NETPORT=m 241 - CONFIG_IP_SET_HASH_NETIFACE=m 242 - CONFIG_IP_SET_LIST_SET=m 243 - CONFIG_IP_VS=m 244 - CONFIG_IP_VS_PROTO_TCP=y 245 - CONFIG_IP_VS_PROTO_UDP=y 246 - CONFIG_IP_VS_PROTO_ESP=y 247 - CONFIG_IP_VS_PROTO_AH=y 248 - CONFIG_IP_VS_RR=m 249 - CONFIG_IP_VS_WRR=m 250 - CONFIG_IP_VS_LC=m 251 - CONFIG_IP_VS_WLC=m 252 - CONFIG_IP_VS_LBLC=m 253 - CONFIG_IP_VS_LBLCR=m 254 - CONFIG_IP_VS_DH=m 255 - CONFIG_IP_VS_SH=m 256 - CONFIG_IP_VS_SED=m 257 - CONFIG_IP_VS_NQ=m 258 - CONFIG_IP_VS_FTP=m 259 - CONFIG_IP_VS_PE_SIP=m 260 - CONFIG_NF_CONNTRACK_IPV4=m 261 - CONFIG_NF_TABLES_IPV4=m 262 - CONFIG_NFT_CHAIN_ROUTE_IPV4=m 263 - CONFIG_NF_TABLES_ARP=m 264 - CONFIG_NFT_CHAIN_NAT_IPV4=m 265 - CONFIG_IP_NF_IPTABLES=m 266 - CONFIG_IP_NF_MATCH_AH=m 267 - CONFIG_IP_NF_MATCH_ECN=m 268 - CONFIG_IP_NF_MATCH_RPFILTER=m 269 - CONFIG_IP_NF_MATCH_TTL=m 270 - CONFIG_IP_NF_FILTER=m 271 - CONFIG_IP_NF_TARGET_REJECT=m 272 - CONFIG_IP_NF_NAT=m 273 - CONFIG_IP_NF_TARGET_MASQUERADE=m 274 - CONFIG_IP_NF_MANGLE=m 275 - CONFIG_IP_NF_TARGET_CLUSTERIP=m 276 - CONFIG_IP_NF_TARGET_ECN=m 277 - CONFIG_IP_NF_TARGET_TTL=m 278 - CONFIG_IP_NF_RAW=m 279 - CONFIG_IP_NF_SECURITY=m 280 - CONFIG_IP_NF_ARPTABLES=m 281 - CONFIG_IP_NF_ARPFILTER=m 282 - CONFIG_IP_NF_ARP_MANGLE=m 283 - CONFIG_NF_CONNTRACK_IPV6=m 284 - CONFIG_NF_TABLES_IPV6=m 285 - CONFIG_NFT_CHAIN_ROUTE_IPV6=m 286 - CONFIG_NFT_CHAIN_NAT_IPV6=m 287 - CONFIG_IP6_NF_IPTABLES=m 288 - CONFIG_IP6_NF_MATCH_AH=m 289 - CONFIG_IP6_NF_MATCH_EUI64=m 290 - CONFIG_IP6_NF_MATCH_FRAG=m 291 - CONFIG_IP6_NF_MATCH_OPTS=m 292 - CONFIG_IP6_NF_MATCH_HL=m 293 - CONFIG_IP6_NF_MATCH_IPV6HEADER=m 294 - CONFIG_IP6_NF_MATCH_MH=m 295 - CONFIG_IP6_NF_MATCH_RPFILTER=m 296 - CONFIG_IP6_NF_MATCH_RT=m 297 - CONFIG_IP6_NF_TARGET_HL=m 298 - CONFIG_IP6_NF_FILTER=m 299 - CONFIG_IP6_NF_TARGET_REJECT=m 300 - CONFIG_IP6_NF_MANGLE=m 301 - CONFIG_IP6_NF_RAW=m 302 - CONFIG_IP6_NF_SECURITY=m 303 - CONFIG_IP6_NF_NAT=m 304 - CONFIG_IP6_NF_TARGET_MASQUERADE=m 305 - CONFIG_NF_TABLES_BRIDGE=m 306 - CONFIG_NET_SCTPPROBE=m 307 - CONFIG_RDS=m 308 - CONFIG_RDS_RDMA=m 309 - CONFIG_RDS_TCP=m 310 - CONFIG_L2TP=m 311 - CONFIG_L2TP_DEBUGFS=m 312 - CONFIG_L2TP_V3=y 313 - CONFIG_L2TP_IP=m 314 - CONFIG_L2TP_ETH=m 315 - CONFIG_BRIDGE=m 316 - CONFIG_VLAN_8021Q=m 317 - CONFIG_VLAN_8021Q_GVRP=y 318 - CONFIG_NET_SCHED=y 319 - CONFIG_NET_SCH_CBQ=m 320 - CONFIG_NET_SCH_HTB=m 321 - CONFIG_NET_SCH_HFSC=m 322 - CONFIG_NET_SCH_PRIO=m 323 - CONFIG_NET_SCH_MULTIQ=m 324 - CONFIG_NET_SCH_RED=m 325 - CONFIG_NET_SCH_SFB=m 326 - CONFIG_NET_SCH_SFQ=m 327 - CONFIG_NET_SCH_TEQL=m 328 - CONFIG_NET_SCH_TBF=m 329 - CONFIG_NET_SCH_GRED=m 330 - CONFIG_NET_SCH_DSMARK=m 331 - CONFIG_NET_SCH_NETEM=m 332 - CONFIG_NET_SCH_DRR=m 333 - CONFIG_NET_SCH_MQPRIO=m 334 - CONFIG_NET_SCH_CHOKE=m 335 - CONFIG_NET_SCH_QFQ=m 336 - CONFIG_NET_SCH_CODEL=m 337 - CONFIG_NET_SCH_FQ_CODEL=m 338 - CONFIG_NET_SCH_INGRESS=m 339 - CONFIG_NET_SCH_PLUG=m 340 - CONFIG_NET_CLS_BASIC=m 341 - CONFIG_NET_CLS_TCINDEX=m 342 - CONFIG_NET_CLS_ROUTE4=m 343 - CONFIG_NET_CLS_FW=m 344 - CONFIG_NET_CLS_U32=m 345 - CONFIG_CLS_U32_PERF=y 346 - CONFIG_CLS_U32_MARK=y 347 - CONFIG_NET_CLS_RSVP=m 348 - CONFIG_NET_CLS_RSVP6=m 349 - CONFIG_NET_CLS_FLOW=m 350 - CONFIG_NET_CLS_CGROUP=y 351 - CONFIG_NET_CLS_BPF=m 352 - CONFIG_NET_CLS_ACT=y 353 - CONFIG_NET_ACT_POLICE=m 354 - CONFIG_NET_ACT_GACT=m 355 - CONFIG_GACT_PROB=y 356 - CONFIG_NET_ACT_MIRRED=m 357 - CONFIG_NET_ACT_IPT=m 358 - CONFIG_NET_ACT_NAT=m 359 - CONFIG_NET_ACT_PEDIT=m 360 - CONFIG_NET_ACT_SIMP=m 361 - CONFIG_NET_ACT_SKBEDIT=m 362 - CONFIG_NET_ACT_CSUM=m 363 - CONFIG_DNS_RESOLVER=y 364 - CONFIG_NETLINK_DIAG=m 365 - CONFIG_CGROUP_NET_PRIO=y 366 - CONFIG_BPF_JIT=y 367 - CONFIG_NET_PKTGEN=m 368 - CONFIG_NET_TCPPROBE=m 369 - CONFIG_DEVTMPFS=y 370 - CONFIG_DMA_CMA=y 371 - CONFIG_CMA_SIZE_MBYTES=0 372 - CONFIG_CONNECTOR=y 373 - CONFIG_ZRAM=m 374 - CONFIG_BLK_DEV_LOOP=m 375 - CONFIG_BLK_DEV_CRYPTOLOOP=m 376 - CONFIG_BLK_DEV_DRBD=m 377 - CONFIG_BLK_DEV_NBD=m 378 - CONFIG_BLK_DEV_RAM=y 379 - CONFIG_BLK_DEV_RAM_SIZE=32768 380 - CONFIG_BLK_DEV_RAM_DAX=y 381 - CONFIG_VIRTIO_BLK=y 382 - CONFIG_ENCLOSURE_SERVICES=m 383 - CONFIG_GENWQE=m 384 - CONFIG_RAID_ATTRS=m 385 - CONFIG_SCSI=y 386 - CONFIG_BLK_DEV_SD=y 387 - CONFIG_CHR_DEV_ST=m 388 - CONFIG_CHR_DEV_OSST=m 389 - CONFIG_BLK_DEV_SR=m 390 - CONFIG_CHR_DEV_SG=y 391 - CONFIG_CHR_DEV_SCH=m 392 - CONFIG_SCSI_ENCLOSURE=m 393 - CONFIG_SCSI_CONSTANTS=y 394 - CONFIG_SCSI_LOGGING=y 395 - CONFIG_SCSI_SPI_ATTRS=m 396 - CONFIG_SCSI_FC_ATTRS=y 397 - CONFIG_SCSI_SAS_LIBSAS=m 398 - CONFIG_SCSI_SRP_ATTRS=m 399 - CONFIG_ISCSI_TCP=m 400 - CONFIG_SCSI_DEBUG=m 401 - CONFIG_ZFCP=y 402 - CONFIG_SCSI_VIRTIO=m 403 - CONFIG_SCSI_DH=y 404 - CONFIG_SCSI_DH_RDAC=m 405 - CONFIG_SCSI_DH_HP_SW=m 406 - CONFIG_SCSI_DH_EMC=m 407 - CONFIG_SCSI_DH_ALUA=m 408 - CONFIG_SCSI_OSD_INITIATOR=m 409 - CONFIG_SCSI_OSD_ULD=m 410 - CONFIG_MD=y 411 - CONFIG_BLK_DEV_MD=y 412 - CONFIG_MD_LINEAR=m 413 - CONFIG_MD_MULTIPATH=m 414 - CONFIG_MD_FAULTY=m 415 - CONFIG_BLK_DEV_DM=m 416 - CONFIG_DM_CRYPT=m 417 - CONFIG_DM_SNAPSHOT=m 418 - CONFIG_DM_THIN_PROVISIONING=m 419 - CONFIG_DM_MIRROR=m 420 - CONFIG_DM_LOG_USERSPACE=m 421 - CONFIG_DM_RAID=m 422 - CONFIG_DM_ZERO=m 423 - CONFIG_DM_MULTIPATH=m 424 - CONFIG_DM_MULTIPATH_QL=m 425 - CONFIG_DM_MULTIPATH_ST=m 426 - CONFIG_DM_DELAY=m 427 - CONFIG_DM_UEVENT=y 428 - CONFIG_DM_FLAKEY=m 429 - CONFIG_DM_VERITY=m 430 - CONFIG_DM_SWITCH=m 431 - CONFIG_NETDEVICES=y 432 - CONFIG_BONDING=m 433 - CONFIG_DUMMY=m 434 - CONFIG_EQUALIZER=m 435 - CONFIG_IFB=m 436 - CONFIG_MACVLAN=m 437 - CONFIG_MACVTAP=m 438 - CONFIG_VXLAN=m 439 - CONFIG_TUN=m 440 - CONFIG_VETH=m 441 - CONFIG_VIRTIO_NET=m 442 - CONFIG_NLMON=m 443 - # CONFIG_NET_VENDOR_ARC is not set 444 - # CONFIG_NET_VENDOR_CHELSIO is not set 445 - # CONFIG_NET_VENDOR_INTEL is not set 446 - # CONFIG_NET_VENDOR_MARVELL is not set 447 - CONFIG_MLX4_EN=m 448 - CONFIG_MLX5_CORE=m 449 - CONFIG_MLX5_CORE_EN=y 450 - # CONFIG_NET_VENDOR_NATSEMI is not set 451 - CONFIG_PPP=m 452 - CONFIG_PPP_BSDCOMP=m 453 - CONFIG_PPP_DEFLATE=m 454 - CONFIG_PPP_MPPE=m 455 - CONFIG_PPPOE=m 456 - CONFIG_PPTP=m 457 - CONFIG_PPPOL2TP=m 458 - CONFIG_PPP_ASYNC=m 459 - CONFIG_PPP_SYNC_TTY=m 460 - # CONFIG_INPUT_KEYBOARD is not set 461 - # CONFIG_INPUT_MOUSE is not set 462 - # CONFIG_SERIO is not set 463 - CONFIG_LEGACY_PTY_COUNT=0 464 - CONFIG_HW_RANDOM_VIRTIO=m 465 - CONFIG_RAW_DRIVER=m 466 - CONFIG_HANGCHECK_TIMER=m 467 - CONFIG_TN3270_FS=y 468 - # CONFIG_HWMON is not set 469 - CONFIG_WATCHDOG=y 470 - CONFIG_WATCHDOG_NOWAYOUT=y 471 - CONFIG_SOFT_WATCHDOG=m 472 - CONFIG_DIAG288_WATCHDOG=m 473 - # CONFIG_HID is not set 474 - # CONFIG_USB_SUPPORT is not set 475 - CONFIG_INFINIBAND=m 476 - CONFIG_INFINIBAND_USER_ACCESS=m 477 - CONFIG_MLX4_INFINIBAND=m 478 - CONFIG_MLX5_INFINIBAND=m 479 - CONFIG_VFIO=m 480 - CONFIG_VFIO_PCI=m 481 - CONFIG_VIRTIO_BALLOON=m 482 - CONFIG_EXT4_FS=y 483 - CONFIG_EXT4_FS_POSIX_ACL=y 484 - CONFIG_EXT4_FS_SECURITY=y 485 - CONFIG_EXT4_ENCRYPTION=y 486 - CONFIG_JBD2_DEBUG=y 487 - CONFIG_JFS_FS=m 488 - CONFIG_JFS_POSIX_ACL=y 489 - CONFIG_JFS_SECURITY=y 490 - CONFIG_JFS_STATISTICS=y 491 - CONFIG_XFS_FS=y 492 - CONFIG_XFS_QUOTA=y 493 - CONFIG_XFS_POSIX_ACL=y 494 - CONFIG_XFS_RT=y 495 - CONFIG_GFS2_FS=m 496 - CONFIG_GFS2_FS_LOCKING_DLM=y 497 - CONFIG_OCFS2_FS=m 498 - CONFIG_BTRFS_FS=y 499 - CONFIG_BTRFS_FS_POSIX_ACL=y 500 - CONFIG_NILFS2_FS=m 501 - CONFIG_FS_DAX=y 502 - CONFIG_EXPORTFS_BLOCK_OPS=y 503 - CONFIG_FANOTIFY=y 504 - CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 505 - CONFIG_QUOTA_NETLINK_INTERFACE=y 506 - CONFIG_QFMT_V1=m 507 - CONFIG_QFMT_V2=m 508 - CONFIG_AUTOFS4_FS=m 509 - CONFIG_FUSE_FS=y 510 - CONFIG_CUSE=m 511 - CONFIG_OVERLAY_FS=m 512 - CONFIG_OVERLAY_FS_REDIRECT_DIR=y 513 - CONFIG_FSCACHE=m 514 - CONFIG_CACHEFILES=m 515 - CONFIG_ISO9660_FS=y 516 - CONFIG_JOLIET=y 517 - CONFIG_ZISOFS=y 518 - CONFIG_UDF_FS=m 519 - CONFIG_MSDOS_FS=m 520 - CONFIG_VFAT_FS=m 521 - CONFIG_NTFS_FS=m 522 - CONFIG_NTFS_RW=y 523 - CONFIG_PROC_KCORE=y 524 - CONFIG_TMPFS=y 525 - CONFIG_TMPFS_POSIX_ACL=y 526 - CONFIG_HUGETLBFS=y 527 - CONFIG_CONFIGFS_FS=m 528 - CONFIG_ECRYPT_FS=m 529 - CONFIG_CRAMFS=m 530 - CONFIG_SQUASHFS=m 531 - CONFIG_SQUASHFS_XATTR=y 532 - CONFIG_SQUASHFS_LZO=y 533 - CONFIG_SQUASHFS_XZ=y 534 - CONFIG_ROMFS_FS=m 535 - CONFIG_NFS_FS=m 536 - CONFIG_NFS_V3_ACL=y 537 - CONFIG_NFS_V4=m 538 - CONFIG_NFS_SWAP=y 539 - CONFIG_NFSD=m 540 - CONFIG_NFSD_V3_ACL=y 541 - CONFIG_NFSD_V4=y 542 - CONFIG_NFSD_V4_SECURITY_LABEL=y 543 - CONFIG_CIFS=m 544 - CONFIG_CIFS_STATS=y 545 - CONFIG_CIFS_STATS2=y 546 - CONFIG_CIFS_WEAK_PW_HASH=y 547 - CONFIG_CIFS_UPCALL=y 548 - CONFIG_CIFS_XATTR=y 549 - CONFIG_CIFS_POSIX=y 550 - # CONFIG_CIFS_DEBUG is not set 551 - CONFIG_CIFS_DFS_UPCALL=y 552 - CONFIG_NLS_DEFAULT="utf8" 553 - CONFIG_NLS_CODEPAGE_437=m 554 - CONFIG_NLS_CODEPAGE_850=m 555 - CONFIG_NLS_ASCII=m 556 - CONFIG_NLS_ISO8859_1=m 557 - CONFIG_NLS_ISO8859_15=m 558 - CONFIG_NLS_UTF8=m 559 - CONFIG_DLM=m 560 - CONFIG_PRINTK_TIME=y 561 - CONFIG_DEBUG_INFO=y 562 - CONFIG_DEBUG_INFO_DWARF4=y 563 - CONFIG_GDB_SCRIPTS=y 564 - # CONFIG_ENABLE_MUST_CHECK is not set 565 - CONFIG_FRAME_WARN=1024 566 - CONFIG_UNUSED_SYMBOLS=y 567 - CONFIG_MAGIC_SYSRQ=y 568 - CONFIG_DEBUG_MEMORY_INIT=y 569 - CONFIG_PANIC_ON_OOPS=y 570 - CONFIG_RCU_TORTURE_TEST=m 571 - CONFIG_RCU_CPU_STALL_TIMEOUT=60 572 - CONFIG_LATENCYTOP=y 573 - CONFIG_SCHED_TRACER=y 574 - CONFIG_FTRACE_SYSCALLS=y 575 - CONFIG_STACK_TRACER=y 576 - CONFIG_BLK_DEV_IO_TRACE=y 577 - CONFIG_FUNCTION_PROFILER=y 578 - CONFIG_HIST_TRIGGERS=y 579 - CONFIG_LKDTM=m 580 - CONFIG_PERCPU_TEST=m 581 - CONFIG_ATOMIC64_SELFTEST=y 582 - CONFIG_TEST_BPF=m 583 - CONFIG_BUG_ON_DATA_CORRUPTION=y 584 - CONFIG_S390_PTDUMP=y 585 - CONFIG_PERSISTENT_KEYRINGS=y 586 - CONFIG_BIG_KEYS=y 587 - CONFIG_ENCRYPTED_KEYS=m 588 - CONFIG_SECURITY=y 589 - CONFIG_SECURITY_NETWORK=y 590 - CONFIG_SECURITY_SELINUX=y 591 - CONFIG_SECURITY_SELINUX_BOOTPARAM=y 592 - CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 593 - CONFIG_SECURITY_SELINUX_DISABLE=y 594 - CONFIG_INTEGRITY_SIGNATURE=y 595 - CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y 596 - CONFIG_IMA=y 597 - CONFIG_IMA_WRITE_POLICY=y 598 - CONFIG_IMA_APPRAISE=y 599 - CONFIG_CRYPTO_DH=m 600 - CONFIG_CRYPTO_ECDH=m 601 - CONFIG_CRYPTO_USER=m 602 - # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 603 - CONFIG_CRYPTO_PCRYPT=m 604 - CONFIG_CRYPTO_CRYPTD=m 605 - CONFIG_CRYPTO_MCRYPTD=m 606 - CONFIG_CRYPTO_TEST=m 607 - CONFIG_CRYPTO_CHACHA20POLY1305=m 608 - CONFIG_CRYPTO_LRW=m 609 - CONFIG_CRYPTO_PCBC=m 610 - CONFIG_CRYPTO_KEYWRAP=m 611 - CONFIG_CRYPTO_XCBC=m 612 - CONFIG_CRYPTO_VMAC=m 613 - CONFIG_CRYPTO_CRC32=m 614 - CONFIG_CRYPTO_MICHAEL_MIC=m 615 - CONFIG_CRYPTO_RMD128=m 616 - CONFIG_CRYPTO_RMD160=m 617 - CONFIG_CRYPTO_RMD256=m 618 - CONFIG_CRYPTO_RMD320=m 619 - CONFIG_CRYPTO_SHA512=m 620 - CONFIG_CRYPTO_SHA3=m 621 - CONFIG_CRYPTO_TGR192=m 622 - CONFIG_CRYPTO_WP512=m 623 - CONFIG_CRYPTO_AES_TI=m 624 - CONFIG_CRYPTO_ANUBIS=m 625 - CONFIG_CRYPTO_BLOWFISH=m 626 - CONFIG_CRYPTO_CAMELLIA=m 627 - CONFIG_CRYPTO_CAST5=m 628 - CONFIG_CRYPTO_CAST6=m 629 - CONFIG_CRYPTO_FCRYPT=m 630 - CONFIG_CRYPTO_KHAZAD=m 631 - CONFIG_CRYPTO_SALSA20=m 632 - CONFIG_CRYPTO_SEED=m 633 - CONFIG_CRYPTO_SERPENT=m 634 - CONFIG_CRYPTO_TEA=m 635 - CONFIG_CRYPTO_TWOFISH=m 636 - CONFIG_CRYPTO_842=m 637 - CONFIG_CRYPTO_LZ4=m 638 - CONFIG_CRYPTO_LZ4HC=m 639 - CONFIG_CRYPTO_ANSI_CPRNG=m 640 - CONFIG_CRYPTO_USER_API_HASH=m 641 - CONFIG_CRYPTO_USER_API_SKCIPHER=m 642 - CONFIG_CRYPTO_USER_API_RNG=m 643 - CONFIG_CRYPTO_USER_API_AEAD=m 644 - CONFIG_ZCRYPT=m 645 - CONFIG_PKEY=m 646 - CONFIG_CRYPTO_PAES_S390=m 647 - CONFIG_CRYPTO_SHA1_S390=m 648 - CONFIG_CRYPTO_SHA256_S390=m 649 - CONFIG_CRYPTO_SHA512_S390=m 650 - CONFIG_CRYPTO_DES_S390=m 651 - CONFIG_CRYPTO_AES_S390=m 652 - CONFIG_CRYPTO_GHASH_S390=m 653 - CONFIG_CRYPTO_CRC32_S390=y 654 - CONFIG_CRC7=m 655 - CONFIG_CRC8=m 656 - CONFIG_CORDIC=m 657 - CONFIG_CMM=m 658 - CONFIG_APPLDATA_BASE=y 659 - CONFIG_KVM=m 660 - CONFIG_KVM_S390_UCONTROL=y 661 - CONFIG_VHOST_NET=m
+15 -5
arch/s390/configs/performance_defconfig
··· 25 25 CONFIG_CGROUP_DEVICE=y 26 26 CONFIG_CGROUP_CPUACCT=y 27 27 CONFIG_CGROUP_PERF=y 28 - CONFIG_CHECKPOINT_RESTORE=y 29 28 CONFIG_NAMESPACES=y 30 29 CONFIG_USER_NS=y 31 30 CONFIG_SCHED_AUTOGROUP=y 32 31 CONFIG_BLK_DEV_INITRD=y 33 32 CONFIG_EXPERT=y 34 33 # CONFIG_SYSFS_SYSCALL is not set 34 + CONFIG_CHECKPOINT_RESTORE=y 35 35 CONFIG_BPF_SYSCALL=y 36 36 CONFIG_USERFAULTFD=y 37 37 # CONFIG_COMPAT_BRK is not set ··· 45 45 CONFIG_MODULE_FORCE_UNLOAD=y 46 46 CONFIG_MODVERSIONS=y 47 47 CONFIG_MODULE_SRCVERSION_ALL=y 48 + CONFIG_MODULE_SIG=y 49 + CONFIG_MODULE_SIG_SHA256=y 48 50 CONFIG_BLK_DEV_INTEGRITY=y 49 51 CONFIG_BLK_DEV_THROTTLING=y 50 52 CONFIG_BLK_WBT=y ··· 64 62 CONFIG_NR_CPUS=512 65 63 CONFIG_NUMA=y 66 64 CONFIG_HZ_100=y 65 + CONFIG_KEXEC_FILE=y 67 66 CONFIG_MEMORY_HOTPLUG=y 68 67 CONFIG_MEMORY_HOTREMOVE=y 69 68 CONFIG_KSM=y ··· 304 301 CONFIG_IP6_NF_NAT=m 305 302 CONFIG_IP6_NF_TARGET_MASQUERADE=m 306 303 CONFIG_NF_TABLES_BRIDGE=m 307 - CONFIG_NET_SCTPPROBE=m 308 304 CONFIG_RDS=m 309 305 CONFIG_RDS_RDMA=m 310 306 CONFIG_RDS_TCP=m ··· 361 359 CONFIG_NET_ACT_SKBEDIT=m 362 360 CONFIG_NET_ACT_CSUM=m 363 361 CONFIG_DNS_RESOLVER=y 362 + CONFIG_OPENVSWITCH=m 364 363 CONFIG_NETLINK_DIAG=m 365 364 CONFIG_CGROUP_NET_PRIO=y 366 365 CONFIG_BPF_JIT=y 367 366 CONFIG_NET_PKTGEN=m 368 - CONFIG_NET_TCPPROBE=m 369 367 CONFIG_DEVTMPFS=y 370 368 CONFIG_DMA_CMA=y 371 369 CONFIG_CMA_SIZE_MBYTES=0 ··· 377 375 CONFIG_BLK_DEV_NBD=m 378 376 CONFIG_BLK_DEV_RAM=y 379 377 CONFIG_BLK_DEV_RAM_SIZE=32768 380 - CONFIG_BLK_DEV_RAM_DAX=y 381 378 CONFIG_VIRTIO_BLK=y 379 + CONFIG_BLK_DEV_RBD=m 380 + CONFIG_BLK_DEV_NVME=m 382 381 CONFIG_ENCLOSURE_SERVICES=m 383 382 CONFIG_GENWQE=m 384 383 CONFIG_RAID_ATTRS=m ··· 458 455 CONFIG_PPPOL2TP=m 459 456 CONFIG_PPP_ASYNC=m 460 457 CONFIG_PPP_SYNC_TTY=m 458 + CONFIG_INPUT_EVDEV=y 461 459 # CONFIG_INPUT_KEYBOARD is not set 462 460 # CONFIG_INPUT_MOUSE is not set 463 461 # CONFIG_SERIO is not set ··· 472 468 CONFIG_WATCHDOG_NOWAYOUT=y 473 469 CONFIG_SOFT_WATCHDOG=m 474 470 CONFIG_DIAG288_WATCHDOG=m 471 + CONFIG_DRM=y 472 + CONFIG_DRM_VIRTIO_GPU=y 473 + CONFIG_FRAMEBUFFER_CONSOLE=y 475 474 # CONFIG_HID is not set 476 475 # CONFIG_USB_SUPPORT is not set 477 476 CONFIG_INFINIBAND=m ··· 483 476 CONFIG_MLX5_INFINIBAND=m 484 477 CONFIG_VFIO=m 485 478 CONFIG_VFIO_PCI=m 479 + CONFIG_VIRTIO_PCI=m 486 480 CONFIG_VIRTIO_BALLOON=m 481 + CONFIG_VIRTIO_INPUT=y 487 482 CONFIG_EXT4_FS=y 488 483 CONFIG_EXT4_FS_POSIX_ACL=y 489 484 CONFIG_EXT4_FS_SECURITY=y ··· 516 507 CONFIG_FUSE_FS=y 517 508 CONFIG_CUSE=m 518 509 CONFIG_OVERLAY_FS=m 519 - CONFIG_OVERLAY_FS_REDIRECT_DIR=y 520 510 CONFIG_FSCACHE=m 521 511 CONFIG_CACHEFILES=m 522 512 CONFIG_ISO9660_FS=y ··· 600 592 CONFIG_INTEGRITY_SIGNATURE=y 601 593 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y 602 594 CONFIG_IMA=y 595 + CONFIG_IMA_DEFAULT_HASH_SHA256=y 603 596 CONFIG_IMA_WRITE_POLICY=y 604 597 CONFIG_IMA_APPRAISE=y 598 + CONFIG_CRYPTO_FIPS=y 605 599 CONFIG_CRYPTO_DH=m 606 600 CONFIG_CRYPTO_ECDH=m 607 601 CONFIG_CRYPTO_USER=m
+11 -2
arch/s390/defconfig
··· 8 8 CONFIG_TASK_DELAY_ACCT=y 9 9 CONFIG_TASK_XACCT=y 10 10 CONFIG_TASK_IO_ACCOUNTING=y 11 + # CONFIG_CPU_ISOLATION is not set 11 12 CONFIG_IKCONFIG=y 12 13 CONFIG_IKCONFIG_PROC=y 13 14 CONFIG_CGROUPS=y ··· 24 23 CONFIG_CGROUP_DEVICE=y 25 24 CONFIG_CGROUP_CPUACCT=y 26 25 CONFIG_CGROUP_PERF=y 27 - CONFIG_CHECKPOINT_RESTORE=y 28 26 CONFIG_NAMESPACES=y 29 27 CONFIG_USER_NS=y 30 28 CONFIG_BLK_DEV_INITRD=y 31 29 CONFIG_EXPERT=y 32 30 # CONFIG_SYSFS_SYSCALL is not set 31 + CONFIG_CHECKPOINT_RESTORE=y 33 32 CONFIG_BPF_SYSCALL=y 34 33 CONFIG_USERFAULTFD=y 35 34 # CONFIG_COMPAT_BRK is not set ··· 48 47 CONFIG_NR_CPUS=256 49 48 CONFIG_NUMA=y 50 49 CONFIG_HZ_100=y 50 + CONFIG_KEXEC_FILE=y 51 51 CONFIG_MEMORY_HOTPLUG=y 52 52 CONFIG_MEMORY_HOTREMOVE=y 53 53 CONFIG_KSM=y ··· 131 129 CONFIG_TUN=m 132 130 CONFIG_VIRTIO_NET=y 133 131 # CONFIG_NET_VENDOR_ALACRITECH is not set 132 + # CONFIG_NET_VENDOR_CORTINA is not set 134 133 # CONFIG_NET_VENDOR_SOLARFLARE is not set 134 + # CONFIG_NET_VENDOR_SOCIONEXT is not set 135 135 # CONFIG_NET_VENDOR_SYNOPSYS is not set 136 136 # CONFIG_INPUT is not set 137 137 # CONFIG_SERIO is not set 138 + # CONFIG_VT is not set 138 139 CONFIG_DEVKMEM=y 139 140 CONFIG_RAW_DRIVER=m 140 141 CONFIG_VIRTIO_BALLOON=y ··· 182 177 CONFIG_STACK_TRACER=y 183 178 CONFIG_BLK_DEV_IO_TRACE=y 184 179 CONFIG_FUNCTION_PROFILER=y 185 - CONFIG_KPROBES_SANITY_TEST=y 180 + # CONFIG_RUNTIME_TESTING_MENU is not set 186 181 CONFIG_S390_PTDUMP=y 187 182 CONFIG_CRYPTO_CRYPTD=m 183 + CONFIG_CRYPTO_AUTHENC=m 188 184 CONFIG_CRYPTO_TEST=m 189 185 CONFIG_CRYPTO_CCM=m 190 186 CONFIG_CRYPTO_GCM=m 191 187 CONFIG_CRYPTO_CBC=y 188 + CONFIG_CRYPTO_CFB=m 192 189 CONFIG_CRYPTO_CTS=m 193 190 CONFIG_CRYPTO_LRW=m 194 191 CONFIG_CRYPTO_PCBC=m ··· 220 213 CONFIG_CRYPTO_SALSA20=m 221 214 CONFIG_CRYPTO_SEED=m 222 215 CONFIG_CRYPTO_SERPENT=m 216 + CONFIG_CRYPTO_SM4=m 217 + CONFIG_CRYPTO_SPECK=m 223 218 CONFIG_CRYPTO_TEA=m 224 219 CONFIG_CRYPTO_TWOFISH=m 225 220 CONFIG_CRYPTO_DEFLATE=m
+1 -1
arch/s390/hypfs/inode.c
··· 320 320 321 321 if (sb->s_root) 322 322 hypfs_delete_tree(sb->s_root); 323 - if (sb_info->update_file) 323 + if (sb_info && sb_info->update_file) 324 324 hypfs_remove(sb_info->update_file); 325 325 kfree(sb->s_fs_info); 326 326 sb->s_fs_info = NULL;
+23
arch/s390/include/asm/kexec.h
··· 46 46 static inline void crash_setup_regs(struct pt_regs *newregs, 47 47 struct pt_regs *oldregs) { } 48 48 49 + struct kimage; 50 + struct s390_load_data { 51 + /* Pointer to the kernel buffer. Used to register cmdline etc.. */ 52 + void *kernel_buf; 53 + 54 + /* Total size of loaded segments in memory. Used as an offset. */ 55 + size_t memsz; 56 + 57 + /* Load address of initrd. Used to register INITRD_START in kernel. */ 58 + unsigned long initrd_load_addr; 59 + }; 60 + 61 + int kexec_file_add_purgatory(struct kimage *image, 62 + struct s390_load_data *data); 63 + int kexec_file_add_initrd(struct kimage *image, 64 + struct s390_load_data *data, 65 + char *initrd, unsigned long initrd_len); 66 + int *kexec_file_update_kernel(struct kimage *iamge, 67 + struct s390_load_data *data); 68 + 69 + extern const struct kexec_file_ops s390_kexec_image_ops; 70 + extern const struct kexec_file_ops s390_kexec_elf_ops; 71 + 49 72 #endif /*_S390_KEXEC_H */
+17
arch/s390/include/asm/purgatory.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright IBM Corp. 2018 4 + * 5 + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> 6 + */ 7 + 8 + #ifndef _S390_PURGATORY_H_ 9 + #define _S390_PURGATORY_H_ 10 + #ifndef __ASSEMBLY__ 11 + 12 + #include <linux/purgatory.h> 13 + 14 + int verify_sha256_digest(void); 15 + 16 + #endif /* __ASSEMBLY__ */ 17 + #endif /* _S390_PURGATORY_H_ */
+27 -13
arch/s390/include/asm/setup.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 3 * S390 version 4 - * Copyright IBM Corp. 1999, 2010 4 + * Copyright IBM Corp. 1999, 2017 5 5 */ 6 6 #ifndef _ASM_S390_SETUP_H 7 7 #define _ASM_S390_SETUP_H ··· 37 37 #define LPP_MAGIC _BITUL(31) 38 38 #define LPP_PID_MASK _AC(0xffffffff, UL) 39 39 40 + /* Offsets to entry points in kernel/head.S */ 41 + 42 + #define STARTUP_NORMAL_OFFSET 0x10000 43 + #define STARTUP_KDUMP_OFFSET 0x10010 44 + 45 + /* Offsets to parameters in kernel/head.S */ 46 + 47 + #define IPL_DEVICE_OFFSET 0x10400 48 + #define INITRD_START_OFFSET 0x10408 49 + #define INITRD_SIZE_OFFSET 0x10410 50 + #define OLDMEM_BASE_OFFSET 0x10418 51 + #define OLDMEM_SIZE_OFFSET 0x10420 52 + #define COMMAND_LINE_OFFSET 0x10480 53 + 40 54 #ifndef __ASSEMBLY__ 41 55 42 56 #include <asm/lowcore.h> 43 57 #include <asm/types.h> 44 58 45 - #define IPL_DEVICE (*(unsigned long *) (0x10400)) 46 - #define INITRD_START (*(unsigned long *) (0x10408)) 47 - #define INITRD_SIZE (*(unsigned long *) (0x10410)) 48 - #define OLDMEM_BASE (*(unsigned long *) (0x10418)) 49 - #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) 50 - #define COMMAND_LINE ((char *) (0x10480)) 59 + #define IPL_DEVICE (*(unsigned long *) (IPL_DEVICE_OFFSET)) 60 + #define INITRD_START (*(unsigned long *) (INITRD_START_OFFSET)) 61 + #define INITRD_SIZE (*(unsigned long *) (INITRD_SIZE_OFFSET)) 62 + #define OLDMEM_BASE (*(unsigned long *) (OLDMEM_BASE_OFFSET)) 63 + #define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET)) 64 + #define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET)) 51 65 52 66 extern int memory_end_set; 53 67 extern unsigned long memory_end; ··· 135 121 136 122 #else /* __ASSEMBLY__ */ 137 123 138 - #define IPL_DEVICE 0x10400 139 - #define INITRD_START 0x10408 140 - #define INITRD_SIZE 0x10410 141 - #define OLDMEM_BASE 0x10418 142 - #define OLDMEM_SIZE 0x10420 143 - #define COMMAND_LINE 0x10480 124 + #define IPL_DEVICE (IPL_DEVICE_OFFSET) 125 + #define INITRD_START (INITRD_START_OFFSET) 126 + #define INITRD_SIZE (INITRD_SIZE_OFFSET) 127 + #define OLDMEM_BASE (OLDMEM_BASE_OFFSET) 128 + #define OLDMEM_SIZE (OLDMEM_SIZE_OFFSET) 129 + #define COMMAND_LINE (COMMAND_LINE_OFFSET) 144 130 145 131 #endif /* __ASSEMBLY__ */ 146 132 #endif /* _ASM_S390_SETUP_H */
+16 -7
arch/s390/include/uapi/asm/signal.h
··· 97 97 #include <asm-generic/signal-defs.h> 98 98 99 99 #ifndef __KERNEL__ 100 - /* Here we must cater to libcs that poke about in kernel headers. */ 101 100 101 + /* 102 + * There are two system calls in regard to sigaction, sys_rt_sigaction 103 + * and sys_sigaction. Internally the kernel uses the struct old_sigaction 104 + * for the older sys_sigaction system call, and the kernel version of the 105 + * struct sigaction for the newer sys_rt_sigaction. 106 + * 107 + * The uapi definition for struct sigaction has made a strange distinction 108 + * between 31-bit and 64-bit in the past. For 64-bit the uapi structure 109 + * looks like the kernel struct sigaction, but for 31-bit it used to 110 + * look like the kernel struct old_sigaction. That practically made the 111 + * structure unusable for either system call. To get around this problem 112 + * the glibc always had its own definitions for the sigaction structures. 113 + * 114 + * The current struct sigaction uapi definition below is suitable for the 115 + * sys_rt_sigaction system call only. 116 + */ 102 117 struct sigaction { 103 118 union { 104 119 __sighandler_t _sa_handler; 105 120 void (*_sa_sigaction)(int, struct siginfo *, void *); 106 121 } _u; 107 - #ifndef __s390x__ /* lovely */ 108 - sigset_t sa_mask; 109 - unsigned long sa_flags; 110 - void (*sa_restorer)(void); 111 - #else /* __s390x__ */ 112 122 unsigned long sa_flags; 113 123 void (*sa_restorer)(void); 114 124 sigset_t sa_mask; 115 - #endif /* __s390x__ */ 116 125 }; 117 126 118 127 #define sa_handler _u._sa_handler
+3
arch/s390/kernel/Makefile
··· 82 82 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 83 83 obj-$(CONFIG_UPROBES) += uprobes.o 84 84 85 + obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o 86 + obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o 87 + 85 88 obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o 86 89 obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o 87 90
+5
arch/s390/kernel/asm-offsets.c
··· 10 10 #include <linux/kbuild.h> 11 11 #include <linux/kvm_host.h> 12 12 #include <linux/sched.h> 13 + #include <linux/purgatory.h> 13 14 #include <asm/idle.h> 14 15 #include <asm/vdso.h> 15 16 #include <asm/pgtable.h> ··· 205 204 OFFSET(__GMAP_ASCE, gmap, asce); 206 205 OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); 207 206 OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); 207 + /* kexec_sha_region */ 208 + OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start); 209 + OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len); 210 + DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region)); 208 211 return 0; 209 212 }
+1
arch/s390/kernel/compat_wrapper.c
··· 182 182 COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); 183 183 COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); 184 184 COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); 185 + COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
+147
arch/s390/kernel/kexec_elf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * ELF loader for kexec_file_load system call. 4 + * 5 + * Copyright IBM Corp. 2018 6 + * 7 + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> 8 + */ 9 + 10 + #include <linux/errno.h> 11 + #include <linux/kernel.h> 12 + #include <linux/kexec.h> 13 + #include <asm/setup.h> 14 + 15 + static int kexec_file_add_elf_kernel(struct kimage *image, 16 + struct s390_load_data *data, 17 + char *kernel, unsigned long kernel_len) 18 + { 19 + struct kexec_buf buf; 20 + const Elf_Ehdr *ehdr; 21 + const Elf_Phdr *phdr; 22 + int i, ret; 23 + 24 + ehdr = (Elf_Ehdr *)kernel; 25 + buf.image = image; 26 + 27 + phdr = (void *)ehdr + ehdr->e_phoff; 28 + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 29 + if (phdr->p_type != PT_LOAD) 30 + continue; 31 + 32 + buf.buffer = kernel + phdr->p_offset; 33 + buf.bufsz = phdr->p_filesz; 34 + 35 + buf.mem = ALIGN(phdr->p_paddr, phdr->p_align); 36 + buf.memsz = phdr->p_memsz; 37 + 38 + if (phdr->p_paddr == 0) { 39 + data->kernel_buf = buf.buffer; 40 + data->memsz += STARTUP_NORMAL_OFFSET; 41 + 42 + buf.buffer += STARTUP_NORMAL_OFFSET; 43 + buf.bufsz -= STARTUP_NORMAL_OFFSET; 44 + 45 + buf.mem += STARTUP_NORMAL_OFFSET; 46 + buf.memsz -= STARTUP_NORMAL_OFFSET; 47 + } 48 + 49 + if (image->type == KEXEC_TYPE_CRASH) 50 + buf.mem += crashk_res.start; 51 + 52 + ret = kexec_add_buffer(&buf); 53 + if (ret) 54 + return ret; 55 + 56 + data->memsz += buf.memsz; 57 + } 58 + 59 + return 0; 60 + } 61 + 62 + static void *s390_elf_load(struct kimage *image, 63 + char *kernel, unsigned long kernel_len, 64 + char *initrd, unsigned long initrd_len, 65 + char *cmdline, unsigned long cmdline_len) 66 + { 67 + struct s390_load_data data = {0}; 68 + const Elf_Ehdr *ehdr; 69 + const Elf_Phdr *phdr; 70 + size_t size; 71 + int i, ret; 72 + 73 + /* image->fobs->probe already checked for valid ELF magic number. */ 74 + ehdr = (Elf_Ehdr *)kernel; 75 + 76 + if (ehdr->e_type != ET_EXEC || 77 + ehdr->e_ident[EI_CLASS] != ELFCLASS64 || 78 + !elf_check_arch(ehdr)) 79 + return ERR_PTR(-EINVAL); 80 + 81 + if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr)) 82 + return ERR_PTR(-EINVAL); 83 + 84 + size = ehdr->e_ehsize + ehdr->e_phoff; 85 + size += ehdr->e_phentsize * ehdr->e_phnum; 86 + if (size > kernel_len) 87 + return ERR_PTR(-EINVAL); 88 + 89 + phdr = (void *)ehdr + ehdr->e_phoff; 90 + size = ALIGN(size, phdr->p_align); 91 + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 92 + if (phdr->p_type == PT_INTERP) 93 + return ERR_PTR(-EINVAL); 94 + 95 + if (phdr->p_offset > kernel_len) 96 + return ERR_PTR(-EINVAL); 97 + 98 + size += ALIGN(phdr->p_filesz, phdr->p_align); 99 + } 100 + 101 + if (size > kernel_len) 102 + return ERR_PTR(-EINVAL); 103 + 104 + ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len); 105 + if (ret) 106 + return ERR_PTR(ret); 107 + 108 + if (!data.memsz) 109 + return ERR_PTR(-EINVAL); 110 + 111 + if (initrd) { 112 + ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); 113 + if (ret) 114 + return ERR_PTR(ret); 115 + } 116 + 117 + ret = kexec_file_add_purgatory(image, &data); 118 + if (ret) 119 + return ERR_PTR(ret); 120 + 121 + return kexec_file_update_kernel(image, &data); 122 + } 123 + 124 + static int s390_elf_probe(const char *buf, unsigned long len) 125 + { 126 + const Elf_Ehdr *ehdr; 127 + 128 + if (len < sizeof(Elf_Ehdr)) 129 + return -ENOEXEC; 130 + 131 + ehdr = (Elf_Ehdr *)buf; 132 + 133 + /* Only check the ELF magic number here and do proper validity check 134 + * in the loader. Any check here that fails would send the erroneous 135 + * ELF file to the image loader that does not care what it gets. 136 + * (Most likely) causing behavior not intended by the user. 137 + */ 138 + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) 139 + return -ENOEXEC; 140 + 141 + return 0; 142 + } 143 + 144 + const struct kexec_file_ops s390_kexec_elf_ops = { 145 + .probe = s390_elf_probe, 146 + .load = s390_elf_load, 147 + };
+76
arch/s390/kernel/kexec_image.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Image loader for kexec_file_load system call. 4 + * 5 + * Copyright IBM Corp. 2018 6 + * 7 + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> 8 + */ 9 + 10 + #include <linux/errno.h> 11 + #include <linux/kernel.h> 12 + #include <linux/kexec.h> 13 + #include <asm/setup.h> 14 + 15 + static int kexec_file_add_image_kernel(struct kimage *image, 16 + struct s390_load_data *data, 17 + char *kernel, unsigned long kernel_len) 18 + { 19 + struct kexec_buf buf; 20 + int ret; 21 + 22 + buf.image = image; 23 + 24 + buf.buffer = kernel + STARTUP_NORMAL_OFFSET; 25 + buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET; 26 + 27 + buf.mem = STARTUP_NORMAL_OFFSET; 28 + if (image->type == KEXEC_TYPE_CRASH) 29 + buf.mem += crashk_res.start; 30 + buf.memsz = buf.bufsz; 31 + 32 + ret = kexec_add_buffer(&buf); 33 + 34 + data->kernel_buf = kernel; 35 + data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET; 36 + 37 + return ret; 38 + } 39 + 40 + static void *s390_image_load(struct kimage *image, 41 + char *kernel, unsigned long kernel_len, 42 + char *initrd, unsigned long initrd_len, 43 + char *cmdline, unsigned long cmdline_len) 44 + { 45 + struct s390_load_data data = {0}; 46 + int ret; 47 + 48 + ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len); 49 + if (ret) 50 + return ERR_PTR(ret); 51 + 52 + if (initrd) { 53 + ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); 54 + if (ret) 55 + return ERR_PTR(ret); 56 + } 57 + 58 + ret = kexec_file_add_purgatory(image, &data); 59 + if (ret) 60 + return ERR_PTR(ret); 61 + 62 + return kexec_file_update_kernel(image, &data); 63 + } 64 + 65 + static int s390_image_probe(const char *buf, unsigned long len) 66 + { 67 + /* Can't reliably tell if an image is valid. Therefore give the 68 + * user whatever he wants. 69 + */ 70 + return 0; 71 + } 72 + 73 + const struct kexec_file_ops s390_kexec_image_ops = { 74 + .probe = s390_image_probe, 75 + .load = s390_image_load, 76 + };
+245
arch/s390/kernel/machine_kexec_file.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * s390 code for kexec_file_load system call 4 + * 5 + * Copyright IBM Corp. 2018 6 + * 7 + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> 8 + */ 9 + 10 + #include <linux/elf.h> 11 + #include <linux/kexec.h> 12 + #include <asm/setup.h> 13 + 14 + const struct kexec_file_ops * const kexec_file_loaders[] = { 15 + &s390_kexec_elf_ops, 16 + &s390_kexec_image_ops, 17 + NULL, 18 + }; 19 + 20 + int *kexec_file_update_kernel(struct kimage *image, 21 + struct s390_load_data *data) 22 + { 23 + unsigned long *loc; 24 + 25 + if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) 26 + return ERR_PTR(-EINVAL); 27 + 28 + if (image->cmdline_buf_len) 29 + memcpy(data->kernel_buf + COMMAND_LINE_OFFSET, 30 + image->cmdline_buf, image->cmdline_buf_len); 31 + 32 + if (image->type == KEXEC_TYPE_CRASH) { 33 + loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET); 34 + *loc = crashk_res.start; 35 + 36 + loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET); 37 + *loc = crashk_res.end - crashk_res.start + 1; 38 + } 39 + 40 + if (image->initrd_buf) { 41 + loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET); 42 + *loc = data->initrd_load_addr; 43 + 44 + loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET); 45 + *loc = image->initrd_buf_len; 46 + } 47 + 48 + return NULL; 49 + } 50 + 51 + static int kexec_file_update_purgatory(struct kimage *image) 52 + { 53 + u64 entry, type; 54 + int ret; 55 + 56 + if (image->type == KEXEC_TYPE_CRASH) { 57 + entry = STARTUP_KDUMP_OFFSET; 58 + type = KEXEC_TYPE_CRASH; 59 + } else { 60 + entry = STARTUP_NORMAL_OFFSET; 61 + type = KEXEC_TYPE_DEFAULT; 62 + } 63 + 64 + ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry, 65 + sizeof(entry), false); 66 + if (ret) 67 + return ret; 68 + 69 + ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type, 70 + sizeof(type), false); 71 + if (ret) 72 + return ret; 73 + 74 + if (image->type == KEXEC_TYPE_CRASH) { 75 + u64 crash_size; 76 + 77 + ret = kexec_purgatory_get_set_symbol(image, "crash_start", 78 + &crashk_res.start, 79 + sizeof(crashk_res.start), 80 + false); 81 + if (ret) 82 + return ret; 83 + 84 + crash_size = crashk_res.end - crashk_res.start + 1; 85 + ret = kexec_purgatory_get_set_symbol(image, "crash_size", 86 + &crash_size, 87 + sizeof(crash_size), 88 + false); 89 + } 90 + return ret; 91 + } 92 + 93 + int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data) 94 + { 95 + struct kexec_buf buf; 96 + int ret; 97 + 98 + buf.image = image; 99 + 100 + data->memsz = ALIGN(data->memsz, PAGE_SIZE); 101 + buf.mem = data->memsz; 102 + if (image->type == KEXEC_TYPE_CRASH) 103 + buf.mem += crashk_res.start; 104 + 105 + ret = kexec_load_purgatory(image, &buf); 106 + if (ret) 107 + return ret; 108 + 109 + ret = kexec_file_update_purgatory(image); 110 + return ret; 111 + } 112 + 113 + int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data, 114 + char *initrd, unsigned long initrd_len) 115 + { 116 + struct kexec_buf buf; 117 + int ret; 118 + 119 + buf.image = image; 120 + 121 + buf.buffer = initrd; 122 + buf.bufsz = initrd_len; 123 + 124 + data->memsz = ALIGN(data->memsz, PAGE_SIZE); 125 + buf.mem = data->memsz; 126 + if (image->type == KEXEC_TYPE_CRASH) 127 + buf.mem += crashk_res.start; 128 + buf.memsz = buf.bufsz; 129 + 130 + data->initrd_load_addr = buf.mem; 131 + data->memsz += buf.memsz; 132 + 133 + ret = kexec_add_buffer(&buf); 134 + return ret; 135 + } 136 + 137 + /* 138 + * The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole 139 + * and provide kbuf->mem by hand. 140 + */ 141 + int arch_kexec_walk_mem(struct kexec_buf *kbuf, 142 + int (*func)(struct resource *, void *)) 143 + { 144 + return 1; 145 + } 146 + 147 + int arch_kexec_apply_relocations_add(struct purgatory_info *pi, 148 + Elf_Shdr *section, 149 + const Elf_Shdr *relsec, 150 + const Elf_Shdr *symtab) 151 + { 152 + Elf_Rela *relas; 153 + int i; 154 + 155 + relas = (void *)pi->ehdr + relsec->sh_offset; 156 + 157 + for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { 158 + const Elf_Sym *sym; /* symbol to relocate */ 159 + unsigned long addr; /* final location after relocation */ 160 + unsigned long val; /* relocated symbol value */ 161 + void *loc; /* tmp location to modify */ 162 + 163 + sym = (void *)pi->ehdr + symtab->sh_offset; 164 + sym += ELF64_R_SYM(relas[i].r_info); 165 + 166 + if (sym->st_shndx == SHN_UNDEF) 167 + return -ENOEXEC; 168 + 169 + if (sym->st_shndx == SHN_COMMON) 170 + return -ENOEXEC; 171 + 172 + if (sym->st_shndx >= pi->ehdr->e_shnum && 173 + sym->st_shndx != SHN_ABS) 174 + return -ENOEXEC; 175 + 176 + loc = pi->purgatory_buf; 177 + loc += section->sh_offset; 178 + loc += relas[i].r_offset; 179 + 180 + val = sym->st_value; 181 + if (sym->st_shndx != SHN_ABS) 182 + val += pi->sechdrs[sym->st_shndx].sh_addr; 183 + val += relas[i].r_addend; 184 + 185 + addr = section->sh_addr + relas[i].r_offset; 186 + 187 + switch (ELF64_R_TYPE(relas[i].r_info)) { 188 + case R_390_8: /* Direct 8 bit. */ 189 + *(u8 *)loc = val; 190 + break; 191 + case R_390_12: /* Direct 12 bit. */ 192 + *(u16 *)loc &= 0xf000; 193 + *(u16 *)loc |= val & 0xfff; 194 + break; 195 + case R_390_16: /* Direct 16 bit. */ 196 + *(u16 *)loc = val; 197 + break; 198 + case R_390_20: /* Direct 20 bit. */ 199 + *(u32 *)loc &= 0xf00000ff; 200 + *(u32 *)loc |= (val & 0xfff) << 16; /* DL */ 201 + *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */ 202 + break; 203 + case R_390_32: /* Direct 32 bit. */ 204 + *(u32 *)loc = val; 205 + break; 206 + case R_390_64: /* Direct 64 bit. */ 207 + *(u64 *)loc = val; 208 + break; 209 + case R_390_PC16: /* PC relative 16 bit. */ 210 + *(u16 *)loc = (val - addr); 211 + break; 212 + case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ 213 + *(u16 *)loc = (val - addr) >> 1; 214 + break; 215 + case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ 216 + *(u32 *)loc = (val - addr) >> 1; 217 + break; 218 + case R_390_PC32: /* PC relative 32 bit. */ 219 + *(u32 *)loc = (val - addr); 220 + break; 221 + case R_390_PC64: /* PC relative 64 bit. */ 222 + *(u64 *)loc = (val - addr); 223 + break; 224 + default: 225 + break; 226 + } 227 + } 228 + return 0; 229 + } 230 + 231 + int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, 232 + unsigned long buf_len) 233 + { 234 + /* A kernel must be at least large enough to contain head.S. During 235 + * load memory in head.S will be accessed, e.g. to register the next 236 + * command line. If the next kernel were smaller the current kernel 237 + * will panic at load. 238 + * 239 + * 0x11000 = sizeof(head.S) 240 + */ 241 + if (buf_len < 0x11000) 242 + return -ENOEXEC; 243 + 244 + return kexec_image_probe_default(image, buf, buf_len); 245 + }
+1
arch/s390/kernel/nospec-branch.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/module.h> 3 3 #include <linux/device.h> 4 + #include <linux/cpu.h> 4 5 #include <asm/nospec-branch.h> 5 6 6 7 static int __init nobp_setup_early(char *str)
+1
arch/s390/kernel/perf_cpum_cf_events.c
··· 583 583 model = cpumcf_z13_pmu_event_attr; 584 584 break; 585 585 case 0x3906: 586 + case 0x3907: 586 587 model = cpumcf_z14_pmu_event_attr; 587 588 break; 588 589 default:
+1
arch/s390/kernel/setup.c
··· 821 821 strcpy(elf_platform, "z13"); 822 822 break; 823 823 case 0x3906: 824 + case 0x3907: 824 825 strcpy(elf_platform, "z14"); 825 826 break; 826 827 }
+1
arch/s390/kernel/syscalls/syscall.tbl
··· 388 388 378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage 389 389 379 common statx sys_statx compat_sys_statx 390 390 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi 391 + 381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load
+2
arch/s390/purgatory/.gitignore
··· 1 + kexec-purgatory.c 2 + purgatory.ro
+37
arch/s390/purgatory/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + OBJECT_FILES_NON_STANDARD := y 4 + 5 + purgatory-y := head.o purgatory.o string.o sha256.o mem.o 6 + 7 + targets += $(purgatory-y) purgatory.ro kexec-purgatory.c 8 + PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) 9 + 10 + $(obj)/sha256.o: $(srctree)/lib/sha256.c 11 + $(call if_changed_rule,cc_o_c) 12 + 13 + $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S 14 + $(call if_changed_rule,as_o_S) 15 + 16 + $(obj)/string.o: $(srctree)/arch/s390/lib/string.c 17 + $(call if_changed_rule,cc_o_c) 18 + 19 + LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib 20 + LDFLAGS_purgatory.ro += -z nodefaultlib 21 + KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes 22 + KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare 23 + KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding 24 + KBUILD_CFLAGS += -c -MD -Os -m64 25 + KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 26 + 27 + $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE 28 + $(call if_changed,ld) 29 + 30 + CMD_BIN2C = $(objtree)/scripts/basic/bin2c 31 + quiet_cmd_bin2c = BIN2C $@ 32 + cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@ 33 + 34 + $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE 35 + $(call if_changed,bin2c) 36 + 37 + obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
+279
arch/s390/purgatory/head.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Purgatory setup code 4 + * 5 + * Copyright IBM Corp. 2018 6 + * 7 + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> 8 + */ 9 + 10 + #include <linux/linkage.h> 11 + #include <asm/asm-offsets.h> 12 + #include <asm/page.h> 13 + #include <asm/sigp.h> 14 + 15 + /* The purgatory is the code running between two kernels. It's main purpose 16 + * is to verify that the next kernel was not corrupted after load and to 17 + * start it. 18 + * 19 + * If the next kernel is a crash kernel there are some peculiarities to 20 + * consider: 21 + * 22 + * First the purgatory is called twice. Once only to verify the 23 + * sha digest. So if the crash kernel got corrupted the old kernel can try 24 + * to trigger a stand-alone dumper. And once to actually load the crash kernel. 25 + * 26 + * Second the purgatory also has to swap the crash memory region with its 27 + * destination at address 0. As the purgatory is part of crash memory this 28 + * requires some finesse. The tactic here is that the purgatory first copies 29 + * itself to the end of the destination and then swaps the rest of the 30 + * memory running from there. 31 + */ 32 + 33 + #define bufsz purgatory_end-stack 34 + 35 + .macro MEMCPY dst,src,len 36 + lgr %r0,\dst 37 + lgr %r1,\len 38 + lgr %r2,\src 39 + lgr %r3,\len 40 + 41 + 20: mvcle %r0,%r2,0 42 + jo 20b 43 + .endm 44 + 45 + .macro MEMSWAP dst,src,buf,len 46 + 10: cghi \len,bufsz 47 + jh 11f 48 + lgr %r4,\len 49 + j 12f 50 + 11: lghi %r4,bufsz 51 + 52 + 12: MEMCPY \buf,\dst,%r4 53 + MEMCPY \dst,\src,%r4 54 + MEMCPY \src,\buf,%r4 55 + 56 + agr \dst,%r4 57 + agr \src,%r4 58 + sgr \len,%r4 59 + 60 + cghi \len,0 61 + jh 10b 62 + .endm 63 + 64 + .macro START_NEXT_KERNEL base 65 + lg %r4,kernel_entry-\base(%r13) 66 + lg %r5,load_psw_mask-\base(%r13) 67 + ogr %r4,%r5 68 + stg %r4,0(%r0) 69 + 70 + xgr %r0,%r0 71 + diag %r0,%r0,0x308 72 + .endm 73 + 74 + .text 75 + .align PAGE_SIZE 76 + ENTRY(purgatory_start) 77 + /* The purgatory might be called after a diag308 so better set 78 + * architecture and addressing mode. 79 + */ 80 + lhi %r1,1 81 + sigp %r1,%r0,SIGP_SET_ARCHITECTURE 82 + sam64 83 + 84 + larl %r5,gprregs 85 + stmg %r6,%r15,0(%r5) 86 + 87 + basr %r13,0 88 + .base_crash: 89 + 90 + /* Setup stack */ 91 + larl %r15,purgatory_end 92 + aghi %r15,-160 93 + 94 + /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called 95 + * directly with a flag passed in %r2 whether the purgatory shall do 96 + * checksum verification only (%r2 = 0 -> verification only). 97 + * 98 + * Check now and preserve over C function call by storing in 99 + * %r10 whith 100 + * 1 -> checksum verification only 101 + * 0 -> load new kernel 102 + */ 103 + lghi %r10,0 104 + lg %r11,kernel_type-.base_crash(%r13) 105 + cghi %r11,1 /* KEXEC_TYPE_CRASH */ 106 + jne .do_checksum_verification 107 + cghi %r2,0 /* checksum verification only */ 108 + jne .do_checksum_verification 109 + lghi %r10,1 110 + 111 + .do_checksum_verification: 112 + brasl %r14,verify_sha256_digest 113 + 114 + cghi %r10,1 /* checksum verification only */ 115 + je .return_old_kernel 116 + cghi %r2,0 /* checksum match */ 117 + jne .disabled_wait 118 + 119 + /* If the next kernel is a crash kernel the purgatory has to swap 120 + * the mem regions first. 121 + */ 122 + cghi %r11,1 /* KEXEC_TYPE_CRASH */ 123 + je .start_crash_kernel 124 + 125 + /* start normal kernel */ 126 + START_NEXT_KERNEL .base_crash 127 + 128 + .return_old_kernel: 129 + lmg %r6,%r15,gprregs-.base_crash(%r13) 130 + br %r14 131 + 132 + .disabled_wait: 133 + lpswe disabled_wait_psw-.base_crash(%r13) 134 + 135 + .start_crash_kernel: 136 + /* Location of purgatory_start in crash memory */ 137 + lgr %r8,%r13 138 + aghi %r8,-(.base_crash-purgatory_start) 139 + 140 + /* Destination for this code i.e. end of memory to be swapped. */ 141 + lg %r9,crash_size-.base_crash(%r13) 142 + aghi %r9,-(purgatory_end-purgatory_start) 143 + 144 + /* Destination in crash memory, i.e. same as r9 but in crash memory. */ 145 + lg %r10,crash_start-.base_crash(%r13) 146 + agr %r10,%r9 147 + 148 + /* Buffer location (in crash memory) and size. As the purgatory is 149 + * behind the point of no return it can re-use the stack as buffer. 150 + */ 151 + lghi %r11,bufsz 152 + larl %r12,stack 153 + 154 + MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ 155 + MEMCPY %r9,%r8,%r11 /* self -> dst */ 156 + 157 + /* Jump to new location. */ 158 + lgr %r7,%r9 159 + aghi %r7,.jump_to_dst-purgatory_start 160 + br %r7 161 + 162 + .jump_to_dst: 163 + basr %r13,0 164 + .base_dst: 165 + 166 + /* clear buffer */ 167 + MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */ 168 + 169 + /* Load new buffer location after jump */ 170 + larl %r7,stack 171 + aghi %r10,stack-purgatory_start 172 + MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ 173 + 174 + /* Now the code is set up to run from its designated location. Start 175 + * swapping the rest of crash memory now. 176 + * 177 + * The registers will be used as follow: 178 + * 179 + * %r0-%r4 reserved for macros defined above 180 + * %r5-%r6 tmp registers 181 + * %r7 pointer to current struct sha region 182 + * %r8 index to iterate over all sha regions 183 + * %r9 pointer in crash memory 184 + * %r10 pointer in old kernel 185 + * %r11 total size (still) to be moved 186 + * %r12 pointer to buffer 187 + */ 188 + lgr %r12,%r7 189 + lgr %r11,%r9 190 + lghi %r10,0 191 + lg %r9,crash_start-.base_dst(%r13) 192 + lghi %r8,16 /* KEXEC_SEGMENTS_MAX */ 193 + larl %r7,purgatory_sha_regions 194 + 195 + j .loop_first 196 + 197 + /* Loop over all purgatory_sha_regions. */ 198 + .loop_next: 199 + aghi %r8,-1 200 + cghi %r8,0 201 + je .loop_out 202 + 203 + aghi %r7,__KEXEC_SHA_REGION_SIZE 204 + 205 + .loop_first: 206 + lg %r5,__KEXEC_SHA_REGION_START(%r7) 207 + cghi %r5,0 208 + je .loop_next 209 + 210 + /* Copy [end last sha region, start current sha region) */ 211 + /* Note: kexec_sha_region->start points in crash memory */ 212 + sgr %r5,%r9 213 + MEMCPY %r9,%r10,%r5 214 + 215 + agr %r9,%r5 216 + agr %r10,%r5 217 + sgr %r11,%r5 218 + 219 + /* Swap sha region */ 220 + lg %r6,__KEXEC_SHA_REGION_LEN(%r7) 221 + MEMSWAP %r9,%r10,%r12,%r6 222 + sg %r11,__KEXEC_SHA_REGION_LEN(%r7) 223 + j .loop_next 224 + 225 + .loop_out: 226 + /* Copy rest of crash memory */ 227 + MEMCPY %r9,%r10,%r11 228 + 229 + /* start crash kernel */ 230 + START_NEXT_KERNEL .base_dst 231 + 232 + 233 + load_psw_mask: 234 + .long 0x00080000,0x80000000 235 + 236 + .align 8 237 + disabled_wait_psw: 238 + .quad 0x0002000180000000 239 + .quad 0x0000000000000000 + .do_checksum_verification 240 + 241 + gprregs: 242 + .rept 10 243 + .quad 0 244 + .endr 245 + 246 + purgatory_sha256_digest: 247 + .global purgatory_sha256_digest 248 + .rept 32 /* SHA256_DIGEST_SIZE */ 249 + .byte 0 250 + .endr 251 + 252 + purgatory_sha_regions: 253 + .global purgatory_sha_regions 254 + .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */ 255 + .byte 0 256 + .endr 257 + 258 + kernel_entry: 259 + .global kernel_entry 260 + .quad 0 261 + 262 + kernel_type: 263 + .global kernel_type 264 + .quad 0 265 + 266 + crash_start: 267 + .global crash_start 268 + .quad 0 269 + 270 + crash_size: 271 + .global crash_size 272 + .quad 0 273 + 274 + .align PAGE_SIZE 275 + stack: 276 + /* The buffer to move this code must be as big as the code. */ 277 + .skip stack-purgatory_start 278 + .align PAGE_SIZE 279 + purgatory_end:
+42
arch/s390/purgatory/purgatory.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Purgatory code running between two kernels. 4 + * 5 + * Copyright IBM Corp. 2018 6 + * 7 + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> 8 + */ 9 + 10 + #include <linux/kexec.h> 11 + #include <linux/sha256.h> 12 + #include <linux/string.h> 13 + #include <asm/purgatory.h> 14 + 15 + struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; 16 + u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; 17 + 18 + u64 kernel_entry; 19 + u64 kernel_type; 20 + 21 + u64 crash_start; 22 + u64 crash_size; 23 + 24 + int verify_sha256_digest(void) 25 + { 26 + struct kexec_sha_region *ptr, *end; 27 + u8 digest[SHA256_DIGEST_SIZE]; 28 + struct sha256_state sctx; 29 + 30 + sha256_init(&sctx); 31 + end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); 32 + 33 + for (ptr = purgatory_sha_regions; ptr < end; ptr++) 34 + sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); 35 + 36 + sha256_final(&sctx, digest); 37 + 38 + if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) 39 + return 1; 40 + 41 + return 0; 42 + }
+37
arch/x86/events/intel/uncore_snbep.c
··· 3028 3028 .format_group = &hswep_uncore_cbox_format_group, 3029 3029 }; 3030 3030 3031 + static struct intel_uncore_type bdx_uncore_sbox = { 3032 + .name = "sbox", 3033 + .num_counters = 4, 3034 + .num_boxes = 4, 3035 + .perf_ctr_bits = 48, 3036 + .event_ctl = HSWEP_S0_MSR_PMON_CTL0, 3037 + .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, 3038 + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 3039 + .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, 3040 + .msr_offset = HSWEP_SBOX_MSR_OFFSET, 3041 + .ops = &hswep_uncore_sbox_msr_ops, 3042 + .format_group = &hswep_uncore_sbox_format_group, 3043 + }; 3044 + 3045 + #define BDX_MSR_UNCORE_SBOX 3 3046 + 3031 3047 static struct intel_uncore_type *bdx_msr_uncores[] = { 3032 3048 &bdx_uncore_ubox, 3033 3049 &bdx_uncore_cbox, 3034 3050 &hswep_uncore_pcu, 3051 + &bdx_uncore_sbox, 3035 3052 NULL, 3036 3053 }; 3037 3054 ··· 3060 3043 3061 3044 void bdx_uncore_cpu_init(void) 3062 3045 { 3046 + int pkg = topology_phys_to_logical_pkg(0); 3047 + 3063 3048 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 3064 3049 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 3065 3050 uncore_msr_uncores = bdx_msr_uncores; 3066 3051 3052 + /* BDX-DE doesn't have SBOX */ 3053 + if (boot_cpu_data.x86_model == 86) { 3054 + uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; 3055 + /* Detect systems with no SBOXes */ 3056 + } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { 3057 + struct pci_dev *pdev; 3058 + u32 capid4; 3059 + 3060 + pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]; 3061 + pci_read_config_dword(pdev, 0x94, &capid4); 3062 + if (((capid4 >> 6) & 0x3) == 0) 3063 + bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; 3064 + } 3067 3065 hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; 3068 3066 } 3069 3067 ··· 3295 3263 { /* QPI Port 2 filter */ 3296 3264 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), 3297 3265 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), 3266 + }, 3267 + { /* PCU.3 (for Capability registers) */ 3268 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), 3269 + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 3270 + HSWEP_PCI_PCU_3), 3298 3271 }, 3299 3272 { /* end: all zeroes */ } 3300 3273 };
-2
arch/x86/include/asm/asm.h
··· 136 136 #endif 137 137 138 138 #ifndef __ASSEMBLY__ 139 - #ifndef __BPF__ 140 139 /* 141 140 * This output constraint should be used for any inline asm which has a "call" 142 141 * instruction. Otherwise the asm may be inserted before the frame pointer ··· 144 145 */ 145 146 register unsigned long current_stack_pointer asm(_ASM_SP); 146 147 #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) 147 - #endif 148 148 #endif 149 149 150 150 #endif /* _ASM_X86_ASM_H */
+1
arch/x86/include/asm/kvm_host.h
··· 1013 1013 1014 1014 bool (*has_wbinvd_exit)(void); 1015 1015 1016 + u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); 1016 1017 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 1017 1018 1018 1019 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
-2
arch/x86/include/asm/processor.h
··· 749 749 extern void enable_sep_cpu(void); 750 750 extern int sysenter_setup(void); 751 751 752 - extern void early_trap_init(void); 753 752 void early_trap_pf_init(void); 754 753 755 754 /* Defined in head.S */ 756 755 extern struct desc_ptr early_gdt_descr; 757 756 758 - extern void cpu_set_gdt(int); 759 757 extern void switch_to_new_gdt(int); 760 758 extern void load_direct_gdt(int); 761 759 extern void load_fixmap_gdt(int);
+4
arch/x86/kernel/acpi/boot.c
··· 215 215 apic_id = processor->local_apic_id; 216 216 enabled = processor->lapic_flags & ACPI_MADT_ENABLED; 217 217 218 + /* Ignore invalid ID */ 219 + if (apic_id == 0xffffffff) 220 + return 0; 221 + 218 222 /* 219 223 * We need to register disabled CPU as well to permit 220 224 * counting disabled CPUs. This allows us to size
+2 -3
arch/x86/kernel/kexec-bzimage64.c
··· 398 398 * little bit simple 399 399 */ 400 400 efi_map_sz = efi_get_runtime_map_size(); 401 - efi_map_sz = ALIGN(efi_map_sz, 16); 402 401 params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + 403 402 MAX_ELFCOREHDR_STR_LEN; 404 403 params_cmdline_sz = ALIGN(params_cmdline_sz, 16); 405 - kbuf.bufsz = params_cmdline_sz + efi_map_sz + 404 + kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + 406 405 sizeof(struct setup_data) + 407 406 sizeof(struct efi_setup_data); 408 407 ··· 409 410 if (!params) 410 411 return ERR_PTR(-ENOMEM); 411 412 efi_map_offset = params_cmdline_sz; 412 - efi_setup_data_offset = efi_map_offset + efi_map_sz; 413 + efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); 413 414 414 415 /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ 415 416 setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
+1 -1
arch/x86/kernel/ldt.c
··· 166 166 */ 167 167 pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); 168 168 /* Filter out unsuppored __PAGE_KERNEL* bits: */ 169 - pgprot_val(pte_prot) |= __supported_pte_mask; 169 + pgprot_val(pte_prot) &= __supported_pte_mask; 170 170 pte = pfn_pte(pfn, pte_prot); 171 171 set_pte_at(mm, va, ptep, pte); 172 172 pte_unmap_unlock(ptep, ptl);
-90
arch/x86/kernel/pci-nommu.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* Fallback functions when the main IOMMU code is not compiled in. This 3 - code is roughly equivalent to i386. */ 4 - #include <linux/dma-direct.h> 5 - #include <linux/scatterlist.h> 6 - #include <linux/string.h> 7 - #include <linux/gfp.h> 8 - #include <linux/pci.h> 9 - #include <linux/mm.h> 10 - 11 - #include <asm/processor.h> 12 - #include <asm/iommu.h> 13 - #include <asm/dma.h> 14 - 15 - #define NOMMU_MAPPING_ERROR 0 16 - 17 - static int 18 - check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 19 - { 20 - if (hwdev && !dma_capable(hwdev, bus, size)) { 21 - if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) 22 - printk(KERN_ERR 23 - "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", 24 - name, (long long)bus, size, 25 - (long long)*hwdev->dma_mask); 26 - return 0; 27 - } 28 - return 1; 29 - } 30 - 31 - static dma_addr_t nommu_map_page(struct device *dev, struct page *page, 32 - unsigned long offset, size_t size, 33 - enum dma_data_direction dir, 34 - unsigned long attrs) 35 - { 36 - dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset; 37 - WARN_ON(size == 0); 38 - if (!check_addr("map_single", dev, bus, size)) 39 - return NOMMU_MAPPING_ERROR; 40 - return bus; 41 - } 42 - 43 - /* Map a set of buffers described by scatterlist in streaming 44 - * mode for DMA. This is the scatter-gather version of the 45 - * above pci_map_single interface. Here the scatter gather list 46 - * elements are each tagged with the appropriate dma address 47 - * and length. They are obtained via sg_dma_{address,length}(SG). 48 - * 49 - * NOTE: An implementation may be able to use a smaller number of 50 - * DMA address/length pairs than there are SG table elements. 51 - * (for example via virtual mapping capabilities) 52 - * The routine returns the number of addr/length pairs actually 53 - * used, at most nents. 54 - * 55 - * Device ownership issues as mentioned above for pci_map_single are 56 - * the same here. 57 - */ 58 - static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, 59 - int nents, enum dma_data_direction dir, 60 - unsigned long attrs) 61 - { 62 - struct scatterlist *s; 63 - int i; 64 - 65 - WARN_ON(nents == 0 || sg[0].length == 0); 66 - 67 - for_each_sg(sg, s, nents, i) { 68 - BUG_ON(!sg_page(s)); 69 - s->dma_address = sg_phys(s); 70 - if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) 71 - return 0; 72 - s->dma_length = s->length; 73 - } 74 - return nents; 75 - } 76 - 77 - static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr) 78 - { 79 - return dma_addr == NOMMU_MAPPING_ERROR; 80 - } 81 - 82 - const struct dma_map_ops nommu_dma_ops = { 83 - .alloc = dma_generic_alloc_coherent, 84 - .free = dma_generic_free_coherent, 85 - .map_sg = nommu_map_sg, 86 - .map_page = nommu_map_page, 87 - .is_phys = 1, 88 - .mapping_error = nommu_mapping_error, 89 - .dma_supported = x86_dma_supported, 90 - };
+40 -5
arch/x86/kernel/smpboot.c
··· 77 77 #include <asm/i8259.h> 78 78 #include <asm/misc.h> 79 79 #include <asm/qspinlock.h> 80 + #include <asm/intel-family.h> 81 + #include <asm/cpu_device_id.h> 80 82 81 83 /* Number of siblings per CPU package */ 82 84 int smp_num_siblings = 1; ··· 392 390 return false; 393 391 } 394 392 393 + /* 394 + * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs. 395 + * 396 + * These are Intel CPUs that enumerate an LLC that is shared by 397 + * multiple NUMA nodes. The LLC on these systems is shared for 398 + * off-package data access but private to the NUMA node (half 399 + * of the package) for on-package access. 400 + * 401 + * CPUID (the source of the information about the LLC) can only 402 + * enumerate the cache as being shared *or* unshared, but not 403 + * this particular configuration. The CPU in this case enumerates 404 + * the cache to be shared across the entire package (spanning both 405 + * NUMA nodes). 406 + */ 407 + 408 + static const struct x86_cpu_id snc_cpu[] = { 409 + { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X }, 410 + {} 411 + }; 412 + 395 413 static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 396 414 { 397 415 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 398 416 399 - if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && 400 - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) 401 - return topology_sane(c, o, "llc"); 417 + /* Do not match if we do not have a valid APICID for cpu: */ 418 + if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID) 419 + return false; 402 420 403 - return false; 421 + /* Do not match if LLC id does not match: */ 422 + if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2)) 423 + return false; 424 + 425 + /* 426 + * Allow the SNC topology without warning. Return of false 427 + * means 'c' does not share the LLC of 'o'. This will be 428 + * reflected to userspace. 429 + */ 430 + if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) 431 + return false; 432 + 433 + return topology_sane(c, o, "llc"); 404 434 } 405 435 406 436 /* ··· 490 456 491 457 /* 492 458 * Set if a package/die has multiple NUMA nodes inside. 493 - * AMD Magny-Cours and Intel Cluster-on-Die have this. 459 + * AMD Magny-Cours, Intel Cluster-on-Die, and Intel 460 + * Sub-NUMA Clustering have this. 494 461 */ 495 462 static bool x86_has_numa_in_package; 496 463
+1 -1
arch/x86/kernel/tsc.c
··· 317 317 hpet2 -= hpet1; 318 318 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 319 319 do_div(tmp, 1000000); 320 - do_div(deltatsc, tmp); 320 + deltatsc = div64_u64(deltatsc, tmp); 321 321 322 322 return (unsigned long) deltatsc; 323 323 }
+18 -13
arch/x86/kvm/svm.c
··· 1423 1423 seg->base = 0; 1424 1424 } 1425 1425 1426 + static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) 1427 + { 1428 + struct vcpu_svm *svm = to_svm(vcpu); 1429 + 1430 + if (is_guest_mode(vcpu)) 1431 + return svm->nested.hsave->control.tsc_offset; 1432 + 1433 + return vcpu->arch.tsc_offset; 1434 + } 1435 + 1426 1436 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1427 1437 { 1428 1438 struct vcpu_svm *svm = to_svm(vcpu); 1429 1439 u64 g_tsc_offset = 0; 1430 1440 1431 1441 if (is_guest_mode(vcpu)) { 1442 + /* Write L1's TSC offset. */ 1432 1443 g_tsc_offset = svm->vmcb->control.tsc_offset - 1433 1444 svm->nested.hsave->control.tsc_offset; 1434 1445 svm->nested.hsave->control.tsc_offset = offset; ··· 3333 3322 /* Restore the original control entries */ 3334 3323 copy_vmcb_control_area(vmcb, hsave); 3335 3324 3325 + svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; 3336 3326 kvm_clear_exception_queue(&svm->vcpu); 3337 3327 kvm_clear_interrupt_queue(&svm->vcpu); 3338 3328 ··· 3494 3482 /* We don't want to see VMMCALLs from a nested guest */ 3495 3483 clr_intercept(svm, INTERCEPT_VMMCALL); 3496 3484 3485 + svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; 3486 + svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; 3487 + 3497 3488 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; 3498 3489 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; 3499 3490 svm->vmcb->control.int_state = nested_vmcb->control.int_state; 3500 - svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; 3501 3491 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; 3502 3492 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; 3503 3493 ··· 4049 4035 struct vcpu_svm *svm = to_svm(vcpu); 4050 4036 4051 4037 switch (msr_info->index) { 4052 - case MSR_IA32_TSC: { 4053 - msr_info->data = svm->vmcb->control.tsc_offset + 4054 - kvm_scale_tsc(vcpu, rdtsc()); 4055 - 4056 - break; 4057 - } 4058 4038 case MSR_STAR: 4059 4039 msr_info->data = svm->vmcb->save.star; 4060 4040 break; ··· 4200 4192 vcpu->arch.pat = data; 4201 4193 svm->vmcb->save.g_pat = data; 4202 4194 mark_dirty(svm->vmcb, VMCB_NPT); 4203 - break; 4204 - case MSR_IA32_TSC: 4205 - kvm_write_tsc(vcpu, msr); 4206 4195 break; 4207 4196 case MSR_IA32_SPEC_CTRL: 4208 4197 if (!msr->host_initiated && ··· 5270 5265 } 5271 5266 5272 5267 if (!ret && svm) { 5273 - trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, 5274 - host_irq, e->gsi, 5275 - vcpu_info.vector, 5268 + trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, 5269 + e->gsi, vcpu_info.vector, 5276 5270 vcpu_info.pi_desc_addr, set); 5277 5271 } 5278 5272 ··· 7106 7102 7107 7103 .has_wbinvd_exit = svm_has_wbinvd_exit, 7108 7104 7105 + .read_l1_tsc_offset = svm_read_l1_tsc_offset, 7109 7106 .write_tsc_offset = svm_write_tsc_offset, 7110 7107 7111 7108 .set_tdp_cr3 = set_tdp_cr3,
+55 -40
arch/x86/kvm/vmx.c
··· 2880 2880 vmx_update_msr_bitmap(&vmx->vcpu); 2881 2881 } 2882 2882 2883 - /* 2884 - * reads and returns guest's timestamp counter "register" 2885 - * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset 2886 - * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 2887 - */ 2888 - static u64 guest_read_tsc(struct kvm_vcpu *vcpu) 2883 + static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) 2889 2884 { 2890 - u64 host_tsc, tsc_offset; 2885 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 2891 2886 2892 - host_tsc = rdtsc(); 2893 - tsc_offset = vmcs_read64(TSC_OFFSET); 2894 - return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; 2887 + if (is_guest_mode(vcpu) && 2888 + (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) 2889 + return vcpu->arch.tsc_offset - vmcs12->tsc_offset; 2890 + 2891 + return vcpu->arch.tsc_offset; 2895 2892 } 2896 2893 2897 2894 /* ··· 3521 3524 #endif 3522 3525 case MSR_EFER: 3523 3526 return kvm_get_msr_common(vcpu, msr_info); 3524 - case MSR_IA32_TSC: 3525 - msr_info->data = guest_read_tsc(vcpu); 3526 - break; 3527 3527 case MSR_IA32_SPEC_CTRL: 3528 3528 if (!msr_info->host_initiated && 3529 3529 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && ··· 3639 3645 (data & MSR_IA32_BNDCFGS_RSVD)) 3640 3646 return 1; 3641 3647 vmcs_write64(GUEST_BNDCFGS, data); 3642 - break; 3643 - case MSR_IA32_TSC: 3644 - kvm_write_tsc(vcpu, msr_info); 3645 3648 break; 3646 3649 case MSR_IA32_SPEC_CTRL: 3647 3650 if (!msr_info->host_initiated && ··· 10599 10608 return true; 10600 10609 } 10601 10610 10611 + static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 10612 + struct vmcs12 *vmcs12) 10613 + { 10614 + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 10615 + !page_address_valid(vcpu, vmcs12->apic_access_addr)) 10616 + return -EINVAL; 10617 + else 10618 + return 0; 10619 + } 10620 + 10602 10621 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 10603 10622 struct vmcs12 *vmcs12) 10604 10623 { ··· 11177 11176 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 11178 11177 } 11179 11178 11180 - if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 11181 - vmcs_write64(TSC_OFFSET, 11182 - vcpu->arch.tsc_offset + vmcs12->tsc_offset); 11183 - else 11184 - vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 11179 + vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 11180 + 11185 11181 if (kvm_has_tsc_control) 11186 11182 decache_tsc_multiplier(vmx); 11187 11183 ··· 11295 11297 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 11296 11298 11297 11299 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) 11300 + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 11301 + 11302 + if (nested_vmx_check_apic_access_controls(vcpu, vmcs12)) 11298 11303 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 11299 11304 11300 11305 if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) ··· 11421 11420 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 11422 11421 u32 msr_entry_idx; 11423 11422 u32 exit_qual; 11423 + int r; 11424 11424 11425 11425 enter_guest_mode(vcpu); 11426 11426 ··· 11431 11429 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 11432 11430 vmx_segment_cache_clear(vmx); 11433 11431 11434 - if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { 11435 - leave_guest_mode(vcpu); 11436 - vmx_switch_vmcs(vcpu, &vmx->vmcs01); 11437 - nested_vmx_entry_failure(vcpu, vmcs12, 11438 - EXIT_REASON_INVALID_STATE, exit_qual); 11439 - return 1; 11440 - } 11432 + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 11433 + vcpu->arch.tsc_offset += vmcs12->tsc_offset; 11434 + 11435 + r = EXIT_REASON_INVALID_STATE; 11436 + if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) 11437 + goto fail; 11441 11438 11442 11439 nested_get_vmcs12_pages(vcpu, vmcs12); 11443 11440 11441 + r = EXIT_REASON_MSR_LOAD_FAIL; 11444 11442 msr_entry_idx = nested_vmx_load_msr(vcpu, 11445 11443 vmcs12->vm_entry_msr_load_addr, 11446 11444 vmcs12->vm_entry_msr_load_count); 11447 - if (msr_entry_idx) { 11448 - leave_guest_mode(vcpu); 11449 - vmx_switch_vmcs(vcpu, &vmx->vmcs01); 11450 - nested_vmx_entry_failure(vcpu, vmcs12, 11451 - EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); 11452 - return 1; 11453 - } 11445 + if (msr_entry_idx) 11446 + goto fail; 11454 11447 11455 11448 /* 11456 11449 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point ··· 11454 11457 * the success flag) when L2 exits (see nested_vmx_vmexit()). 11455 11458 */ 11456 11459 return 0; 11460 + 11461 + fail: 11462 + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 11463 + vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 11464 + leave_guest_mode(vcpu); 11465 + vmx_switch_vmcs(vcpu, &vmx->vmcs01); 11466 + nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual); 11467 + return 1; 11457 11468 } 11458 11469 11459 11470 /* ··· 12033 12028 12034 12029 leave_guest_mode(vcpu); 12035 12030 12031 + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) 12032 + vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 12033 + 12036 12034 if (likely(!vmx->fail)) { 12037 12035 if (exit_reason == -1) 12038 12036 sync_vmcs12(vcpu, vmcs12); ··· 12232 12224 12233 12225 static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) 12234 12226 { 12235 - struct vcpu_vmx *vmx = to_vmx(vcpu); 12236 - u64 tscl = rdtsc(); 12237 - u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 12238 - u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 12227 + struct vcpu_vmx *vmx; 12228 + u64 tscl, guest_tscl, delta_tsc; 12229 + 12230 + if (kvm_mwait_in_guest(vcpu->kvm)) 12231 + return -EOPNOTSUPP; 12232 + 12233 + vmx = to_vmx(vcpu); 12234 + tscl = rdtsc(); 12235 + guest_tscl = kvm_read_l1_tsc(vcpu, tscl); 12236 + delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; 12239 12237 12240 12238 /* Convert to host delta tsc if tsc scaling is enabled */ 12241 12239 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && ··· 12547 12533 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); 12548 12534 vcpu_info.vector = irq.vector; 12549 12535 12550 - trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, 12536 + trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, 12551 12537 vcpu_info.vector, vcpu_info.pi_desc_addr, set); 12552 12538 12553 12539 if (set) ··· 12726 12712 12727 12713 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 12728 12714 12715 + .read_l1_tsc_offset = vmx_read_l1_tsc_offset, 12729 12716 .write_tsc_offset = vmx_write_tsc_offset, 12730 12717 12731 12718 .set_tdp_cr3 = vmx_set_cr3,
+12 -3
arch/x86/kvm/x86.c
··· 1490 1490 1491 1491 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) 1492 1492 { 1493 - u64 curr_offset = vcpu->arch.tsc_offset; 1493 + u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); 1494 1494 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; 1495 1495 } 1496 1496 ··· 1532 1532 1533 1533 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 1534 1534 { 1535 - return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); 1535 + u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); 1536 + 1537 + return tsc_offset + kvm_scale_tsc(vcpu, host_tsc); 1536 1538 } 1537 1539 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 1538 1540 ··· 2364 2362 return 1; 2365 2363 vcpu->arch.smbase = data; 2366 2364 break; 2365 + case MSR_IA32_TSC: 2366 + kvm_write_tsc(vcpu, msr_info); 2367 + break; 2367 2368 case MSR_SMI_COUNT: 2368 2369 if (!msr_info->host_initiated) 2369 2370 return 1; ··· 2610 2605 case MSR_IA32_UCODE_REV: 2611 2606 msr_info->data = vcpu->arch.microcode_version; 2612 2607 break; 2608 + case MSR_IA32_TSC: 2609 + msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; 2610 + break; 2613 2611 case MSR_MTRRcap: 2614 2612 case 0x200 ... 0x2ff: 2615 2613 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); ··· 2827 2819 static inline bool kvm_can_mwait_in_guest(void) 2828 2820 { 2829 2821 return boot_cpu_has(X86_FEATURE_MWAIT) && 2830 - !boot_cpu_has_bug(X86_BUG_MONITOR); 2822 + !boot_cpu_has_bug(X86_BUG_MONITOR) && 2823 + boot_cpu_has(X86_FEATURE_ARAT); 2831 2824 } 2832 2825 2833 2826 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+6 -5
arch/x86/mm/dump_pagetables.c
··· 18 18 #include <linux/init.h> 19 19 #include <linux/sched.h> 20 20 #include <linux/seq_file.h> 21 + #include <linux/highmem.h> 21 22 22 23 #include <asm/pgtable.h> 23 24 ··· 335 334 pgprotval_t eff_in, unsigned long P) 336 335 { 337 336 int i; 338 - pte_t *start; 337 + pte_t *pte; 339 338 pgprotval_t prot, eff; 340 339 341 - start = (pte_t *)pmd_page_vaddr(addr); 342 340 for (i = 0; i < PTRS_PER_PTE; i++) { 343 - prot = pte_flags(*start); 344 - eff = effective_prot(eff_in, prot); 345 341 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); 342 + pte = pte_offset_map(&addr, st->current_address); 343 + prot = pte_flags(*pte); 344 + eff = effective_prot(eff_in, prot); 346 345 note_page(m, st, __pgprot(prot), eff, 5); 347 - start++; 346 + pte_unmap(pte); 348 347 } 349 348 } 350 349 #ifdef CONFIG_KASAN
+1 -1
arch/x86/power/hibernate_64.c
··· 98 98 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); 99 99 } else { 100 100 /* No p4d for 4-level paging: point the pgd to the pud page table */ 101 - pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); 101 + pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); 102 102 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); 103 103 } 104 104
+2 -2
drivers/atm/iphase.c
··· 671 671 if ((vcc->pop) && (skb1->len != 0)) 672 672 { 673 673 vcc->pop(vcc, skb1); 674 - IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n", 674 + IF_EVENT(printk("Transmit Done - skb 0x%lx return\n", 675 675 (long)skb1);) 676 676 } 677 677 else ··· 1665 1665 status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); 1666 1666 if (status & TRANSMIT_DONE){ 1667 1667 1668 - IF_EVENT(printk("Tansmit Done Intr logic run\n");) 1668 + IF_EVENT(printk("Transmit Done Intr logic run\n");) 1669 1669 spin_lock_irqsave(&iadev->tx_lock, flags); 1670 1670 ia_tx_poll(iadev); 1671 1671 spin_unlock_irqrestore(&iadev->tx_lock, flags);
+69 -32
drivers/block/rbd.c
··· 732 732 */ 733 733 enum { 734 734 Opt_queue_depth, 735 + Opt_lock_timeout, 735 736 Opt_last_int, 736 737 /* int args above */ 737 738 Opt_last_string, ··· 741 740 Opt_read_write, 742 741 Opt_lock_on_read, 743 742 Opt_exclusive, 743 + Opt_notrim, 744 744 Opt_err 745 745 }; 746 746 747 747 static match_table_t rbd_opts_tokens = { 748 748 {Opt_queue_depth, "queue_depth=%d"}, 749 + {Opt_lock_timeout, "lock_timeout=%d"}, 749 750 /* int args above */ 750 751 /* string args above */ 751 752 {Opt_read_only, "read_only"}, ··· 756 753 {Opt_read_write, "rw"}, /* Alternate spelling */ 757 754 {Opt_lock_on_read, "lock_on_read"}, 758 755 {Opt_exclusive, "exclusive"}, 756 + {Opt_notrim, "notrim"}, 759 757 {Opt_err, NULL} 760 758 }; 761 759 762 760 struct rbd_options { 763 761 int queue_depth; 762 + unsigned long lock_timeout; 764 763 bool read_only; 765 764 bool lock_on_read; 766 765 bool exclusive; 766 + bool trim; 767 767 }; 768 768 769 769 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ 770 + #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ 770 771 #define RBD_READ_ONLY_DEFAULT false 771 772 #define RBD_LOCK_ON_READ_DEFAULT false 772 773 #define RBD_EXCLUSIVE_DEFAULT false 774 + #define RBD_TRIM_DEFAULT true 773 775 774 776 static int parse_rbd_opts_token(char *c, void *private) 775 777 { ··· 804 796 } 805 797 rbd_opts->queue_depth = intval; 806 798 break; 799 + case Opt_lock_timeout: 800 + /* 0 is "wait forever" (i.e. infinite timeout) */ 801 + if (intval < 0 || intval > INT_MAX / 1000) { 802 + pr_err("lock_timeout out of range\n"); 803 + return -EINVAL; 804 + } 805 + rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000); 806 + break; 807 807 case Opt_read_only: 808 808 rbd_opts->read_only = true; 809 809 break; ··· 823 807 break; 824 808 case Opt_exclusive: 825 809 rbd_opts->exclusive = true; 810 + break; 811 + case Opt_notrim: 812 + rbd_opts->trim = false; 826 813 break; 827 814 default: 828 815 /* libceph prints "bad option" msg */ ··· 1411 1392 case OBJ_OP_DISCARD: 1412 1393 return true; 1413 1394 default: 1414 - rbd_assert(0); 1395 + BUG(); 1415 1396 } 1416 1397 } 1417 1398 ··· 2485 2466 } 2486 2467 return false; 2487 2468 default: 2488 - rbd_assert(0); 2469 + BUG(); 2489 2470 } 2490 2471 } 2491 2472 ··· 2513 2494 } 2514 2495 return false; 2515 2496 default: 2516 - rbd_assert(0); 2497 + BUG(); 2517 2498 } 2518 2499 } 2519 2500 ··· 3552 3533 /* 3553 3534 * lock_rwsem must be held for read 3554 3535 */ 3555 - static void rbd_wait_state_locked(struct rbd_device *rbd_dev) 3536 + static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire) 3556 3537 { 3557 3538 DEFINE_WAIT(wait); 3539 + unsigned long timeout; 3540 + int ret = 0; 3541 + 3542 + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) 3543 + return -EBLACKLISTED; 3544 + 3545 + if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) 3546 + return 0; 3547 + 3548 + if (!may_acquire) { 3549 + rbd_warn(rbd_dev, "exclusive lock required"); 3550 + return -EROFS; 3551 + } 3558 3552 3559 3553 do { 3560 3554 /* ··· 3579 3547 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, 3580 3548 TASK_UNINTERRUPTIBLE); 3581 3549 up_read(&rbd_dev->lock_rwsem); 3582 - schedule(); 3550 + timeout = schedule_timeout(ceph_timeout_jiffies( 3551 + rbd_dev->opts->lock_timeout)); 3583 3552 down_read(&rbd_dev->lock_rwsem); 3584 - } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3585 - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); 3553 + if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 3554 + ret = -EBLACKLISTED; 3555 + break; 3556 + } 3557 + if (!timeout) { 3558 + rbd_warn(rbd_dev, "timed out waiting for lock"); 3559 + ret = -ETIMEDOUT; 3560 + break; 3561 + } 3562 + } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED); 3586 3563 3587 3564 finish_wait(&rbd_dev->lock_waitq, &wait); 3565 + return ret; 3588 3566 } 3589 3567 3590 3568 static void rbd_queue_workfn(struct work_struct *work) ··· 3680 3638 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read); 3681 3639 if (must_be_locked) { 3682 3640 down_read(&rbd_dev->lock_rwsem); 3683 - if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && 3684 - !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 3685 - if (rbd_dev->opts->exclusive) { 3686 - rbd_warn(rbd_dev, "exclusive lock required"); 3687 - result = -EROFS; 3688 - goto err_unlock; 3689 - } 3690 - rbd_wait_state_locked(rbd_dev); 3691 - } 3692 - if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 3693 - result = -EBLACKLISTED; 3641 + result = rbd_wait_state_locked(rbd_dev, 3642 + !rbd_dev->opts->exclusive); 3643 + if (result) 3694 3644 goto err_unlock; 3695 - } 3696 3645 } 3697 3646 3698 3647 img_request = rbd_img_request_create(rbd_dev, op_type, snapc); ··· 3935 3902 { 3936 3903 struct gendisk *disk; 3937 3904 struct request_queue *q; 3938 - u64 segment_size; 3905 + unsigned int objset_bytes = 3906 + rbd_dev->layout.object_size * rbd_dev->layout.stripe_count; 3939 3907 int err; 3940 3908 3941 3909 /* create gendisk info */ ··· 3976 3942 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 3977 3943 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ 3978 3944 3979 - /* set io sizes to object size */ 3980 - segment_size = rbd_obj_bytes(&rbd_dev->header); 3981 - blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 3945 + blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); 3982 3946 q->limits.max_sectors = queue_max_hw_sectors(q); 3983 3947 blk_queue_max_segments(q, USHRT_MAX); 3984 3948 blk_queue_max_segment_size(q, UINT_MAX); 3985 - blk_queue_io_min(q, segment_size); 3986 - blk_queue_io_opt(q, segment_size); 3949 + blk_queue_io_min(q, objset_bytes); 3950 + blk_queue_io_opt(q, objset_bytes); 3987 3951 3988 - /* enable the discard support */ 3989 - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 3990 - q->limits.discard_granularity = segment_size; 3991 - blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 3992 - blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); 3952 + if (rbd_dev->opts->trim) { 3953 + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 3954 + q->limits.discard_granularity = objset_bytes; 3955 + blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); 3956 + blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); 3957 + } 3993 3958 3994 3959 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 3995 3960 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; ··· 5212 5179 5213 5180 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; 5214 5181 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; 5182 + rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; 5215 5183 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; 5216 5184 rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT; 5185 + rbd_opts->trim = RBD_TRIM_DEFAULT; 5217 5186 5218 5187 copts = ceph_parse_options(options, mon_addrs, 5219 5188 mon_addrs + mon_addrs_size - 1, ··· 5251 5216 5252 5217 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) 5253 5218 { 5219 + int ret; 5220 + 5254 5221 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { 5255 5222 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); 5256 5223 return -EINVAL; ··· 5260 5223 5261 5224 /* FIXME: "rbd map --exclusive" should be in interruptible */ 5262 5225 down_read(&rbd_dev->lock_rwsem); 5263 - rbd_wait_state_locked(rbd_dev); 5226 + ret = rbd_wait_state_locked(rbd_dev, true); 5264 5227 up_read(&rbd_dev->lock_rwsem); 5265 - if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { 5228 + if (ret) { 5266 5229 rbd_warn(rbd_dev, "failed to acquire exclusive lock"); 5267 5230 return -EROFS; 5268 5231 }
+97 -31
drivers/char/random.c
··· 427 427 * its value (from 0->1->2). 428 428 */ 429 429 static int crng_init = 0; 430 - #define crng_ready() (likely(crng_init > 0)) 430 + #define crng_ready() (likely(crng_init > 1)) 431 431 static int crng_init_cnt = 0; 432 + static unsigned long crng_global_init_time = 0; 432 433 #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) 433 434 static void _extract_crng(struct crng_state *crng, 434 435 __u32 out[CHACHA20_BLOCK_WORDS]); ··· 788 787 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; 789 788 } 790 789 790 + #ifdef CONFIG_NUMA 791 + static void numa_crng_init(void) 792 + { 793 + int i; 794 + struct crng_state *crng; 795 + struct crng_state **pool; 796 + 797 + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); 798 + for_each_online_node(i) { 799 + crng = kmalloc_node(sizeof(struct crng_state), 800 + GFP_KERNEL | __GFP_NOFAIL, i); 801 + spin_lock_init(&crng->lock); 802 + crng_initialize(crng); 803 + pool[i] = crng; 804 + } 805 + mb(); 806 + if (cmpxchg(&crng_node_pool, NULL, pool)) { 807 + for_each_node(i) 808 + kfree(pool[i]); 809 + kfree(pool); 810 + } 811 + } 812 + #else 813 + static void numa_crng_init(void) {} 814 + #endif 815 + 816 + /* 817 + * crng_fast_load() can be called by code in the interrupt service 818 + * path. So we can't afford to dilly-dally. 819 + */ 791 820 static int crng_fast_load(const char *cp, size_t len) 792 821 { 793 822 unsigned long flags; ··· 825 794 826 795 if (!spin_trylock_irqsave(&primary_crng.lock, flags)) 827 796 return 0; 828 - if (crng_ready()) { 797 + if (crng_init != 0) { 829 798 spin_unlock_irqrestore(&primary_crng.lock, flags); 830 799 return 0; 831 800 } ··· 841 810 wake_up_interruptible(&crng_init_wait); 842 811 pr_notice("random: fast init done\n"); 843 812 } 813 + return 1; 814 + } 815 + 816 + /* 817 + * crng_slow_load() is called by add_device_randomness, which has two 818 + * attributes. (1) We can't trust the buffer passed to it is 819 + * guaranteed to be unpredictable (so it might not have any entropy at 820 + * all), and (2) it doesn't have the performance constraints of 821 + * crng_fast_load(). 822 + * 823 + * So we do something more comprehensive which is guaranteed to touch 824 + * all of the primary_crng's state, and which uses a LFSR with a 825 + * period of 255 as part of the mixing algorithm. Finally, we do 826 + * *not* advance crng_init_cnt since buffer we may get may be something 827 + * like a fixed DMI table (for example), which might very well be 828 + * unique to the machine, but is otherwise unvarying. 829 + */ 830 + static int crng_slow_load(const char *cp, size_t len) 831 + { 832 + unsigned long flags; 833 + static unsigned char lfsr = 1; 834 + unsigned char tmp; 835 + unsigned i, max = CHACHA20_KEY_SIZE; 836 + const char * src_buf = cp; 837 + char * dest_buf = (char *) &primary_crng.state[4]; 838 + 839 + if (!spin_trylock_irqsave(&primary_crng.lock, flags)) 840 + return 0; 841 + if (crng_init != 0) { 842 + spin_unlock_irqrestore(&primary_crng.lock, flags); 843 + return 0; 844 + } 845 + if (len > max) 846 + max = len; 847 + 848 + for (i = 0; i < max ; i++) { 849 + tmp = lfsr; 850 + lfsr >>= 1; 851 + if (tmp & 1) 852 + lfsr ^= 0xE1; 853 + tmp = dest_buf[i % CHACHA20_KEY_SIZE]; 854 + dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; 855 + lfsr += (tmp << 3) | (tmp >> 5); 856 + } 857 + spin_unlock_irqrestore(&primary_crng.lock, flags); 844 858 return 1; 845 859 } 846 860 ··· 907 831 _crng_backtrack_protect(&primary_crng, buf.block, 908 832 CHACHA20_KEY_SIZE); 909 833 } 910 - spin_lock_irqsave(&primary_crng.lock, flags); 834 + spin_lock_irqsave(&crng->lock, flags); 911 835 for (i = 0; i < 8; i++) { 912 836 unsigned long rv; 913 837 if (!arch_get_random_seed_long(&rv) && ··· 917 841 } 918 842 memzero_explicit(&buf, sizeof(buf)); 919 843 crng->init_time = jiffies; 920 - spin_unlock_irqrestore(&primary_crng.lock, flags); 844 + spin_unlock_irqrestore(&crng->lock, flags); 921 845 if (crng == &primary_crng && crng_init < 2) { 922 846 invalidate_batched_entropy(); 847 + numa_crng_init(); 923 848 crng_init = 2; 924 849 process_random_ready_list(); 925 850 wake_up_interruptible(&crng_init_wait); ··· 933 856 { 934 857 unsigned long v, flags; 935 858 936 - if (crng_init > 1 && 937 - time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) 859 + if (crng_ready() && 860 + (time_after(crng_global_init_time, crng->init_time) || 861 + time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) 938 862 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); 939 863 spin_lock_irqsave(&crng->lock, flags); 940 864 if (arch_get_random_long(&v)) ··· 1059 981 unsigned long time = random_get_entropy() ^ jiffies; 1060 982 unsigned long flags; 1061 983 1062 - if (!crng_ready()) { 1063 - crng_fast_load(buf, size); 1064 - return; 1065 - } 984 + if (!crng_ready() && size) 985 + crng_slow_load(buf, size); 1066 986 1067 987 trace_add_device_randomness(size, _RET_IP_); 1068 988 spin_lock_irqsave(&input_pool.lock, flags); ··· 1215 1139 fast_mix(fast_pool); 1216 1140 add_interrupt_bench(cycles); 1217 1141 1218 - if (!crng_ready()) { 1142 + if (unlikely(crng_init == 0)) { 1219 1143 if ((fast_pool->count >= 64) && 1220 1144 crng_fast_load((char *) fast_pool->pool, 1221 1145 sizeof(fast_pool->pool))) { ··· 1756 1680 */ 1757 1681 static int rand_initialize(void) 1758 1682 { 1759 - #ifdef CONFIG_NUMA 1760 - int i; 1761 - struct crng_state *crng; 1762 - struct crng_state **pool; 1763 - #endif 1764 - 1765 1683 init_std_data(&input_pool); 1766 1684 init_std_data(&blocking_pool); 1767 1685 crng_initialize(&primary_crng); 1768 - 1769 - #ifdef CONFIG_NUMA 1770 - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); 1771 - for_each_online_node(i) { 1772 - crng = kmalloc_node(sizeof(struct crng_state), 1773 - GFP_KERNEL | __GFP_NOFAIL, i); 1774 - spin_lock_init(&crng->lock); 1775 - crng_initialize(crng); 1776 - pool[i] = crng; 1777 - } 1778 - mb(); 1779 - crng_node_pool = pool; 1780 - #endif 1686 + crng_global_init_time = jiffies; 1781 1687 return 0; 1782 1688 } 1783 1689 early_initcall(rand_initialize); ··· 1932 1874 return -EPERM; 1933 1875 input_pool.entropy_count = 0; 1934 1876 blocking_pool.entropy_count = 0; 1877 + return 0; 1878 + case RNDRESEEDCRNG: 1879 + if (!capable(CAP_SYS_ADMIN)) 1880 + return -EPERM; 1881 + if (crng_init < 2) 1882 + return -ENODATA; 1883 + crng_reseed(&primary_crng, NULL); 1884 + crng_global_init_time = jiffies - 1; 1935 1885 return 0; 1936 1886 default: 1937 1887 return -EINVAL; ··· 2278 2212 { 2279 2213 struct entropy_store *poolp = &input_pool; 2280 2214 2281 - if (!crng_ready()) { 2215 + if (unlikely(crng_init == 0)) { 2282 2216 crng_fast_load(buffer, count); 2283 2217 return; 2284 2218 }
+8
drivers/clocksource/Kconfig
··· 133 133 help 134 134 Enables support for the VT8500 driver. 135 135 136 + config NPCM7XX_TIMER 137 + bool "NPCM7xx timer driver" if COMPILE_TEST 138 + depends on HAS_IOMEM 139 + select CLKSRC_MMIO 140 + help 141 + Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, 142 + While TIMER0 serves as clockevent and TIMER1 serves as clocksource. 143 + 136 144 config CADENCE_TTC_TIMER 137 145 bool "Cadence TTC timer driver" if COMPILE_TEST 138 146 depends on COMMON_CLK
+1
drivers/clocksource/Makefile
··· 56 56 obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o 57 57 obj-$(CONFIG_OWL_TIMER) += owl-timer.o 58 58 obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o 59 + obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o 59 60 60 61 obj-$(CONFIG_ARC_TIMERS) += arc_timer.o 61 62 obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
+34 -11
drivers/clocksource/timer-imx-tpm.c
··· 17 17 #include <linux/of_irq.h> 18 18 #include <linux/sched_clock.h> 19 19 20 + #define TPM_PARAM 0x4 21 + #define TPM_PARAM_WIDTH_SHIFT 16 22 + #define TPM_PARAM_WIDTH_MASK (0xff << 16) 20 23 #define TPM_SC 0x10 21 24 #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) 22 25 #define TPM_SC_CMOD_DIV_DEFAULT 0x3 26 + #define TPM_SC_CMOD_DIV_MAX 0x7 27 + #define TPM_SC_TOF_MASK (0x1 << 7) 23 28 #define TPM_CNT 0x14 24 29 #define TPM_MOD 0x18 25 30 #define TPM_STATUS 0x1c ··· 34 29 #define TPM_C0SC_MODE_SHIFT 2 35 30 #define TPM_C0SC_MODE_MASK 0x3c 36 31 #define TPM_C0SC_MODE_SW_COMPARE 0x4 32 + #define TPM_C0SC_CHF_MASK (0x1 << 7) 37 33 #define TPM_C0V 0x24 38 34 35 + static int counter_width; 36 + static int rating; 39 37 static void __iomem *timer_base; 40 38 static struct clock_event_device clockevent_tpm; 41 39 ··· 91 83 tpm_delay_timer.freq = rate; 92 84 register_current_timer_delay(&tpm_delay_timer); 93 85 94 - sched_clock_register(tpm_read_sched_clock, 32, rate); 86 + sched_clock_register(tpm_read_sched_clock, counter_width, rate); 95 87 96 88 return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", 97 - rate, 200, 32, clocksource_mmio_readl_up); 89 + rate, rating, counter_width, 90 + clocksource_mmio_readl_up); 98 91 } 99 92 100 93 static int tpm_set_next_event(unsigned long delta, ··· 114 105 * of writing CNT registers which may cause the min_delta event got 115 106 * missed, so we need add a ETIME check here in case it happened. 116 107 */ 117 - return (int)((next - now) <= 0) ? -ETIME : 0; 108 + return (int)(next - now) <= 0 ? -ETIME : 0; 118 109 } 119 110 120 111 static int tpm_set_state_oneshot(struct clock_event_device *evt) ··· 148 139 .set_state_oneshot = tpm_set_state_oneshot, 149 140 .set_next_event = tpm_set_next_event, 150 141 .set_state_shutdown = tpm_set_state_shutdown, 151 - .rating = 200, 152 142 }; 153 143 154 144 static int __init tpm_clockevent_init(unsigned long rate, int irq) ··· 157 149 ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, 158 150 "i.MX7ULP TPM Timer", &clockevent_tpm); 159 151 152 + clockevent_tpm.rating = rating; 160 153 clockevent_tpm.cpumask = cpumask_of(0); 161 154 clockevent_tpm.irq = irq; 162 - clockevents_config_and_register(&clockevent_tpm, 163 - rate, 300, 0xfffffffe); 155 + clockevents_config_and_register(&clockevent_tpm, rate, 300, 156 + GENMASK(counter_width - 1, 1)); 164 157 165 158 return ret; 166 159 } ··· 188 179 ipg = of_clk_get_by_name(np, "ipg"); 189 180 per = of_clk_get_by_name(np, "per"); 190 181 if (IS_ERR(ipg) || IS_ERR(per)) { 191 - pr_err("tpm: failed to get igp or per clk\n"); 182 + pr_err("tpm: failed to get ipg or per clk\n"); 192 183 ret = -ENODEV; 193 184 goto err_clk_get; 194 185 } ··· 206 197 goto err_per_clk_enable; 207 198 } 208 199 200 + counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK) 201 + >> TPM_PARAM_WIDTH_SHIFT; 202 + /* use rating 200 for 32-bit counter and 150 for 16-bit counter */ 203 + rating = counter_width == 0x20 ? 200 : 150; 204 + 209 205 /* 210 206 * Initialize tpm module to a known state 211 207 * 1) Counter disabled ··· 219 205 * 4) Channel0 disabled 220 206 * 5) DMA transfers disabled 221 207 */ 208 + /* make sure counter is disabled */ 222 209 writel(0, timer_base + TPM_SC); 210 + /* TOF is W1C */ 211 + writel(TPM_SC_TOF_MASK, timer_base + TPM_SC); 223 212 writel(0, timer_base + TPM_CNT); 224 - writel(0, timer_base + TPM_C0SC); 213 + /* CHF is W1C */ 214 + writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC); 225 215 226 - /* increase per cnt, div 8 by default */ 227 - writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, 216 + /* 217 + * increase per cnt, 218 + * div 8 for 32-bit counter and div 128 for 16-bit counter 219 + */ 220 + writel(TPM_SC_CMOD_INC_PER_CNT | 221 + (counter_width == 0x20 ? 222 + TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX), 228 223 timer_base + TPM_SC); 229 224 230 225 /* set MOD register to maximum for free running mode */ 231 - writel(0xffffffff, timer_base + TPM_MOD); 226 + writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD); 232 227 233 228 rate = clk_get_rate(per) >> 3; 234 229 ret = tpm_clocksource_init(rate);
+215
drivers/clocksource/timer-npcm7xx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com 4 + * All rights reserved. 5 + * 6 + * Copyright 2017 Google, Inc. 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/sched.h> 11 + #include <linux/init.h> 12 + #include <linux/interrupt.h> 13 + #include <linux/err.h> 14 + #include <linux/clk.h> 15 + #include <linux/io.h> 16 + #include <linux/clockchips.h> 17 + #include <linux/of_irq.h> 18 + #include <linux/of_address.h> 19 + #include "timer-of.h" 20 + 21 + /* Timers registers */ 22 + #define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */ 23 + #define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */ 24 + #define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */ 25 + #define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */ 26 + #define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */ 27 + #define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */ 28 + 29 + /* Timers control */ 30 + #define NPCM7XX_Tx_RESETINT 0x1f 31 + #define NPCM7XX_Tx_PERIOD BIT(27) 32 + #define NPCM7XX_Tx_INTEN BIT(29) 33 + #define NPCM7XX_Tx_COUNTEN BIT(30) 34 + #define NPCM7XX_Tx_ONESHOT 0x0 35 + #define NPCM7XX_Tx_OPER GENMASK(3, 27) 36 + #define NPCM7XX_Tx_MIN_PRESCALE 0x1 37 + #define NPCM7XX_Tx_TDR_MASK_BITS 24 38 + #define NPCM7XX_Tx_MAX_CNT 0xFFFFFF 39 + #define NPCM7XX_T0_CLR_INT 0x1 40 + #define NPCM7XX_Tx_CLR_CSR 0x0 41 + 42 + /* Timers operating mode */ 43 + #define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \ 44 + NPCM7XX_Tx_INTEN | \ 45 + NPCM7XX_Tx_MIN_PRESCALE) 46 + 47 + #define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \ 48 + NPCM7XX_Tx_INTEN | \ 49 + NPCM7XX_Tx_MIN_PRESCALE) 50 + 51 + #define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \ 52 + NPCM7XX_Tx_MIN_PRESCALE) 53 + 54 + #define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE) 55 + 56 + static int npcm7xx_timer_resume(struct clock_event_device *evt) 57 + { 58 + struct timer_of *to = to_timer_of(evt); 59 + u32 val; 60 + 61 + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); 62 + val |= NPCM7XX_Tx_COUNTEN; 63 + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); 64 + 65 + return 0; 66 + } 67 + 68 + static int npcm7xx_timer_shutdown(struct clock_event_device *evt) 69 + { 70 + struct timer_of *to = to_timer_of(evt); 71 + u32 val; 72 + 73 + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); 74 + val &= ~NPCM7XX_Tx_COUNTEN; 75 + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); 76 + 77 + return 0; 78 + } 79 + 80 + static int npcm7xx_timer_oneshot(struct clock_event_device *evt) 81 + { 82 + struct timer_of *to = to_timer_of(evt); 83 + u32 val; 84 + 85 + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); 86 + val &= ~NPCM7XX_Tx_OPER; 87 + 88 + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); 89 + val |= NPCM7XX_START_ONESHOT_Tx; 90 + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); 91 + 92 + return 0; 93 + } 94 + 95 + static int npcm7xx_timer_periodic(struct clock_event_device *evt) 96 + { 97 + struct timer_of *to = to_timer_of(evt); 98 + u32 val; 99 + 100 + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); 101 + val &= ~NPCM7XX_Tx_OPER; 102 + 103 + writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0); 104 + val |= NPCM7XX_START_PERIODIC_Tx; 105 + 106 + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); 107 + 108 + return 0; 109 + } 110 + 111 + static int npcm7xx_clockevent_set_next_event(unsigned long evt, 112 + struct clock_event_device *clk) 113 + { 114 + struct timer_of *to = to_timer_of(clk); 115 + u32 val; 116 + 117 + writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0); 118 + val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0); 119 + val |= NPCM7XX_START_Tx; 120 + writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0); 121 + 122 + return 0; 123 + } 124 + 125 + static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id) 126 + { 127 + struct clock_event_device *evt = (struct clock_event_device *)dev_id; 128 + struct timer_of *to = to_timer_of(evt); 129 + 130 + writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR); 131 + 132 + evt->event_handler(evt); 133 + 134 + return IRQ_HANDLED; 135 + } 136 + 137 + static struct timer_of npcm7xx_to = { 138 + .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, 139 + 140 + .clkevt = { 141 + .name = "npcm7xx-timer0", 142 + .features = CLOCK_EVT_FEAT_PERIODIC | 143 + CLOCK_EVT_FEAT_ONESHOT, 144 + .set_next_event = npcm7xx_clockevent_set_next_event, 145 + .set_state_shutdown = npcm7xx_timer_shutdown, 146 + .set_state_periodic = npcm7xx_timer_periodic, 147 + .set_state_oneshot = npcm7xx_timer_oneshot, 148 + .tick_resume = npcm7xx_timer_resume, 149 + .rating = 300, 150 + }, 151 + 152 + .of_irq = { 153 + .handler = npcm7xx_timer0_interrupt, 154 + .flags = IRQF_TIMER | IRQF_IRQPOLL, 155 + }, 156 + }; 157 + 158 + static void __init npcm7xx_clockevents_init(void) 159 + { 160 + writel(NPCM7XX_DEFAULT_CSR, 161 + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0); 162 + 163 + writel(NPCM7XX_Tx_RESETINT, 164 + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR); 165 + 166 + npcm7xx_to.clkevt.cpumask = cpumask_of(0); 167 + clockevents_config_and_register(&npcm7xx_to.clkevt, 168 + timer_of_rate(&npcm7xx_to), 169 + 0x1, NPCM7XX_Tx_MAX_CNT); 170 + } 171 + 172 + static void __init npcm7xx_clocksource_init(void) 173 + { 174 + u32 val; 175 + 176 + writel(NPCM7XX_DEFAULT_CSR, 177 + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); 178 + writel(NPCM7XX_Tx_MAX_CNT, 179 + timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1); 180 + 181 + val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); 182 + val |= NPCM7XX_START_Tx; 183 + writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1); 184 + 185 + clocksource_mmio_init(timer_of_base(&npcm7xx_to) + 186 + NPCM7XX_REG_TDR1, 187 + "npcm7xx-timer1", timer_of_rate(&npcm7xx_to), 188 + 200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS, 189 + clocksource_mmio_readl_down); 190 + } 191 + 192 + static int __init npcm7xx_timer_init(struct device_node *np) 193 + { 194 + int ret; 195 + 196 + ret = timer_of_init(np, &npcm7xx_to); 197 + if (ret) 198 + return ret; 199 + 200 + /* Clock input is divided by PRESCALE + 1 before it is fed */ 201 + /* to the counter */ 202 + npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate / 203 + (NPCM7XX_Tx_MIN_PRESCALE + 1); 204 + 205 + npcm7xx_clocksource_init(); 206 + npcm7xx_clockevents_init(); 207 + 208 + pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ", 209 + timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to)); 210 + 211 + return 0; 212 + } 213 + 214 + TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init); 215 +
+2
drivers/dax/device.c
··· 19 19 #include <linux/dax.h> 20 20 #include <linux/fs.h> 21 21 #include <linux/mm.h> 22 + #include <linux/mman.h> 22 23 #include "dax-private.h" 23 24 #include "dax.h" 24 25 ··· 541 540 .release = dax_release, 542 541 .get_unmapped_area = dax_get_unmapped_area, 543 542 .mmap = dax_mmap, 543 + .mmap_supported_flags = MAP_SYNC, 544 544 }; 545 545 546 546 static void dev_dax_release(struct device *dev)
-7
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
··· 138 138 lut = (struct drm_color_lut *)blob->data; 139 139 lut_size = blob->length / sizeof(struct drm_color_lut); 140 140 141 - if (__is_lut_linear(lut, lut_size)) { 142 - /* Set to bypass if lut is set to linear */ 143 - stream->out_transfer_func->type = TF_TYPE_BYPASS; 144 - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; 145 - return 0; 146 - } 147 - 148 141 gamma = dc_create_gamma(); 149 142 if (!gamma) 150 143 return -ENOMEM;
+10 -6
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 4743 4743 4744 4744 for (i=0; i < dep_table->count; i++) { 4745 4745 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 4746 - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4747 - break; 4746 + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; 4747 + return; 4748 4748 } 4749 4749 } 4750 - if (i == dep_table->count) 4750 + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 4751 4751 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4752 + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 4753 + } 4752 4754 4753 4755 dep_table = table_info->vdd_dep_on_sclk; 4754 4756 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); 4755 4757 for (i=0; i < dep_table->count; i++) { 4756 4758 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 4757 - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4758 - break; 4759 + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; 4760 + return; 4759 4761 } 4760 4762 } 4761 - if (i == dep_table->count) 4763 + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 4762 4764 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 4765 + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 4766 + } 4763 4767 } 4764 4768 4765 4769 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+3 -1
drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
··· 412 412 QuadraticInt_t ReservedEquation2; 413 413 QuadraticInt_t ReservedEquation3; 414 414 415 + uint16_t MinVoltageUlvGfx; 416 + uint16_t MinVoltageUlvSoc; 415 417 416 - uint32_t Reserved[15]; 418 + uint32_t Reserved[14]; 417 419 418 420 419 421
+32 -7
drivers/gpu/drm/drm_dp_dual_mode_helper.c
··· 350 350 { 351 351 uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; 352 352 ssize_t ret; 353 + int retry; 353 354 354 355 if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) 355 356 return 0; 356 357 357 - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, 358 - &tmds_oen, sizeof(tmds_oen)); 359 - if (ret) { 360 - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", 361 - enable ? "enable" : "disable"); 362 - return ret; 358 + /* 359 + * LSPCON adapters in low-power state may ignore the first write, so 360 + * read back and verify the written value a few times. 361 + */ 362 + for (retry = 0; retry < 3; retry++) { 363 + uint8_t tmp; 364 + 365 + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, 366 + &tmds_oen, sizeof(tmds_oen)); 367 + if (ret) { 368 + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", 369 + enable ? "enable" : "disable", 370 + retry + 1); 371 + return ret; 372 + } 373 + 374 + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, 375 + &tmp, sizeof(tmp)); 376 + if (ret) { 377 + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", 378 + enable ? "enabling" : "disabling", 379 + retry + 1); 380 + return ret; 381 + } 382 + 383 + if (tmp == tmds_oen) 384 + return 0; 363 385 } 364 386 365 - return 0; 387 + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", 388 + enable ? "enabling" : "disabling"); 389 + 390 + return -EIO; 366 391 } 367 392 EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); 368 393
+14 -59
drivers/gpu/drm/exynos/exynos_drm_fb.c
··· 18 18 #include <drm/drm_fb_helper.h> 19 19 #include <drm/drm_atomic.h> 20 20 #include <drm/drm_atomic_helper.h> 21 + #include <drm/drm_gem_framebuffer_helper.h> 21 22 #include <uapi/drm/exynos_drm.h> 22 23 23 24 #include "exynos_drm_drv.h" ··· 26 25 #include "exynos_drm_fbdev.h" 27 26 #include "exynos_drm_iommu.h" 28 27 #include "exynos_drm_crtc.h" 29 - 30 - #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 31 - 32 - /* 33 - * exynos specific framebuffer structure. 34 - * 35 - * @fb: drm framebuffer obejct. 36 - * @exynos_gem: array of exynos specific gem object containing a gem object. 37 - */ 38 - struct exynos_drm_fb { 39 - struct drm_framebuffer fb; 40 - struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 41 - dma_addr_t dma_addr[MAX_FB_BUFFER]; 42 - }; 43 28 44 29 static int check_fb_gem_memory_type(struct drm_device *drm_dev, 45 30 struct exynos_drm_gem *exynos_gem) ··· 53 66 return 0; 54 67 } 55 68 56 - static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 57 - { 58 - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 59 - unsigned int i; 60 - 61 - drm_framebuffer_cleanup(fb); 62 - 63 - for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { 64 - struct drm_gem_object *obj; 65 - 66 - if (exynos_fb->exynos_gem[i] == NULL) 67 - continue; 68 - 69 - obj = &exynos_fb->exynos_gem[i]->base; 70 - drm_gem_object_unreference_unlocked(obj); 71 - } 72 - 73 - kfree(exynos_fb); 74 - exynos_fb = NULL; 75 - } 76 - 77 - static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, 78 - struct drm_file *file_priv, 79 - unsigned int *handle) 80 - { 81 - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 82 - 83 - return drm_gem_handle_create(file_priv, 84 - &exynos_fb->exynos_gem[0]->base, handle); 85 - } 86 - 87 69 static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { 88 - .destroy = exynos_drm_fb_destroy, 89 - .create_handle = exynos_drm_fb_create_handle, 70 + .destroy = drm_gem_fb_destroy, 71 + .create_handle = drm_gem_fb_create_handle, 90 72 }; 91 73 92 74 struct drm_framebuffer * ··· 64 108 struct exynos_drm_gem **exynos_gem, 65 109 int count) 66 110 { 67 - struct exynos_drm_fb *exynos_fb; 111 + struct drm_framebuffer *fb; 68 112 int i; 69 113 int ret; 70 114 71 - exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 72 - if (!exynos_fb) 115 + fb = kzalloc(sizeof(*fb), GFP_KERNEL); 116 + if (!fb) 73 117 return ERR_PTR(-ENOMEM); 74 118 75 119 for (i = 0; i < count; i++) { ··· 77 121 if (ret < 0) 78 122 goto err; 79 123 80 - exynos_fb->exynos_gem[i] = exynos_gem[i]; 81 - exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr 82 - + mode_cmd->offsets[i]; 124 + fb->obj[i] = &exynos_gem[i]->base; 83 125 } 84 126 85 - drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); 127 + drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 86 128 87 - ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 129 + ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); 88 130 if (ret < 0) { 89 131 DRM_ERROR("failed to initialize framebuffer\n"); 90 132 goto err; 91 133 } 92 134 93 - return &exynos_fb->fb; 135 + return fb; 94 136 95 137 err: 96 - kfree(exynos_fb); 138 + kfree(fb); 97 139 return ERR_PTR(ret); 98 140 } 99 141 ··· 145 191 146 192 dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) 147 193 { 148 - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 194 + struct exynos_drm_gem *exynos_gem; 149 195 150 196 if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) 151 197 return 0; 152 198 153 - return exynos_fb->dma_addr[index]; 199 + exynos_gem = to_exynos_gem(fb->obj[index]); 200 + return exynos_gem->dma_addr + fb->offsets[index]; 154 201 } 155 202 156 203 static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
+1
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 1080 1080 { 1081 1081 set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, 1082 1082 s->workload->pending_events); 1083 + patch_value(s, cmd_ptr(s, 0), MI_NOOP); 1083 1084 return 0; 1084 1085 } 1085 1086
+10
drivers/gpu/drm/i915/gvt/display.c
··· 169 169 static void emulate_monitor_status_change(struct intel_vgpu *vgpu) 170 170 { 171 171 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 172 + int pipe; 173 + 172 174 vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | 173 175 SDE_PORTC_HOTPLUG_CPT | 174 176 SDE_PORTD_HOTPLUG_CPT); ··· 268 266 /* Clear host CRT status, so guest couldn't detect this host CRT. */ 269 267 if (IS_BROADWELL(dev_priv)) 270 268 vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; 269 + 270 + /* Disable Primary/Sprite/Cursor plane */ 271 + for_each_pipe(dev_priv, pipe) { 272 + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; 273 + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; 274 + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; 275 + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; 276 + } 271 277 272 278 vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; 273 279 }
+1
drivers/gpu/drm/i915/gvt/dmabuf.c
··· 323 323 struct intel_vgpu_fb_info *fb_info) 324 324 { 325 325 gvt_dmabuf->drm_format = fb_info->drm_format; 326 + gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; 326 327 gvt_dmabuf->width = fb_info->width; 327 328 gvt_dmabuf->height = fb_info->height; 328 329 gvt_dmabuf->stride = fb_info->stride;
+9 -18
drivers/gpu/drm/i915/gvt/fb_decoder.c
··· 245 245 plane->hw_format = fmt; 246 246 247 247 plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; 248 - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 249 - gvt_vgpu_err("invalid gma address: %lx\n", 250 - (unsigned long)plane->base); 248 + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 251 249 return -EINVAL; 252 - } 253 250 254 251 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 255 252 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 256 - gvt_vgpu_err("invalid gma address: %lx\n", 257 - (unsigned long)plane->base); 253 + gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", 254 + plane->base); 258 255 return -EINVAL; 259 256 } 260 257 ··· 368 371 alpha_plane, alpha_force); 369 372 370 373 plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; 371 - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 372 - gvt_vgpu_err("invalid gma address: %lx\n", 373 - (unsigned long)plane->base); 374 + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 374 375 return -EINVAL; 375 - } 376 376 377 377 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 378 378 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 379 - gvt_vgpu_err("invalid gma address: %lx\n", 380 - (unsigned long)plane->base); 379 + gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", 380 + plane->base); 381 381 return -EINVAL; 382 382 } 383 383 ··· 470 476 plane->drm_format = drm_format; 471 477 472 478 plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; 473 - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { 474 - gvt_vgpu_err("invalid gma address: %lx\n", 475 - (unsigned long)plane->base); 479 + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) 476 480 return -EINVAL; 477 - } 478 481 479 482 plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); 480 483 if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { 481 - gvt_vgpu_err("invalid gma address: %lx\n", 482 - (unsigned long)plane->base); 484 + gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", 485 + plane->base); 483 486 return -EINVAL; 484 487 } 485 488
+45 -7
drivers/gpu/drm/i915/gvt/gtt.c
··· 530 530 false, 0, mm->vgpu); 531 531 } 532 532 533 + static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, 534 + struct intel_gvt_gtt_entry *entry, unsigned long index) 535 + { 536 + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 537 + 538 + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 539 + 540 + pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); 541 + } 542 + 533 543 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 534 544 struct intel_gvt_gtt_entry *entry, unsigned long index) 535 545 { ··· 1828 1818 return ret; 1829 1819 } 1830 1820 1821 + static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, 1822 + struct intel_gvt_gtt_entry *entry) 1823 + { 1824 + struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 1825 + unsigned long pfn; 1826 + 1827 + pfn = pte_ops->get_pfn(entry); 1828 + if (pfn != vgpu->gvt->gtt.scratch_mfn) 1829 + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, 1830 + pfn << PAGE_SHIFT); 1831 + } 1832 + 1831 1833 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1832 1834 void *p_data, unsigned int bytes) 1833 1835 { ··· 1866 1844 1867 1845 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1868 1846 bytes); 1869 - m = e; 1870 1847 1871 1848 if (ops->test_present(&e)) { 1872 1849 gfn = ops->get_pfn(&e); 1850 + m = e; 1873 1851 1874 1852 /* one PTE update may be issued in multiple writes and the 1875 1853 * first write may not construct a valid gfn ··· 1890 1868 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1891 1869 } else 1892 1870 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 1893 - } else 1871 + } else { 1872 + ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); 1873 + ggtt_invalidate_pte(vgpu, &m); 1894 1874 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1875 + ops->clear_present(&m); 1876 + } 1895 1877 1896 1878 out: 1897 1879 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); ··· 2056 2030 return PTR_ERR(gtt->ggtt_mm); 2057 2031 } 2058 2032 2059 - intel_vgpu_reset_ggtt(vgpu); 2033 + intel_vgpu_reset_ggtt(vgpu, false); 2060 2034 2061 2035 return create_scratch_page_tree(vgpu); 2062 2036 } ··· 2341 2315 /** 2342 2316 * intel_vgpu_reset_ggtt - reset the GGTT entry 2343 2317 * @vgpu: a vGPU 2318 + * @invalidate_old: invalidate old entries 2344 2319 * 2345 2320 * This function is called at the vGPU create stage 2346 2321 * to reset all the GGTT entries. 2347 2322 * 2348 2323 */ 2349 - void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) 2324 + void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) 2350 2325 { 2351 2326 struct intel_gvt *gvt = vgpu->gvt; 2352 2327 struct drm_i915_private *dev_priv = gvt->dev_priv; 2353 2328 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2354 2329 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2330 + struct intel_gvt_gtt_entry old_entry; 2355 2331 u32 index; 2356 2332 u32 num_entries; 2357 2333 ··· 2362 2334 2363 2335 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2364 2336 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2365 - while (num_entries--) 2337 + while (num_entries--) { 2338 + if (invalidate_old) { 2339 + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2340 + ggtt_invalidate_pte(vgpu, &old_entry); 2341 + } 2366 2342 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2343 + } 2367 2344 2368 2345 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2369 2346 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2370 - while (num_entries--) 2347 + while (num_entries--) { 2348 + if (invalidate_old) { 2349 + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2350 + ggtt_invalidate_pte(vgpu, &old_entry); 2351 + } 2371 2352 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2353 + } 2372 2354 2373 2355 ggtt_invalidate(dev_priv); 2374 2356 } ··· 2398 2360 * removing the shadow pages. 2399 2361 */ 2400 2362 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2401 - intel_vgpu_reset_ggtt(vgpu); 2363 + intel_vgpu_reset_ggtt(vgpu, true); 2402 2364 }
+1 -1
drivers/gpu/drm/i915/gvt/gtt.h
··· 193 193 194 194 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 195 195 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 196 - void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 196 + void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); 197 197 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); 198 198 199 199 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+1
drivers/gpu/drm/i915/gvt/handlers.c
··· 1150 1150 switch (notification) { 1151 1151 case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: 1152 1152 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; 1153 + /* fall through */ 1153 1154 case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: 1154 1155 mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); 1155 1156 return PTR_ERR_OR_ZERO(mm);
+1 -1
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 1301 1301 1302 1302 } 1303 1303 1304 - return 0; 1304 + return -ENOTTY; 1305 1305 } 1306 1306 1307 1307 static ssize_t
+15 -12
drivers/gpu/drm/i915/i915_drv.c
··· 1105 1105 1106 1106 ret = i915_ggtt_probe_hw(dev_priv); 1107 1107 if (ret) 1108 - return ret; 1108 + goto err_perf; 1109 1109 1110 - /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1111 - * otherwise the vga fbdev driver falls over. */ 1110 + /* 1111 + * WARNING: Apparently we must kick fbdev drivers before vgacon, 1112 + * otherwise the vga fbdev driver falls over. 1113 + */ 1112 1114 ret = i915_kick_out_firmware_fb(dev_priv); 1113 1115 if (ret) { 1114 1116 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1115 - goto out_ggtt; 1117 + goto err_ggtt; 1116 1118 } 1117 1119 1118 1120 ret = i915_kick_out_vgacon(dev_priv); 1119 1121 if (ret) { 1120 1122 DRM_ERROR("failed to remove conflicting VGA console\n"); 1121 - goto out_ggtt; 1123 + goto err_ggtt; 1122 1124 } 1123 1125 1124 1126 ret = i915_ggtt_init_hw(dev_priv); 1125 1127 if (ret) 1126 - return ret; 1128 + goto err_ggtt; 1127 1129 1128 1130 ret = i915_ggtt_enable_hw(dev_priv); 1129 1131 if (ret) { 1130 1132 DRM_ERROR("failed to enable GGTT\n"); 1131 - goto out_ggtt; 1133 + goto err_ggtt; 1132 1134 } 1133 1135 1134 1136 pci_set_master(pdev); ··· 1141 1139 if (ret) { 1142 1140 DRM_ERROR("failed to set DMA mask\n"); 1143 1141 1144 - goto out_ggtt; 1142 + goto err_ggtt; 1145 1143 } 1146 1144 } 1147 1145 ··· 1159 1157 if (ret) { 1160 1158 DRM_ERROR("failed to set DMA mask\n"); 1161 1159 1162 - goto out_ggtt; 1160 + goto err_ggtt; 1163 1161 } 1164 1162 } 1165 1163 ··· 1192 1190 1193 1191 ret = intel_gvt_init(dev_priv); 1194 1192 if (ret) 1195 - goto out_ggtt; 1193 + goto err_ggtt; 1196 1194 1197 1195 return 0; 1198 1196 1199 - out_ggtt: 1197 + err_ggtt: 1200 1198 i915_ggtt_cleanup_hw(dev_priv); 1201 - 1199 + err_perf: 1200 + i915_perf_fini(dev_priv); 1202 1201 return ret; 1203 1202 } 1204 1203
+1 -1
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 728 728 729 729 err = radix_tree_insert(handles_vma, handle, vma); 730 730 if (unlikely(err)) { 731 - kfree(lut); 731 + kmem_cache_free(eb->i915->luts, lut); 732 732 goto err_obj; 733 733 } 734 734
+28 -11
drivers/gpu/drm/i915/i915_pmu.c
··· 473 473 spin_lock_irqsave(&i915->pmu.lock, flags); 474 474 spin_lock(&kdev->power.lock); 475 475 476 - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 477 - i915->pmu.suspended_jiffies_last = 478 - kdev->power.suspended_jiffies; 476 + /* 477 + * After the above branch intel_runtime_pm_get_if_in_use failed 478 + * to get the runtime PM reference we cannot assume we are in 479 + * runtime suspend since we can either: a) race with coming out 480 + * of it before we took the power.lock, or b) there are other 481 + * states than suspended which can bring us here. 482 + * 483 + * We need to double-check that we are indeed currently runtime 484 + * suspended and if not we cannot do better than report the last 485 + * known RC6 value. 486 + */ 487 + if (kdev->power.runtime_status == RPM_SUSPENDED) { 488 + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 489 + i915->pmu.suspended_jiffies_last = 490 + kdev->power.suspended_jiffies; 479 491 480 - val = kdev->power.suspended_jiffies - 481 - i915->pmu.suspended_jiffies_last; 482 - val += jiffies - kdev->power.accounting_timestamp; 492 + val = kdev->power.suspended_jiffies - 493 + i915->pmu.suspended_jiffies_last; 494 + val += jiffies - kdev->power.accounting_timestamp; 495 + 496 + val = jiffies_to_nsecs(val); 497 + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 498 + 499 + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 500 + } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { 501 + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; 502 + } else { 503 + val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; 504 + } 483 505 484 506 spin_unlock(&kdev->power.lock); 485 - 486 - val = jiffies_to_nsecs(val); 487 - val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 488 - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 489 - 490 507 spin_unlock_irqrestore(&i915->pmu.lock, flags); 491 508 } 492 509
+1 -1
drivers/gpu/drm/i915/intel_audio.c
··· 729 729 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 730 730 u32 tmp; 731 731 732 - if (!IS_GEN9_BC(dev_priv)) 732 + if (!IS_GEN9(dev_priv)) 733 733 return; 734 734 735 735 i915_audio_component_get_power(kdev);
+9 -4
drivers/gpu/drm/i915/intel_bios.c
··· 1256 1256 return; 1257 1257 1258 1258 aux_channel = child->aux_channel; 1259 - ddc_pin = child->ddc_pin; 1260 1259 1261 1260 is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; 1262 1261 is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; ··· 1302 1303 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); 1303 1304 1304 1305 if (is_dvi) { 1305 - info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); 1306 - 1307 - sanitize_ddc_pin(dev_priv, port); 1306 + ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin); 1307 + if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) { 1308 + info->alternate_ddc_pin = ddc_pin; 1309 + sanitize_ddc_pin(dev_priv, port); 1310 + } else { 1311 + DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, " 1312 + "sticking to defaults\n", 1313 + port_name(port), ddc_pin); 1314 + } 1308 1315 } 1309 1316 1310 1317 if (is_dp) {
+9
drivers/gpu/drm/i915/intel_lrc.c
··· 577 577 * know the next preemption status we see corresponds 578 578 * to this ELSP update. 579 579 */ 580 + GEM_BUG_ON(!execlists_is_active(execlists, 581 + EXECLISTS_ACTIVE_USER)); 580 582 GEM_BUG_ON(!port_count(&port[0])); 581 583 if (port_count(&port[0]) > 1) 582 584 goto unlock; ··· 740 738 memset(port, 0, sizeof(*port)); 741 739 port++; 742 740 } 741 + 742 + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); 743 743 } 744 744 745 745 static void execlists_cancel_requests(struct intel_engine_cs *engine) ··· 1005 1001 1006 1002 if (fw) 1007 1003 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); 1004 + 1005 + /* If the engine is now idle, so should be the flag; and vice versa. */ 1006 + GEM_BUG_ON(execlists_is_active(&engine->execlists, 1007 + EXECLISTS_ACTIVE_USER) == 1008 + !port_isset(engine->execlists.port)); 1008 1009 } 1009 1010 1010 1011 static void queue_request(struct intel_engine_cs *engine,
+2
drivers/gpu/drm/vc4/vc4_bo.c
··· 195 195 vc4_bo_set_label(obj, -1); 196 196 197 197 if (bo->validated_shader) { 198 + kfree(bo->validated_shader->uniform_addr_offsets); 198 199 kfree(bo->validated_shader->texture_samples); 199 200 kfree(bo->validated_shader); 200 201 bo->validated_shader = NULL; ··· 592 591 } 593 592 594 593 if (bo->validated_shader) { 594 + kfree(bo->validated_shader->uniform_addr_offsets); 595 595 kfree(bo->validated_shader->texture_samples); 596 596 kfree(bo->validated_shader); 597 597 bo->validated_shader = NULL;
+1
drivers/gpu/drm/vc4/vc4_validate_shaders.c
··· 942 942 fail: 943 943 kfree(validation_state.branch_targets); 944 944 if (validated_shader) { 945 + kfree(validated_shader->uniform_addr_offsets); 945 946 kfree(validated_shader->texture_samples); 946 947 kfree(validated_shader); 947 948 }
+3
drivers/hid/hid-ids.h
··· 525 525 #define I2C_VENDOR_ID_HANTICK 0x0911 526 526 #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 527 527 528 + #define I2C_VENDOR_ID_RAYD 0x2386 529 + #define I2C_PRODUCT_ID_RAYD_3118 0x3118 530 + 528 531 #define USB_VENDOR_ID_HANWANG 0x0b57 529 532 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 530 533 #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
+17 -7
drivers/hid/hid-input.c
··· 387 387 break; 388 388 389 389 case POWER_SUPPLY_PROP_CAPACITY: 390 - if (dev->battery_report_type == HID_FEATURE_REPORT) { 390 + if (dev->battery_status != HID_BATTERY_REPORTED && 391 + !dev->battery_avoid_query) { 391 392 value = hidinput_query_battery_capacity(dev); 392 393 if (value < 0) 393 394 return value; ··· 404 403 break; 405 404 406 405 case POWER_SUPPLY_PROP_STATUS: 407 - if (!dev->battery_reported && 408 - dev->battery_report_type == HID_FEATURE_REPORT) { 406 + if (dev->battery_status != HID_BATTERY_REPORTED && 407 + !dev->battery_avoid_query) { 409 408 value = hidinput_query_battery_capacity(dev); 410 409 if (value < 0) 411 410 return value; 412 411 413 412 dev->battery_capacity = value; 414 - dev->battery_reported = true; 413 + dev->battery_status = HID_BATTERY_QUERIED; 415 414 } 416 415 417 - if (!dev->battery_reported) 416 + if (dev->battery_status == HID_BATTERY_UNKNOWN) 418 417 val->intval = POWER_SUPPLY_STATUS_UNKNOWN; 419 418 else if (dev->battery_capacity == 100) 420 419 val->intval = POWER_SUPPLY_STATUS_FULL; ··· 487 486 dev->battery_report_type = report_type; 488 487 dev->battery_report_id = field->report->id; 489 488 489 + /* 490 + * Stylus is normally not connected to the device and thus we 491 + * can't query the device and get meaningful battery strength. 492 + * We have to wait for the device to report it on its own. 493 + */ 494 + dev->battery_avoid_query = report_type == HID_INPUT_REPORT && 495 + field->physical == HID_DG_STYLUS; 496 + 490 497 dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); 491 498 if (IS_ERR(dev->battery)) { 492 499 error = PTR_ERR(dev->battery); ··· 539 530 540 531 capacity = hidinput_scale_battery_capacity(dev, value); 541 532 542 - if (!dev->battery_reported || capacity != dev->battery_capacity) { 533 + if (dev->battery_status != HID_BATTERY_REPORTED || 534 + capacity != dev->battery_capacity) { 543 535 dev->battery_capacity = capacity; 544 - dev->battery_reported = true; 536 + dev->battery_status = HID_BATTERY_REPORTED; 545 537 power_supply_changed(dev->battery); 546 538 } 547 539 }
+5
drivers/hid/hidraw.c
··· 192 192 int ret = 0, len; 193 193 unsigned char report_number; 194 194 195 + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { 196 + ret = -ENODEV; 197 + goto out; 198 + } 199 + 195 200 dev = hidraw_table[minor]->hid; 196 201 197 202 if (!dev->ll_driver->raw_request) {
+13
drivers/hid/i2c-hid/i2c-hid.c
··· 47 47 /* quirks to control the device */ 48 48 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) 49 49 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) 50 + #define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2) 50 51 51 52 /* flags */ 52 53 #define I2C_HID_STARTED 0 ··· 172 171 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 173 172 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 174 173 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, 174 + { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, 175 + I2C_HID_QUIRK_RESEND_REPORT_DESCR }, 175 176 { 0, 0 } 176 177 }; 177 178 ··· 1222 1219 ret = i2c_hid_hwreset(client); 1223 1220 if (ret) 1224 1221 return ret; 1222 + 1223 + /* RAYDIUM device (2386:3118) need to re-send report descr cmd 1224 + * after resume, after this it will be back normal. 1225 + * otherwise it issues too many incomplete reports. 1226 + */ 1227 + if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) { 1228 + ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0); 1229 + if (ret) 1230 + return ret; 1231 + } 1225 1232 1226 1233 if (hid->driver && hid->driver->reset_resume) { 1227 1234 ret = hid->driver->reset_resume(hid);
+46 -30
drivers/hid/wacom_wac.c
··· 689 689 return tool_type; 690 690 } 691 691 692 + static void wacom_exit_report(struct wacom_wac *wacom) 693 + { 694 + struct input_dev *input = wacom->pen_input; 695 + struct wacom_features *features = &wacom->features; 696 + unsigned char *data = wacom->data; 697 + int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; 698 + 699 + /* 700 + * Reset all states otherwise we lose the initial states 701 + * when in-prox next time 702 + */ 703 + input_report_abs(input, ABS_X, 0); 704 + input_report_abs(input, ABS_Y, 0); 705 + input_report_abs(input, ABS_DISTANCE, 0); 706 + input_report_abs(input, ABS_TILT_X, 0); 707 + input_report_abs(input, ABS_TILT_Y, 0); 708 + if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { 709 + input_report_key(input, BTN_LEFT, 0); 710 + input_report_key(input, BTN_MIDDLE, 0); 711 + input_report_key(input, BTN_RIGHT, 0); 712 + input_report_key(input, BTN_SIDE, 0); 713 + input_report_key(input, BTN_EXTRA, 0); 714 + input_report_abs(input, ABS_THROTTLE, 0); 715 + input_report_abs(input, ABS_RZ, 0); 716 + } else { 717 + input_report_abs(input, ABS_PRESSURE, 0); 718 + input_report_key(input, BTN_STYLUS, 0); 719 + input_report_key(input, BTN_STYLUS2, 0); 720 + input_report_key(input, BTN_TOUCH, 0); 721 + input_report_abs(input, ABS_WHEEL, 0); 722 + if (features->type >= INTUOS3S) 723 + input_report_abs(input, ABS_Z, 0); 724 + } 725 + input_report_key(input, wacom->tool[idx], 0); 726 + input_report_abs(input, ABS_MISC, 0); /* reset tool id */ 727 + input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); 728 + wacom->id[idx] = 0; 729 + } 730 + 692 731 static int wacom_intuos_inout(struct wacom_wac *wacom) 693 732 { 694 733 struct wacom_features *features = &wacom->features; ··· 780 741 if (!wacom->id[idx]) 781 742 return 1; 782 743 783 - /* 784 - * Reset all states otherwise we lose the initial states 785 - * when in-prox next time 786 - */ 787 - input_report_abs(input, ABS_X, 0); 788 - input_report_abs(input, ABS_Y, 0); 789 - input_report_abs(input, ABS_DISTANCE, 0); 790 - input_report_abs(input, ABS_TILT_X, 0); 791 - input_report_abs(input, ABS_TILT_Y, 0); 792 - if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { 793 - input_report_key(input, BTN_LEFT, 0); 794 - input_report_key(input, BTN_MIDDLE, 0); 795 - input_report_key(input, BTN_RIGHT, 0); 796 - input_report_key(input, BTN_SIDE, 0); 797 - input_report_key(input, BTN_EXTRA, 0); 798 - input_report_abs(input, ABS_THROTTLE, 0); 799 - input_report_abs(input, ABS_RZ, 0); 800 - } else { 801 - input_report_abs(input, ABS_PRESSURE, 0); 802 - input_report_key(input, BTN_STYLUS, 0); 803 - input_report_key(input, BTN_STYLUS2, 0); 804 - input_report_key(input, BTN_TOUCH, 0); 805 - input_report_abs(input, ABS_WHEEL, 0); 806 - if (features->type >= INTUOS3S) 807 - input_report_abs(input, ABS_Z, 0); 808 - } 809 - input_report_key(input, wacom->tool[idx], 0); 810 - input_report_abs(input, ABS_MISC, 0); /* reset tool id */ 811 - input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); 812 - wacom->id[idx] = 0; 744 + wacom_exit_report(wacom); 813 745 return 2; 814 746 } 815 747 ··· 1245 1235 if (!valid) 1246 1236 continue; 1247 1237 1238 + if (!prox) { 1239 + wacom->shared->stylus_in_proximity = false; 1240 + wacom_exit_report(wacom); 1241 + input_sync(pen_input); 1242 + return; 1243 + } 1248 1244 if (range) { 1249 1245 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); 1250 1246 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
+5 -3
drivers/isdn/mISDN/dsp_hwec.c
··· 68 68 goto _do; 69 69 70 70 { 71 - char _dup[len + 1]; 72 71 char *dup, *tok, *name, *val; 73 72 int tmp; 74 73 75 - strcpy(_dup, arg); 76 - dup = _dup; 74 + dup = kstrdup(arg, GFP_ATOMIC); 75 + if (!dup) 76 + return; 77 77 78 78 while ((tok = strsep(&dup, ","))) { 79 79 if (!strlen(tok)) ··· 89 89 deftaps = tmp; 90 90 } 91 91 } 92 + 93 + kfree(dup); 92 94 } 93 95 94 96 _do:
+11 -3
drivers/isdn/mISDN/l1oip_core.c
··· 279 279 u16 timebase, u8 *buf, int len) 280 280 { 281 281 u8 *p; 282 - u8 frame[len + 32]; 282 + u8 frame[MAX_DFRAME_LEN_L1 + 32]; 283 283 struct socket *socket = NULL; 284 284 285 285 if (debug & DEBUG_L1OIP_MSG) ··· 902 902 p = skb->data; 903 903 l = skb->len; 904 904 while (l) { 905 - ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; 905 + /* 906 + * This is technically bounded by L1OIP_MAX_PERFRAME but 907 + * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME 908 + */ 909 + ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; 906 910 l1oip_socket_send(hc, 0, dch->slot, 0, 907 911 hc->chan[dch->slot].tx_counter++, p, ll); 908 912 p += ll; ··· 1144 1140 p = skb->data; 1145 1141 l = skb->len; 1146 1142 while (l) { 1147 - ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME; 1143 + /* 1144 + * This is technically bounded by L1OIP_MAX_PERFRAME but 1145 + * MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME 1146 + */ 1147 + ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; 1148 1148 l1oip_socket_send(hc, hc->codec, bch->slot, 0, 1149 1149 hc->chan[bch->slot].tx_counter, p, ll); 1150 1150 hc->chan[bch->slot].tx_counter += ll;
+4 -2
drivers/md/md.c
··· 9256 9256 check_sb_changes(mddev, rdev); 9257 9257 9258 9258 /* Read all rdev's to update recovery_offset */ 9259 - rdev_for_each_rcu(rdev, mddev) 9260 - read_rdev(mddev, rdev); 9259 + rdev_for_each_rcu(rdev, mddev) { 9260 + if (!test_bit(Faulty, &rdev->flags)) 9261 + read_rdev(mddev, rdev); 9262 + } 9261 9263 } 9262 9264 EXPORT_SYMBOL(md_reload_sb); 9263 9265
+20 -5
drivers/md/raid1.c
··· 854 854 * there is no normal IO happeing. It must arrange to call 855 855 * lower_barrier when the particular background IO completes. 856 856 */ 857 - static void raise_barrier(struct r1conf *conf, sector_t sector_nr) 857 + static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr) 858 858 { 859 859 int idx = sector_to_idx(sector_nr); 860 860 ··· 885 885 * max resync count which allowed on current I/O barrier bucket. 886 886 */ 887 887 wait_event_lock_irq(conf->wait_barrier, 888 - !conf->array_frozen && 888 + (!conf->array_frozen && 889 889 !atomic_read(&conf->nr_pending[idx]) && 890 - atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH, 890 + atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) || 891 + test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), 891 892 conf->resync_lock); 893 + 894 + if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 895 + atomic_dec(&conf->barrier[idx]); 896 + spin_unlock_irq(&conf->resync_lock); 897 + wake_up(&conf->wait_barrier); 898 + return -EINTR; 899 + } 892 900 893 901 atomic_inc(&conf->nr_sync_pending); 894 902 spin_unlock_irq(&conf->resync_lock); 903 + 904 + return 0; 895 905 } 896 906 897 907 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) ··· 1101 1091 behind_bio->bi_iter.bi_size = size; 1102 1092 goto skip_copy; 1103 1093 } 1094 + 1095 + behind_bio->bi_write_hint = bio->bi_write_hint; 1104 1096 1105 1097 while (i < vcnt && size) { 1106 1098 struct page *page; ··· 2674 2662 2675 2663 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2676 2664 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 2677 - r1_bio = raid1_alloc_init_r1buf(conf); 2678 2665 2679 - raise_barrier(conf, sector_nr); 2666 + 2667 + if (raise_barrier(conf, sector_nr)) 2668 + return 0; 2669 + 2670 + r1_bio = raid1_alloc_init_r1buf(conf); 2680 2671 2681 2672 rcu_read_lock(); 2682 2673 /*
+33 -6
drivers/mmc/host/renesas_sdhi_internal_dmac.c
··· 9 9 * published by the Free Software Foundation. 10 10 */ 11 11 12 + #include <linux/bitops.h> 12 13 #include <linux/device.h> 13 14 #include <linux/dma-mapping.h> 14 15 #include <linux/io-64-nonatomic-hi-lo.h> ··· 62 61 * - Since this SDHI DMAC register set has 16 but 32-bit width, we 63 62 * need a custom accessor. 64 63 */ 64 + 65 + static unsigned long global_flags; 66 + /* 67 + * Workaround for avoiding to use RX DMAC by multiple channels. 68 + * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use 69 + * RX DMAC simultaneously, sometimes hundreds of bytes data are not 70 + * stored into the system memory even if the DMAC interrupt happened. 71 + * So, this driver then uses one RX DMAC channel only. 72 + */ 73 + #define SDHI_INTERNAL_DMAC_ONE_RX_ONLY 0 74 + #define SDHI_INTERNAL_DMAC_RX_IN_USE 1 65 75 66 76 /* Definitions for sampling clocks */ 67 77 static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { ··· 138 126 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, 139 127 RST_RESERVED_BITS | val); 140 128 129 + if (host->data && host->data->flags & MMC_DATA_READ) 130 + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); 131 + 141 132 renesas_sdhi_internal_dmac_enable_dma(host, true); 142 133 } 143 134 ··· 170 155 if (data->flags & MMC_DATA_READ) { 171 156 dtran_mode |= DTRAN_MODE_CH_NUM_CH1; 172 157 dir = DMA_FROM_DEVICE; 158 + if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && 159 + test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) 160 + goto force_pio; 173 161 } else { 174 162 dtran_mode |= DTRAN_MODE_CH_NUM_CH0; 175 163 dir = DMA_TO_DEVICE; ··· 226 208 renesas_sdhi_internal_dmac_enable_dma(host, false); 227 209 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->sg_len, dir); 228 210 211 + if (dir == DMA_FROM_DEVICE) 212 + clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); 213 + 229 214 tmio_mmc_do_data_irq(host); 230 215 out: 231 216 spin_unlock_irq(&host->lock); ··· 272 251 * implementation as others may use a different implementation. 273 252 */ 274 253 static const struct soc_device_attribute gen3_soc_whitelist[] = { 275 - { .soc_id = "r8a7795", .revision = "ES1.*" }, 276 - { .soc_id = "r8a7795", .revision = "ES2.0" }, 277 - { .soc_id = "r8a7796", .revision = "ES1.0" }, 278 - { .soc_id = "r8a77995", .revision = "ES1.0" }, 279 - { /* sentinel */ } 254 + { .soc_id = "r8a7795", .revision = "ES1.*", 255 + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, 256 + { .soc_id = "r8a7795", .revision = "ES2.0" }, 257 + { .soc_id = "r8a7796", .revision = "ES1.0", 258 + .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, 259 + { .soc_id = "r8a77995", .revision = "ES1.0" }, 260 + { /* sentinel */ } 280 261 }; 281 262 282 263 static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) 283 264 { 284 - if (!soc_device_match(gen3_soc_whitelist)) 265 + const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist); 266 + 267 + if (!soc) 285 268 return -ENODEV; 269 + 270 + global_flags |= (unsigned long)soc->data; 286 271 287 272 return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops); 288 273 }
+23 -2
drivers/mmc/host/sdhci-pci-core.c
··· 1312 1312 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); 1313 1313 } 1314 1314 1315 - static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) 1315 + static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) 1316 1316 { 1317 1317 struct sdhci_pci_slot *slot = sdhci_priv(host); 1318 1318 struct pci_dev *pdev = slot->chip->pdev; ··· 1351 1351 return 0; 1352 1352 } 1353 1353 1354 + static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) 1355 + { 1356 + struct sdhci_host *host = mmc_priv(mmc); 1357 + 1358 + /* AMD requires custom HS200 tuning */ 1359 + if (host->timing == MMC_TIMING_MMC_HS200) 1360 + return amd_execute_tuning_hs200(host, opcode); 1361 + 1362 + /* Otherwise perform standard SDHCI tuning */ 1363 + return sdhci_execute_tuning(mmc, opcode); 1364 + } 1365 + 1366 + static int amd_probe_slot(struct sdhci_pci_slot *slot) 1367 + { 1368 + struct mmc_host_ops *ops = &slot->host->mmc_host_ops; 1369 + 1370 + ops->execute_tuning = amd_execute_tuning; 1371 + 1372 + return 0; 1373 + } 1374 + 1354 1375 static int amd_probe(struct sdhci_pci_chip *chip) 1355 1376 { 1356 1377 struct pci_dev *smbus_dev; ··· 1406 1385 .set_bus_width = sdhci_set_bus_width, 1407 1386 .reset = sdhci_reset, 1408 1387 .set_uhs_signaling = sdhci_set_uhs_signaling, 1409 - .platform_execute_tuning = amd_execute_tuning, 1410 1388 }; 1411 1389 1412 1390 static const struct sdhci_pci_fixes sdhci_amd = { 1413 1391 .probe = amd_probe, 1414 1392 .ops = &amd_sdhci_pci_ops, 1393 + .probe_slot = amd_probe_slot, 1415 1394 }; 1416 1395 1417 1396 static const struct pci_device_id pci_ids[] = {
+10 -2
drivers/net/dsa/mv88e6xxx/hwtstamp.c
··· 285 285 struct sk_buff_head *rxq) 286 286 { 287 287 u16 buf[4] = { 0 }, status, seq_id; 288 - u64 ns, timelo, timehi; 289 288 struct skb_shared_hwtstamps *shwt; 289 + struct sk_buff_head received; 290 + u64 ns, timelo, timehi; 291 + unsigned long flags; 290 292 int err; 293 + 294 + /* The latched timestamp belongs to one of the received frames. */ 295 + __skb_queue_head_init(&received); 296 + spin_lock_irqsave(&rxq->lock, flags); 297 + skb_queue_splice_tail_init(rxq, &received); 298 + spin_unlock_irqrestore(&rxq->lock, flags); 291 299 292 300 mutex_lock(&chip->reg_lock); 293 301 err = mv88e6xxx_port_ptp_read(chip, ps->port_id, ··· 319 311 /* Since the device can only handle one time stamp at a time, 320 312 * we purge any extra frames from the queue. 321 313 */ 322 - for ( ; skb; skb = skb_dequeue(rxq)) { 314 + for ( ; skb; skb = __skb_dequeue(&received)) { 323 315 if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) { 324 316 ns = timehi << 16 | timelo; 325 317
+27 -22
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 1927 1927 return retval; 1928 1928 } 1929 1929 1930 - static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) 1930 + static void bnxt_get_pkgver(struct net_device *dev) 1931 1931 { 1932 + struct bnxt *bp = netdev_priv(dev); 1932 1933 u16 index = 0; 1933 - u32 datalen; 1934 + char *pkgver; 1935 + u32 pkglen; 1936 + u8 *pkgbuf; 1937 + int len; 1934 1938 1935 1939 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, 1936 1940 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 1937 - &index, NULL, &datalen) != 0) 1938 - return NULL; 1941 + &index, NULL, &pkglen) != 0) 1942 + return; 1939 1943 1940 - memset(buf, 0, buflen); 1941 - if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) 1942 - return NULL; 1944 + pkgbuf = kzalloc(pkglen, GFP_KERNEL); 1945 + if (!pkgbuf) { 1946 + dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", 1947 + pkglen); 1948 + return; 1949 + } 1943 1950 1944 - return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, 1945 - datalen); 1951 + if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) 1952 + goto err; 1953 + 1954 + pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, 1955 + pkglen); 1956 + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { 1957 + len = strlen(bp->fw_ver_str); 1958 + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, 1959 + "/pkg %s", pkgver); 1960 + } 1961 + err: 1962 + kfree(pkgbuf); 1946 1963 } 1947 1964 1948 1965 static int bnxt_get_eeprom(struct net_device *dev, ··· 2632 2615 struct hwrm_selftest_qlist_input req = {0}; 2633 2616 struct bnxt_test_info *test_info; 2634 2617 struct net_device *dev = bp->dev; 2635 - char *pkglog; 2636 2618 int i, rc; 2637 2619 2638 - pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); 2639 - if (pkglog) { 2640 - char *pkgver; 2641 - int len; 2620 + bnxt_get_pkgver(dev); 2642 2621 2643 - pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); 2644 - if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { 2645 - len = strlen(bp->fw_ver_str); 2646 - snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, 2647 - "/pkg %s", pkgver); 2648 - } 2649 - kfree(pkglog); 2650 - } 2651 2622 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) 2652 2623 return; 2653 2624
-2
drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
··· 59 59 #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) 60 60 #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) 61 61 62 - #define BNX_PKG_LOG_MAX_LENGTH 4096 63 - 64 62 enum bnxnvm_pkglog_field_index { 65 63 BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, 66 64 BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
+1 -1
drivers/net/ethernet/hisilicon/hns/hnae.h
··· 87 87 88 88 #define HNAE_AE_REGISTER 0x1 89 89 90 - #define RCB_RING_NAME_LEN 16 90 + #define RCB_RING_NAME_LEN (IFNAMSIZ + 4) 91 91 92 92 #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 93 93 #define HNAE_LOW_LATENCY_COAL_PARAM 80
+54 -31
drivers/net/ethernet/ibm/ibmvnic.c
··· 794 794 { 795 795 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 796 796 unsigned long timeout = msecs_to_jiffies(30000); 797 - struct device *dev = &adapter->vdev->dev; 797 + int retry_count = 0; 798 798 int rc; 799 799 800 800 do { 801 - if (adapter->renegotiate) { 802 - adapter->renegotiate = false; 801 + if (retry_count > IBMVNIC_MAX_QUEUES) { 802 + netdev_warn(netdev, "Login attempts exceeded\n"); 803 + return -1; 804 + } 805 + 806 + adapter->init_done_rc = 0; 807 + reinit_completion(&adapter->init_done); 808 + rc = send_login(adapter); 809 + if (rc) { 810 + netdev_warn(netdev, "Unable to login\n"); 811 + return rc; 812 + } 813 + 814 + if (!wait_for_completion_timeout(&adapter->init_done, 815 + timeout)) { 816 + netdev_warn(netdev, "Login timed out\n"); 817 + return -1; 818 + } 819 + 820 + if (adapter->init_done_rc == PARTIALSUCCESS) { 821 + retry_count++; 803 822 release_sub_crqs(adapter, 1); 804 823 824 + adapter->init_done_rc = 0; 805 825 reinit_completion(&adapter->init_done); 806 826 send_cap_queries(adapter); 807 827 if (!wait_for_completion_timeout(&adapter->init_done, 808 828 timeout)) { 809 - dev_err(dev, "Capabilities query timeout\n"); 829 + netdev_warn(netdev, 830 + "Capabilities query timed out\n"); 810 831 return -1; 811 832 } 833 + 812 834 rc = init_sub_crqs(adapter); 813 835 if (rc) { 814 - dev_err(dev, 815 - "Initialization of SCRQ's failed\n"); 836 + netdev_warn(netdev, 837 + "SCRQ initialization failed\n"); 816 838 return -1; 817 839 } 840 + 818 841 rc = init_sub_crq_irqs(adapter); 819 842 if (rc) { 820 - dev_err(dev, 821 - "Initialization of SCRQ's irqs failed\n"); 843 + netdev_warn(netdev, 844 + "SCRQ irq initialization failed\n"); 822 845 return -1; 823 846 } 824 - } 825 - 826 - reinit_completion(&adapter->init_done); 827 - rc = send_login(adapter); 828 - if (rc) { 829 - dev_err(dev, "Unable to attempt device login\n"); 830 - return rc; 831 - } else if (!wait_for_completion_timeout(&adapter->init_done, 832 - timeout)) { 833 - dev_err(dev, "Login timeout\n"); 847 + } else if (adapter->init_done_rc) { 848 + netdev_warn(netdev, "Adapter login failed\n"); 834 849 return -1; 835 850 } 836 - } while (adapter->renegotiate); 851 + } while (adapter->init_done_rc == PARTIALSUCCESS); 837 852 838 853 /* handle pending MAC address changes after successful login */ 839 854 if (adapter->mac_change_pending) { ··· 1049 1034 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 1050 1035 if (prev_state == VNIC_CLOSED) 1051 1036 enable_irq(adapter->rx_scrq[i]->irq); 1052 - else 1053 - enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1037 + enable_scrq_irq(adapter, adapter->rx_scrq[i]); 1054 1038 } 1055 1039 1056 1040 for (i = 0; i < adapter->req_tx_queues; i++) { 1057 1041 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 1058 1042 if (prev_state == VNIC_CLOSED) 1059 1043 enable_irq(adapter->tx_scrq[i]->irq); 1060 - else 1061 - enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1044 + enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1062 1045 } 1063 1046 1064 1047 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); ··· 1197 1184 if (adapter->tx_scrq[i]->irq) { 1198 1185 netdev_dbg(netdev, 1199 1186 "Disabling tx_scrq[%d] irq\n", i); 1187 + disable_scrq_irq(adapter, adapter->tx_scrq[i]); 1200 1188 disable_irq(adapter->tx_scrq[i]->irq); 1201 1189 } 1202 1190 } ··· 1207 1193 if (adapter->rx_scrq[i]->irq) { 1208 1194 netdev_dbg(netdev, 1209 1195 "Disabling rx_scrq[%d] irq\n", i); 1196 + disable_scrq_irq(adapter, adapter->rx_scrq[i]); 1210 1197 disable_irq(adapter->rx_scrq[i]->irq); 1211 1198 } 1212 1199 } ··· 1843 1828 for (i = 0; i < adapter->req_rx_queues; i++) 1844 1829 napi_schedule(&adapter->napi[i]); 1845 1830 1846 - if (adapter->reset_reason != VNIC_RESET_FAILOVER) 1831 + if (adapter->reset_reason != VNIC_RESET_FAILOVER && 1832 + adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) 1847 1833 netdev_notify_peers(netdev); 1848 1834 1849 1835 netif_carrier_on(netdev); ··· 2617 2601 { 2618 2602 struct device *dev = &adapter->vdev->dev; 2619 2603 unsigned long rc; 2604 + u64 val; 2620 2605 2621 2606 if (scrq->hw_irq > 0x100000000ULL) { 2622 2607 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2623 2608 return 1; 2624 2609 } 2610 + 2611 + val = (0xff000000) | scrq->hw_irq; 2612 + rc = plpar_hcall_norets(H_EOI, val); 2613 + if (rc) 2614 + dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 2615 + val, rc); 2625 2616 2626 2617 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2627 2618 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); ··· 3193 3170 struct vnic_login_client_data { 3194 3171 u8 type; 3195 3172 __be16 len; 3196 - char name; 3173 + char name[]; 3197 3174 } __packed; 3198 3175 3199 3176 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) ··· 3222 3199 vlcd->type = 1; 3223 3200 len = strlen(os_name) + 1; 3224 3201 vlcd->len = cpu_to_be16(len); 3225 - strncpy(&vlcd->name, os_name, len); 3226 - vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); 3202 + strncpy(vlcd->name, os_name, len); 3203 + vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3227 3204 3228 3205 /* Type 2 - LPAR name */ 3229 3206 vlcd->type = 2; 3230 3207 len = strlen(utsname()->nodename) + 1; 3231 3208 vlcd->len = cpu_to_be16(len); 3232 - strncpy(&vlcd->name, utsname()->nodename, len); 3233 - vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); 3209 + strncpy(vlcd->name, utsname()->nodename, len); 3210 + vlcd = (struct vnic_login_client_data *)(vlcd->name + len); 3234 3211 3235 3212 /* Type 3 - device name */ 3236 3213 vlcd->type = 3; 3237 3214 len = strlen(adapter->netdev->name) + 1; 3238 3215 vlcd->len = cpu_to_be16(len); 3239 - strncpy(&vlcd->name, adapter->netdev->name, len); 3216 + strncpy(vlcd->name, adapter->netdev->name, len); 3240 3217 } 3241 3218 3242 3219 static int send_login(struct ibmvnic_adapter *adapter) ··· 3965 3942 * to resend the login buffer with fewer queues requested. 3966 3943 */ 3967 3944 if (login_rsp_crq->generic.rc.code) { 3968 - adapter->renegotiate = true; 3945 + adapter->init_done_rc = login_rsp_crq->generic.rc.code; 3969 3946 complete(&adapter->init_done); 3970 3947 return 0; 3971 3948 }
-1
drivers/net/ethernet/ibm/ibmvnic.h
··· 1035 1035 1036 1036 struct ibmvnic_sub_crq_queue **tx_scrq; 1037 1037 struct ibmvnic_sub_crq_queue **rx_scrq; 1038 - bool renegotiate; 1039 1038 1040 1039 /* rx structs */ 1041 1040 struct napi_struct *napi;
+8 -6
drivers/net/ethernet/marvell/mvpp2.c
··· 663 663 #define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 664 664 #define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ 665 665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) 666 - #define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1) 666 + #define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) 667 667 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 668 668 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 669 669 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) ··· 915 915 #define MVPP2_MIB_LATE_COLLISION 0x7c 916 916 917 917 #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) 918 + 919 + #define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) 918 920 919 921 /* Definitions */ 920 922 ··· 1431 1429 if (port->priv->hw_version == MVPP21) 1432 1430 return tx_desc->pp21.buf_dma_addr; 1433 1431 else 1434 - return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); 1432 + return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; 1435 1433 } 1436 1434 1437 1435 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, ··· 1449 1447 } else { 1450 1448 u64 val = (u64)addr; 1451 1449 1452 - tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1450 + tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; 1453 1451 tx_desc->pp22.buf_dma_addr_ptp |= val; 1454 1452 tx_desc->pp22.packet_offset = offset; 1455 1453 } ··· 1509 1507 if (port->priv->hw_version == MVPP21) 1510 1508 return rx_desc->pp21.buf_dma_addr; 1511 1509 else 1512 - return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1510 + return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; 1513 1511 } 1514 1512 1515 1513 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, ··· 1518 1516 if (port->priv->hw_version == MVPP21) 1519 1517 return rx_desc->pp21.buf_cookie; 1520 1518 else 1521 - return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1519 + return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; 1522 1520 } 1523 1521 1524 1522 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, ··· 8791 8789 } 8792 8790 8793 8791 if (priv->hw_version == MVPP22) { 8794 - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); 8792 + err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); 8795 8793 if (err) 8796 8794 goto err_mg_clk; 8797 8795 /* Sadly, the BM pools all share the same register to
+37 -7
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
··· 258 258 case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: 259 259 nfp_tunnel_keep_alive(app, skb); 260 260 break; 261 - case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH: 262 - /* Acks from the NFP that the route is added - ignore. */ 263 - break; 264 261 default: 265 262 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", 266 263 type); ··· 272 275 273 276 void nfp_flower_cmsg_process_rx(struct work_struct *work) 274 277 { 278 + struct sk_buff_head cmsg_joined; 275 279 struct nfp_flower_priv *priv; 276 280 struct sk_buff *skb; 277 281 278 282 priv = container_of(work, struct nfp_flower_priv, cmsg_work); 283 + skb_queue_head_init(&cmsg_joined); 279 284 280 - while ((skb = skb_dequeue(&priv->cmsg_skbs))) 285 + spin_lock_bh(&priv->cmsg_skbs_high.lock); 286 + skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined); 287 + spin_unlock_bh(&priv->cmsg_skbs_high.lock); 288 + 289 + spin_lock_bh(&priv->cmsg_skbs_low.lock); 290 + skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined); 291 + spin_unlock_bh(&priv->cmsg_skbs_low.lock); 292 + 293 + while ((skb = __skb_dequeue(&cmsg_joined))) 281 294 nfp_flower_cmsg_process_one_rx(priv->app, skb); 295 + } 296 + 297 + static void 298 + nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) 299 + { 300 + struct nfp_flower_priv *priv = app->priv; 301 + struct sk_buff_head *skb_head; 302 + 303 + if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || 304 + type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) 305 + skb_head = &priv->cmsg_skbs_high; 306 + else 307 + skb_head = &priv->cmsg_skbs_low; 308 + 309 + if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) { 310 + nfp_flower_cmsg_warn(app, "Dropping queued control messages\n"); 311 + dev_kfree_skb_any(skb); 312 + return; 313 + } 314 + 315 + skb_queue_tail(skb_head, skb); 316 + schedule_work(&priv->cmsg_work); 282 317 } 283 318 284 319 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) 285 320 { 286 - struct nfp_flower_priv *priv = app->priv; 287 321 struct nfp_flower_cmsg_hdr *cmsg_hdr; 288 322 289 323 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); ··· 334 306 nfp_flower_process_mtu_ack(app, skb)) { 335 307 /* Handle MTU acks outside wq to prevent RTNL conflict. */ 336 308 dev_consume_skb_any(skb); 309 + } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { 310 + /* Acks from the NFP that the route is added - ignore. */ 311 + dev_consume_skb_any(skb); 337 312 } else { 338 - skb_queue_tail(&priv->cmsg_skbs, skb); 339 - schedule_work(&priv->cmsg_work); 313 + nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); 340 314 } 341 315 }
+2
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
··· 108 108 #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) 109 109 #define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) 110 110 111 + #define NFP_FLOWER_WORKQ_MAX_SKBS 30000 112 + 111 113 #define nfp_flower_cmsg_warn(app, fmt, args...) \ 112 114 do { \ 113 115 if (net_ratelimit()) \
+4 -2
drivers/net/ethernet/netronome/nfp/flower/main.c
··· 519 519 520 520 app->priv = app_priv; 521 521 app_priv->app = app; 522 - skb_queue_head_init(&app_priv->cmsg_skbs); 522 + skb_queue_head_init(&app_priv->cmsg_skbs_high); 523 + skb_queue_head_init(&app_priv->cmsg_skbs_low); 523 524 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); 524 525 init_waitqueue_head(&app_priv->reify_wait_queue); 525 526 ··· 550 549 { 551 550 struct nfp_flower_priv *app_priv = app->priv; 552 551 553 - skb_queue_purge(&app_priv->cmsg_skbs); 552 + skb_queue_purge(&app_priv->cmsg_skbs_high); 553 + skb_queue_purge(&app_priv->cmsg_skbs_low); 554 554 flush_work(&app_priv->cmsg_work); 555 555 556 556 nfp_flower_metadata_cleanup(app);
+6 -2
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 107 107 * @mask_table: Hash table used to store masks 108 108 * @flow_table: Hash table used to store flower rules 109 109 * @cmsg_work: Workqueue for control messages processing 110 - * @cmsg_skbs: List of skbs for control message processing 110 + * @cmsg_skbs_high: List of higher priority skbs for control message 111 + * processing 112 + * @cmsg_skbs_low: List of lower priority skbs for control message 113 + * processing 111 114 * @nfp_mac_off_list: List of MAC addresses to offload 112 115 * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs 113 116 * @nfp_ipv4_off_list: List of IPv4 addresses to offload ··· 139 136 DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); 140 137 DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); 141 138 struct work_struct cmsg_work; 142 - struct sk_buff_head cmsg_skbs; 139 + struct sk_buff_head cmsg_skbs_high; 140 + struct sk_buff_head cmsg_skbs_low; 143 141 struct list_head nfp_mac_off_list; 144 142 struct list_head nfp_mac_index_list; 145 143 struct list_head nfp_ipv4_off_list;
+4 -1
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
··· 211 211 break; 212 212 213 213 err = msleep_interruptible(timeout_ms); 214 - if (err != 0) 214 + if (err != 0) { 215 + nfp_info(mutex->cpp, 216 + "interrupted waiting for NFP mutex\n"); 215 217 return -ERESTARTSYS; 218 + } 216 219 217 220 if (time_is_before_eq_jiffies(warn_at)) { 218 221 warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;
+1 -2
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
··· 281 281 if ((*reg & mask) == val) 282 282 return 0; 283 283 284 - if (msleep_interruptible(25)) 285 - return -ERESTARTSYS; 284 + msleep(25); 286 285 287 286 if (time_after(start_time, wait_until)) 288 287 return -ETIMEDOUT;
+6 -5
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
··· 350 350 351 351 real_dev = priv->real_dev; 352 352 353 - if (!rmnet_is_real_dev_registered(real_dev)) 354 - return -ENODEV; 355 - 356 353 if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) 357 354 goto nla_put_failure; 358 355 359 - port = rmnet_get_port_rtnl(real_dev); 356 + if (rmnet_is_real_dev_registered(real_dev)) { 357 + port = rmnet_get_port_rtnl(real_dev); 358 + f.flags = port->data_format; 359 + } else { 360 + f.flags = 0; 361 + } 360 362 361 - f.flags = port->data_format; 362 363 f.mask = ~0; 363 364 364 365 if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
+3 -4
drivers/net/ethernet/sfc/ef10.c
··· 4776 4776 goto out_unlock; 4777 4777 } 4778 4778 4779 - if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, 4780 - flow_id, filter_idx)) { 4779 + if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) { 4781 4780 ret = false; 4782 4781 goto out_unlock; 4783 4782 } ··· 5264 5265 ids = vlan->uc; 5265 5266 } 5266 5267 5267 - filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5268 + filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; 5268 5269 5269 5270 /* Insert/renew filters */ 5270 5271 for (i = 0; i < addr_count; i++) { ··· 5333 5334 int rc; 5334 5335 u16 *id; 5335 5336 5336 - filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5337 + filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; 5337 5338 5338 5339 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5339 5340
+1 -1
drivers/net/ethernet/sfc/farch.c
··· 2912 2912 if (test_bit(index, table->used_bitmap) && 2913 2913 table->spec[index].priority == EFX_FILTER_PRI_HINT && 2914 2914 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, 2915 - flow_id, index)) { 2915 + flow_id, 0)) { 2916 2916 efx_farch_filter_table_clear_entry(efx, table, index); 2917 2917 ret = true; 2918 2918 }
+25
drivers/net/ethernet/sfc/net_driver.h
··· 733 733 u32 rx_indir_table[128]; 734 734 }; 735 735 736 + #ifdef CONFIG_RFS_ACCEL 737 + /** 738 + * struct efx_async_filter_insertion - Request to asynchronously insert a filter 739 + * @net_dev: Reference to the netdevice 740 + * @spec: The filter to insert 741 + * @work: Workitem for this request 742 + * @rxq_index: Identifies the channel for which this request was made 743 + * @flow_id: Identifies the kernel-side flow for which this request was made 744 + */ 745 + struct efx_async_filter_insertion { 746 + struct net_device *net_dev; 747 + struct efx_filter_spec spec; 748 + struct work_struct work; 749 + u16 rxq_index; 750 + u32 flow_id; 751 + }; 752 + 753 + /* Maximum number of ARFS workitems that may be in flight on an efx_nic */ 754 + #define EFX_RPS_MAX_IN_FLIGHT 8 755 + #endif /* CONFIG_RFS_ACCEL */ 756 + 736 757 /** 737 758 * struct efx_nic - an Efx NIC 738 759 * @name: Device name (net device name or bus id before net device registered) ··· 871 850 * @rps_expire_channel: Next channel to check for expiry 872 851 * @rps_expire_index: Next index to check for expiry in 873 852 * @rps_expire_channel's @rps_flow_id 853 + * @rps_slot_map: bitmap of in-flight entries in @rps_slot 854 + * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() 874 855 * @active_queues: Count of RX and TX queues that haven't been flushed and drained. 875 856 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 876 857 * Decremented when the efx_flush_rx_queue() is called. ··· 1027 1004 struct mutex rps_mutex; 1028 1005 unsigned int rps_expire_channel; 1029 1006 unsigned int rps_expire_index; 1007 + unsigned long rps_slot_map; 1008 + struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; 1030 1009 #endif 1031 1010 1032 1011 atomic_t active_queues;
+31 -29
drivers/net/ethernet/sfc/rx.c
··· 827 827 828 828 #ifdef CONFIG_RFS_ACCEL 829 829 830 - /** 831 - * struct efx_async_filter_insertion - Request to asynchronously insert a filter 832 - * @net_dev: Reference to the netdevice 833 - * @spec: The filter to insert 834 - * @work: Workitem for this request 835 - * @rxq_index: Identifies the channel for which this request was made 836 - * @flow_id: Identifies the kernel-side flow for which this request was made 837 - */ 838 - struct efx_async_filter_insertion { 839 - struct net_device *net_dev; 840 - struct efx_filter_spec spec; 841 - struct work_struct work; 842 - u16 rxq_index; 843 - u32 flow_id; 844 - }; 845 - 846 830 static void efx_filter_rfs_work(struct work_struct *data) 847 831 { 848 832 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, 849 833 work); 850 834 struct efx_nic *efx = netdev_priv(req->net_dev); 851 835 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); 836 + int slot_idx = req - efx->rps_slot; 852 837 int rc; 853 838 854 - rc = efx->type->filter_insert(efx, &req->spec, false); 839 + rc = efx->type->filter_insert(efx, &req->spec, true); 855 840 if (rc >= 0) { 856 841 /* Remember this so we can check whether to expire the filter 857 842 * later. ··· 863 878 } 864 879 865 880 /* Release references */ 881 + clear_bit(slot_idx, &efx->rps_slot_map); 866 882 dev_put(req->net_dev); 867 - kfree(req); 868 883 } 869 884 870 885 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ··· 873 888 struct efx_nic *efx = netdev_priv(net_dev); 874 889 struct efx_async_filter_insertion *req; 875 890 struct flow_keys fk; 891 + int slot_idx; 892 + int rc; 876 893 877 - if (flow_id == RPS_FLOW_ID_INVALID) 878 - return -EINVAL; 894 + /* find a free slot */ 895 + for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) 896 + if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) 897 + break; 898 + if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) 899 + return -EBUSY; 879 900 880 - if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) 881 - return -EPROTONOSUPPORT; 901 + if (flow_id == RPS_FLOW_ID_INVALID) { 902 + rc = -EINVAL; 903 + goto out_clear; 904 + } 882 905 883 - if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) 884 - return -EPROTONOSUPPORT; 885 - if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) 886 - return -EPROTONOSUPPORT; 906 + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { 907 + rc = -EPROTONOSUPPORT; 908 + goto out_clear; 909 + } 887 910 888 - req = kmalloc(sizeof(*req), GFP_ATOMIC); 889 - if (!req) 890 - return -ENOMEM; 911 + if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { 912 + rc = -EPROTONOSUPPORT; 913 + goto out_clear; 914 + } 915 + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { 916 + rc = -EPROTONOSUPPORT; 917 + goto out_clear; 918 + } 891 919 920 + req = efx->rps_slot + slot_idx; 892 921 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, 893 922 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, 894 923 rxq_index); ··· 932 933 req->flow_id = flow_id; 933 934 schedule_work(&req->work); 934 935 return 0; 936 + out_clear: 937 + clear_bit(slot_idx, &efx->rps_slot_map); 938 + return rc; 935 939 } 936 940 937 941 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac4.h
··· 347 347 #define MTL_RX_OVERFLOW_INT BIT(16) 348 348 349 349 /* Default operating mode of the MAC */ 350 - #define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ 350 + #define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ 351 351 GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) 352 352 353 353 /* To dump the core regs excluding the Address Registers */
-7
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
··· 31 31 32 32 value |= GMAC_CORE_INIT; 33 33 34 - /* Clear ACS bit because Ethernet switch tagging formats such as 35 - * Broadcom tags can look like invalid LLC/SNAP packets and cause the 36 - * hardware to truncate packets on reception. 37 - */ 38 - if (netdev_uses_dsa(dev)) 39 - value &= ~GMAC_CONFIG_ACS; 40 - 41 34 if (mtu > 1500) 42 35 value |= GMAC_CONFIG_2K; 43 36 if (mtu > 2000)
+6 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 3495 3495 3496 3496 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3497 3497 * Type frames (LLC/LLC-SNAP) 3498 + * 3499 + * llc_snap is never checked in GMAC >= 4, so this ACS 3500 + * feature is always disabled and packets need to be 3501 + * stripped manually. 3498 3502 */ 3499 - if (unlikely(status != llc_snap)) 3503 + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || 3504 + unlikely(status != llc_snap)) 3500 3505 frame_len -= ETH_FCS_LEN; 3501 3506 3502 3507 if (netif_msg_rx_status(priv)) {
+2 -3
drivers/net/macsec.c
··· 3277 3277 3278 3278 err = netdev_upper_dev_link(real_dev, dev, extack); 3279 3279 if (err < 0) 3280 - goto put_dev; 3280 + goto unregister; 3281 3281 3282 3282 /* need to be already registered so that ->init has run and 3283 3283 * the MAC addr is set ··· 3316 3316 macsec_del_dev(macsec); 3317 3317 unlink: 3318 3318 netdev_upper_dev_unlink(real_dev, dev); 3319 - put_dev: 3320 - dev_put(real_dev); 3319 + unregister: 3321 3320 unregister_netdevice(dev); 3322 3321 return err; 3323 3322 }
+177 -1
drivers/net/phy/microchip.c
··· 20 20 #include <linux/ethtool.h> 21 21 #include <linux/phy.h> 22 22 #include <linux/microchipphy.h> 23 + #include <linux/delay.h> 23 24 24 25 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" 25 26 #define DRIVER_DESC "Microchip LAN88XX PHY driver" ··· 30 29 int chip_rev; 31 30 __u32 wolopts; 32 31 }; 32 + 33 + static int lan88xx_read_page(struct phy_device *phydev) 34 + { 35 + return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS); 36 + } 37 + 38 + static int lan88xx_write_page(struct phy_device *phydev, int page) 39 + { 40 + return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page); 41 + } 33 42 34 43 static int lan88xx_phy_config_intr(struct phy_device *phydev) 35 44 { ··· 75 64 genphy_suspend(phydev); 76 65 77 66 return 0; 67 + } 68 + 69 + static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr, 70 + u32 data) 71 + { 72 + int val, save_page, ret = 0; 73 + u16 buf; 74 + 75 + /* Save current page */ 76 + save_page = phy_save_page(phydev); 77 + if (save_page < 0) { 78 + pr_warn("Failed to get current page\n"); 79 + goto err; 80 + } 81 + 82 + /* Switch to TR page */ 83 + lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR); 84 + 85 + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA, 86 + (data & 0xFFFF)); 87 + if (ret < 0) { 88 + pr_warn("Failed to write TR low data\n"); 89 + goto err; 90 + } 91 + 92 + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA, 93 + (data & 0x00FF0000) >> 16); 94 + if (ret < 0) { 95 + pr_warn("Failed to write TR high data\n"); 96 + goto err; 97 + } 98 + 99 + /* Config control bits [15:13] of register */ 100 + buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */ 101 + buf |= 0x8000; /* Set [15] to Packet transmit */ 102 + 103 + ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf); 104 + if (ret < 0) { 105 + pr_warn("Failed to write data in reg\n"); 106 + goto err; 107 + } 108 + 109 + usleep_range(1000, 2000);/* Wait for Data to be written */ 110 + val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR); 111 + if (!(val & 0x8000)) 112 + pr_warn("TR Register[0x%X] configuration failed\n", regaddr); 113 + err: 114 + return phy_restore_page(phydev, save_page, ret); 115 + } 116 + 117 + static void lan88xx_config_TR_regs(struct phy_device *phydev) 118 + { 119 + int err; 120 + 121 + /* Get access to Channel 0x1, Node 0xF , Register 0x01. 122 + * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf, 123 + * MrvlTrFix1000Kp, MasterEnableTR bits. 124 + */ 125 + err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A); 126 + if (err < 0) 127 + pr_warn("Failed to Set Register[0x0F82]\n"); 128 + 129 + /* Get access to Channel b'10, Node b'1101, Register 0x06. 130 + * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv, 131 + * SSTrKp1000Mas bits. 132 + */ 133 + err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F); 134 + if (err < 0) 135 + pr_warn("Failed to Set Register[0x168C]\n"); 136 + 137 + /* Get access to Channel b'10, Node b'1111, Register 0x11. 138 + * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh 139 + * bits 140 + */ 141 + err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620); 142 + if (err < 0) 143 + pr_warn("Failed to Set Register[0x17A2]\n"); 144 + 145 + /* Get access to Channel b'10, Node b'1101, Register 0x10. 146 + * Write 24-bit value 0xEEFFDD to register. Setting 147 + * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000, 148 + * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits. 149 + */ 150 + err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD); 151 + if (err < 0) 152 + pr_warn("Failed to Set Register[0x16A0]\n"); 153 + 154 + /* Get access to Channel b'10, Node b'1101, Register 0x13. 155 + * Write 24-bit value 0x071448 to register. Setting 156 + * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits. 157 + */ 158 + err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448); 159 + if (err < 0) 160 + pr_warn("Failed to Set Register[0x16A6]\n"); 161 + 162 + /* Get access to Channel b'10, Node b'1101, Register 0x12. 163 + * Write 24-bit value 0x13132F to register. Setting 164 + * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits. 165 + */ 166 + err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F); 167 + if (err < 0) 168 + pr_warn("Failed to Set Register[0x16A4]\n"); 169 + 170 + /* Get access to Channel b'10, Node b'1101, Register 0x14. 171 + * Write 24-bit value 0x0 to register. Setting eee_3level_delay, 172 + * eee_TrKf_freeze_delay bits. 173 + */ 174 + err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0); 175 + if (err < 0) 176 + pr_warn("Failed to Set Register[0x16A8]\n"); 177 + 178 + /* Get access to Channel b'01, Node b'1111, Register 0x34. 179 + * Write 24-bit value 0x91B06C to register. Setting 180 + * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000, 181 + * FastMseSearchUpdGain1000 bits. 182 + */ 183 + err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C); 184 + if (err < 0) 185 + pr_warn("Failed to Set Register[0x0FE8]\n"); 186 + 187 + /* Get access to Channel b'01, Node b'1111, Register 0x3E. 188 + * Write 24-bit value 0xC0A028 to register. Setting 189 + * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000, 190 + * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits. 191 + */ 192 + err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028); 193 + if (err < 0) 194 + pr_warn("Failed to Set Register[0x0FFC]\n"); 195 + 196 + /* Get access to Channel b'01, Node b'1111, Register 0x35. 197 + * Write 24-bit value 0x041600 to register. Setting 198 + * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000, 199 + * FastMsePhChangeDelay1000 bits. 200 + */ 201 + err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600); 202 + if (err < 0) 203 + pr_warn("Failed to Set Register[0x0FEA]\n"); 204 + 205 + /* Get access to Channel b'10, Node b'1101, Register 0x03. 206 + * Write 24-bit value 0x000004 to register. Setting TrFreeze bits. 207 + */ 208 + err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004); 209 + if (err < 0) 210 + pr_warn("Failed to Set Register[0x1686]\n"); 78 211 } 79 212 80 213 static int lan88xx_probe(struct phy_device *phydev) ··· 287 132 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0); 288 133 } 289 134 135 + static int lan88xx_config_init(struct phy_device *phydev) 136 + { 137 + int val; 138 + 139 + genphy_config_init(phydev); 140 + /*Zerodetect delay enable */ 141 + val = phy_read_mmd(phydev, MDIO_MMD_PCS, 142 + PHY_ARDENNES_MMD_DEV_3_PHY_CFG); 143 + val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_; 144 + 145 + phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG, 146 + val); 147 + 148 + /* Config DSP registers */ 149 + lan88xx_config_TR_regs(phydev); 150 + 151 + return 0; 152 + } 153 + 290 154 static int lan88xx_config_aneg(struct phy_device *phydev) 291 155 { 292 156 lan88xx_set_mdix(phydev); ··· 325 151 .probe = lan88xx_probe, 326 152 .remove = lan88xx_remove, 327 153 328 - .config_init = genphy_config_init, 154 + .config_init = lan88xx_config_init, 329 155 .config_aneg = lan88xx_config_aneg, 330 156 331 157 .ack_interrupt = lan88xx_phy_ack_interrupt, ··· 334 160 .suspend = lan88xx_suspend, 335 161 .resume = genphy_resume, 336 162 .set_wol = lan88xx_set_wol, 163 + .read_page = lan88xx_read_page, 164 + .write_page = lan88xx_write_page, 337 165 } }; 338 166 339 167 module_phy_driver(microchip_phy_driver);
+19
drivers/net/team/team.c
··· 261 261 } 262 262 } 263 263 264 + static bool __team_option_inst_tmp_find(const struct list_head *opts, 265 + const struct team_option_inst *needle) 266 + { 267 + struct team_option_inst *opt_inst; 268 + 269 + list_for_each_entry(opt_inst, opts, tmp_list) 270 + if (opt_inst == needle) 271 + return true; 272 + return false; 273 + } 274 + 264 275 static int __team_options_register(struct team *team, 265 276 const struct team_option *option, 266 277 size_t option_count) ··· 2579 2568 if (err) 2580 2569 goto team_put; 2581 2570 opt_inst->changed = true; 2571 + 2572 + /* dumb/evil user-space can send us duplicate opt, 2573 + * keep only the last one 2574 + */ 2575 + if (__team_option_inst_tmp_find(&opt_inst_list, 2576 + opt_inst)) 2577 + continue; 2578 + 2582 2579 list_add(&opt_inst->tmp_list, &opt_inst_list); 2583 2580 } 2584 2581 if (!opt_found) {
+1 -6
drivers/net/tun.c
··· 1102 1102 goto drop; 1103 1103 1104 1104 len = run_ebpf_filter(tun, skb, len); 1105 - 1106 - /* Trim extra bytes since we may insert vlan proto & TCI 1107 - * in tun_put_user(). 1108 - */ 1109 - len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; 1110 - if (len <= 0 || pskb_trim(skb, len)) 1105 + if (len == 0 || pskb_trim(skb, len)) 1111 1106 goto drop; 1112 1107 1113 1108 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
+1
drivers/net/usb/qmi_wwan.c
··· 1107 1107 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ 1108 1108 {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ 1109 1109 {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ 1110 + {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ 1110 1111 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 1111 1112 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ 1112 1113 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
+48 -31
drivers/net/virtio_net.c
··· 147 147 struct xdp_rxq_info xdp_rxq; 148 148 }; 149 149 150 + /* Control VQ buffers: protected by the rtnl lock */ 151 + struct control_buf { 152 + struct virtio_net_ctrl_hdr hdr; 153 + virtio_net_ctrl_ack status; 154 + struct virtio_net_ctrl_mq mq; 155 + u8 promisc; 156 + u8 allmulti; 157 + __virtio16 vid; 158 + __virtio64 offloads; 159 + }; 160 + 150 161 struct virtnet_info { 151 162 struct virtio_device *vdev; 152 163 struct virtqueue *cvq; ··· 203 192 struct hlist_node node; 204 193 struct hlist_node node_dead; 205 194 206 - /* Control VQ buffers: protected by the rtnl lock */ 207 - struct virtio_net_ctrl_hdr ctrl_hdr; 208 - virtio_net_ctrl_ack ctrl_status; 209 - struct virtio_net_ctrl_mq ctrl_mq; 210 - u8 ctrl_promisc; 211 - u8 ctrl_allmulti; 212 - u16 ctrl_vid; 213 - u64 ctrl_offloads; 195 + struct control_buf *ctrl; 214 196 215 197 /* Ethtool settings */ 216 198 u8 duplex; ··· 1273 1269 { 1274 1270 struct receive_queue *rq = 1275 1271 container_of(napi, struct receive_queue, napi); 1276 - unsigned int received; 1272 + struct virtnet_info *vi = rq->vq->vdev->priv; 1273 + struct send_queue *sq; 1274 + unsigned int received, qp; 1277 1275 bool xdp_xmit = false; 1278 1276 1279 1277 virtnet_poll_cleantx(rq); ··· 1286 1280 if (received < budget) 1287 1281 virtqueue_napi_complete(napi, rq->vq, received); 1288 1282 1289 - if (xdp_xmit) 1283 + if (xdp_xmit) { 1284 + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + 1285 + smp_processor_id(); 1286 + sq = &vi->sq[qp]; 1287 + virtqueue_kick(sq->vq); 1290 1288 xdp_do_flush_map(); 1289 + } 1291 1290 1292 1291 return received; 1293 1292 } ··· 1465 1454 /* Caller should know better */ 1466 1455 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 1467 1456 1468 - vi->ctrl_status = ~0; 1469 - vi->ctrl_hdr.class = class; 1470 - vi->ctrl_hdr.cmd = cmd; 1457 + vi->ctrl->status = ~0; 1458 + vi->ctrl->hdr.class = class; 1459 + vi->ctrl->hdr.cmd = cmd; 1471 1460 /* Add header */ 1472 - sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); 1461 + sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); 1473 1462 sgs[out_num++] = &hdr; 1474 1463 1475 1464 if (out) 1476 1465 sgs[out_num++] = out; 1477 1466 1478 1467 /* Add return status. */ 1479 - sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); 1468 + sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); 1480 1469 sgs[out_num] = &stat; 1481 1470 1482 1471 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1483 1472 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1484 1473 1485 1474 if (unlikely(!virtqueue_kick(vi->cvq))) 1486 - return vi->ctrl_status == VIRTIO_NET_OK; 1475 + return vi->ctrl->status == VIRTIO_NET_OK; 1487 1476 1488 1477 /* Spin for a response, the kick causes an ioport write, trapping 1489 1478 * into the hypervisor, so the request should be handled immediately. ··· 1492 1481 !virtqueue_is_broken(vi->cvq)) 1493 1482 cpu_relax(); 1494 1483 1495 - return vi->ctrl_status == VIRTIO_NET_OK; 1484 + return vi->ctrl->status == VIRTIO_NET_OK; 1496 1485 } 1497 1486 1498 1487 static int virtnet_set_mac_address(struct net_device *dev, void *p) ··· 1604 1593 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) 1605 1594 return 0; 1606 1595 1607 - vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 1608 - sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); 1596 + vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); 1597 + sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); 1609 1598 1610 1599 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, 1611 1600 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { ··· 1664 1653 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 1665 1654 return; 1666 1655 1667 - vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); 1668 - vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1656 + vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); 1657 + vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1669 1658 1670 - sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); 1659 + sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); 1671 1660 1672 1661 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1673 1662 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 1674 1663 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 1675 - vi->ctrl_promisc ? "en" : "dis"); 1664 + vi->ctrl->promisc ? "en" : "dis"); 1676 1665 1677 - sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); 1666 + sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); 1678 1667 1679 1668 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1680 1669 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 1681 1670 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 1682 - vi->ctrl_allmulti ? "en" : "dis"); 1671 + vi->ctrl->allmulti ? "en" : "dis"); 1683 1672 1684 1673 uc_count = netdev_uc_count(dev); 1685 1674 mc_count = netdev_mc_count(dev); ··· 1725 1714 struct virtnet_info *vi = netdev_priv(dev); 1726 1715 struct scatterlist sg; 1727 1716 1728 - vi->ctrl_vid = vid; 1729 - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1717 + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 1718 + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 1730 1719 1731 1720 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1732 1721 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) ··· 1740 1729 struct virtnet_info *vi = netdev_priv(dev); 1741 1730 struct scatterlist sg; 1742 1731 1743 - vi->ctrl_vid = vid; 1744 - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); 1732 + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); 1733 + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); 1745 1734 1746 1735 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 1747 1736 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) ··· 2137 2126 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) 2138 2127 { 2139 2128 struct scatterlist sg; 2140 - vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); 2129 + vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); 2141 2130 2142 - sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); 2131 + sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); 2143 2132 2144 2133 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, 2145 2134 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { ··· 2362 2351 2363 2352 kfree(vi->rq); 2364 2353 kfree(vi->sq); 2354 + kfree(vi->ctrl); 2365 2355 } 2366 2356 2367 2357 static void _free_receive_bufs(struct virtnet_info *vi) ··· 2555 2543 { 2556 2544 int i; 2557 2545 2546 + vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); 2547 + if (!vi->ctrl) 2548 + goto err_ctrl; 2558 2549 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); 2559 2550 if (!vi->sq) 2560 2551 goto err_sq; ··· 2586 2571 err_rq: 2587 2572 kfree(vi->sq); 2588 2573 err_sq: 2574 + kfree(vi->ctrl); 2575 + err_ctrl: 2589 2576 return -ENOMEM; 2590 2577 } 2591 2578
+13 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 1218 1218 union { 1219 1219 void *ptr; 1220 1220 struct ethhdr *eth; 1221 + struct vlan_ethhdr *veth; 1221 1222 struct iphdr *ipv4; 1222 1223 struct ipv6hdr *ipv6; 1223 1224 struct tcphdr *tcp; ··· 1229 1228 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) 1230 1229 return 0; 1231 1230 1231 + if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 1232 + skb->protocol == cpu_to_be16(ETH_P_8021AD)) 1233 + hlen = sizeof(struct vlan_ethhdr); 1234 + else 1235 + hlen = sizeof(struct ethhdr); 1236 + 1232 1237 hdr.eth = eth_hdr(skb); 1233 1238 if (gdesc->rcd.v4) { 1234 - BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); 1235 - hdr.ptr += sizeof(struct ethhdr); 1239 + BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && 1240 + hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); 1241 + hdr.ptr += hlen; 1236 1242 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); 1237 1243 hlen = hdr.ipv4->ihl << 2; 1238 1244 hdr.ptr += hdr.ipv4->ihl << 2; 1239 1245 } else if (gdesc->rcd.v6) { 1240 - BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); 1241 - hdr.ptr += sizeof(struct ethhdr); 1246 + BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && 1247 + hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); 1248 + hdr.ptr += hlen; 1242 1249 /* Use an estimated value, since we also need to handle 1243 1250 * TSO case. 1244 1251 */
+2 -2
drivers/net/vmxnet3/vmxnet3_int.h
··· 69 69 /* 70 70 * Version numbers 71 71 */ 72 - #define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" 72 + #define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" 73 73 74 74 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75 - #define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 75 + #define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 76 76 77 77 #if defined(CONFIG_PCI_MSI) 78 78 /* RSS only makes sense if MSI-X is supported. */
+1 -2
drivers/nvdimm/Kconfig
··· 103 103 Select Y if unsure 104 104 105 105 config OF_PMEM 106 - # FIXME: make tristate once OF_NUMA dependency removed 107 - bool "Device-tree support for persistent memory regions" 106 + tristate "Device-tree support for persistent memory regions" 108 107 depends on OF 109 108 default LIBNVDIMM 110 109 help
+12 -10
drivers/nvdimm/dimm_devs.c
··· 88 88 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) 89 89 { 90 90 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 91 + int rc = validate_dimm(ndd), cmd_rc = 0; 91 92 struct nd_cmd_get_config_data_hdr *cmd; 92 93 struct nvdimm_bus_descriptor *nd_desc; 93 - int rc = validate_dimm(ndd); 94 94 u32 max_cmd_size, config_size; 95 95 size_t offset; 96 96 ··· 124 124 cmd->in_offset = offset; 125 125 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 126 126 ND_CMD_GET_CONFIG_DATA, cmd, 127 - cmd->in_length + sizeof(*cmd), NULL); 128 - if (rc || cmd->status) { 129 - rc = -ENXIO; 127 + cmd->in_length + sizeof(*cmd), &cmd_rc); 128 + if (rc < 0) 129 + break; 130 + if (cmd_rc < 0) { 131 + rc = cmd_rc; 130 132 break; 131 133 } 132 134 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); ··· 142 140 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, 143 141 void *buf, size_t len) 144 142 { 145 - int rc = validate_dimm(ndd); 146 143 size_t max_cmd_size, buf_offset; 147 144 struct nd_cmd_set_config_hdr *cmd; 145 + int rc = validate_dimm(ndd), cmd_rc = 0; 148 146 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); 149 147 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 150 148 ··· 166 164 for (buf_offset = 0; len; len -= cmd->in_length, 167 165 buf_offset += cmd->in_length) { 168 166 size_t cmd_size; 169 - u32 *status; 170 167 171 168 cmd->in_offset = offset + buf_offset; 172 169 cmd->in_length = min(max_cmd_size, len); ··· 173 172 174 173 /* status is output in the last 4-bytes of the command buffer */ 175 174 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); 176 - status = ((void *) cmd) + cmd_size - sizeof(u32); 177 175 178 176 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), 179 - ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); 180 - if (rc || *status) { 181 - rc = rc ? rc : -ENXIO; 177 + ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); 178 + if (rc < 0) 179 + break; 180 + if (cmd_rc < 0) { 181 + rc = cmd_rc; 182 182 break; 183 183 } 184 184 }
+1 -1
drivers/nvdimm/of_pmem.c
··· 67 67 */ 68 68 memset(&ndr_desc, 0, sizeof(ndr_desc)); 69 69 ndr_desc.attr_groups = region_attr_groups; 70 - ndr_desc.numa_node = of_node_to_nid(np); 70 + ndr_desc.numa_node = dev_to_node(&pdev->dev); 71 71 ndr_desc.res = &pdev->resource[i]; 72 72 ndr_desc.of_node = np; 73 73 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
+9 -10
drivers/rapidio/devices/rio_mport_cdev.c
··· 740 740 tx->callback = dma_xfer_callback; 741 741 tx->callback_param = req; 742 742 743 - req->dmach = chan; 744 - req->sync = sync; 745 743 req->status = DMA_IN_PROGRESS; 746 - init_completion(&req->req_comp); 747 744 kref_get(&req->refcount); 748 745 749 746 cookie = dmaengine_submit(tx); ··· 828 831 if (!req) 829 832 return -ENOMEM; 830 833 831 - kref_init(&req->refcount); 832 - 833 834 ret = get_dma_channel(priv); 834 835 if (ret) { 835 836 kfree(req); 836 837 return ret; 837 838 } 839 + chan = priv->dmach; 840 + 841 + kref_init(&req->refcount); 842 + init_completion(&req->req_comp); 843 + req->dir = dir; 844 + req->filp = filp; 845 + req->priv = priv; 846 + req->dmach = chan; 847 + req->sync = sync; 838 848 839 849 /* 840 850 * If parameter loc_addr != NULL, we are transferring data from/to ··· 928 924 map->virt_addr + (baddr - map->phys_addr) + 929 925 xfer->offset, xfer->length); 930 926 } 931 - 932 - req->dir = dir; 933 - req->filp = filp; 934 - req->priv = priv; 935 - chan = priv->dmach; 936 927 937 928 nents = dma_map_sg(chan->device->dev, 938 929 req->sgt.sgl, req->sgt.nents, dir);
-1
drivers/s390/block/dasd_diag.c
··· 27 27 #include <asm/io.h> 28 28 #include <asm/irq.h> 29 29 #include <asm/vtoc.h> 30 - #include <asm/diag.h> 31 30 32 31 #include "dasd_int.h" 33 32 #include "dasd_diag.h"
+1 -1
drivers/s390/char/sclp_early_core.c
··· 18 18 * Used to keep track of the size of the event masks. Qemu until version 2.11 19 19 * only supports 4 and needs a workaround. 20 20 */ 21 - bool sclp_mask_compat_mode; 21 + bool sclp_mask_compat_mode __section(.data); 22 22 23 23 void sclp_early_wait_irq(void) 24 24 {
-1
drivers/s390/net/qeth_l2_main.c
··· 21 21 #include <linux/list.h> 22 22 #include <linux/hash.h> 23 23 #include <linux/hashtable.h> 24 - #include <linux/string.h> 25 24 #include <asm/setup.h> 26 25 #include "qeth_core.h" 27 26 #include "qeth_l2.h"
+1 -1
drivers/s390/net/smsgiucv.c
··· 176 176 177 177 static void __exit smsg_exit(void) 178 178 { 179 - cpcmd("SET SMSG IUCV", NULL, 0, NULL); 179 + cpcmd("SET SMSG OFF", NULL, 0, NULL); 180 180 device_unregister(smsg_dev); 181 181 iucv_unregister(&smsg_handler, 1); 182 182 driver_unregister(&smsg_driver);
+7
drivers/watchdog/aspeed_wdt.c
··· 55 55 #define WDT_CTRL_WDT_INTR BIT(2) 56 56 #define WDT_CTRL_RESET_SYSTEM BIT(1) 57 57 #define WDT_CTRL_ENABLE BIT(0) 58 + #define WDT_TIMEOUT_STATUS 0x10 59 + #define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1) 58 60 59 61 /* 60 62 * WDT_RESET_WIDTH controls the characteristics of the external pulse (if ··· 194 192 struct device_node *np; 195 193 const char *reset_type; 196 194 u32 duration; 195 + u32 status; 197 196 int ret; 198 197 199 198 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); ··· 309 306 */ 310 307 writel(duration - 1, wdt->base + WDT_RESET_WIDTH); 311 308 } 309 + 310 + status = readl(wdt->base + WDT_TIMEOUT_STATUS); 311 + if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) 312 + wdt->wdd.bootstatus = WDIOF_CARDRESET; 312 313 313 314 ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); 314 315 if (ret) {
+4 -2
drivers/watchdog/renesas_wdt.c
··· 121 121 } 122 122 123 123 static const struct watchdog_info rwdt_ident = { 124 - .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, 124 + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | 125 + WDIOF_CARDRESET, 125 126 .identity = "Renesas WDT Watchdog", 126 127 }; 127 128 ··· 198 197 return PTR_ERR(clk); 199 198 200 199 pm_runtime_enable(&pdev->dev); 201 - 202 200 pm_runtime_get_sync(&pdev->dev); 203 201 priv->clk_rate = clk_get_rate(clk); 202 + priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) & 203 + RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0; 204 204 pm_runtime_put(&pdev->dev); 205 205 206 206 if (!priv->clk_rate) {
+1 -1
drivers/watchdog/sch311x_wdt.c
··· 299 299 if (sch311x_wdt_set_heartbeat(new_timeout)) 300 300 return -EINVAL; 301 301 sch311x_wdt_keepalive(); 302 - /* Fall */ 302 + /* Fall through */ 303 303 case WDIOC_GETTIMEOUT: 304 304 return put_user(timeout, p); 305 305 default:
+1 -1
drivers/watchdog/w83977f_wdt.c
··· 427 427 return -EINVAL; 428 428 429 429 wdt_keepalive(); 430 - /* Fall */ 430 + /* Fall through */ 431 431 432 432 case WDIOC_GETTIMEOUT: 433 433 return put_user(timeout, uarg.i);
+1 -1
drivers/watchdog/wafer5823wdt.c
··· 178 178 timeout = new_timeout; 179 179 wafwdt_stop(); 180 180 wafwdt_start(); 181 - /* Fall */ 181 + /* Fall through */ 182 182 case WDIOC_GETTIMEOUT: 183 183 return put_user(timeout, p); 184 184
+1 -1
drivers/xen/xen-pciback/conf_space_quirks.c
··· 95 95 struct xen_pcibk_config_quirk *quirk; 96 96 int ret = 0; 97 97 98 - quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC); 98 + quirk = kzalloc(sizeof(*quirk), GFP_KERNEL); 99 99 if (!quirk) { 100 100 ret = -ENOMEM; 101 101 goto out;
+4 -4
drivers/xen/xen-pciback/pci_stub.c
··· 71 71 72 72 dev_dbg(&dev->dev, "pcistub_device_alloc\n"); 73 73 74 - psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC); 74 + psdev = kzalloc(sizeof(*psdev), GFP_KERNEL); 75 75 if (!psdev) 76 76 return NULL; 77 77 ··· 364 364 * here and then to call kfree(pci_get_drvdata(psdev->dev)). 365 365 */ 366 366 dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]") 367 - + strlen(pci_name(dev)) + 1, GFP_ATOMIC); 367 + + strlen(pci_name(dev)) + 1, GFP_KERNEL); 368 368 if (!dev_data) { 369 369 err = -ENOMEM; 370 370 goto out; ··· 577 577 } 578 578 579 579 if (!match) { 580 - pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC); 580 + pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); 581 581 if (!pci_dev_id) { 582 582 err = -ENOMEM; 583 583 goto out; ··· 1149 1149 } 1150 1150 dev = psdev->dev; 1151 1151 1152 - field = kzalloc(sizeof(*field), GFP_ATOMIC); 1152 + field = kzalloc(sizeof(*field), GFP_KERNEL); 1153 1153 if (!field) { 1154 1154 err = -ENOMEM; 1155 1155 goto out;
+2 -1
drivers/xen/xenbus/xenbus_dev_frontend.c
··· 403 403 { 404 404 struct { 405 405 struct xsd_sockmsg hdr; 406 - const char body[16]; 406 + char body[16]; 407 407 } msg; 408 408 int rc; 409 409 ··· 412 412 msg.hdr.len = strlen(reply) + 1; 413 413 if (msg.hdr.len > sizeof(msg.body)) 414 414 return -E2BIG; 415 + memcpy(&msg.body, reply, msg.hdr.len); 415 416 416 417 mutex_lock(&u->reply_mutex); 417 418 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
+8 -1
fs/afs/server.c
··· 428 428 } 429 429 write_sequnlock(&net->fs_lock); 430 430 431 - if (deleted) 431 + if (deleted) { 432 + write_seqlock(&net->fs_addr_lock); 433 + if (!hlist_unhashed(&server->addr4_link)) 434 + hlist_del_rcu(&server->addr4_link); 435 + if (!hlist_unhashed(&server->addr6_link)) 436 + hlist_del_rcu(&server->addr6_link); 437 + write_sequnlock(&net->fs_addr_lock); 432 438 afs_destroy_server(net, server); 439 + } 433 440 } 434 441 } 435 442
+1 -1
fs/autofs4/root.c
··· 749 749 750 750 autofs4_del_active(dentry); 751 751 752 - inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); 752 + inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode); 753 753 if (!inode) 754 754 return -ENOMEM; 755 755 d_add(dentry, inode);
+4 -4
fs/binfmt_elf.c
··· 377 377 } else 378 378 map_addr = vm_mmap(filep, addr, size, prot, type, off); 379 379 380 - if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr)) 381 - pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n", 382 - task_pid_nr(current), current->comm, 383 - (void *)addr); 380 + if ((type & MAP_FIXED_NOREPLACE) && 381 + PTR_ERR((void *)map_addr) == -EEXIST) 382 + pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", 383 + task_pid_nr(current), current->comm, (void *)addr); 384 384 385 385 return(map_addr); 386 386 }
+25
fs/btrfs/ctree.h
··· 459 459 unsigned short full; 460 460 unsigned short type; 461 461 unsigned short failfast; 462 + 463 + /* 464 + * Qgroup equivalent for @size @reserved 465 + * 466 + * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care 467 + * about things like csum size nor how many tree blocks it will need to 468 + * reserve. 469 + * 470 + * Qgroup cares more about net change of the extent usage. 471 + * 472 + * So for one newly inserted file extent, in worst case it will cause 473 + * leaf split and level increase, nodesize for each file extent is 474 + * already too much. 475 + * 476 + * In short, qgroup_size/reserved is the upper limit of possible needed 477 + * qgroup metadata reservation. 478 + */ 479 + u64 qgroup_rsv_size; 480 + u64 qgroup_rsv_reserved; 462 481 }; 463 482 464 483 /* ··· 732 713 * (device replace, resize, device add/delete, balance) 733 714 */ 734 715 #define BTRFS_FS_EXCL_OP 16 716 + 717 + /* 718 + * To info transaction_kthread we need an immediate commit so it doesn't 719 + * need to wait for commit_interval 720 + */ 721 + #define BTRFS_FS_NEED_ASYNC_COMMIT 17 735 722 736 723 struct btrfs_fs_info { 737 724 u8 fsid[BTRFS_FSID_SIZE];
+16 -4
fs/btrfs/delayed-inode.c
··· 556 556 dst_rsv = &fs_info->delayed_block_rsv; 557 557 558 558 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); 559 + 560 + /* 561 + * Here we migrate space rsv from transaction rsv, since have already 562 + * reserved space when starting a transaction. So no need to reserve 563 + * qgroup space here. 564 + */ 559 565 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); 560 566 if (!ret) { 561 567 trace_btrfs_space_reservation(fs_info, "delayed_item", ··· 583 577 return; 584 578 585 579 rsv = &fs_info->delayed_block_rsv; 586 - btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved); 580 + /* 581 + * Check btrfs_delayed_item_reserve_metadata() to see why we don't need 582 + * to release/reserve qgroup space. 583 + */ 587 584 trace_btrfs_space_reservation(fs_info, "delayed_item", 588 585 item->key.objectid, item->bytes_reserved, 589 586 0); ··· 611 602 612 603 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); 613 604 614 - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); 615 - if (ret < 0) 616 - return ret; 617 605 /* 618 606 * btrfs_dirty_inode will update the inode under btrfs_join_transaction 619 607 * which doesn't reserve space for speed. This is a problem since we ··· 622 616 */ 623 617 if (!src_rsv || (!trans->bytes_reserved && 624 618 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { 619 + ret = btrfs_qgroup_reserve_meta_prealloc(root, 620 + fs_info->nodesize, true); 621 + if (ret < 0) 622 + return ret; 625 623 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, 626 624 BTRFS_RESERVE_NO_FLUSH); 627 625 /* ··· 644 634 "delayed_inode", 645 635 btrfs_ino(inode), 646 636 num_bytes, 1); 637 + } else { 638 + btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize); 647 639 } 648 640 return ret; 649 641 }
+14 -5
fs/btrfs/delayed-ref.c
··· 540 540 struct btrfs_delayed_ref_head *head_ref, 541 541 struct btrfs_qgroup_extent_record *qrecord, 542 542 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, 543 - int action, int is_data, int *qrecord_inserted_ret, 543 + int action, int is_data, int is_system, 544 + int *qrecord_inserted_ret, 544 545 int *old_ref_mod, int *new_ref_mod) 546 + 545 547 { 546 548 struct btrfs_delayed_ref_head *existing; 547 549 struct btrfs_delayed_ref_root *delayed_refs; ··· 587 585 head_ref->ref_mod = count_mod; 588 586 head_ref->must_insert_reserved = must_insert_reserved; 589 587 head_ref->is_data = is_data; 588 + head_ref->is_system = is_system; 590 589 head_ref->ref_tree = RB_ROOT; 591 590 INIT_LIST_HEAD(&head_ref->ref_add_list); 592 591 RB_CLEAR_NODE(&head_ref->href_node); ··· 775 772 struct btrfs_delayed_ref_root *delayed_refs; 776 773 struct btrfs_qgroup_extent_record *record = NULL; 777 774 int qrecord_inserted; 775 + int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID); 778 776 779 777 BUG_ON(extent_op && extent_op->is_data); 780 778 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); ··· 804 800 */ 805 801 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, 806 802 bytenr, num_bytes, 0, 0, action, 0, 807 - &qrecord_inserted, old_ref_mod, 808 - new_ref_mod); 803 + is_system, &qrecord_inserted, 804 + old_ref_mod, new_ref_mod); 809 805 810 806 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, 811 807 num_bytes, parent, ref_root, level, action); ··· 872 868 */ 873 869 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, 874 870 bytenr, num_bytes, ref_root, reserved, 875 - action, 1, &qrecord_inserted, 871 + action, 1, 0, &qrecord_inserted, 876 872 old_ref_mod, new_ref_mod); 877 873 878 874 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, ··· 902 898 delayed_refs = &trans->transaction->delayed_refs; 903 899 spin_lock(&delayed_refs->lock); 904 900 901 + /* 902 + * extent_ops just modify the flags of an extent and they don't result 903 + * in ref count changes, hence it's safe to pass false/0 for is_system 904 + * argument 905 + */ 905 906 add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr, 906 907 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, 907 - extent_op->is_data, NULL, NULL, NULL); 908 + extent_op->is_data, 0, NULL, NULL, NULL); 908 909 909 910 spin_unlock(&delayed_refs->lock); 910 911 return 0;
+1
fs/btrfs/delayed-ref.h
··· 127 127 */ 128 128 unsigned int must_insert_reserved:1; 129 129 unsigned int is_data:1; 130 + unsigned int is_system:1; 130 131 unsigned int processing:1; 131 132 }; 132 133
+1
fs/btrfs/disk-io.c
··· 1824 1824 1825 1825 now = get_seconds(); 1826 1826 if (cur->state < TRANS_STATE_BLOCKED && 1827 + !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && 1827 1828 (now < cur->start_time || 1828 1829 now - cur->start_time < fs_info->commit_interval)) { 1829 1830 spin_unlock(&fs_info->trans_lock);
+57 -16
fs/btrfs/extent-tree.c
··· 2601 2601 trace_run_delayed_ref_head(fs_info, head, 0); 2602 2602 2603 2603 if (head->total_ref_mod < 0) { 2604 - struct btrfs_block_group_cache *cache; 2604 + struct btrfs_space_info *space_info; 2605 + u64 flags; 2605 2606 2606 - cache = btrfs_lookup_block_group(fs_info, head->bytenr); 2607 - ASSERT(cache); 2608 - percpu_counter_add(&cache->space_info->total_bytes_pinned, 2607 + if (head->is_data) 2608 + flags = BTRFS_BLOCK_GROUP_DATA; 2609 + else if (head->is_system) 2610 + flags = BTRFS_BLOCK_GROUP_SYSTEM; 2611 + else 2612 + flags = BTRFS_BLOCK_GROUP_METADATA; 2613 + space_info = __find_space_info(fs_info, flags); 2614 + ASSERT(space_info); 2615 + percpu_counter_add(&space_info->total_bytes_pinned, 2609 2616 -head->num_bytes); 2610 - btrfs_put_block_group(cache); 2611 2617 2612 2618 if (head->is_data) { 2613 2619 spin_lock(&delayed_refs->lock); ··· 5565 5559 5566 5560 static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, 5567 5561 struct btrfs_block_rsv *block_rsv, 5568 - struct btrfs_block_rsv *dest, u64 num_bytes) 5562 + struct btrfs_block_rsv *dest, u64 num_bytes, 5563 + u64 *qgroup_to_release_ret) 5569 5564 { 5570 5565 struct btrfs_space_info *space_info = block_rsv->space_info; 5566 + u64 qgroup_to_release = 0; 5571 5567 u64 ret; 5572 5568 5573 5569 spin_lock(&block_rsv->lock); 5574 - if (num_bytes == (u64)-1) 5570 + if (num_bytes == (u64)-1) { 5575 5571 num_bytes = block_rsv->size; 5572 + qgroup_to_release = block_rsv->qgroup_rsv_size; 5573 + } 5576 5574 block_rsv->size -= num_bytes; 5577 5575 if (block_rsv->reserved >= block_rsv->size) { 5578 5576 num_bytes = block_rsv->reserved - block_rsv->size; ··· 5584 5574 block_rsv->full = 1; 5585 5575 } else { 5586 5576 num_bytes = 0; 5577 + } 5578 + if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) { 5579 + qgroup_to_release = block_rsv->qgroup_rsv_reserved - 5580 + block_rsv->qgroup_rsv_size; 5581 + block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size; 5582 + } else { 5583 + qgroup_to_release = 0; 5587 5584 } 5588 5585 spin_unlock(&block_rsv->lock); 5589 5586 ··· 5614 5597 space_info_add_old_bytes(fs_info, space_info, 5615 5598 num_bytes); 5616 5599 } 5600 + if (qgroup_to_release_ret) 5601 + *qgroup_to_release_ret = qgroup_to_release; 5617 5602 return ret; 5618 5603 } 5619 5604 ··· 5757 5738 struct btrfs_root *root = inode->root; 5758 5739 struct btrfs_block_rsv *block_rsv = &inode->block_rsv; 5759 5740 u64 num_bytes = 0; 5741 + u64 qgroup_num_bytes = 0; 5760 5742 int ret = -ENOSPC; 5761 5743 5762 5744 spin_lock(&block_rsv->lock); 5763 5745 if (block_rsv->reserved < block_rsv->size) 5764 5746 num_bytes = block_rsv->size - block_rsv->reserved; 5747 + if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size) 5748 + qgroup_num_bytes = block_rsv->qgroup_rsv_size - 5749 + block_rsv->qgroup_rsv_reserved; 5765 5750 spin_unlock(&block_rsv->lock); 5766 5751 5767 5752 if (num_bytes == 0) 5768 5753 return 0; 5769 5754 5770 - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); 5755 + ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true); 5771 5756 if (ret) 5772 5757 return ret; 5773 5758 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); ··· 5779 5756 block_rsv_add_bytes(block_rsv, num_bytes, 0); 5780 5757 trace_btrfs_space_reservation(root->fs_info, "delalloc", 5781 5758 btrfs_ino(inode), num_bytes, 1); 5782 - } 5759 + 5760 + /* Don't forget to increase qgroup_rsv_reserved */ 5761 + spin_lock(&block_rsv->lock); 5762 + block_rsv->qgroup_rsv_reserved += qgroup_num_bytes; 5763 + spin_unlock(&block_rsv->lock); 5764 + } else 5765 + btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); 5783 5766 return ret; 5784 5767 } 5785 5768 ··· 5806 5777 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; 5807 5778 struct btrfs_block_rsv *block_rsv = &inode->block_rsv; 5808 5779 u64 released = 0; 5780 + u64 qgroup_to_release = 0; 5809 5781 5810 5782 /* 5811 5783 * Since we statically set the block_rsv->size we just want to say we 5812 5784 * are releasing 0 bytes, and then we'll just get the reservation over 5813 5785 * the size free'd. 5814 5786 */ 5815 - released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0); 5787 + released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0, 5788 + &qgroup_to_release); 5816 5789 if (released > 0) 5817 5790 trace_btrfs_space_reservation(fs_info, "delalloc", 5818 5791 btrfs_ino(inode), released, 0); 5819 5792 if (qgroup_free) 5820 - btrfs_qgroup_free_meta_prealloc(inode->root, released); 5793 + btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release); 5821 5794 else 5822 - btrfs_qgroup_convert_reserved_meta(inode->root, released); 5795 + btrfs_qgroup_convert_reserved_meta(inode->root, 5796 + qgroup_to_release); 5823 5797 } 5824 5798 5825 5799 void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, ··· 5834 5802 if (global_rsv == block_rsv || 5835 5803 block_rsv->space_info != global_rsv->space_info) 5836 5804 global_rsv = NULL; 5837 - block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes); 5805 + block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL); 5838 5806 } 5839 5807 5840 5808 static void update_global_block_rsv(struct btrfs_fs_info *fs_info) ··· 5914 5882 static void release_global_block_rsv(struct btrfs_fs_info *fs_info) 5915 5883 { 5916 5884 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL, 5917 - (u64)-1); 5885 + (u64)-1, NULL); 5918 5886 WARN_ON(fs_info->trans_block_rsv.size > 0); 5919 5887 WARN_ON(fs_info->trans_block_rsv.reserved > 0); 5920 5888 WARN_ON(fs_info->chunk_block_rsv.size > 0); ··· 5938 5906 WARN_ON_ONCE(!list_empty(&trans->new_bgs)); 5939 5907 5940 5908 block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL, 5941 - trans->chunk_bytes_reserved); 5909 + trans->chunk_bytes_reserved, NULL); 5942 5910 trans->chunk_bytes_reserved = 0; 5943 5911 } 5944 5912 ··· 6043 6011 { 6044 6012 struct btrfs_block_rsv *block_rsv = &inode->block_rsv; 6045 6013 u64 reserve_size = 0; 6014 + u64 qgroup_rsv_size = 0; 6046 6015 u64 csum_leaves; 6047 6016 unsigned outstanding_extents; 6048 6017 ··· 6056 6023 inode->csum_bytes); 6057 6024 reserve_size += btrfs_calc_trans_metadata_size(fs_info, 6058 6025 csum_leaves); 6026 + /* 6027 + * For qgroup rsv, the calculation is very simple: 6028 + * account one nodesize for each outstanding extent 6029 + * 6030 + * This is overestimating in most cases. 6031 + */ 6032 + qgroup_rsv_size = outstanding_extents * fs_info->nodesize; 6059 6033 6060 6034 spin_lock(&block_rsv->lock); 6061 6035 block_rsv->size = reserve_size; 6036 + block_rsv->qgroup_rsv_size = qgroup_rsv_size; 6062 6037 spin_unlock(&block_rsv->lock); 6063 6038 } 6064 6039 ··· 8444 8403 struct btrfs_block_rsv *block_rsv, u32 blocksize) 8445 8404 { 8446 8405 block_rsv_add_bytes(block_rsv, blocksize, 0); 8447 - block_rsv_release_bytes(fs_info, block_rsv, NULL, 0); 8406 + block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL); 8448 8407 } 8449 8408 8450 8409 /*
+1 -1
fs/btrfs/file.c
··· 1748 1748 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1749 1749 lockstart, lockend, &cached_state); 1750 1750 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, 1751 - (ret != 0)); 1751 + true); 1752 1752 if (ret) { 1753 1753 btrfs_drop_pages(pages, num_pages); 1754 1754 break;
+12 -8
fs/btrfs/inode.c
··· 31 31 #include <linux/uio.h> 32 32 #include <linux/magic.h> 33 33 #include <linux/iversion.h> 34 + #include <asm/unaligned.h> 34 35 #include "ctree.h" 35 36 #include "disk-io.h" 36 37 #include "transaction.h" ··· 5906 5905 struct dir_entry *entry = addr; 5907 5906 char *name = (char *)(entry + 1); 5908 5907 5909 - ctx->pos = entry->offset; 5910 - if (!dir_emit(ctx, name, entry->name_len, entry->ino, 5911 - entry->type)) 5908 + ctx->pos = get_unaligned(&entry->offset); 5909 + if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), 5910 + get_unaligned(&entry->ino), 5911 + get_unaligned(&entry->type))) 5912 5912 return 1; 5913 - addr += sizeof(struct dir_entry) + entry->name_len; 5913 + addr += sizeof(struct dir_entry) + 5914 + get_unaligned(&entry->name_len); 5914 5915 ctx->pos++; 5915 5916 } 5916 5917 return 0; ··· 6002 5999 } 6003 6000 6004 6001 entry = addr; 6005 - entry->name_len = name_len; 6002 + put_unaligned(name_len, &entry->name_len); 6006 6003 name_ptr = (char *)(entry + 1); 6007 6004 read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), 6008 6005 name_len); 6009 - entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; 6006 + put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)], 6007 + &entry->type); 6010 6008 btrfs_dir_item_key_to_cpu(leaf, di, &location); 6011 - entry->ino = location.objectid; 6012 - entry->offset = found_key.offset; 6009 + put_unaligned(location.objectid, &entry->ino); 6010 + put_unaligned(found_key.offset, &entry->offset); 6013 6011 entries++; 6014 6012 addr += sizeof(struct dir_entry) + name_len; 6015 6013 total_len += sizeof(struct dir_entry) + name_len;
+15 -10
fs/btrfs/print-tree.c
··· 189 189 fs_info = l->fs_info; 190 190 nr = btrfs_header_nritems(l); 191 191 192 - btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d", 193 - btrfs_header_bytenr(l), nr, 194 - btrfs_leaf_free_space(fs_info, l)); 192 + btrfs_info(fs_info, 193 + "leaf %llu gen %llu total ptrs %d free space %d owner %llu", 194 + btrfs_header_bytenr(l), btrfs_header_generation(l), nr, 195 + btrfs_leaf_free_space(fs_info, l), btrfs_header_owner(l)); 195 196 for (i = 0 ; i < nr ; i++) { 196 197 item = btrfs_item_nr(i); 197 198 btrfs_item_key_to_cpu(l, &key, i); ··· 326 325 } 327 326 } 328 327 329 - void btrfs_print_tree(struct extent_buffer *c) 328 + void btrfs_print_tree(struct extent_buffer *c, bool follow) 330 329 { 331 330 struct btrfs_fs_info *fs_info; 332 331 int i; u32 nr; ··· 343 342 return; 344 343 } 345 344 btrfs_info(fs_info, 346 - "node %llu level %d total ptrs %d free spc %u", 347 - btrfs_header_bytenr(c), level, nr, 348 - (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr); 345 + "node %llu level %d gen %llu total ptrs %d free spc %u owner %llu", 346 + btrfs_header_bytenr(c), level, btrfs_header_generation(c), 347 + nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr, 348 + btrfs_header_owner(c)); 349 349 for (i = 0; i < nr; i++) { 350 350 btrfs_node_key_to_cpu(c, &key, i); 351 - pr_info("\tkey %d (%llu %u %llu) block %llu\n", 351 + pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n", 352 352 i, key.objectid, key.type, key.offset, 353 - btrfs_node_blockptr(c, i)); 353 + btrfs_node_blockptr(c, i), 354 + btrfs_node_ptr_generation(c, i)); 354 355 } 356 + if (!follow) 357 + return; 355 358 for (i = 0; i < nr; i++) { 356 359 struct btrfs_key first_key; 357 360 struct extent_buffer *next; ··· 377 372 if (btrfs_header_level(next) != 378 373 level - 1) 379 374 BUG(); 380 - btrfs_print_tree(next); 375 + btrfs_print_tree(next, follow); 381 376 free_extent_buffer(next); 382 377 } 383 378 }
+1 -1
fs/btrfs/print-tree.h
··· 7 7 #define BTRFS_PRINT_TREE_H 8 8 9 9 void btrfs_print_leaf(struct extent_buffer *l); 10 - void btrfs_print_tree(struct extent_buffer *c); 10 + void btrfs_print_tree(struct extent_buffer *c, bool follow); 11 11 12 12 #endif
+41 -2
fs/btrfs/qgroup.c
··· 11 11 #include <linux/slab.h> 12 12 #include <linux/workqueue.h> 13 13 #include <linux/btrfs.h> 14 + #include <linux/sizes.h> 14 15 15 16 #include "ctree.h" 16 17 #include "transaction.h" ··· 2376 2375 return ret; 2377 2376 } 2378 2377 2379 - static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) 2378 + /* 2379 + * Two limits to commit transaction in advance. 2380 + * 2381 + * For RATIO, it will be 1/RATIO of the remaining limit 2382 + * (excluding data and prealloc meta) as threshold. 2383 + * For SIZE, it will be in byte unit as threshold. 2384 + */ 2385 + #define QGROUP_PERTRANS_RATIO 32 2386 + #define QGROUP_PERTRANS_SIZE SZ_32M 2387 + static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, 2388 + const struct btrfs_qgroup *qg, u64 num_bytes) 2380 2389 { 2390 + u64 limit; 2391 + u64 threshold; 2392 + 2381 2393 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 2382 2394 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) 2383 2395 return false; ··· 2398 2384 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 2399 2385 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) 2400 2386 return false; 2387 + 2388 + /* 2389 + * Even if we passed the check, it's better to check if reservation 2390 + * for meta_pertrans is pushing us near limit. 2391 + * If there is too much pertrans reservation or it's near the limit, 2392 + * let's try commit transaction to free some, using transaction_kthread 2393 + */ 2394 + if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER | 2395 + BTRFS_QGROUP_LIMIT_MAX_EXCL))) { 2396 + if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) 2397 + limit = qg->max_excl; 2398 + else 2399 + limit = qg->max_rfer; 2400 + threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] - 2401 + qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) / 2402 + QGROUP_PERTRANS_RATIO; 2403 + threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE); 2404 + 2405 + /* 2406 + * Use transaction_kthread to commit transaction, so we no 2407 + * longer need to bother nested transaction nor lock context. 2408 + */ 2409 + if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold) 2410 + btrfs_commit_transaction_locksafe(fs_info); 2411 + } 2401 2412 2402 2413 return true; 2403 2414 } ··· 2473 2434 2474 2435 qg = unode_aux_to_qgroup(unode); 2475 2436 2476 - if (enforce && !qgroup_check_limits(qg, num_bytes)) { 2437 + if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) { 2477 2438 ret = -EDQUOT; 2478 2439 goto out; 2479 2440 }
+1
fs/btrfs/transaction.c
··· 2267 2267 */ 2268 2268 cur_trans->state = TRANS_STATE_COMPLETED; 2269 2269 wake_up(&cur_trans->commit_wait); 2270 + clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); 2270 2271 2271 2272 spin_lock(&fs_info->trans_lock); 2272 2273 list_del_init(&cur_trans->list);
+14
fs/btrfs/transaction.h
··· 199 199 int btrfs_commit_transaction(struct btrfs_trans_handle *trans); 200 200 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, 201 201 int wait_for_unblock); 202 + 203 + /* 204 + * Try to commit transaction asynchronously, so this is safe to call 205 + * even holding a spinlock. 206 + * 207 + * It's done by informing transaction_kthread to commit transaction without 208 + * waiting for commit interval. 209 + */ 210 + static inline void btrfs_commit_transaction_locksafe( 211 + struct btrfs_fs_info *fs_info) 212 + { 213 + set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); 214 + wake_up_process(fs_info->transaction_kthread); 215 + } 202 216 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); 203 217 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); 204 218 void btrfs_throttle(struct btrfs_fs_info *fs_info);
+7 -3
fs/ceph/inode.c
··· 669 669 CEPH_CAP_FILE_BUFFER| 670 670 CEPH_CAP_AUTH_EXCL| 671 671 CEPH_CAP_XATTR_EXCL)) { 672 - if (timespec_compare(ctime, &inode->i_ctime) > 0) { 672 + if (ci->i_version == 0 || 673 + timespec_compare(ctime, &inode->i_ctime) > 0) { 673 674 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 674 675 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 675 676 ctime->tv_sec, ctime->tv_nsec); 676 677 inode->i_ctime = *ctime; 677 678 } 678 - if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 679 + if (ci->i_version == 0 || 680 + ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 679 681 /* the MDS did a utimes() */ 680 682 dout("mtime %ld.%09ld -> %ld.%09ld " 681 683 "tw %d -> %d\n", ··· 797 795 new_issued = ~issued & le32_to_cpu(info->cap.caps); 798 796 799 797 /* update inode */ 800 - ci->i_version = le64_to_cpu(info->version); 801 798 inode->i_rdev = le32_to_cpu(info->rdev); 802 799 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 803 800 ··· 868 867 ceph_forget_all_cached_acls(inode); 869 868 xattr_blob = NULL; 870 869 } 870 + 871 + /* finally update i_version */ 872 + ci->i_version = le64_to_cpu(info->version); 871 873 872 874 inode->i_mapping->a_ops = &ceph_aops; 873 875
+1 -1
fs/cifs/cifs_debug.h
··· 54 54 pr_debug_ ## ratefunc("%s: " \ 55 55 fmt, __FILE__, ##__VA_ARGS__); \ 56 56 } else if ((type) & VFS) { \ 57 - pr_err_ ## ratefunc("CuIFS VFS: " \ 57 + pr_err_ ## ratefunc("CIFS VFS: " \ 58 58 fmt, ##__VA_ARGS__); \ 59 59 } else if ((type) & NOISY && (NOISY != 0)) { \ 60 60 pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \
+5 -4
fs/cifs/dir.c
··· 684 684 goto mknod_out; 685 685 } 686 686 687 + if (!S_ISCHR(mode) && !S_ISBLK(mode)) 688 + goto mknod_out; 689 + 687 690 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) 688 691 goto mknod_out; 689 692 ··· 695 692 696 693 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 697 694 if (buf == NULL) { 698 - kfree(full_path); 699 695 rc = -ENOMEM; 700 - free_xid(xid); 701 - return rc; 696 + goto mknod_out; 702 697 } 703 698 704 699 if (backup_cred(cifs_sb)) ··· 743 742 pdev->minor = cpu_to_le64(MINOR(device_number)); 744 743 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, 745 744 &bytes_written, iov, 1); 746 - } /* else if (S_ISFIFO) */ 745 + } 747 746 tcon->ses->server->ops->close(xid, tcon, &fid); 748 747 d_drop(direntry); 749 748
+1 -1
fs/cifs/file.c
··· 3462 3462 * If the page is mmap'ed into a process' page tables, then we need to make 3463 3463 * sure that it doesn't change while being written back. 3464 3464 */ 3465 - static int 3465 + static vm_fault_t 3466 3466 cifs_page_mkwrite(struct vm_fault *vmf) 3467 3467 { 3468 3468 struct page *page = vmf->page;
+2 -2
fs/cifs/smb2ops.c
··· 1452 1452 struct cifs_open_parms oparms; 1453 1453 struct cifs_fid fid; 1454 1454 struct kvec err_iov = {NULL, 0}; 1455 - struct smb2_err_rsp *err_buf = NULL; 1455 + struct smb2_err_rsp *err_buf; 1456 1456 struct smb2_symlink_err_rsp *symlink; 1457 1457 unsigned int sub_len; 1458 1458 unsigned int sub_offset; ··· 1476 1476 1477 1477 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov); 1478 1478 1479 - if (!rc || !err_buf) { 1479 + if (!rc || !err_iov.iov_base) { 1480 1480 kfree(utf16_path); 1481 1481 return -ENOENT; 1482 1482 }
+7 -1
fs/cifs/smbdirect.c
··· 1028 1028 for (i = 0; i < request->num_sge; i++) { 1029 1029 log_rdma_send(INFO, 1030 1030 "rdma_request sge[%d] addr=%llu length=%u\n", 1031 - i, request->sge[0].addr, request->sge[0].length); 1031 + i, request->sge[i].addr, request->sge[i].length); 1032 1032 ib_dma_sync_single_for_device( 1033 1033 info->id->device, 1034 1034 request->sge[i].addr, ··· 2139 2139 goto done; 2140 2140 } 2141 2141 2142 + cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen); 2143 + for (i = 0; i < rqst->rq_nvec-1; i++) 2144 + dump_smb(iov[i].iov_base, iov[i].iov_len); 2145 + 2142 2146 remaining_data_length = buflen; 2143 2147 2144 2148 log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " ··· 2198 2194 goto done; 2199 2195 } 2200 2196 i++; 2197 + if (i == rqst->rq_nvec) 2198 + break; 2201 2199 } 2202 2200 start = i; 2203 2201 buflen = 0;
+28 -13
fs/ecryptfs/crypto.c
··· 1997 1997 return rc; 1998 1998 } 1999 1999 2000 + static bool is_dot_dotdot(const char *name, size_t name_size) 2001 + { 2002 + if (name_size == 1 && name[0] == '.') 2003 + return true; 2004 + else if (name_size == 2 && name[0] == '.' && name[1] == '.') 2005 + return true; 2006 + 2007 + return false; 2008 + } 2009 + 2000 2010 /** 2001 2011 * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext 2002 2012 * @plaintext_name: The plaintext name ··· 2031 2021 size_t packet_size; 2032 2022 int rc = 0; 2033 2023 2034 - if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) 2035 - && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) 2036 - && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) 2037 - && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, 2038 - ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { 2039 - const char *orig_name = name; 2040 - size_t orig_name_size = name_size; 2024 + if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && 2025 + !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) { 2026 + if (is_dot_dotdot(name, name_size)) { 2027 + rc = ecryptfs_copy_filename(plaintext_name, 2028 + plaintext_name_size, 2029 + name, name_size); 2030 + goto out; 2031 + } 2032 + 2033 + if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE || 2034 + strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, 2035 + ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) { 2036 + rc = -EINVAL; 2037 + goto out; 2038 + } 2041 2039 2042 2040 name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; 2043 2041 name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; ··· 2065 2047 decoded_name, 2066 2048 decoded_name_size); 2067 2049 if (rc) { 2068 - printk(KERN_INFO "%s: Could not parse tag 70 packet " 2069 - "from filename; copying through filename " 2070 - "as-is\n", __func__); 2071 - rc = ecryptfs_copy_filename(plaintext_name, 2072 - plaintext_name_size, 2073 - orig_name, orig_name_size); 2050 + ecryptfs_printk(KERN_DEBUG, 2051 + "%s: Could not parse tag 70 packet from filename\n", 2052 + __func__); 2074 2053 goto out_free; 2075 2054 } 2076 2055 } else {
+16 -5
fs/ecryptfs/file.c
··· 82 82 buf->sb, lower_name, 83 83 lower_namelen); 84 84 if (rc) { 85 - printk(KERN_ERR "%s: Error attempting to decode and decrypt " 86 - "filename [%s]; rc = [%d]\n", __func__, lower_name, 87 - rc); 88 - goto out; 85 + if (rc != -EINVAL) { 86 + ecryptfs_printk(KERN_DEBUG, 87 + "%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n", 88 + __func__, lower_name, rc); 89 + return rc; 90 + } 91 + 92 + /* Mask -EINVAL errors as these are most likely due a plaintext 93 + * filename present in the lower filesystem despite filename 94 + * encryption being enabled. One unavoidable example would be 95 + * the "lost+found" dentry in the root directory of an Ext4 96 + * filesystem. 97 + */ 98 + return 0; 89 99 } 100 + 90 101 buf->caller->pos = buf->ctx.pos; 91 102 rc = !dir_emit(buf->caller, name, name_size, ino, d_type); 92 103 kfree(name); 93 104 if (!rc) 94 105 buf->entries_written++; 95 - out: 106 + 96 107 return rc; 97 108 } 98 109
+1 -2
fs/ecryptfs/inode.c
··· 395 395 396 396 mount_crypt_stat = &ecryptfs_superblock_to_private( 397 397 ecryptfs_dentry->d_sb)->mount_crypt_stat; 398 - if (mount_crypt_stat 399 - && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) { 398 + if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) { 400 399 rc = ecryptfs_encrypt_and_encode_filename( 401 400 &encrypted_and_encoded_name, &len, 402 401 mount_crypt_stat, name, len);
+1 -1
fs/ecryptfs/keystore.c
··· 1880 1880 candidate_auth_tok = &auth_tok_list_item->auth_tok; 1881 1881 if (unlikely(ecryptfs_verbosity > 0)) { 1882 1882 ecryptfs_printk(KERN_DEBUG, 1883 - "Considering cadidate auth tok:\n"); 1883 + "Considering candidate auth tok:\n"); 1884 1884 ecryptfs_dump_auth_tok(candidate_auth_tok); 1885 1885 } 1886 1886 rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig,
+2 -2
fs/ext2/file.c
··· 88 88 * The default page_lock and i_size verification done by non-DAX fault paths 89 89 * is sufficient because ext2 doesn't support hole punching. 90 90 */ 91 - static int ext2_dax_fault(struct vm_fault *vmf) 91 + static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) 92 92 { 93 93 struct inode *inode = file_inode(vmf->vma->vm_file); 94 94 struct ext2_inode_info *ei = EXT2_I(inode); 95 - int ret; 95 + vm_fault_t ret; 96 96 97 97 if (vmf->flags & FAULT_FLAG_WRITE) { 98 98 sb_start_pagefault(inode->i_sb);
+4 -3
fs/fs-writeback.c
··· 745 745 */ 746 746 if (inode && inode_to_wb_is_valid(inode)) { 747 747 struct bdi_writeback *wb; 748 - bool locked, congested; 748 + struct wb_lock_cookie lock_cookie = {}; 749 + bool congested; 749 750 750 - wb = unlocked_inode_to_wb_begin(inode, &locked); 751 + wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); 751 752 congested = wb_congested(wb, cong_bits); 752 - unlocked_inode_to_wb_end(inode, locked); 753 + unlocked_inode_to_wb_end(inode, &lock_cookie); 753 754 return congested; 754 755 } 755 756
+16 -3
fs/isofs/compress.c
··· 20 20 #include <linux/init.h> 21 21 #include <linux/bio.h> 22 22 23 + #include <linux/slab.h> 23 24 #include <linux/vmalloc.h> 24 25 #include <linux/zlib.h> 25 26 ··· 60 59 >> bufshift; 61 60 int haveblocks; 62 61 blkcnt_t blocknum; 63 - struct buffer_head *bhs[needblocks + 1]; 62 + struct buffer_head **bhs; 64 63 int curbh, curpage; 65 64 66 65 if (block_size > deflateBound(1UL << zisofs_block_shift)) { ··· 81 80 82 81 /* Because zlib is not thread-safe, do all the I/O at the top. */ 83 82 blocknum = block_start >> bufshift; 84 - memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); 83 + bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL); 84 + if (!bhs) { 85 + *errp = -ENOMEM; 86 + return 0; 87 + } 85 88 haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); 86 89 ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); 87 90 ··· 195 190 b_eio: 196 191 for (i = 0; i < haveblocks; i++) 197 192 brelse(bhs[i]); 193 + kfree(bhs); 198 194 return stream.total_out; 199 195 } 200 196 ··· 311 305 unsigned int zisofs_pages_per_cblock = 312 306 PAGE_SHIFT <= zisofs_block_shift ? 313 307 (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; 314 - struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; 308 + struct page **pages; 315 309 pgoff_t index = page->index, end_index; 316 310 317 311 end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; ··· 335 329 } else { 336 330 full_page = 0; 337 331 pcount = 1; 332 + } 333 + pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1), 334 + sizeof(*pages), GFP_KERNEL); 335 + if (!pages) { 336 + unlock_page(page); 337 + return -ENOMEM; 338 338 } 339 339 pages[full_page] = page; 340 340 ··· 369 357 } 370 358 371 359 /* At this point, err contains 0 or -EIO depending on the "critical" page */ 360 + kfree(pages); 372 361 return err; 373 362 } 374 363
+3
fs/isofs/inode.c
··· 394 394 break; 395 395 #ifdef CONFIG_JOLIET 396 396 case Opt_iocharset: 397 + kfree(popt->iocharset); 397 398 popt->iocharset = match_strdup(&args[0]); 399 + if (!popt->iocharset) 400 + return 0; 398 401 break; 399 402 #endif 400 403 case Opt_map_a:
+1 -1
fs/jffs2/super.c
··· 342 342 static void jffs2_kill_sb(struct super_block *sb) 343 343 { 344 344 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 345 - if (!sb_rdonly(sb)) 345 + if (c && !sb_rdonly(sb)) 346 346 jffs2_stop_garbage_collect_thread(c); 347 347 kill_mtd_super(sb); 348 348 kfree(c);
+3 -2
fs/namespace.c
··· 1089 1089 goto out_free; 1090 1090 } 1091 1091 1092 - mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); 1092 + mnt->mnt.mnt_flags = old->mnt.mnt_flags; 1093 + mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); 1093 1094 /* Don't allow unprivileged users to change mount flags */ 1094 1095 if (flag & CL_UNPRIVILEGED) { 1095 1096 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; ··· 2815 2814 mnt_flags |= MNT_NODIRATIME; 2816 2815 if (flags & MS_STRICTATIME) 2817 2816 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); 2818 - if (flags & SB_RDONLY) 2817 + if (flags & MS_RDONLY) 2819 2818 mnt_flags |= MNT_READONLY; 2820 2819 2821 2820 /* The default atime for remount is preservation */
+15 -19
fs/notify/fanotify/fanotify.c
··· 92 92 u32 event_mask, 93 93 const void *data, int data_type) 94 94 { 95 - __u32 marks_mask, marks_ignored_mask; 95 + __u32 marks_mask = 0, marks_ignored_mask = 0; 96 96 const struct path *path = data; 97 97 98 98 pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" ··· 108 108 !d_can_lookup(path->dentry)) 109 109 return false; 110 110 111 - if (inode_mark && vfsmnt_mark) { 112 - marks_mask = (vfsmnt_mark->mask | inode_mark->mask); 113 - marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask); 114 - } else if (inode_mark) { 115 - /* 116 - * if the event is for a child and this inode doesn't care about 117 - * events on the child, don't send it! 118 - */ 119 - if ((event_mask & FS_EVENT_ON_CHILD) && 120 - !(inode_mark->mask & FS_EVENT_ON_CHILD)) 121 - return false; 122 - marks_mask = inode_mark->mask; 123 - marks_ignored_mask = inode_mark->ignored_mask; 124 - } else if (vfsmnt_mark) { 125 - marks_mask = vfsmnt_mark->mask; 126 - marks_ignored_mask = vfsmnt_mark->ignored_mask; 127 - } else { 128 - BUG(); 111 + /* 112 + * if the event is for a child and this inode doesn't care about 113 + * events on the child, don't send it! 114 + */ 115 + if (inode_mark && 116 + (!(event_mask & FS_EVENT_ON_CHILD) || 117 + (inode_mark->mask & FS_EVENT_ON_CHILD))) { 118 + marks_mask |= inode_mark->mask; 119 + marks_ignored_mask |= inode_mark->ignored_mask; 120 + } 121 + 122 + if (vfsmnt_mark) { 123 + marks_mask |= vfsmnt_mark->mask; 124 + marks_ignored_mask |= vfsmnt_mark->ignored_mask; 129 125 } 130 126 131 127 if (d_is_dir(path->dentry) &&
+11 -14
fs/notify/fsnotify.c
··· 192 192 struct fsnotify_iter_info *iter_info) 193 193 { 194 194 struct fsnotify_group *group = NULL; 195 - __u32 inode_test_mask = 0; 196 - __u32 vfsmount_test_mask = 0; 195 + __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); 196 + __u32 marks_mask = 0; 197 + __u32 marks_ignored_mask = 0; 197 198 198 199 if (unlikely(!inode_mark && !vfsmount_mark)) { 199 200 BUG(); ··· 214 213 /* does the inode mark tell us to do something? */ 215 214 if (inode_mark) { 216 215 group = inode_mark->group; 217 - inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); 218 - inode_test_mask &= inode_mark->mask; 219 - inode_test_mask &= ~inode_mark->ignored_mask; 216 + marks_mask |= inode_mark->mask; 217 + marks_ignored_mask |= inode_mark->ignored_mask; 220 218 } 221 219 222 220 /* does the vfsmount_mark tell us to do something? */ 223 221 if (vfsmount_mark) { 224 - vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD); 225 222 group = vfsmount_mark->group; 226 - vfsmount_test_mask &= vfsmount_mark->mask; 227 - vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; 228 - if (inode_mark) 229 - vfsmount_test_mask &= ~inode_mark->ignored_mask; 223 + marks_mask |= vfsmount_mark->mask; 224 + marks_ignored_mask |= vfsmount_mark->ignored_mask; 230 225 } 231 226 232 227 pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" 233 - " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" 228 + " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x" 234 229 " data=%p data_is=%d cookie=%d\n", 235 - __func__, group, to_tell, mask, inode_mark, 236 - inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, 230 + __func__, group, to_tell, mask, inode_mark, vfsmount_mark, 231 + marks_mask, marks_ignored_mask, data, 237 232 data_is, cookie); 238 233 239 - if (!inode_test_mask && !vfsmount_test_mask) 234 + if (!(test_mask & marks_mask & ~marks_ignored_mask)) 240 235 return 0; 241 236 242 237 return group->ops->handle_event(group, to_tell, inode_mark,
+5
fs/orangefs/super.c
··· 579 579 /* provided sb cleanup */ 580 580 kill_anon_super(sb); 581 581 582 + if (!ORANGEFS_SB(sb)) { 583 + mutex_lock(&orangefs_request_mutex); 584 + mutex_unlock(&orangefs_request_mutex); 585 + return; 586 + } 582 587 /* 583 588 * issue the unmount to userspace to tell it to remove the 584 589 * dynamic mount info it has for this superblock
+6
fs/proc/base.c
··· 1693 1693 kuid_t uid; 1694 1694 kgid_t gid; 1695 1695 1696 + if (unlikely(task->flags & PF_KTHREAD)) { 1697 + *ruid = GLOBAL_ROOT_UID; 1698 + *rgid = GLOBAL_ROOT_GID; 1699 + return; 1700 + } 1701 + 1696 1702 /* Default to the tasks effective ownership */ 1697 1703 rcu_read_lock(); 1698 1704 cred = __task_cred(task);
+1 -1
fs/proc/loadavg.c
··· 24 24 LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), 25 25 LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), 26 26 nr_running(), nr_threads, 27 - idr_get_cursor(&task_active_pid_ns(current)->idr)); 27 + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); 28 28 return 0; 29 29 } 30 30
+5 -1
fs/proc/task_mmu.c
··· 1310 1310 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1311 1311 else if (is_swap_pmd(pmd)) { 1312 1312 swp_entry_t entry = pmd_to_swp_entry(pmd); 1313 + unsigned long offset = swp_offset(entry); 1313 1314 1315 + offset += (addr & ~PMD_MASK) >> PAGE_SHIFT; 1314 1316 frame = swp_type(entry) | 1315 - (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1317 + (offset << MAX_SWAPFILES_SHIFT); 1316 1318 flags |= PM_SWAP; 1317 1319 if (pmd_swp_soft_dirty(pmd)) 1318 1320 flags |= PM_SOFT_DIRTY; ··· 1334 1332 break; 1335 1333 if (pm->show_pfn && (flags & PM_PRESENT)) 1336 1334 frame++; 1335 + else if (flags & PM_SWAP) 1336 + frame += (1 << MAX_SWAPFILES_SHIFT); 1337 1337 } 1338 1338 spin_unlock(ptl); 1339 1339 return err;
+1 -1
fs/quota/dquot.c
··· 2966 2966 NULL); 2967 2967 2968 2968 order = 0; 2969 - dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); 2969 + dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order); 2970 2970 if (!dquot_hash) 2971 2971 panic("Cannot create dquot hash table"); 2972 2972
+4 -5
fs/super.c
··· 167 167 security_sb_free(s); 168 168 put_user_ns(s->s_user_ns); 169 169 kfree(s->s_subtype); 170 + free_prealloced_shrinker(&s->s_shrink); 170 171 /* no delays needed */ 171 172 destroy_super_work(&s->destroy_work); 172 173 } ··· 253 252 s->s_shrink.count_objects = super_cache_count; 254 253 s->s_shrink.batch = 1024; 255 254 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; 255 + if (prealloc_shrinker(&s->s_shrink)) 256 + goto fail; 256 257 return s; 257 258 258 259 fail: ··· 521 518 hlist_add_head(&s->s_instances, &type->fs_supers); 522 519 spin_unlock(&sb_lock); 523 520 get_filesystem(type); 524 - err = register_shrinker(&s->s_shrink); 525 - if (err) { 526 - deactivate_locked_super(s); 527 - s = ERR_PTR(err); 528 - } 521 + register_shrinker_prepared(&s->s_shrink); 529 522 return s; 530 523 } 531 524
+6
fs/udf/unicode.c
··· 28 28 29 29 #include "udf_sb.h" 30 30 31 + #define SURROGATE_MASK 0xfffff800 32 + #define SURROGATE_PAIR 0x0000d800 33 + 31 34 static int udf_uni2char_utf8(wchar_t uni, 32 35 unsigned char *out, 33 36 int boundlen) ··· 39 36 40 37 if (boundlen <= 0) 41 38 return -ENAMETOOLONG; 39 + 40 + if ((uni & SURROGATE_MASK) == SURROGATE_PAIR) 41 + return -EINVAL; 42 42 43 43 if (uni < 0x80) { 44 44 out[u_len++] = (unsigned char)uni;
+1 -1
include/drm/drm_hdcp.h
··· 19 19 #define DRM_HDCP_RI_LEN 2 20 20 #define DRM_HDCP_V_PRIME_PART_LEN 4 21 21 #define DRM_HDCP_V_PRIME_NUM_PARTS 5 22 - #define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f) 22 + #define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) 23 23 #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) 24 24 #define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) 25 25
+5
include/linux/backing-dev-defs.h
··· 223 223 set_wb_congested(bdi->wb.congested, sync); 224 224 } 225 225 226 + struct wb_lock_cookie { 227 + bool locked; 228 + unsigned long flags; 229 + }; 230 + 226 231 #ifdef CONFIG_CGROUP_WRITEBACK 227 232 228 233 /**
+16 -14
include/linux/backing-dev.h
··· 347 347 /** 348 348 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 349 349 * @inode: target inode 350 - * @lockedp: temp bool output param, to be passed to the end function 350 + * @cookie: output param, to be passed to the end function 351 351 * 352 352 * The caller wants to access the wb associated with @inode but isn't 353 353 * holding inode->i_lock, the i_pages lock or wb->list_lock. This ··· 355 355 * association doesn't change until the transaction is finished with 356 356 * unlocked_inode_to_wb_end(). 357 357 * 358 - * The caller must call unlocked_inode_to_wb_end() with *@lockdep 359 - * afterwards and can't sleep during transaction. IRQ may or may not be 360 - * disabled on return. 358 + * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and 359 + * can't sleep during the transaction. IRQs may or may not be disabled on 360 + * return. 361 361 */ 362 362 static inline struct bdi_writeback * 363 - unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 363 + unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 364 364 { 365 365 rcu_read_lock(); 366 366 ··· 368 368 * Paired with store_release in inode_switch_wb_work_fn() and 369 369 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 370 370 */ 371 - *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 371 + cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 372 372 373 - if (unlikely(*lockedp)) 374 - xa_lock_irq(&inode->i_mapping->i_pages); 373 + if (unlikely(cookie->locked)) 374 + xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); 375 375 376 376 /* 377 377 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages ··· 383 383 /** 384 384 * unlocked_inode_to_wb_end - end inode wb access transaction 385 385 * @inode: target inode 386 - * @locked: *@lockedp from unlocked_inode_to_wb_begin() 386 + * @cookie: @cookie from unlocked_inode_to_wb_begin() 387 387 */ 388 - static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 388 + static inline void unlocked_inode_to_wb_end(struct inode *inode, 389 + struct wb_lock_cookie *cookie) 389 390 { 390 - if (unlikely(locked)) 391 - xa_unlock_irq(&inode->i_mapping->i_pages); 391 + if (unlikely(cookie->locked)) 392 + xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); 392 393 393 394 rcu_read_unlock(); 394 395 } ··· 436 435 } 437 436 438 437 static inline struct bdi_writeback * 439 - unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) 438 + unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 440 439 { 441 440 return inode_to_wb(inode); 442 441 } 443 442 444 - static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) 443 + static inline void unlocked_inode_to_wb_end(struct inode *inode, 444 + struct wb_lock_cookie *cookie) 445 445 { 446 446 } 447 447
+3
include/linux/compiler-clang.h
··· 25 25 #define __SANITIZE_ADDRESS__ 26 26 #endif 27 27 28 + #undef __no_sanitize_address 29 + #define __no_sanitize_address __attribute__((no_sanitize("address"))) 30 + 28 31 /* Clang doesn't have a way to turn it off per-function, yet. */ 29 32 #ifdef __noretpoline 30 33 #undef __noretpoline
+1 -12
include/linux/coresight-pmu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 2 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License version 2 as published by 7 - * the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 - * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program. If not, see <http://www.gnu.org/licenses/>. 16 5 */ 17 6 18 7 #ifndef _LINUX_CORESIGHT_PMU_H
+1 -1
include/linux/fsnotify_backend.h
··· 248 248 /* Group this mark is for. Set on mark creation, stable until last ref 249 249 * is dropped */ 250 250 struct fsnotify_group *group; 251 - /* List of marks by group->i_fsnotify_marks. Also reused for queueing 251 + /* List of marks by group->marks_list. Also reused for queueing 252 252 * mark into destroy_list when it's waiting for the end of SRCU period 253 253 * before it can be freed. [group->mark_mutex] */ 254 254 struct list_head g_list;
+8 -1
include/linux/hid.h
··· 516 516 HID_TYPE_USBNONE 517 517 }; 518 518 519 + enum hid_battery_status { 520 + HID_BATTERY_UNKNOWN = 0, 521 + HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */ 522 + HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */ 523 + }; 524 + 519 525 struct hid_driver; 520 526 struct hid_ll_driver; 521 527 ··· 564 558 __s32 battery_max; 565 559 __s32 battery_report_type; 566 560 __s32 battery_report_id; 567 - bool battery_reported; 561 + enum hid_battery_status battery_status; 562 + bool battery_avoid_query; 568 563 #endif 569 564 570 565 unsigned int status; /* see STAT flags above */
+5 -2
include/linux/if_vlan.h
··· 663 663 * Returns true if the skb is tagged with multiple vlan headers, regardless 664 664 * of whether it is hardware accelerated or not. 665 665 */ 666 - static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) 666 + static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) 667 667 { 668 668 __be16 protocol = skb->protocol; 669 669 ··· 671 671 struct vlan_ethhdr *veh; 672 672 673 673 if (likely(!eth_type_vlan(protocol))) 674 + return false; 675 + 676 + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) 674 677 return false; 675 678 676 679 veh = (struct vlan_ethhdr *)skb->data; ··· 693 690 * 694 691 * Returns features without unsafe ones if the skb has multiple tags. 695 692 */ 696 - static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, 693 + static inline netdev_features_t vlan_features_check(struct sk_buff *skb, 697 694 netdev_features_t features) 698 695 { 699 696 if (skb_vlan_tagged_multi(skb)) {
+13 -6
include/linux/livepatch.h
··· 186 186 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); 187 187 } 188 188 189 + typedef int (*klp_shadow_ctor_t)(void *obj, 190 + void *shadow_data, 191 + void *ctor_data); 192 + typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); 193 + 189 194 void *klp_shadow_get(void *obj, unsigned long id); 190 - void *klp_shadow_alloc(void *obj, unsigned long id, void *data, 191 - size_t size, gfp_t gfp_flags); 192 - void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, 193 - size_t size, gfp_t gfp_flags); 194 - void klp_shadow_free(void *obj, unsigned long id); 195 - void klp_shadow_free_all(unsigned long id); 195 + void *klp_shadow_alloc(void *obj, unsigned long id, 196 + size_t size, gfp_t gfp_flags, 197 + klp_shadow_ctor_t ctor, void *ctor_data); 198 + void *klp_shadow_get_or_alloc(void *obj, unsigned long id, 199 + size_t size, gfp_t gfp_flags, 200 + klp_shadow_ctor_t ctor, void *ctor_data); 201 + void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); 202 + void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); 196 203 197 204 #else /* !CONFIG_LIVEPATCH */ 198 205
+8
include/linux/microchipphy.h
··· 70 70 #define LAN88XX_MMD3_CHIP_ID (32877) 71 71 #define LAN88XX_MMD3_CHIP_REV (32878) 72 72 73 + /* DSP registers */ 74 + #define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A) 75 + #define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000) 76 + #define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5) 77 + #define LAN88XX_EXT_PAGE_TR_CR 16 78 + #define LAN88XX_EXT_PAGE_TR_LOW_DATA 17 79 + #define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18 80 + 73 81 #endif /* _MICROCHIPPHY_H */
+5 -2
include/linux/shrinker.h
··· 75 75 #define SHRINKER_NUMA_AWARE (1 << 0) 76 76 #define SHRINKER_MEMCG_AWARE (1 << 1) 77 77 78 - extern int register_shrinker(struct shrinker *); 79 - extern void unregister_shrinker(struct shrinker *); 78 + extern int prealloc_shrinker(struct shrinker *shrinker); 79 + extern void register_shrinker_prepared(struct shrinker *shrinker); 80 + extern int register_shrinker(struct shrinker *shrinker); 81 + extern void unregister_shrinker(struct shrinker *shrinker); 82 + extern void free_prealloced_shrinker(struct shrinker *shrinker); 80 83 #endif
+2 -2
include/linux/textsearch.h
··· 62 62 int flags; 63 63 64 64 /** 65 - * get_next_block - fetch next block of data 65 + * @get_next_block: fetch next block of data 66 66 * @consumed: number of bytes consumed by the caller 67 67 * @dst: destination buffer 68 68 * @conf: search configuration ··· 79 79 struct ts_state *state); 80 80 81 81 /** 82 - * finish - finalize/clean a series of get_next_block() calls 82 + * @finish: finalize/clean a series of get_next_block() calls 83 83 * @conf: search configuration 84 84 * @state: search state 85 85 *
+1 -5
include/linux/thread_info.h
··· 43 43 #define THREAD_ALIGN THREAD_SIZE 44 44 #endif 45 45 46 - #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) 47 - # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) 48 - #else 49 - # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) 50 - #endif 46 + #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) 51 47 52 48 /* 53 49 * flag set/clear/test wrappers
-3
include/linux/timekeeping32.h
··· 9 9 extern void do_gettimeofday(struct timeval *tv); 10 10 unsigned long get_seconds(void); 11 11 12 - /* does not take xtime_lock */ 13 - struct timespec __current_kernel_time(void); 14 - 15 12 static inline struct timespec current_kernel_time(void) 16 13 { 17 14 struct timespec64 now = current_kernel_time64();
-2
include/linux/timer.h
··· 8 8 #include <linux/debugobjects.h> 9 9 #include <linux/stringify.h> 10 10 11 - struct tvec_base; 12 - 13 11 struct timer_list { 14 12 /* 15 13 * All fields that change during normal runtime grouped to the
+5 -2
include/sound/control.h
··· 23 23 */ 24 24 25 25 #include <linux/wait.h> 26 + #include <linux/nospec.h> 26 27 #include <sound/asound.h> 27 28 28 29 #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) ··· 149 148 150 149 static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 151 150 { 152 - return id->numid - kctl->id.numid; 151 + unsigned int ioff = id->numid - kctl->id.numid; 152 + return array_index_nospec(ioff, kctl->count); 153 153 } 154 154 155 155 static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 156 156 { 157 - return id->index - kctl->id.index; 157 + unsigned int ioff = id->index - kctl->id.index; 158 + return array_index_nospec(ioff, kctl->count); 158 159 } 159 160 160 161 static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
+15 -3
include/uapi/linux/perf_event.h
··· 650 650 #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 651 651 #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 652 652 /* 653 - * Indicates that the content of PERF_SAMPLE_IP points to 654 - * the actual instruction that triggered the event. See also 655 - * perf_event_attr::precise_ip. 653 + * These PERF_RECORD_MISC_* flags below are safely reused 654 + * for the following events: 655 + * 656 + * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events 657 + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events 658 + * 659 + * 660 + * PERF_RECORD_MISC_EXACT_IP: 661 + * Indicates that the content of PERF_SAMPLE_IP points to 662 + * the actual instruction that triggered the event. See also 663 + * perf_event_attr::precise_ip. 664 + * 665 + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: 666 + * Indicates that thread was preempted in TASK_RUNNING state. 656 667 */ 657 668 #define PERF_RECORD_MISC_EXACT_IP (1 << 14) 669 + #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) 658 670 /* 659 671 * Reserve the last bit to indicate some extended misc field 660 672 */
+3
include/uapi/linux/random.h
··· 35 35 /* Clear the entropy pool and associated counters. (Superuser only.) */ 36 36 #define RNDCLEARPOOL _IO( 'R', 0x06 ) 37 37 38 + /* Reseed CRNG. (Superuser only.) */ 39 + #define RNDRESEEDCRNG _IO( 'R', 0x07 ) 40 + 38 41 struct rand_pool_info { 39 42 int entropy_count; 40 43 int buf_size;
+307 -17
include/xen/interface/io/sndif.h
··· 38 38 39 39 /* 40 40 ****************************************************************************** 41 + * Protocol version 42 + ****************************************************************************** 43 + */ 44 + #define XENSND_PROTOCOL_VERSION 2 45 + 46 + /* 47 + ****************************************************************************** 41 48 * Feature and Parameter Negotiation 42 49 ****************************************************************************** 43 50 * ··· 113 106 * 114 107 * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386" 115 108 * /local/domain/1/device/vsnd/0/0/0/event-channel = "15" 109 + * /local/domain/1/device/vsnd/0/0/0/evt-ring-ref = "1386" 110 + * /local/domain/1/device/vsnd/0/0/0/evt-event-channel = "215" 116 111 * 117 112 *------------------------------ Stream 1, capture ---------------------------- 118 113 * ··· 124 115 * 125 116 * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384" 126 117 * /local/domain/1/device/vsnd/0/0/1/event-channel = "13" 118 + * /local/domain/1/device/vsnd/0/0/1/evt-ring-ref = "1384" 119 + * /local/domain/1/device/vsnd/0/0/1/evt-event-channel = "213" 127 120 * 128 121 *------------------------------- PCM device 1 -------------------------------- 129 122 * ··· 139 128 * 140 129 * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387" 141 130 * /local/domain/1/device/vsnd/0/1/0/event-channel = "151" 131 + * /local/domain/1/device/vsnd/0/1/0/evt-ring-ref = "1387" 132 + * /local/domain/1/device/vsnd/0/1/0/evt-event-channel = "351" 142 133 * 143 134 *------------------------------- PCM device 2 -------------------------------- 144 135 * ··· 153 140 * 154 141 * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389" 155 142 * /local/domain/1/device/vsnd/0/2/0/event-channel = "152" 143 + * /local/domain/1/device/vsnd/0/2/0/evt-ring-ref = "1389" 144 + * /local/domain/1/device/vsnd/0/2/0/evt-event-channel = "452" 156 145 * 157 146 ****************************************************************************** 158 147 * Backend XenBus Nodes ··· 295 280 * in the ring buffer. 296 281 * 297 282 * ring-ref 283 + * Values: <uint32_t> 284 + * 285 + * The Xen grant reference granting permission for the backend to map 286 + * a sole page in a single page sized ring buffer. 287 + * 288 + *--------------------- Stream Event Transport Parameters --------------------- 289 + * 290 + * This communication path is used to deliver asynchronous events from backend 291 + * to frontend, set up per stream. 292 + * 293 + * evt-event-channel 294 + * Values: <uint32_t> 295 + * 296 + * The identifier of the Xen event channel used to signal activity 297 + * in the ring buffer. 298 + * 299 + * evt-ring-ref 298 300 * Values: <uint32_t> 299 301 * 300 302 * The Xen grant reference granting permission for the backend to map ··· 464 432 #define XENSND_OP_GET_VOLUME 5 465 433 #define XENSND_OP_MUTE 6 466 434 #define XENSND_OP_UNMUTE 7 435 + #define XENSND_OP_TRIGGER 8 436 + #define XENSND_OP_HW_PARAM_QUERY 9 437 + 438 + #define XENSND_OP_TRIGGER_START 0 439 + #define XENSND_OP_TRIGGER_PAUSE 1 440 + #define XENSND_OP_TRIGGER_STOP 2 441 + #define XENSND_OP_TRIGGER_RESUME 3 442 + 443 + /* 444 + ****************************************************************************** 445 + * EVENT CODES 446 + ****************************************************************************** 447 + */ 448 + #define XENSND_EVT_CUR_POS 0 467 449 468 450 /* 469 451 ****************************************************************************** ··· 494 448 #define XENSND_FIELD_VCARD_LONG_NAME "long-name" 495 449 #define XENSND_FIELD_RING_REF "ring-ref" 496 450 #define XENSND_FIELD_EVT_CHNL "event-channel" 451 + #define XENSND_FIELD_EVT_RING_REF "evt-ring-ref" 452 + #define XENSND_FIELD_EVT_EVT_CHNL "evt-event-channel" 497 453 #define XENSND_FIELD_DEVICE_NAME "name" 498 454 #define XENSND_FIELD_TYPE "type" 499 455 #define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id" ··· 574 526 * 575 527 *---------------------------------- Requests --------------------------------- 576 528 * 577 - * All request packets have the same length (32 octets) 529 + * All request packets have the same length (64 octets) 578 530 * All request packets have common header: 579 531 * 0 1 2 3 octet 580 532 * +----------------+----------------+----------------+----------------+ ··· 607 559 * +----------------+----------------+----------------+----------------+ 608 560 * | gref_directory | 24 609 561 * +----------------+----------------+----------------+----------------+ 610 - * | reserved | 28 562 + * | period_sz | 28 563 + * +----------------+----------------+----------------+----------------+ 564 + * | reserved | 32 611 565 * +----------------+----------------+----------------+----------------+ 612 566 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 613 567 * +----------------+----------------+----------------+----------------+ 614 - * | reserved | 32 568 + * | reserved | 64 615 569 * +----------------+----------------+----------------+----------------+ 616 570 * 617 571 * pcm_rate - uint32_t, stream data rate, Hz ··· 621 571 * pcm_channels - uint8_t, number of channels of this stream, 622 572 * [channels-min; channels-max] 623 573 * buffer_sz - uint32_t, buffer size to be allocated, octets 574 + * period_sz - uint32_t, event period size, octets 575 + * This is the requested value of the period at which frontend would 576 + * like to receive XENSND_EVT_CUR_POS notifications from the backend when 577 + * stream position advances during playback/capture. 578 + * It shows how many octets are expected to be played/captured before 579 + * sending such an event. 580 + * If set to 0 no XENSND_EVT_CUR_POS events are sent by the backend. 581 + * 624 582 * gref_directory - grant_ref_t, a reference to the first shared page 625 583 * describing shared buffer references. At least one page exists. If shared 626 584 * buffer size (buffer_sz) exceeds what can be addressed by this single page, ··· 643 585 uint16_t reserved; 644 586 uint32_t buffer_sz; 645 587 grant_ref_t gref_directory; 588 + uint32_t period_sz; 646 589 }; 647 590 648 591 /* ··· 691 632 * +----------------+----------------+----------------+----------------+ 692 633 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 693 634 * +----------------+----------------+----------------+----------------+ 694 - * | reserved | 32 635 + * | reserved | 64 695 636 * +----------------+----------------+----------------+----------------+ 696 637 * 697 638 * Request read/write - used for read (for capture) or write (for playback): ··· 709 650 * +----------------+----------------+----------------+----------------+ 710 651 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 711 652 * +----------------+----------------+----------------+----------------+ 712 - * | reserved | 32 653 + * | reserved | 64 713 654 * +----------------+----------------+----------------+----------------+ 714 655 * 715 656 * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write ··· 732 673 * +----------------+----------------+----------------+----------------+ 733 674 * | length | 16 734 675 * +----------------+----------------+----------------+----------------+ 676 + * | reserved | 20 677 + * +----------------+----------------+----------------+----------------+ 735 678 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 736 679 * +----------------+----------------+----------------+----------------+ 737 - * | reserved | 32 680 + * | reserved | 64 738 681 * +----------------+----------------+----------------+----------------+ 739 682 * 740 683 * operation - XENSND_OP_SET_VOLUME for volume set ··· 774 713 * +----------------+----------------+----------------+----------------+ 775 714 * | length | 16 776 715 * +----------------+----------------+----------------+----------------+ 716 + * | reserved | 20 717 + * +----------------+----------------+----------------+----------------+ 777 718 * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 778 719 * +----------------+----------------+----------------+----------------+ 779 - * | reserved | 32 720 + * | reserved | 64 780 721 * +----------------+----------------+----------------+----------------+ 781 722 * 782 723 * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute ··· 806 743 * 807 744 * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME, 808 745 * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE. 746 + * 747 + * Request stream running state change - trigger PCM stream running state 748 + * to start, stop, pause or resume: 749 + * 750 + * 0 1 2 3 octet 751 + * +----------------+----------------+----------------+----------------+ 752 + * | id | _OP_TRIGGER | reserved | 4 753 + * +----------------+----------------+----------------+----------------+ 754 + * | reserved | 8 755 + * +----------------+----------------+----------------+----------------+ 756 + * | type | reserved | 12 757 + * +----------------+----------------+----------------+----------------+ 758 + * | reserved | 16 759 + * +----------------+----------------+----------------+----------------+ 760 + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 761 + * +----------------+----------------+----------------+----------------+ 762 + * | reserved | 64 763 + * +----------------+----------------+----------------+----------------+ 764 + * 765 + * type - uint8_t, XENSND_OP_TRIGGER_XXX value 809 766 */ 767 + 768 + struct xensnd_trigger_req { 769 + uint8_t type; 770 + }; 771 + 772 + /* 773 + * Request stream parameter ranges: request intervals and 774 + * masks of supported ranges for stream configuration values. 775 + * 776 + * Sound device configuration for a particular stream is a limited subset 777 + * of the multidimensional configuration available on XenStore, e.g. 778 + * once the frame rate has been selected there is a limited supported range 779 + * for sample rates becomes available (which might be the same set configured 780 + * on XenStore or less). For example, selecting 96kHz sample rate may limit 781 + * number of channels available for such configuration from 4 to 2, etc. 782 + * Thus, each call to XENSND_OP_HW_PARAM_QUERY may reduce configuration 783 + * space making it possible to iteratively get the final stream configuration, 784 + * used in XENSND_OP_OPEN request. 785 + * 786 + * See response format for this request. 787 + * 788 + * 0 1 2 3 octet 789 + * +----------------+----------------+----------------+----------------+ 790 + * | id | _HW_PARAM_QUERY| reserved | 4 791 + * +----------------+----------------+----------------+----------------+ 792 + * | reserved | 8 793 + * +----------------+----------------+----------------+----------------+ 794 + * | formats mask low 32-bit | 12 795 + * +----------------+----------------+----------------+----------------+ 796 + * | formats mask high 32-bit | 16 797 + * +----------------+----------------+----------------+----------------+ 798 + * | min rate | 20 799 + * +----------------+----------------+----------------+----------------+ 800 + * | max rate | 24 801 + * +----------------+----------------+----------------+----------------+ 802 + * | min channels | 28 803 + * +----------------+----------------+----------------+----------------+ 804 + * | max channels | 32 805 + * +----------------+----------------+----------------+----------------+ 806 + * | min buffer frames | 36 807 + * +----------------+----------------+----------------+----------------+ 808 + * | max buffer frames | 40 809 + * +----------------+----------------+----------------+----------------+ 810 + * | min period frames | 44 811 + * +----------------+----------------+----------------+----------------+ 812 + * | max period frames | 48 813 + * +----------------+----------------+----------------+----------------+ 814 + * | reserved | 52 815 + * +----------------+----------------+----------------+----------------+ 816 + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 817 + * +----------------+----------------+----------------+----------------+ 818 + * | reserved | 64 819 + * +----------------+----------------+----------------+----------------+ 820 + * 821 + * formats - uint64_t, bit mask representing values of the parameter 822 + * made as bitwise OR of (1 << XENSND_PCM_FORMAT_XXX) values 823 + * 824 + * For interval parameters: 825 + * min - uint32_t, minimum value of the parameter 826 + * max - uint32_t, maximum value of the parameter 827 + * 828 + * Frame is defined as a product of the number of channels by the 829 + * number of octets per one sample. 830 + */ 831 + 832 + struct xensnd_query_hw_param { 833 + uint64_t formats; 834 + struct { 835 + uint32_t min; 836 + uint32_t max; 837 + } rates; 838 + struct { 839 + uint32_t min; 840 + uint32_t max; 841 + } channels; 842 + struct { 843 + uint32_t min; 844 + uint32_t max; 845 + } buffer; 846 + struct { 847 + uint32_t min; 848 + uint32_t max; 849 + } period; 850 + }; 810 851 811 852 /* 812 853 *---------------------------------- Responses -------------------------------- 813 854 * 814 - * All response packets have the same length (32 octets) 855 + * All response packets have the same length (64 octets) 815 856 * 816 - * Response for all requests: 857 + * All response packets have common header: 817 858 * 0 1 2 3 octet 818 859 * +----------------+----------------+----------------+----------------+ 819 860 * | id | operation | reserved | 4 820 861 * +----------------+----------------+----------------+----------------+ 821 862 * | status | 8 822 863 * +----------------+----------------+----------------+----------------+ 823 - * | reserved | 12 824 - * +----------------+----------------+----------------+----------------+ 825 - * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 826 - * +----------------+----------------+----------------+----------------+ 827 - * | reserved | 32 828 - * +----------------+----------------+----------------+----------------+ 829 864 * 830 865 * id - uint16_t, copied from the request 831 866 * operation - uint8_t, XENSND_OP_* - copied from request 832 867 * status - int32_t, response status, zero on success and -XEN_EXX on failure 868 + * 869 + * 870 + * HW parameter query response - response for XENSND_OP_HW_PARAM_QUERY: 871 + * 0 1 2 3 octet 872 + * +----------------+----------------+----------------+----------------+ 873 + * | id | operation | reserved | 4 874 + * +----------------+----------------+----------------+----------------+ 875 + * | status | 8 876 + * +----------------+----------------+----------------+----------------+ 877 + * | formats mask low 32-bit | 12 878 + * +----------------+----------------+----------------+----------------+ 879 + * | formats mask high 32-bit | 16 880 + * +----------------+----------------+----------------+----------------+ 881 + * | min rate | 20 882 + * +----------------+----------------+----------------+----------------+ 883 + * | max rate | 24 884 + * +----------------+----------------+----------------+----------------+ 885 + * | min channels | 28 886 + * +----------------+----------------+----------------+----------------+ 887 + * | max channels | 32 888 + * +----------------+----------------+----------------+----------------+ 889 + * | min buffer frames | 36 890 + * +----------------+----------------+----------------+----------------+ 891 + * | max buffer frames | 40 892 + * +----------------+----------------+----------------+----------------+ 893 + * | min period frames | 44 894 + * +----------------+----------------+----------------+----------------+ 895 + * | max period frames | 48 896 + * +----------------+----------------+----------------+----------------+ 897 + * | reserved | 52 898 + * +----------------+----------------+----------------+----------------+ 899 + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 900 + * +----------------+----------------+----------------+----------------+ 901 + * | reserved | 64 902 + * +----------------+----------------+----------------+----------------+ 903 + * 904 + * Meaning of the values in this response is the same as for 905 + * XENSND_OP_HW_PARAM_QUERY request. 833 906 */ 907 + 908 + /* 909 + *----------------------------------- Events ---------------------------------- 910 + * 911 + * Events are sent via shared page allocated by the front and propagated by 912 + * evt-event-channel/evt-ring-ref XenStore entries 913 + * All event packets have the same length (64 octets) 914 + * All event packets have common header: 915 + * 0 1 2 3 octet 916 + * +----------------+----------------+----------------+----------------+ 917 + * | id | type | reserved | 4 918 + * +----------------+----------------+----------------+----------------+ 919 + * | reserved | 8 920 + * +----------------+----------------+----------------+----------------+ 921 + * 922 + * id - uint16_t, event id, may be used by front 923 + * type - uint8_t, type of the event 924 + * 925 + * 926 + * Current stream position - event from back to front when stream's 927 + * playback/capture position has advanced: 928 + * 0 1 2 3 octet 929 + * +----------------+----------------+----------------+----------------+ 930 + * | id | _EVT_CUR_POS | reserved | 4 931 + * +----------------+----------------+----------------+----------------+ 932 + * | reserved | 8 933 + * +----------------+----------------+----------------+----------------+ 934 + * | position low 32-bit | 12 935 + * +----------------+----------------+----------------+----------------+ 936 + * | position high 32-bit | 16 937 + * +----------------+----------------+----------------+----------------+ 938 + * | reserved | 20 939 + * +----------------+----------------+----------------+----------------+ 940 + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| 941 + * +----------------+----------------+----------------+----------------+ 942 + * | reserved | 64 943 + * +----------------+----------------+----------------+----------------+ 944 + * 945 + * position - current value of stream's playback/capture position, octets 946 + * 947 + */ 948 + 949 + struct xensnd_cur_pos_evt { 950 + uint64_t position; 951 + }; 834 952 835 953 struct xensnd_req { 836 954 uint16_t id; ··· 1020 776 union { 1021 777 struct xensnd_open_req open; 1022 778 struct xensnd_rw_req rw; 1023 - uint8_t reserved[24]; 779 + struct xensnd_trigger_req trigger; 780 + struct xensnd_query_hw_param hw_param; 781 + uint8_t reserved[56]; 1024 782 } op; 1025 783 }; 1026 784 ··· 1031 785 uint8_t operation; 1032 786 uint8_t reserved; 1033 787 int32_t status; 1034 - uint8_t reserved1[24]; 788 + union { 789 + struct xensnd_query_hw_param hw_param; 790 + uint8_t reserved1[56]; 791 + } resp; 792 + }; 793 + 794 + struct xensnd_evt { 795 + uint16_t id; 796 + uint8_t type; 797 + uint8_t reserved[5]; 798 + union { 799 + struct xensnd_cur_pos_evt cur_pos; 800 + uint8_t reserved[56]; 801 + } op; 1035 802 }; 1036 803 1037 804 DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp); 805 + 806 + /* 807 + ****************************************************************************** 808 + * Back to front events delivery 809 + ****************************************************************************** 810 + * In order to deliver asynchronous events from back to front a shared page is 811 + * allocated by front and its granted reference propagated to back via 812 + * XenStore entries (evt-ring-ref/evt-event-channel). 813 + * This page has a common header used by both front and back to synchronize 814 + * access and control event's ring buffer, while back being a producer of the 815 + * events and front being a consumer. The rest of the page after the header 816 + * is used for event packets. 817 + * 818 + * Upon reception of an event(s) front may confirm its reception 819 + * for either each event, group of events or none. 820 + */ 821 + 822 + struct xensnd_event_page { 823 + uint32_t in_cons; 824 + uint32_t in_prod; 825 + uint8_t reserved[56]; 826 + }; 827 + 828 + #define XENSND_EVENT_PAGE_SIZE XEN_PAGE_SIZE 829 + #define XENSND_IN_RING_OFFS (sizeof(struct xensnd_event_page)) 830 + #define XENSND_IN_RING_SIZE (XENSND_EVENT_PAGE_SIZE - XENSND_IN_RING_OFFS) 831 + #define XENSND_IN_RING_LEN (XENSND_IN_RING_SIZE / sizeof(struct xensnd_evt)) 832 + #define XENSND_IN_RING(page) \ 833 + ((struct xensnd_evt *)((char *)(page) + XENSND_IN_RING_OFFS)) 834 + #define XENSND_IN_RING_REF(page, idx) \ 835 + (XENSND_IN_RING((page))[(idx) % XENSND_IN_RING_LEN]) 1038 836 1039 837 #endif /* __XEN_PUBLIC_IO_SNDIF_H__ */
+11 -14
kernel/events/callchain.c
··· 119 119 goto exit; 120 120 } 121 121 122 - if (count > 1) { 123 - /* If the allocation failed, give up */ 124 - if (!callchain_cpus_entries) 125 - err = -ENOMEM; 126 - /* 127 - * If requesting per event more than the global cap, 128 - * return a different error to help userspace figure 129 - * this out. 130 - * 131 - * And also do it here so that we have &callchain_mutex held. 132 - */ 133 - if (event_max_stack > sysctl_perf_event_max_stack) 134 - err = -EOVERFLOW; 122 + /* 123 + * If requesting per event more than the global cap, 124 + * return a different error to help userspace figure 125 + * this out. 126 + * 127 + * And also do it here so that we have &callchain_mutex held. 128 + */ 129 + if (event_max_stack > sysctl_perf_event_max_stack) { 130 + err = -EOVERFLOW; 135 131 goto exit; 136 132 } 137 133 138 - err = alloc_callchain_buffers(); 134 + if (count == 1) 135 + err = alloc_callchain_buffers(); 139 136 exit: 140 137 if (err) 141 138 atomic_dec(&nr_callchain_events);
+6 -2
kernel/events/core.c
··· 7587 7587 }, 7588 7588 }; 7589 7589 7590 + if (!sched_in && task->state == TASK_RUNNING) 7591 + switch_event.event_id.header.misc |= 7592 + PERF_RECORD_MISC_SWITCH_OUT_PREEMPT; 7593 + 7590 7594 perf_iterate_sb(perf_event_switch_output, 7591 7595 &switch_event, 7592 7596 NULL); ··· 10209 10205 * __u16 sample size limit. 10210 10206 */ 10211 10207 if (attr->sample_stack_user >= USHRT_MAX) 10212 - ret = -EINVAL; 10208 + return -EINVAL; 10213 10209 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) 10214 - ret = -EINVAL; 10210 + return -EINVAL; 10215 10211 } 10216 10212 10217 10213 if (!attr->sample_max_stack)
+1 -2
kernel/fork.c
··· 216 216 if (!s) 217 217 continue; 218 218 219 - #ifdef CONFIG_DEBUG_KMEMLEAK 220 219 /* Clear stale pointers from reused stack. */ 221 220 memset(s->addr, 0, THREAD_SIZE); 222 - #endif 221 + 223 222 tsk->stack_vm_area = s; 224 223 return s->addr; 225 224 }
+71 -37
kernel/livepatch/shadow.c
··· 113 113 } 114 114 EXPORT_SYMBOL_GPL(klp_shadow_get); 115 115 116 - static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, 117 - size_t size, gfp_t gfp_flags, bool warn_on_exist) 116 + static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, 117 + size_t size, gfp_t gfp_flags, 118 + klp_shadow_ctor_t ctor, void *ctor_data, 119 + bool warn_on_exist) 118 120 { 119 121 struct klp_shadow *new_shadow; 120 122 void *shadow_data; ··· 127 125 if (shadow_data) 128 126 goto exists; 129 127 130 - /* Allocate a new shadow variable for use inside the lock below */ 128 + /* 129 + * Allocate a new shadow variable. Fill it with zeroes by default. 130 + * More complex setting can be done by @ctor function. But it is 131 + * called only when the buffer is really used (under klp_shadow_lock). 132 + */ 131 133 new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); 132 134 if (!new_shadow) 133 135 return NULL; 134 - 135 - new_shadow->obj = obj; 136 - new_shadow->id = id; 137 - 138 - /* Initialize the shadow variable if data provided */ 139 - if (data) 140 - memcpy(new_shadow->data, data, size); 141 136 142 137 /* Look for <obj, id> again under the lock */ 143 138 spin_lock_irqsave(&klp_shadow_lock, flags); ··· 147 148 spin_unlock_irqrestore(&klp_shadow_lock, flags); 148 149 kfree(new_shadow); 149 150 goto exists; 151 + } 152 + 153 + new_shadow->obj = obj; 154 + new_shadow->id = id; 155 + 156 + if (ctor) { 157 + int err; 158 + 159 + err = ctor(obj, new_shadow->data, ctor_data); 160 + if (err) { 161 + spin_unlock_irqrestore(&klp_shadow_lock, flags); 162 + kfree(new_shadow); 163 + pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n", 164 + obj, id, err); 165 + return NULL; 166 + } 150 167 } 151 168 152 169 /* No <obj, id> found, so attach the newly allocated one */ ··· 185 170 * klp_shadow_alloc() - allocate and add a new shadow variable 186 171 * @obj: pointer to parent object 187 172 * @id: data identifier 188 - * @data: pointer to data to attach to parent 189 173 * @size: size of attached data 190 174 * @gfp_flags: GFP mask for allocation 175 + * @ctor: custom constructor to initialize the shadow data (optional) 176 + * @ctor_data: pointer to any data needed by @ctor (optional) 191 177 * 192 - * Allocates @size bytes for new shadow variable data using @gfp_flags 193 - * and copies @size bytes from @data into the new shadow variable's own 194 - * data space. If @data is NULL, @size bytes are still allocated, but 195 - * no copy is performed. The new shadow variable is then added to the 196 - * global hashtable. 178 + * Allocates @size bytes for new shadow variable data using @gfp_flags. 179 + * The data are zeroed by default. They are further initialized by @ctor 180 + * function if it is not NULL. The new shadow variable is then added 181 + * to the global hashtable. 197 182 * 198 - * If an existing <obj, id> shadow variable can be found, this routine 199 - * will issue a WARN, exit early and return NULL. 183 + * If an existing <obj, id> shadow variable can be found, this routine will 184 + * issue a WARN, exit early and return NULL. 185 + * 186 + * This function guarantees that the constructor function is called only when 187 + * the variable did not exist before. The cost is that @ctor is called 188 + * in atomic context under a spin lock. 200 189 * 201 190 * Return: the shadow variable data element, NULL on duplicate or 202 191 * failure. 203 192 */ 204 - void *klp_shadow_alloc(void *obj, unsigned long id, void *data, 205 - size_t size, gfp_t gfp_flags) 193 + void *klp_shadow_alloc(void *obj, unsigned long id, 194 + size_t size, gfp_t gfp_flags, 195 + klp_shadow_ctor_t ctor, void *ctor_data) 206 196 { 207 - return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true); 197 + return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, 198 + ctor, ctor_data, true); 208 199 } 209 200 EXPORT_SYMBOL_GPL(klp_shadow_alloc); 210 201 ··· 218 197 * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable 219 198 * @obj: pointer to parent object 220 199 * @id: data identifier 221 - * @data: pointer to data to attach to parent 222 200 * @size: size of attached data 223 201 * @gfp_flags: GFP mask for allocation 202 + * @ctor: custom constructor to initialize the shadow data (optional) 203 + * @ctor_data: pointer to any data needed by @ctor (optional) 224 204 * 225 205 * Returns a pointer to existing shadow data if an <obj, id> shadow 226 206 * variable is already present. Otherwise, it creates a new shadow 227 207 * variable like klp_shadow_alloc(). 228 208 * 229 - * This function guarantees that only one shadow variable exists with 230 - * the given @id for the given @obj. It also guarantees that the shadow 231 - * variable will be initialized by the given @data only when it did not 232 - * exist before. 209 + * This function guarantees that only one shadow variable exists with the given 210 + * @id for the given @obj. It also guarantees that the constructor function 211 + * will be called only when the variable did not exist before. The cost is 212 + * that @ctor is called in atomic context under a spin lock. 233 213 * 234 214 * Return: the shadow variable data element, NULL on failure. 235 215 */ 236 - void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, 237 - size_t size, gfp_t gfp_flags) 216 + void *klp_shadow_get_or_alloc(void *obj, unsigned long id, 217 + size_t size, gfp_t gfp_flags, 218 + klp_shadow_ctor_t ctor, void *ctor_data) 238 219 { 239 - return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false); 220 + return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, 221 + ctor, ctor_data, false); 240 222 } 241 223 EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc); 224 + 225 + static void klp_shadow_free_struct(struct klp_shadow *shadow, 226 + klp_shadow_dtor_t dtor) 227 + { 228 + hash_del_rcu(&shadow->node); 229 + if (dtor) 230 + dtor(shadow->obj, shadow->data); 231 + kfree_rcu(shadow, rcu_head); 232 + } 242 233 243 234 /** 244 235 * klp_shadow_free() - detach and free a <obj, id> shadow variable 245 236 * @obj: pointer to parent object 246 237 * @id: data identifier 238 + * @dtor: custom callback that can be used to unregister the variable 239 + * and/or free data that the shadow variable points to (optional) 247 240 * 248 241 * This function releases the memory for this <obj, id> shadow variable 249 242 * instance, callers should stop referencing it accordingly. 250 243 */ 251 - void klp_shadow_free(void *obj, unsigned long id) 244 + void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) 252 245 { 253 246 struct klp_shadow *shadow; 254 247 unsigned long flags; ··· 274 239 (unsigned long)obj) { 275 240 276 241 if (klp_shadow_match(shadow, obj, id)) { 277 - hash_del_rcu(&shadow->node); 278 - kfree_rcu(shadow, rcu_head); 242 + klp_shadow_free_struct(shadow, dtor); 279 243 break; 280 244 } 281 245 } ··· 286 252 /** 287 253 * klp_shadow_free_all() - detach and free all <*, id> shadow variables 288 254 * @id: data identifier 255 + * @dtor: custom callback that can be used to unregister the variable 256 + * and/or free data that the shadow variable points to (optional) 289 257 * 290 258 * This function releases the memory for all <*, id> shadow variable 291 259 * instances, callers should stop referencing them accordingly. 292 260 */ 293 - void klp_shadow_free_all(unsigned long id) 261 + void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) 294 262 { 295 263 struct klp_shadow *shadow; 296 264 unsigned long flags; ··· 302 266 303 267 /* Delete all <*, id> from hash */ 304 268 hash_for_each(klp_shadow_hash, i, shadow, node) { 305 - if (klp_shadow_match(shadow, shadow->obj, id)) { 306 - hash_del_rcu(&shadow->node); 307 - kfree_rcu(shadow, rcu_head); 308 - } 269 + if (klp_shadow_match(shadow, shadow->obj, id)) 270 + klp_shadow_free_struct(shadow, dtor); 309 271 } 310 272 311 273 spin_unlock_irqrestore(&klp_shadow_lock, flags);
+3 -1
kernel/time/posix-cpu-timers.c
··· 1205 1205 u64 *newval, u64 *oldval) 1206 1206 { 1207 1207 u64 now; 1208 + int ret; 1208 1209 1209 1210 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); 1211 + ret = cpu_timer_sample_group(clock_idx, tsk, &now); 1210 1212 1211 - if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) { 1213 + if (oldval && ret != -EINVAL) { 1212 1214 /* 1213 1215 * We are setting itimer. The *oldval is absolute and we update 1214 1216 * it to be relative, *newval argument is relative and we update
+5 -6
kernel/time/tick-oneshot.c
··· 82 82 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || 83 83 !tick_device_is_functional(dev)) { 84 84 85 - printk(KERN_INFO "Clockevents: " 86 - "could not switch to one-shot mode:"); 85 + pr_info("Clockevents: could not switch to one-shot mode:"); 87 86 if (!dev) { 88 - printk(" no tick device\n"); 87 + pr_cont(" no tick device\n"); 89 88 } else { 90 89 if (!tick_device_is_functional(dev)) 91 - printk(" %s is not functional.\n", dev->name); 90 + pr_cont(" %s is not functional.\n", dev->name); 92 91 else 93 - printk(" %s does not support one-shot mode.\n", 94 - dev->name); 92 + pr_cont(" %s does not support one-shot mode.\n", 93 + dev->name); 95 94 } 96 95 return -EINVAL; 97 96 }
-7
kernel/time/timekeeping.c
··· 2139 2139 } 2140 2140 EXPORT_SYMBOL(get_seconds); 2141 2141 2142 - struct timespec __current_kernel_time(void) 2143 - { 2144 - struct timekeeper *tk = &tk_core.timekeeper; 2145 - 2146 - return timespec64_to_timespec(tk_xtime(tk)); 2147 - } 2148 - 2149 2142 struct timespec64 current_kernel_time64(void) 2150 2143 { 2151 2144 struct timekeeper *tk = &tk_core.timekeeper;
-2
kernel/trace/trace_kprobe.c
··· 512 512 if (ret == 0) 513 513 tk->tp.flags |= TP_FLAG_REGISTERED; 514 514 else { 515 - pr_warn("Could not insert probe at %s+%lu: %d\n", 516 - trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); 517 515 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { 518 516 pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); 519 517 ret = 0;
+23 -17
lib/textsearch.c
··· 10 10 * Pablo Neira Ayuso <pablo@netfilter.org> 11 11 * 12 12 * ========================================================================== 13 - * 13 + */ 14 + 15 + /** 16 + * DOC: ts_intro 14 17 * INTRODUCTION 15 18 * 16 19 * The textsearch infrastructure provides text searching facilities for ··· 22 19 * 23 20 * ARCHITECTURE 24 21 * 25 - * User 22 + * .. code-block:: none 23 + * 24 + * User 26 25 * +----------------+ 27 26 * | finish()|<--------------(6)-----------------+ 28 27 * |get_next_block()|<--------------(5)---------------+ | ··· 38 33 * | (3)|----->| find()/next() |-----------+ | 39 34 * | (7)|----->| destroy() |----------------------+ 40 35 * +----------------+ +---------------+ 41 - * 42 - * (1) User configures a search by calling _prepare() specifying the 43 - * search parameters such as the pattern and algorithm name. 36 + * 37 + * (1) User configures a search by calling textsearch_prepare() specifying 38 + * the search parameters such as the pattern and algorithm name. 44 39 * (2) Core requests the algorithm to allocate and initialize a search 45 40 * configuration according to the specified parameters. 46 - * (3) User starts the search(es) by calling _find() or _next() to 47 - * fetch subsequent occurrences. A state variable is provided 48 - * to the algorithm to store persistent variables. 41 + * (3) User starts the search(es) by calling textsearch_find() or 42 + * textsearch_next() to fetch subsequent occurrences. A state variable 43 + * is provided to the algorithm to store persistent variables. 49 44 * (4) Core eventually resets the search offset and forwards the find() 50 45 * request to the algorithm. 51 46 * (5) Algorithm calls get_next_block() provided by the user continuously 52 47 * to fetch the data to be searched in block by block. 53 48 * (6) Algorithm invokes finish() after the last call to get_next_block 54 49 * to clean up any leftovers from get_next_block. (Optional) 55 - * (7) User destroys the configuration by calling _destroy(). 50 + * (7) User destroys the configuration by calling textsearch_destroy(). 56 51 * (8) Core notifies the algorithm to destroy algorithm specific 57 52 * allocations. (Optional) 58 53 * ··· 67 62 * amount of times and even in parallel as long as a separate struct 68 63 * ts_state variable is provided to every instance. 69 64 * 70 - * The actual search is performed by either calling textsearch_find_- 71 - * continuous() for linear data or by providing an own get_next_block() 72 - * implementation and calling textsearch_find(). Both functions return 65 + * The actual search is performed by either calling 66 + * textsearch_find_continuous() for linear data or by providing 67 + * an own get_next_block() implementation and 68 + * calling textsearch_find(). Both functions return 73 69 * the position of the first occurrence of the pattern or UINT_MAX if 74 70 * no match was found. Subsequent occurrences can be found by calling 75 71 * textsearch_next() regardless of the linearity of the data. ··· 78 72 * Once you're done using a configuration it must be given back via 79 73 * textsearch_destroy. 80 74 * 81 - * EXAMPLE 75 + * EXAMPLE:: 82 76 * 83 77 * int pos; 84 78 * struct ts_config *conf; ··· 93 87 * goto errout; 94 88 * } 95 89 * 96 - * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); 90 + * pos = textsearch_find_continuous(conf, \&state, example, strlen(example)); 97 91 * if (pos != UINT_MAX) 98 - * panic("Oh my god, dancing chickens at %d\n", pos); 92 + * panic("Oh my god, dancing chickens at \%d\n", pos); 99 93 * 100 94 * textsearch_destroy(conf); 101 - * ========================================================================== 102 95 */ 96 + /* ========================================================================== */ 103 97 104 98 #include <linux/module.h> 105 99 #include <linux/types.h> ··· 231 225 * 232 226 * Returns the position of first occurrence of the pattern or 233 227 * %UINT_MAX if no occurrence was found. 234 - */ 228 + */ 235 229 unsigned int textsearch_find_continuous(struct ts_config *conf, 236 230 struct ts_state *state, 237 231 const void *data, unsigned int len)
+4 -5
mm/filemap.c
··· 786 786 VM_BUG_ON_PAGE(!PageLocked(new), new); 787 787 VM_BUG_ON_PAGE(new->mapping, new); 788 788 789 - error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 789 + error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK); 790 790 if (!error) { 791 791 struct address_space *mapping = old->mapping; 792 792 void (*freepage)(struct page *); ··· 842 842 return error; 843 843 } 844 844 845 - error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 845 + error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); 846 846 if (error) { 847 847 if (!huge) 848 848 mem_cgroup_cancel_charge(page, memcg, false); ··· 1585 1585 if (fgp_flags & FGP_ACCESSED) 1586 1586 __SetPageReferenced(page); 1587 1587 1588 - err = add_to_page_cache_lru(page, mapping, offset, 1589 - gfp_mask & GFP_RECLAIM_MASK); 1588 + err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); 1590 1589 if (unlikely(err)) { 1591 1590 put_page(page); 1592 1591 page = NULL; ··· 2386 2387 if (!page) 2387 2388 return -ENOMEM; 2388 2389 2389 - ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); 2390 + ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); 2390 2391 if (ret == 0) 2391 2392 ret = mapping->a_ops->readpage(file, page); 2392 2393 else if (ret == -EEXIST)
+4 -1
mm/huge_memory.c
··· 2925 2925 pmde = maybe_pmd_mkwrite(pmde, vma); 2926 2926 2927 2927 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); 2928 - page_add_anon_rmap(new, vma, mmun_start, true); 2928 + if (PageAnon(new)) 2929 + page_add_anon_rmap(new, vma, mmun_start, true); 2930 + else 2931 + page_add_file_rmap(new, true); 2929 2932 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 2930 2933 if (vma->vm_flags & VM_LOCKED) 2931 2934 mlock_vma_page(new);
+1 -1
mm/memcontrol.c
··· 2192 2192 { 2193 2193 struct memcg_kmem_cache_create_work *cw; 2194 2194 2195 - cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2195 + cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); 2196 2196 if (!cw) 2197 2197 return; 2198 2198
+19 -3
mm/migrate.c
··· 472 472 pslot = radix_tree_lookup_slot(&mapping->i_pages, 473 473 page_index(page)); 474 474 475 - expected_count += 1 + page_has_private(page); 475 + expected_count += hpage_nr_pages(page) + page_has_private(page); 476 476 if (page_count(page) != expected_count || 477 477 radix_tree_deref_slot_protected(pslot, 478 478 &mapping->i_pages.xa_lock) != page) { ··· 505 505 */ 506 506 newpage->index = page->index; 507 507 newpage->mapping = page->mapping; 508 - get_page(newpage); /* add cache reference */ 508 + page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ 509 509 if (PageSwapBacked(page)) { 510 510 __SetPageSwapBacked(newpage); 511 511 if (PageSwapCache(page)) { ··· 524 524 } 525 525 526 526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); 527 + if (PageTransHuge(page)) { 528 + int i; 529 + int index = page_index(page); 530 + 531 + for (i = 0; i < HPAGE_PMD_NR; i++) { 532 + pslot = radix_tree_lookup_slot(&mapping->i_pages, 533 + index + i); 534 + radix_tree_replace_slot(&mapping->i_pages, pslot, 535 + newpage + i); 536 + } 537 + } else { 538 + radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); 539 + } 527 540 528 541 /* 529 542 * Drop cache reference from old page by unfreezing 530 543 * to one less reference. 531 544 * We know this isn't the last reference. 532 545 */ 533 - page_ref_unfreeze(page, expected_count - 1); 546 + page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); 534 547 535 548 xa_unlock(&mapping->i_pages); 536 549 /* Leave irq disabled to prevent preemption while updating stats */ ··· 1635 1622 current_node = NUMA_NO_NODE; 1636 1623 } 1637 1624 out_flush: 1625 + if (list_empty(&pagelist)) 1626 + return err; 1627 + 1638 1628 /* Make sure we do not overwrite the existing error */ 1639 1629 err1 = do_move_pages_to_node(mm, &pagelist, current_node); 1640 1630 if (!err1)
+9 -9
mm/page-writeback.c
··· 2502 2502 if (mapping && mapping_cap_account_dirty(mapping)) { 2503 2503 struct inode *inode = mapping->host; 2504 2504 struct bdi_writeback *wb; 2505 - bool locked; 2505 + struct wb_lock_cookie cookie = {}; 2506 2506 2507 - wb = unlocked_inode_to_wb_begin(inode, &locked); 2507 + wb = unlocked_inode_to_wb_begin(inode, &cookie); 2508 2508 current->nr_dirtied--; 2509 2509 dec_node_page_state(page, NR_DIRTIED); 2510 2510 dec_wb_stat(wb, WB_DIRTIED); 2511 - unlocked_inode_to_wb_end(inode, locked); 2511 + unlocked_inode_to_wb_end(inode, &cookie); 2512 2512 } 2513 2513 } 2514 2514 EXPORT_SYMBOL(account_page_redirty); ··· 2614 2614 if (mapping_cap_account_dirty(mapping)) { 2615 2615 struct inode *inode = mapping->host; 2616 2616 struct bdi_writeback *wb; 2617 - bool locked; 2617 + struct wb_lock_cookie cookie = {}; 2618 2618 2619 2619 lock_page_memcg(page); 2620 - wb = unlocked_inode_to_wb_begin(inode, &locked); 2620 + wb = unlocked_inode_to_wb_begin(inode, &cookie); 2621 2621 2622 2622 if (TestClearPageDirty(page)) 2623 2623 account_page_cleaned(page, mapping, wb); 2624 2624 2625 - unlocked_inode_to_wb_end(inode, locked); 2625 + unlocked_inode_to_wb_end(inode, &cookie); 2626 2626 unlock_page_memcg(page); 2627 2627 } else { 2628 2628 ClearPageDirty(page); ··· 2654 2654 if (mapping && mapping_cap_account_dirty(mapping)) { 2655 2655 struct inode *inode = mapping->host; 2656 2656 struct bdi_writeback *wb; 2657 - bool locked; 2657 + struct wb_lock_cookie cookie = {}; 2658 2658 2659 2659 /* 2660 2660 * Yes, Virginia, this is indeed insane. ··· 2691 2691 * always locked coming in here, so we get the desired 2692 2692 * exclusion. 2693 2693 */ 2694 - wb = unlocked_inode_to_wb_begin(inode, &locked); 2694 + wb = unlocked_inode_to_wb_begin(inode, &cookie); 2695 2695 if (TestClearPageDirty(page)) { 2696 2696 dec_lruvec_page_state(page, NR_FILE_DIRTY); 2697 2697 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2698 2698 dec_wb_stat(wb, WB_RECLAIMABLE); 2699 2699 ret = 1; 2700 2700 } 2701 - unlocked_inode_to_wb_end(inode, locked); 2701 + unlocked_inode_to_wb_end(inode, &cookie); 2702 2702 return ret; 2703 2703 } 2704 2704 return TestClearPageDirty(page);
-3
mm/rmap.c
··· 1374 1374 if (!pvmw.pte && (flags & TTU_MIGRATION)) { 1375 1375 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 1376 1376 1377 - if (!PageAnon(page)) 1378 - continue; 1379 - 1380 1377 set_pmd_migration_entry(&pvmw, page); 1381 1378 continue; 1382 1379 }
+20 -1
mm/vmscan.c
··· 303 303 /* 304 304 * Add a shrinker callback to be called from the vm. 305 305 */ 306 - int register_shrinker(struct shrinker *shrinker) 306 + int prealloc_shrinker(struct shrinker *shrinker) 307 307 { 308 308 size_t size = sizeof(*shrinker->nr_deferred); 309 309 ··· 313 313 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); 314 314 if (!shrinker->nr_deferred) 315 315 return -ENOMEM; 316 + return 0; 317 + } 316 318 319 + void free_prealloced_shrinker(struct shrinker *shrinker) 320 + { 321 + kfree(shrinker->nr_deferred); 322 + shrinker->nr_deferred = NULL; 323 + } 324 + 325 + void register_shrinker_prepared(struct shrinker *shrinker) 326 + { 317 327 down_write(&shrinker_rwsem); 318 328 list_add_tail(&shrinker->list, &shrinker_list); 319 329 up_write(&shrinker_rwsem); 330 + } 331 + 332 + int register_shrinker(struct shrinker *shrinker) 333 + { 334 + int err = prealloc_shrinker(shrinker); 335 + 336 + if (err) 337 + return err; 338 + register_shrinker_prepared(shrinker); 320 339 return 0; 321 340 } 322 341 EXPORT_SYMBOL(register_shrinker);
+1 -1
net/caif/chnl_net.c
··· 174 174 flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : 175 175 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : 176 176 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? 177 - "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND"); 177 + "REMOTE_SHUTDOWN" : "UNKNOWN CTRL COMMAND"); 178 178 179 179 180 180
+1 -1
net/core/dev.c
··· 2969 2969 } 2970 2970 EXPORT_SYMBOL(passthru_features_check); 2971 2971 2972 - static netdev_features_t dflt_features_check(const struct sk_buff *skb, 2972 + static netdev_features_t dflt_features_check(struct sk_buff *skb, 2973 2973 struct net_device *dev, 2974 2974 netdev_features_t features) 2975 2975 {
+1 -1
net/core/dev_addr_lists.c
··· 839 839 EXPORT_SYMBOL(dev_mc_flush); 840 840 841 841 /** 842 - * dev_mc_flush - Init multicast address list 842 + * dev_mc_init - Init multicast address list 843 843 * @dev: device 844 844 * 845 845 * Init multicast address list.
+26 -14
net/core/neighbour.c
··· 55 55 static void __neigh_notify(struct neighbour *n, int type, int flags, 56 56 u32 pid); 57 57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); 58 - static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 58 + static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 59 + struct net_device *dev); 59 60 60 61 #ifdef CONFIG_PROC_FS 61 62 static const struct file_operations neigh_stat_seq_fops; ··· 292 291 { 293 292 write_lock_bh(&tbl->lock); 294 293 neigh_flush_dev(tbl, dev); 295 - pneigh_ifdown(tbl, dev); 296 - write_unlock_bh(&tbl->lock); 294 + pneigh_ifdown_and_unlock(tbl, dev); 297 295 298 296 del_timer_sync(&tbl->proxy_timer); 299 297 pneigh_queue_purge(&tbl->proxy_queue); ··· 681 681 return -ENOENT; 682 682 } 683 683 684 - static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 684 + static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 685 + struct net_device *dev) 685 686 { 686 - struct pneigh_entry *n, **np; 687 + struct pneigh_entry *n, **np, *freelist = NULL; 687 688 u32 h; 688 689 689 690 for (h = 0; h <= PNEIGH_HASHMASK; h++) { ··· 692 691 while ((n = *np) != NULL) { 693 692 if (!dev || n->dev == dev) { 694 693 *np = n->next; 695 - if (tbl->pdestructor) 696 - tbl->pdestructor(n); 697 - if (n->dev) 698 - dev_put(n->dev); 699 - kfree(n); 694 + n->next = freelist; 695 + freelist = n; 700 696 continue; 701 697 } 702 698 np = &n->next; 703 699 } 700 + } 701 + write_unlock_bh(&tbl->lock); 702 + while ((n = freelist)) { 703 + freelist = n->next; 704 + n->next = NULL; 705 + if (tbl->pdestructor) 706 + tbl->pdestructor(n); 707 + if (n->dev) 708 + dev_put(n->dev); 709 + kfree(n); 704 710 } 705 711 return -ENOENT; 706 712 } ··· 2331 2323 2332 2324 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); 2333 2325 if (!err) { 2334 - if (tb[NDA_IFINDEX]) 2326 + if (tb[NDA_IFINDEX]) { 2327 + if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) 2328 + return -EINVAL; 2335 2329 filter_idx = nla_get_u32(tb[NDA_IFINDEX]); 2336 - 2337 - if (tb[NDA_MASTER]) 2330 + } 2331 + if (tb[NDA_MASTER]) { 2332 + if (nla_len(tb[NDA_MASTER]) != sizeof(u32)) 2333 + return -EINVAL; 2338 2334 filter_master_idx = nla_get_u32(tb[NDA_MASTER]); 2339 - 2335 + } 2340 2336 if (filter_idx || filter_master_idx) 2341 2337 flags |= NLM_F_DUMP_FILTERED; 2342 2338 }
+5 -7
net/dns_resolver/dns_key.c
··· 91 91 92 92 next_opt = memchr(opt, '#', end - opt) ?: end; 93 93 opt_len = next_opt - opt; 94 - if (!opt_len) { 95 - printk(KERN_WARNING 96 - "Empty option to dns_resolver key\n"); 94 + if (opt_len <= 0 || opt_len > 128) { 95 + pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", 96 + opt_len); 97 97 return -EINVAL; 98 98 } 99 99 ··· 127 127 } 128 128 129 129 bad_option_value: 130 - printk(KERN_WARNING 131 - "Option '%*.*s' to dns_resolver key:" 132 - " bad/missing value\n", 133 - opt_nlen, opt_nlen, opt); 130 + pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n", 131 + opt_nlen, opt_nlen, opt); 134 132 return -EINVAL; 135 133 } while (opt = next_opt + 1, opt < end); 136 134 }
+5 -3
net/ipv4/ip_output.c
··· 1109 1109 struct ip_options_rcu *opt; 1110 1110 struct rtable *rt; 1111 1111 1112 + rt = *rtp; 1113 + if (unlikely(!rt)) 1114 + return -EFAULT; 1115 + 1112 1116 /* 1113 1117 * setup for corking. 1114 1118 */ ··· 1128 1124 cork->flags |= IPCORK_OPT; 1129 1125 cork->addr = ipc->addr; 1130 1126 } 1131 - rt = *rtp; 1132 - if (unlikely(!rt)) 1133 - return -EFAULT; 1127 + 1134 1128 /* 1135 1129 * We steal reference to this route, caller should not release it 1136 1130 */
+5 -3
net/ipv4/tcp.c
··· 2368 2368 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 2369 2369 sk_mem_reclaim(sk); 2370 2370 tcp_clear_all_retrans_hints(tcp_sk(sk)); 2371 + tcp_sk(sk)->packets_out = 0; 2371 2372 } 2372 2373 2373 2374 int tcp_disconnect(struct sock *sk, int flags) ··· 2418 2417 icsk->icsk_backoff = 0; 2419 2418 tp->snd_cwnd = 2; 2420 2419 icsk->icsk_probes_out = 0; 2421 - tp->packets_out = 0; 2422 2420 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2423 2421 tp->snd_cwnd_cnt = 0; 2424 2422 tp->window_clamp = 0; ··· 2813 2813 #ifdef CONFIG_TCP_MD5SIG 2814 2814 case TCP_MD5SIG: 2815 2815 case TCP_MD5SIG_EXT: 2816 - /* Read the IP->Key mappings from userspace */ 2817 - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 2816 + if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 2817 + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 2818 + else 2819 + err = -EINVAL; 2818 2820 break; 2819 2821 #endif 2820 2822 case TCP_USER_TIMEOUT:
+20 -20
net/l2tp/l2tp_core.c
··· 183 183 } 184 184 EXPORT_SYMBOL_GPL(l2tp_tunnel_get); 185 185 186 + struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) 187 + { 188 + const struct l2tp_net *pn = l2tp_pernet(net); 189 + struct l2tp_tunnel *tunnel; 190 + int count = 0; 191 + 192 + rcu_read_lock_bh(); 193 + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 194 + if (++count > nth) { 195 + l2tp_tunnel_inc_refcount(tunnel); 196 + rcu_read_unlock_bh(); 197 + return tunnel; 198 + } 199 + } 200 + rcu_read_unlock_bh(); 201 + 202 + return NULL; 203 + } 204 + EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth); 205 + 186 206 /* Lookup a session. A new reference is held on the returned session. */ 187 207 struct l2tp_session *l2tp_session_get(const struct net *net, 188 208 struct l2tp_tunnel *tunnel, ··· 354 334 return err; 355 335 } 356 336 EXPORT_SYMBOL_GPL(l2tp_session_register); 357 - 358 - struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth) 359 - { 360 - struct l2tp_net *pn = l2tp_pernet(net); 361 - struct l2tp_tunnel *tunnel; 362 - int count = 0; 363 - 364 - rcu_read_lock_bh(); 365 - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 366 - if (++count > nth) { 367 - rcu_read_unlock_bh(); 368 - return tunnel; 369 - } 370 - } 371 - 372 - rcu_read_unlock_bh(); 373 - 374 - return NULL; 375 - } 376 - EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); 377 337 378 338 /***************************************************************************** 379 339 * Receive data handling
+2 -1
net/l2tp/l2tp_core.h
··· 212 212 } 213 213 214 214 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); 215 + struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); 216 + 215 217 void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 216 218 217 219 struct l2tp_session *l2tp_session_get(const struct net *net, ··· 222 220 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); 223 221 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, 224 222 const char *ifname); 225 - struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth); 226 223 227 224 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, 228 225 u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+13 -2
net/l2tp/l2tp_debugfs.c
··· 47 47 48 48 static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) 49 49 { 50 - pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); 50 + /* Drop reference taken during previous invocation */ 51 + if (pd->tunnel) 52 + l2tp_tunnel_dec_refcount(pd->tunnel); 53 + 54 + pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx); 51 55 pd->tunnel_idx++; 52 56 } 53 57 ··· 100 96 101 97 static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) 102 98 { 103 - /* nothing to do */ 99 + struct l2tp_dfs_seq_data *pd = v; 100 + 101 + if (!pd || pd == SEQ_START_TOKEN) 102 + return; 103 + 104 + /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */ 105 + if (pd->tunnel) 106 + l2tp_tunnel_dec_refcount(pd->tunnel); 104 107 } 105 108 106 109 static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
+8 -3
net/l2tp/l2tp_netlink.c
··· 487 487 struct net *net = sock_net(skb->sk); 488 488 489 489 for (;;) { 490 - tunnel = l2tp_tunnel_find_nth(net, ti); 490 + tunnel = l2tp_tunnel_get_nth(net, ti); 491 491 if (tunnel == NULL) 492 492 goto out; 493 493 494 494 if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, 495 495 cb->nlh->nlmsg_seq, NLM_F_MULTI, 496 - tunnel, L2TP_CMD_TUNNEL_GET) < 0) 496 + tunnel, L2TP_CMD_TUNNEL_GET) < 0) { 497 + l2tp_tunnel_dec_refcount(tunnel); 497 498 goto out; 499 + } 500 + l2tp_tunnel_dec_refcount(tunnel); 498 501 499 502 ti++; 500 503 } ··· 851 848 852 849 for (;;) { 853 850 if (tunnel == NULL) { 854 - tunnel = l2tp_tunnel_find_nth(net, ti); 851 + tunnel = l2tp_tunnel_get_nth(net, ti); 855 852 if (tunnel == NULL) 856 853 goto out; 857 854 } ··· 859 856 session = l2tp_session_get_nth(tunnel, si); 860 857 if (session == NULL) { 861 858 ti++; 859 + l2tp_tunnel_dec_refcount(tunnel); 862 860 tunnel = NULL; 863 861 si = 0; 864 862 continue; ··· 869 865 cb->nlh->nlmsg_seq, NLM_F_MULTI, 870 866 session, L2TP_CMD_SESSION_GET) < 0) { 871 867 l2tp_session_dec_refcount(session); 868 + l2tp_tunnel_dec_refcount(tunnel); 872 869 break; 873 870 } 874 871 l2tp_session_dec_refcount(session);
+17 -7
net/l2tp/l2tp_ppp.c
··· 1551 1551 1552 1552 static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) 1553 1553 { 1554 + /* Drop reference taken during previous invocation */ 1555 + if (pd->tunnel) 1556 + l2tp_tunnel_dec_refcount(pd->tunnel); 1557 + 1554 1558 for (;;) { 1555 - pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); 1559 + pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx); 1556 1560 pd->tunnel_idx++; 1557 1561 1558 - if (pd->tunnel == NULL) 1559 - break; 1562 + /* Only accept L2TPv2 tunnels */ 1563 + if (!pd->tunnel || pd->tunnel->version == 2) 1564 + return; 1560 1565 1561 - /* Ignore L2TPv3 tunnels */ 1562 - if (pd->tunnel->version < 3) 1563 - break; 1566 + l2tp_tunnel_dec_refcount(pd->tunnel); 1564 1567 } 1565 1568 } 1566 1569 ··· 1612 1609 1613 1610 static void pppol2tp_seq_stop(struct seq_file *p, void *v) 1614 1611 { 1615 - /* nothing to do */ 1612 + struct pppol2tp_seq_data *pd = v; 1613 + 1614 + if (!pd || pd == SEQ_START_TOKEN) 1615 + return; 1616 + 1617 + /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */ 1618 + if (pd->tunnel) 1619 + l2tp_tunnel_dec_refcount(pd->tunnel); 1616 1620 } 1617 1621 1618 1622 static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
+7
net/llc/af_llc.c
··· 189 189 { 190 190 struct sock *sk = sock->sk; 191 191 struct llc_sock *llc; 192 + struct llc_sap *sap; 192 193 193 194 if (unlikely(sk == NULL)) 194 195 goto out; ··· 200 199 llc->laddr.lsap, llc->daddr.lsap); 201 200 if (!llc_send_disc(sk)) 202 201 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 202 + sap = llc->sap; 203 + /* Hold this for release_sock(), so that llc_backlog_rcv() could still 204 + * use it. 205 + */ 206 + llc_sap_hold(sap); 203 207 if (!sock_flag(sk, SOCK_ZAPPED)) 204 208 llc_sap_remove_socket(llc->sap, sk); 205 209 release_sock(sk); 210 + llc_sap_put(sap); 206 211 if (llc->dev) 207 212 dev_put(llc->dev); 208 213 sock_put(sk);
+14 -9
net/packet/af_packet.c
··· 3008 3008 3009 3009 packet_flush_mclist(sk); 3010 3010 3011 + lock_sock(sk); 3011 3012 if (po->rx_ring.pg_vec) { 3012 3013 memset(&req_u, 0, sizeof(req_u)); 3013 3014 packet_set_ring(sk, &req_u, 1, 0); ··· 3018 3017 memset(&req_u, 0, sizeof(req_u)); 3019 3018 packet_set_ring(sk, &req_u, 1, 1); 3020 3019 } 3020 + release_sock(sk); 3021 3021 3022 3022 f = fanout_release(sk); 3023 3023 ··· 3645 3643 union tpacket_req_u req_u; 3646 3644 int len; 3647 3645 3646 + lock_sock(sk); 3648 3647 switch (po->tp_version) { 3649 3648 case TPACKET_V1: 3650 3649 case TPACKET_V2: ··· 3656 3653 len = sizeof(req_u.req3); 3657 3654 break; 3658 3655 } 3659 - if (optlen < len) 3660 - return -EINVAL; 3661 - if (copy_from_user(&req_u.req, optval, len)) 3662 - return -EFAULT; 3663 - return packet_set_ring(sk, &req_u, 0, 3664 - optname == PACKET_TX_RING); 3656 + if (optlen < len) { 3657 + ret = -EINVAL; 3658 + } else { 3659 + if (copy_from_user(&req_u.req, optval, len)) 3660 + ret = -EFAULT; 3661 + else 3662 + ret = packet_set_ring(sk, &req_u, 0, 3663 + optname == PACKET_TX_RING); 3664 + } 3665 + release_sock(sk); 3666 + return ret; 3665 3667 } 3666 3668 case PACKET_COPY_THRESH: 3667 3669 { ··· 4216 4208 /* Added to avoid minimal code churn */ 4217 4209 struct tpacket_req *req = &req_u->req; 4218 4210 4219 - lock_sock(sk); 4220 - 4221 4211 rb = tx_ring ? &po->tx_ring : &po->rx_ring; 4222 4212 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 4223 4213 ··· 4353 4347 if (pg_vec) 4354 4348 free_pg_vec(pg_vec, order, req->tp_block_nr); 4355 4349 out: 4356 - release_sock(sk); 4357 4350 return err; 4358 4351 } 4359 4352
+1
net/qrtr/qrtr.c
··· 1135 1135 1136 1136 MODULE_DESCRIPTION("Qualcomm IPC-router driver"); 1137 1137 MODULE_LICENSE("GPL v2"); 1138 + MODULE_ALIAS_NETPROTO(PF_QIPCRTR);
+37 -37
net/sctp/ipv6.c
··· 556 556 addr->v6.sin6_scope_id = 0; 557 557 } 558 558 559 + static int __sctp_v6_cmp_addr(const union sctp_addr *addr1, 560 + const union sctp_addr *addr2) 561 + { 562 + if (addr1->sa.sa_family != addr2->sa.sa_family) { 563 + if (addr1->sa.sa_family == AF_INET && 564 + addr2->sa.sa_family == AF_INET6 && 565 + ipv6_addr_v4mapped(&addr2->v6.sin6_addr) && 566 + addr2->v6.sin6_addr.s6_addr32[3] == 567 + addr1->v4.sin_addr.s_addr) 568 + return 1; 569 + 570 + if (addr2->sa.sa_family == AF_INET && 571 + addr1->sa.sa_family == AF_INET6 && 572 + ipv6_addr_v4mapped(&addr1->v6.sin6_addr) && 573 + addr1->v6.sin6_addr.s6_addr32[3] == 574 + addr2->v4.sin_addr.s_addr) 575 + return 1; 576 + 577 + return 0; 578 + } 579 + 580 + if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) 581 + return 0; 582 + 583 + /* If this is a linklocal address, compare the scope_id. */ 584 + if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) && 585 + addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && 586 + addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id) 587 + return 0; 588 + 589 + return 1; 590 + } 591 + 559 592 /* Compare addresses exactly. 560 593 * v4-mapped-v6 is also in consideration. 561 594 */ 562 595 static int sctp_v6_cmp_addr(const union sctp_addr *addr1, 563 596 const union sctp_addr *addr2) 564 597 { 565 - if (addr1->sa.sa_family != addr2->sa.sa_family) { 566 - if (addr1->sa.sa_family == AF_INET && 567 - addr2->sa.sa_family == AF_INET6 && 568 - ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { 569 - if (addr2->v6.sin6_port == addr1->v4.sin_port && 570 - addr2->v6.sin6_addr.s6_addr32[3] == 571 - addr1->v4.sin_addr.s_addr) 572 - return 1; 573 - } 574 - if (addr2->sa.sa_family == AF_INET && 575 - addr1->sa.sa_family == AF_INET6 && 576 - ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { 577 - if (addr1->v6.sin6_port == addr2->v4.sin_port && 578 - addr1->v6.sin6_addr.s6_addr32[3] == 579 - addr2->v4.sin_addr.s_addr) 580 - return 1; 581 - } 582 - return 0; 583 - } 584 - if (addr1->v6.sin6_port != addr2->v6.sin6_port) 585 - return 0; 586 - if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) 587 - return 0; 588 - /* If this is a linklocal address, compare the scope_id. */ 589 - if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { 590 - if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && 591 - (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { 592 - return 0; 593 - } 594 - } 595 - 596 - return 1; 598 + return __sctp_v6_cmp_addr(addr1, addr2) && 599 + addr1->v6.sin6_port == addr2->v6.sin6_port; 597 600 } 598 601 599 602 /* Initialize addr struct to INADDR_ANY. */ ··· 878 875 const union sctp_addr *addr2, 879 876 struct sctp_sock *opt) 880 877 { 881 - struct sctp_af *af1, *af2; 882 878 struct sock *sk = sctp_opt2sk(opt); 879 + struct sctp_af *af1, *af2; 883 880 884 881 af1 = sctp_get_af_specific(addr1->sa.sa_family); 885 882 af2 = sctp_get_af_specific(addr2->sa.sa_family); ··· 895 892 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) 896 893 return 1; 897 894 898 - if (addr1->sa.sa_family != addr2->sa.sa_family) 899 - return 0; 900 - 901 - return af1->cmp_addr(addr1, addr2); 895 + return __sctp_v6_cmp_addr(addr1, addr2); 902 896 } 903 897 904 898 /* Verify that the provided sockaddr looks bindable. Common verification,
+4 -6
net/smc/af_smc.c
··· 1259 1259 rc = smc_close_shutdown_write(smc); 1260 1260 break; 1261 1261 case SHUT_RD: 1262 - if (sk->sk_state == SMC_LISTEN) 1263 - rc = smc_close_active(smc); 1264 - else 1265 - rc = 0; 1266 - /* nothing more to do because peer is not involved */ 1262 + rc = 0; 1263 + /* nothing more to do because peer is not involved */ 1267 1264 break; 1268 1265 } 1269 - rc1 = kernel_sock_shutdown(smc->clcsock, how); 1266 + if (smc->clcsock) 1267 + rc1 = kernel_sock_shutdown(smc->clcsock, how); 1270 1268 /* map sock_shutdown_cmd constants to sk_shutdown value range */ 1271 1269 sk->sk_shutdown |= how + 1; 1272 1270
+3 -4
net/strparser/strparser.c
··· 296 296 strp_start_timer(strp, timeo); 297 297 } 298 298 299 + stm->accum_len += cand_len; 299 300 strp->need_bytes = stm->strp.full_len - 300 301 stm->accum_len; 301 - stm->accum_len += cand_len; 302 302 stm->early_eaten = cand_len; 303 303 STRP_STATS_ADD(strp->stats.bytes, cand_len); 304 304 desc->count = 0; /* Stop reading socket */ ··· 321 321 /* Hurray, we have a new message! */ 322 322 cancel_delayed_work(&strp->msg_timer_work); 323 323 strp->skb_head = NULL; 324 + strp->need_bytes = 0; 324 325 STRP_STATS_INCR(strp->stats.msgs); 325 326 326 327 /* Give skb to upper layer */ ··· 411 410 return; 412 411 413 412 if (strp->need_bytes) { 414 - if (strp_peek_len(strp) >= strp->need_bytes) 415 - strp->need_bytes = 0; 416 - else 413 + if (strp_peek_len(strp) < strp->need_bytes) 417 414 return; 418 415 } 419 416
+1
net/sunrpc/rpc_pipe.c
··· 1375 1375 struct dentry *clnt_dir = pipe_dentry->d_parent; 1376 1376 struct dentry *gssd_dir = clnt_dir->d_parent; 1377 1377 1378 + dget(pipe_dentry); 1378 1379 __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry); 1379 1380 __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); 1380 1381 __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
+1 -1
net/tipc/monitor.c
··· 777 777 778 778 ret = tipc_bearer_get_name(net, bearer_name, bearer_id); 779 779 if (ret || !mon) 780 - return -EINVAL; 780 + return 0; 781 781 782 782 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 783 783 NLM_F_MULTI, TIPC_NL_MON_GET);
+21 -13
net/tipc/name_table.c
··· 241 241 static struct publication *tipc_service_remove_publ(struct net *net, 242 242 struct tipc_service *sc, 243 243 u32 lower, u32 upper, 244 - u32 node, u32 key) 244 + u32 node, u32 key, 245 + struct service_range **rng) 245 246 { 246 247 struct tipc_subscription *sub, *tmp; 247 248 struct service_range *sr; ··· 276 275 277 276 list_del(&p->all_publ); 278 277 list_del(&p->local_publ); 279 - 280 - /* Remove service range item if this was its last publication */ 281 - if (list_empty(&sr->all_publ)) { 278 + if (list_empty(&sr->all_publ)) 282 279 last = true; 283 - rb_erase(&sr->tree_node, &sc->ranges); 284 - kfree(sr); 285 - } 286 280 287 281 /* Notify any waiting subscriptions */ 288 282 list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { 289 283 tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN, 290 284 p->port, p->node, p->scope, last); 291 285 } 286 + *rng = sr; 292 287 return p; 293 288 } 294 289 ··· 376 379 u32 node, u32 key) 377 380 { 378 381 struct tipc_service *sc = tipc_service_find(net, type); 382 + struct service_range *sr = NULL; 379 383 struct publication *p = NULL; 380 384 381 385 if (!sc) 382 386 return NULL; 383 387 384 388 spin_lock_bh(&sc->lock); 385 - p = tipc_service_remove_publ(net, sc, lower, upper, node, key); 389 + p = tipc_service_remove_publ(net, sc, lower, upper, node, key, &sr); 390 + 391 + /* Remove service range item if this was its last publication */ 392 + if (sr && list_empty(&sr->all_publ)) { 393 + rb_erase(&sr->tree_node, &sc->ranges); 394 + kfree(sr); 395 + } 386 396 387 397 /* Delete service item if this no more publications and subscriptions */ 388 398 if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) { ··· 669 665 /** 670 666 * tipc_nametbl_subscribe - add a subscription object to the name table 671 667 */ 672 - void tipc_nametbl_subscribe(struct tipc_subscription *sub) 668 + bool tipc_nametbl_subscribe(struct tipc_subscription *sub) 673 669 { 674 670 struct name_table *nt = tipc_name_table(sub->net); 675 671 struct tipc_net *tn = tipc_net(sub->net); 676 672 struct tipc_subscr *s = &sub->evt.s; 677 673 u32 type = tipc_sub_read(s, seq.type); 678 674 struct tipc_service *sc; 675 + bool res = true; 679 676 680 677 spin_lock_bh(&tn->nametbl_lock); 681 678 sc = tipc_service_find(sub->net, type); ··· 690 685 pr_warn("Failed to subscribe for {%u,%u,%u}\n", type, 691 686 tipc_sub_read(s, seq.lower), 692 687 tipc_sub_read(s, seq.upper)); 688 + res = false; 693 689 } 694 690 spin_unlock_bh(&tn->nametbl_lock); 691 + return res; 695 692 } 696 693 697 694 /** ··· 751 744 static void tipc_service_delete(struct net *net, struct tipc_service *sc) 752 745 { 753 746 struct service_range *sr, *tmpr; 754 - struct publication *p, *tmpb; 747 + struct publication *p, *tmp; 755 748 756 749 spin_lock_bh(&sc->lock); 757 750 rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { 758 - list_for_each_entry_safe(p, tmpb, 759 - &sr->all_publ, all_publ) { 751 + list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) { 760 752 tipc_service_remove_publ(net, sc, p->lower, p->upper, 761 - p->node, p->key); 753 + p->node, p->key, &sr); 762 754 kfree_rcu(p, rcu); 763 755 } 756 + rb_erase(&sr->tree_node, &sc->ranges); 757 + kfree(sr); 764 758 } 765 759 hlist_del_init_rcu(&sc->service_list); 766 760 spin_unlock_bh(&sc->lock);
+1 -1
net/tipc/name_table.h
··· 126 126 struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, 127 127 u32 lower, u32 upper, 128 128 u32 node, u32 key); 129 - void tipc_nametbl_subscribe(struct tipc_subscription *s); 129 + bool tipc_nametbl_subscribe(struct tipc_subscription *s); 130 130 void tipc_nametbl_unsubscribe(struct tipc_subscription *s); 131 131 int tipc_nametbl_init(struct net *net); 132 132 void tipc_nametbl_stop(struct net *net);
+2
net/tipc/net.c
··· 252 252 u64 *w0 = (u64 *)&node_id[0]; 253 253 u64 *w1 = (u64 *)&node_id[8]; 254 254 255 + if (!attrs[TIPC_NLA_NET_NODEID_W1]) 256 + return -EINVAL; 255 257 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); 256 258 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); 257 259 tipc_net_init(net, node_id, 0);
+4 -1
net/tipc/netlink.c
··· 79 79 80 80 const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { 81 81 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, 82 - [TIPC_NLA_NET_ID] = { .type = NLA_U32 } 82 + [TIPC_NLA_NET_ID] = { .type = NLA_U32 }, 83 + [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 }, 84 + [TIPC_NLA_NET_NODEID] = { .type = NLA_U64 }, 85 + [TIPC_NLA_NET_NODEID_W1] = { .type = NLA_U64 }, 83 86 }; 84 87 85 88 const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+4 -7
net/tipc/node.c
··· 2232 2232 struct net *net = sock_net(skb->sk); 2233 2233 u32 prev_bearer = cb->args[0]; 2234 2234 struct tipc_nl_msg msg; 2235 + int bearer_id; 2235 2236 int err; 2236 - int i; 2237 2237 2238 2238 if (prev_bearer == MAX_BEARERS) 2239 2239 return 0; ··· 2243 2243 msg.seq = cb->nlh->nlmsg_seq; 2244 2244 2245 2245 rtnl_lock(); 2246 - for (i = prev_bearer; i < MAX_BEARERS; i++) { 2247 - prev_bearer = i; 2246 + for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2248 2247 err = __tipc_nl_add_monitor(net, &msg, prev_bearer); 2249 2248 if (err) 2250 - goto out; 2249 + break; 2251 2250 } 2252 - 2253 - out: 2254 2251 rtnl_unlock(); 2255 - cb->args[0] = prev_bearer; 2252 + cb->args[0] = bearer_id; 2256 2253 2257 2254 return skb->len; 2258 2255 }
+3 -1
net/tipc/socket.c
··· 1278 1278 struct tipc_msg *hdr = &tsk->phdr; 1279 1279 struct tipc_name_seq *seq; 1280 1280 struct sk_buff_head pkts; 1281 - u32 dnode, dport; 1281 + u32 dport, dnode = 0; 1282 1282 u32 type, inst; 1283 1283 int mtu, rc; 1284 1284 ··· 1348 1348 msg_set_destnode(hdr, dnode); 1349 1349 msg_set_destport(hdr, dest->addr.id.ref); 1350 1350 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 1351 + } else { 1352 + return -EINVAL; 1351 1353 } 1352 1354 1353 1355 /* Block or return if destination link is congested */
+4 -1
net/tipc/subscr.c
··· 153 153 memcpy(&sub->evt.s, s, sizeof(*s)); 154 154 spin_lock_init(&sub->lock); 155 155 kref_init(&sub->kref); 156 - tipc_nametbl_subscribe(sub); 156 + if (!tipc_nametbl_subscribe(sub)) { 157 + kfree(sub); 158 + return NULL; 159 + } 157 160 timer_setup(&sub->timer, tipc_sub_timeout, 0); 158 161 timeout = tipc_sub_read(&sub->evt.s, timeout); 159 162 if (timeout != TIPC_WAIT_FOREVER)
+9 -1
net/tls/tls_sw.c
··· 41 41 #include <net/strparser.h> 42 42 #include <net/tls.h> 43 43 44 + #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE 45 + 44 46 static int tls_do_decryption(struct sock *sk, 45 47 struct scatterlist *sgin, 46 48 struct scatterlist *sgout, ··· 675 673 { 676 674 struct tls_context *tls_ctx = tls_get_ctx(sk); 677 675 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 678 - char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + tls_ctx->rx.iv_size]; 676 + char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE]; 679 677 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; 680 678 struct scatterlist *sgin = &sgin_arr[0]; 681 679 struct strp_msg *rxm = strp_msg(skb); ··· 1092 1090 break; 1093 1091 } 1094 1092 default: 1093 + rc = -EINVAL; 1094 + goto free_priv; 1095 + } 1096 + 1097 + /* Sanity-check the IV size for stack allocations. */ 1098 + if (iv_size > MAX_IV_SIZE) { 1095 1099 rc = -EINVAL; 1096 1100 goto free_priv; 1097 1101 }
+6
net/vmw_vsock/af_vsock.c
··· 2018 2018 } 2019 2019 EXPORT_SYMBOL_GPL(vsock_core_get_transport); 2020 2020 2021 + static void __exit vsock_exit(void) 2022 + { 2023 + /* Do nothing. This function makes this module removable. */ 2024 + } 2025 + 2021 2026 module_init(vsock_init_tables); 2027 + module_exit(vsock_exit); 2022 2028 2023 2029 MODULE_AUTHOR("VMware, Inc."); 2024 2030 MODULE_DESCRIPTION("VMware Virtual Socket Family");
+32 -11
samples/livepatch/livepatch-shadow-fix1.c
··· 56 56 unsigned long jiffies_expire; 57 57 }; 58 58 59 + /* 60 + * The constructor makes more sense together with klp_shadow_get_or_alloc(). 61 + * In this example, it would be safe to assign the pointer also to the shadow 62 + * variable returned by klp_shadow_alloc(). But we wanted to show the more 63 + * complicated use of the API. 64 + */ 65 + static int shadow_leak_ctor(void *obj, void *shadow_data, void *ctor_data) 66 + { 67 + void **shadow_leak = shadow_data; 68 + void *leak = ctor_data; 69 + 70 + *shadow_leak = leak; 71 + return 0; 72 + } 73 + 59 74 struct dummy *livepatch_fix1_dummy_alloc(void) 60 75 { 61 76 struct dummy *d; ··· 89 74 * pointer to handle resource release. 90 75 */ 91 76 leak = kzalloc(sizeof(int), GFP_KERNEL); 92 - klp_shadow_alloc(d, SV_LEAK, &leak, sizeof(leak), GFP_KERNEL); 77 + klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL, 78 + shadow_leak_ctor, leak); 93 79 94 80 pr_info("%s: dummy @ %p, expires @ %lx\n", 95 81 __func__, d, d->jiffies_expire); ··· 98 82 return d; 99 83 } 100 84 85 + static void livepatch_fix1_dummy_leak_dtor(void *obj, void *shadow_data) 86 + { 87 + void *d = obj; 88 + void **shadow_leak = shadow_data; 89 + 90 + kfree(*shadow_leak); 91 + pr_info("%s: dummy @ %p, prevented leak @ %p\n", 92 + __func__, d, *shadow_leak); 93 + } 94 + 101 95 void livepatch_fix1_dummy_free(struct dummy *d) 102 96 { 103 - void **shadow_leak, *leak; 97 + void **shadow_leak; 104 98 105 99 /* 106 100 * Patch: fetch the saved SV_LEAK shadow variable, detach and ··· 119 93 * was loaded.) 120 94 */ 121 95 shadow_leak = klp_shadow_get(d, SV_LEAK); 122 - if (shadow_leak) { 123 - leak = *shadow_leak; 124 - klp_shadow_free(d, SV_LEAK); 125 - kfree(leak); 126 - pr_info("%s: dummy @ %p, prevented leak @ %p\n", 127 - __func__, d, leak); 128 - } else { 96 + if (shadow_leak) 97 + klp_shadow_free(d, SV_LEAK, livepatch_fix1_dummy_leak_dtor); 98 + else 129 99 pr_info("%s: dummy @ %p leaked!\n", __func__, d); 130 - } 131 100 132 101 kfree(d); 133 102 } ··· 168 147 static void livepatch_shadow_fix1_exit(void) 169 148 { 170 149 /* Cleanup any existing SV_LEAK shadow variables */ 171 - klp_shadow_free_all(SV_LEAK); 150 + klp_shadow_free_all(SV_LEAK, livepatch_fix1_dummy_leak_dtor); 172 151 173 152 WARN_ON(klp_unregister_patch(&patch)); 174 153 }
+18 -15
samples/livepatch/livepatch-shadow-fix2.c
··· 53 53 bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies) 54 54 { 55 55 int *shadow_count; 56 - int count; 57 56 58 57 /* 59 58 * Patch: handle in-flight dummy structures, if they do not 60 59 * already have a SV_COUNTER shadow variable, then attach a 61 60 * new one. 62 61 */ 63 - count = 0; 64 62 shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER, 65 - &count, sizeof(count), 66 - GFP_NOWAIT); 63 + sizeof(*shadow_count), GFP_NOWAIT, 64 + NULL, NULL); 67 65 if (shadow_count) 68 66 *shadow_count += 1; 69 67 70 68 return time_after(jiffies, d->jiffies_expire); 71 69 } 72 70 71 + static void livepatch_fix2_dummy_leak_dtor(void *obj, void *shadow_data) 72 + { 73 + void *d = obj; 74 + void **shadow_leak = shadow_data; 75 + 76 + kfree(*shadow_leak); 77 + pr_info("%s: dummy @ %p, prevented leak @ %p\n", 78 + __func__, d, *shadow_leak); 79 + } 80 + 73 81 void livepatch_fix2_dummy_free(struct dummy *d) 74 82 { 75 - void **shadow_leak, *leak; 83 + void **shadow_leak; 76 84 int *shadow_count; 77 85 78 86 /* Patch: copy the memory leak patch from the fix1 module. */ 79 87 shadow_leak = klp_shadow_get(d, SV_LEAK); 80 - if (shadow_leak) { 81 - leak = *shadow_leak; 82 - klp_shadow_free(d, SV_LEAK); 83 - kfree(leak); 84 - pr_info("%s: dummy @ %p, prevented leak @ %p\n", 85 - __func__, d, leak); 86 - } else { 88 + if (shadow_leak) 89 + klp_shadow_free(d, SV_LEAK, livepatch_fix2_dummy_leak_dtor); 90 + else 87 91 pr_info("%s: dummy @ %p leaked!\n", __func__, d); 88 - } 89 92 90 93 /* 91 94 * Patch: fetch the SV_COUNTER shadow variable and display ··· 98 95 if (shadow_count) { 99 96 pr_info("%s: dummy @ %p, check counter = %d\n", 100 97 __func__, d, *shadow_count); 101 - klp_shadow_free(d, SV_COUNTER); 98 + klp_shadow_free(d, SV_COUNTER, NULL); 102 99 } 103 100 104 101 kfree(d); ··· 145 142 static void livepatch_shadow_fix2_exit(void) 146 143 { 147 144 /* Cleanup any existing SV_COUNTER shadow variables */ 148 - klp_shadow_free_all(SV_COUNTER); 145 + klp_shadow_free_all(SV_COUNTER, NULL); 149 146 150 147 WARN_ON(klp_unregister_patch(&patch)); 151 148 }
+1 -1
sound/core/control.c
··· 1492 1492 int op_flag) 1493 1493 { 1494 1494 struct snd_ctl_tlv header; 1495 - unsigned int *container; 1495 + unsigned int __user *container; 1496 1496 unsigned int container_size; 1497 1497 struct snd_kcontrol *kctl; 1498 1498 struct snd_ctl_elem_id id;
+4 -3
sound/core/pcm_compat.c
··· 27 27 s32 __user *src) 28 28 { 29 29 snd_pcm_sframes_t delay; 30 + int err; 30 31 31 - delay = snd_pcm_delay(substream); 32 - if (delay < 0) 33 - return delay; 32 + err = snd_pcm_delay(substream, &delay); 33 + if (err) 34 + return err; 34 35 if (put_user(delay, src)) 35 36 return -EFAULT; 36 37 return 0;
+15 -15
sound/core/pcm_native.c
··· 2654 2654 return err; 2655 2655 } 2656 2656 2657 - static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream) 2657 + static int snd_pcm_delay(struct snd_pcm_substream *substream, 2658 + snd_pcm_sframes_t *delay) 2658 2659 { 2659 2660 int err; 2660 2661 snd_pcm_sframes_t n = 0; ··· 2665 2664 if (!err) 2666 2665 n = snd_pcm_calc_delay(substream); 2667 2666 snd_pcm_stream_unlock_irq(substream); 2668 - return err < 0 ? err : n; 2667 + if (!err) 2668 + *delay = n; 2669 + return err; 2669 2670 } 2670 2671 2671 2672 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, ··· 2710 2707 sync_ptr.s.status.hw_ptr = status->hw_ptr; 2711 2708 sync_ptr.s.status.tstamp = status->tstamp; 2712 2709 sync_ptr.s.status.suspended_state = status->suspended_state; 2710 + sync_ptr.s.status.audio_tstamp = status->audio_tstamp; 2713 2711 snd_pcm_stream_unlock_irq(substream); 2714 2712 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) 2715 2713 return -EFAULT; ··· 2870 2866 return snd_pcm_hwsync(substream); 2871 2867 case SNDRV_PCM_IOCTL_DELAY: 2872 2868 { 2873 - snd_pcm_sframes_t delay = snd_pcm_delay(substream); 2869 + snd_pcm_sframes_t delay; 2874 2870 snd_pcm_sframes_t __user *res = arg; 2871 + int err; 2875 2872 2876 - if (delay < 0) 2877 - return delay; 2873 + err = snd_pcm_delay(substream, &delay); 2874 + if (err) 2875 + return err; 2878 2876 if (put_user(delay, res)) 2879 2877 return -EFAULT; 2880 2878 return 0; ··· 2964 2958 case SNDRV_PCM_IOCTL_DROP: 2965 2959 return snd_pcm_drop(substream); 2966 2960 case SNDRV_PCM_IOCTL_DELAY: 2967 - { 2968 - result = snd_pcm_delay(substream); 2969 - if (result < 0) 2970 - return result; 2971 - *frames = result; 2972 - return 0; 2973 - } 2961 + return snd_pcm_delay(substream, frames); 2974 2962 default: 2975 2963 return -EINVAL; 2976 2964 } ··· 3148 3148 /* 3149 3149 * mmap status record 3150 3150 */ 3151 - static int snd_pcm_mmap_status_fault(struct vm_fault *vmf) 3151 + static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf) 3152 3152 { 3153 3153 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3154 3154 struct snd_pcm_runtime *runtime; ··· 3184 3184 /* 3185 3185 * mmap control record 3186 3186 */ 3187 - static int snd_pcm_mmap_control_fault(struct vm_fault *vmf) 3187 + static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf) 3188 3188 { 3189 3189 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3190 3190 struct snd_pcm_runtime *runtime; ··· 3273 3273 /* 3274 3274 * fault callback for mmapping a RAM page 3275 3275 */ 3276 - static int snd_pcm_mmap_data_fault(struct vm_fault *vmf) 3276 + static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf) 3277 3277 { 3278 3278 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3279 3279 struct snd_pcm_runtime *runtime;
+12 -6
sound/core/rawmidi_compat.c
··· 36 36 struct snd_rawmidi_params params; 37 37 unsigned int val; 38 38 39 - if (rfile->output == NULL) 40 - return -EINVAL; 41 39 if (get_user(params.stream, &src->stream) || 42 40 get_user(params.buffer_size, &src->buffer_size) || 43 41 get_user(params.avail_min, &src->avail_min) || ··· 44 46 params.no_active_sensing = val; 45 47 switch (params.stream) { 46 48 case SNDRV_RAWMIDI_STREAM_OUTPUT: 49 + if (!rfile->output) 50 + return -EINVAL; 47 51 return snd_rawmidi_output_params(rfile->output, &params); 48 52 case SNDRV_RAWMIDI_STREAM_INPUT: 53 + if (!rfile->input) 54 + return -EINVAL; 49 55 return snd_rawmidi_input_params(rfile->input, &params); 50 56 } 51 57 return -EINVAL; ··· 69 67 int err; 70 68 struct snd_rawmidi_status status; 71 69 72 - if (rfile->output == NULL) 73 - return -EINVAL; 74 70 if (get_user(status.stream, &src->stream)) 75 71 return -EFAULT; 76 72 77 73 switch (status.stream) { 78 74 case SNDRV_RAWMIDI_STREAM_OUTPUT: 75 + if (!rfile->output) 76 + return -EINVAL; 79 77 err = snd_rawmidi_output_status(rfile->output, &status); 80 78 break; 81 79 case SNDRV_RAWMIDI_STREAM_INPUT: 80 + if (!rfile->input) 81 + return -EINVAL; 82 82 err = snd_rawmidi_input_status(rfile->input, &status); 83 83 break; 84 84 default: ··· 116 112 int err; 117 113 struct snd_rawmidi_status status; 118 114 119 - if (rfile->output == NULL) 120 - return -EINVAL; 121 115 if (get_user(status.stream, &src->stream)) 122 116 return -EFAULT; 123 117 124 118 switch (status.stream) { 125 119 case SNDRV_RAWMIDI_STREAM_OUTPUT: 120 + if (!rfile->output) 121 + return -EINVAL; 126 122 err = snd_rawmidi_output_status(rfile->output, &status); 127 123 break; 128 124 case SNDRV_RAWMIDI_STREAM_INPUT: 125 + if (!rfile->input) 126 + return -EINVAL; 129 127 err = snd_rawmidi_input_status(rfile->input, &status); 130 128 break; 131 129 default:
+9 -6
sound/core/seq/oss/seq_oss_event.c
··· 26 26 #include <sound/seq_oss_legacy.h> 27 27 #include "seq_oss_readq.h" 28 28 #include "seq_oss_writeq.h" 29 + #include <linux/nospec.h> 29 30 30 31 31 32 /* ··· 288 287 { 289 288 struct seq_oss_synthinfo *info; 290 289 291 - if (!snd_seq_oss_synth_is_valid(dp, dev)) 290 + info = snd_seq_oss_synth_info(dp, dev); 291 + if (!info) 292 292 return -ENXIO; 293 293 294 - info = &dp->synths[dev]; 295 294 switch (info->arg.event_passing) { 296 295 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 297 296 if (! info->ch || ch < 0 || ch >= info->nr_voices) { ··· 299 298 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 300 299 } 301 300 301 + ch = array_index_nospec(ch, info->nr_voices); 302 302 if (note == 255 && info->ch[ch].note >= 0) { 303 303 /* volume control */ 304 304 int type; ··· 349 347 { 350 348 struct seq_oss_synthinfo *info; 351 349 352 - if (!snd_seq_oss_synth_is_valid(dp, dev)) 350 + info = snd_seq_oss_synth_info(dp, dev); 351 + if (!info) 353 352 return -ENXIO; 354 353 355 - info = &dp->synths[dev]; 356 354 switch (info->arg.event_passing) { 357 355 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 358 356 if (! info->ch || ch < 0 || ch >= info->nr_voices) { ··· 360 358 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 361 359 } 362 360 361 + ch = array_index_nospec(ch, info->nr_voices); 363 362 if (info->ch[ch].note >= 0) { 364 363 note = info->ch[ch].note; 365 364 info->ch[ch].vel = 0; ··· 384 381 static int 385 382 set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev) 386 383 { 387 - if (! snd_seq_oss_synth_is_valid(dp, dev)) 384 + if (!snd_seq_oss_synth_info(dp, dev)) 388 385 return -ENXIO; 389 386 390 387 ev->type = type; ··· 402 399 static int 403 400 set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev) 404 401 { 405 - if (! snd_seq_oss_synth_is_valid(dp, dev)) 402 + if (!snd_seq_oss_synth_info(dp, dev)) 406 403 return -ENXIO; 407 404 408 405 ev->type = type;
+2
sound/core/seq/oss/seq_oss_midi.c
··· 29 29 #include "../seq_lock.h" 30 30 #include <linux/init.h> 31 31 #include <linux/slab.h> 32 + #include <linux/nospec.h> 32 33 33 34 34 35 /* ··· 316 315 { 317 316 if (dev < 0 || dev >= dp->max_mididev) 318 317 return NULL; 318 + dev = array_index_nospec(dev, dp->max_mididev); 319 319 return get_mdev(dev); 320 320 } 321 321
+49 -36
sound/core/seq/oss/seq_oss_synth.c
··· 26 26 #include <linux/init.h> 27 27 #include <linux/module.h> 28 28 #include <linux/slab.h> 29 + #include <linux/nospec.h> 29 30 30 31 /* 31 32 * constants ··· 340 339 dp->max_synthdev = 0; 341 340 } 342 341 343 - /* 344 - * check if the specified device is MIDI mapped device 345 - */ 346 - static int 347 - is_midi_dev(struct seq_oss_devinfo *dp, int dev) 342 + static struct seq_oss_synthinfo * 343 + get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev) 348 344 { 349 345 if (dev < 0 || dev >= dp->max_synthdev) 350 - return 0; 351 - if (dp->synths[dev].is_midi) 352 - return 1; 353 - return 0; 346 + return NULL; 347 + dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS); 348 + return &dp->synths[dev]; 354 349 } 355 350 356 351 /* ··· 356 359 get_synthdev(struct seq_oss_devinfo *dp, int dev) 357 360 { 358 361 struct seq_oss_synth *rec; 359 - if (dev < 0 || dev >= dp->max_synthdev) 362 + struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev); 363 + 364 + if (!info) 360 365 return NULL; 361 - if (! dp->synths[dev].opened) 366 + if (!info->opened) 362 367 return NULL; 363 - if (dp->synths[dev].is_midi) 364 - return &midi_synth_dev; 365 - if ((rec = get_sdev(dev)) == NULL) 366 - return NULL; 368 + if (info->is_midi) { 369 + rec = &midi_synth_dev; 370 + snd_use_lock_use(&rec->use_lock); 371 + } else { 372 + rec = get_sdev(dev); 373 + if (!rec) 374 + return NULL; 375 + } 367 376 if (! rec->opened) { 368 377 snd_use_lock_free(&rec->use_lock); 369 378 return NULL; ··· 405 402 struct seq_oss_synth *rec; 406 403 struct seq_oss_synthinfo *info; 407 404 408 - if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev)) 409 - return; 410 - info = &dp->synths[dev]; 411 - if (! info->opened) 405 + info = get_synthinfo_nospec(dp, dev); 406 + if (!info || !info->opened) 412 407 return; 413 408 if (info->sysex) 414 409 info->sysex->len = 0; /* reset sysex */ ··· 455 454 const char __user *buf, int p, int c) 456 455 { 457 456 struct seq_oss_synth *rec; 457 + struct seq_oss_synthinfo *info; 458 458 int rc; 459 459 460 - if (dev < 0 || dev >= dp->max_synthdev) 460 + info = get_synthinfo_nospec(dp, dev); 461 + if (!info) 461 462 return -ENXIO; 462 463 463 - if (is_midi_dev(dp, dev)) 464 + if (info->is_midi) 464 465 return 0; 465 466 if ((rec = get_synthdev(dp, dev)) == NULL) 466 467 return -ENXIO; ··· 470 467 if (rec->oper.load_patch == NULL) 471 468 rc = -ENXIO; 472 469 else 473 - rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c); 470 + rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c); 474 471 snd_use_lock_free(&rec->use_lock); 475 472 return rc; 476 473 } 477 474 478 475 /* 479 - * check if the device is valid synth device 476 + * check if the device is valid synth device and return the synth info 480 477 */ 481 - int 482 - snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev) 478 + struct seq_oss_synthinfo * 479 + snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev) 483 480 { 484 481 struct seq_oss_synth *rec; 482 + 485 483 rec = get_synthdev(dp, dev); 486 484 if (rec) { 487 485 snd_use_lock_free(&rec->use_lock); 488 - return 1; 486 + return get_synthinfo_nospec(dp, dev); 489 487 } 490 - return 0; 488 + return NULL; 491 489 } 492 490 493 491 ··· 503 499 int i, send; 504 500 unsigned char *dest; 505 501 struct seq_oss_synth_sysex *sysex; 502 + struct seq_oss_synthinfo *info; 506 503 507 - if (! snd_seq_oss_synth_is_valid(dp, dev)) 504 + info = snd_seq_oss_synth_info(dp, dev); 505 + if (!info) 508 506 return -ENXIO; 509 507 510 - sysex = dp->synths[dev].sysex; 508 + sysex = info->sysex; 511 509 if (sysex == NULL) { 512 510 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL); 513 511 if (sysex == NULL) 514 512 return -ENOMEM; 515 - dp->synths[dev].sysex = sysex; 513 + info->sysex = sysex; 516 514 } 517 515 518 516 send = 0; ··· 559 553 int 560 554 snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev) 561 555 { 562 - if (! snd_seq_oss_synth_is_valid(dp, dev)) 556 + struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev); 557 + 558 + if (!info) 563 559 return -EINVAL; 564 - snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client, 565 - dp->synths[dev].arg.addr.port); 560 + snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client, 561 + info->arg.addr.port); 566 562 return 0; 567 563 } 568 564 ··· 576 568 snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr) 577 569 { 578 570 struct seq_oss_synth *rec; 571 + struct seq_oss_synthinfo *info; 579 572 int rc; 580 573 581 - if (is_midi_dev(dp, dev)) 574 + info = get_synthinfo_nospec(dp, dev); 575 + if (!info || info->is_midi) 582 576 return -ENXIO; 583 577 if ((rec = get_synthdev(dp, dev)) == NULL) 584 578 return -ENXIO; 585 579 if (rec->oper.ioctl == NULL) 586 580 rc = -ENXIO; 587 581 else 588 - rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr); 582 + rc = rec->oper.ioctl(&info->arg, cmd, addr); 589 583 snd_use_lock_free(&rec->use_lock); 590 584 return rc; 591 585 } ··· 599 589 int 600 590 snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev) 601 591 { 602 - if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev)) 592 + struct seq_oss_synthinfo *info; 593 + 594 + info = snd_seq_oss_synth_info(dp, dev); 595 + if (!info || info->is_midi) 603 596 return -ENXIO; 604 597 ev->type = SNDRV_SEQ_EVENT_OSS; 605 598 memcpy(ev->data.raw8.d, data, 8);
+2 -1
sound/core/seq/oss/seq_oss_synth.h
··· 37 37 void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev); 38 38 int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, 39 39 const char __user *buf, int p, int c); 40 - int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev); 40 + struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, 41 + int dev); 41 42 int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, 42 43 struct snd_seq_event *ev); 43 44 int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
+5 -2
sound/drivers/opl3/opl3_synth.c
··· 21 21 22 22 #include <linux/slab.h> 23 23 #include <linux/export.h> 24 + #include <linux/nospec.h> 24 25 #include <sound/opl3.h> 25 26 #include <sound/asound_fm.h> 26 27 ··· 449 448 { 450 449 unsigned short reg_side; 451 450 unsigned char op_offset; 452 - unsigned char voice_offset; 451 + unsigned char voice_offset, voice_op; 453 452 454 453 unsigned short opl3_reg; 455 454 unsigned char reg_val; ··· 474 473 voice_offset = voice->voice - MAX_OPL2_VOICES; 475 474 } 476 475 /* Get register offset of operator */ 477 - op_offset = snd_opl3_regmap[voice_offset][voice->op]; 476 + voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES); 477 + voice_op = array_index_nospec(voice->op, 4); 478 + op_offset = snd_opl3_regmap[voice_offset][voice_op]; 478 479 479 480 reg_val = 0x00; 480 481 /* Set amplitude modulation (tremolo) effect */
+1 -1
sound/firewire/dice/dice.c
··· 14 14 #define OUI_WEISS 0x001c6a 15 15 #define OUI_LOUD 0x000ff2 16 16 #define OUI_FOCUSRITE 0x00130e 17 - #define OUI_TCELECTRONIC 0x001486 17 + #define OUI_TCELECTRONIC 0x000166 18 18 19 19 #define DICE_CATEGORY_ID 0x04 20 20 #define WEISS_CATEGORY_ID 0x00
+9 -4
sound/pci/asihpi/hpimsginit.c
··· 23 23 24 24 #include "hpi_internal.h" 25 25 #include "hpimsginit.h" 26 + #include <linux/nospec.h> 26 27 27 28 /* The actual message size for each object type */ 28 29 static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT; ··· 40 39 { 41 40 u16 size; 42 41 43 - if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 42 + if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) { 43 + object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1); 44 44 size = msg_size[object]; 45 - else 45 + } else { 46 46 size = sizeof(*phm); 47 + } 47 48 48 49 memset(phm, 0, size); 49 50 phm->size = size; ··· 69 66 { 70 67 u16 size; 71 68 72 - if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 69 + if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) { 70 + object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1); 73 71 size = res_size[object]; 74 - else 72 + } else { 75 73 size = sizeof(*phr); 74 + } 76 75 77 76 memset(phr, 0, sizeof(*phr)); 78 77 phr->size = size;
+3 -1
sound/pci/asihpi/hpioctl.c
··· 33 33 #include <linux/stringify.h> 34 34 #include <linux/module.h> 35 35 #include <linux/vmalloc.h> 36 + #include <linux/nospec.h> 36 37 37 38 #ifdef MODULE_FIRMWARE 38 39 MODULE_FIRMWARE("asihpi/dsp5000.bin"); ··· 187 186 struct hpi_adapter *pa = NULL; 188 187 189 188 if (hm->h.adapter_index < ARRAY_SIZE(adapters)) 190 - pa = &adapters[hm->h.adapter_index]; 189 + pa = &adapters[array_index_nospec(hm->h.adapter_index, 190 + ARRAY_SIZE(adapters))]; 191 191 192 192 if (!pa || !pa->adapter || !pa->adapter->type) { 193 193 hpi_init_response(&hr->r0, hm->h.object,
+11 -1
sound/pci/hda/hda_hwdep.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/slab.h> 23 23 #include <linux/compat.h> 24 + #include <linux/nospec.h> 24 25 #include <sound/core.h> 25 26 #include "hda_codec.h" 26 27 #include "hda_local.h" ··· 52 51 53 52 if (get_user(verb, &arg->verb)) 54 53 return -EFAULT; 55 - res = get_wcaps(codec, verb >> 24); 54 + /* open-code get_wcaps(verb>>24) with nospec */ 55 + verb >>= 24; 56 + if (verb < codec->core.start_nid || 57 + verb >= codec->core.start_nid + codec->core.num_nodes) { 58 + res = 0; 59 + } else { 60 + verb -= codec->core.start_nid; 61 + verb = array_index_nospec(verb, codec->core.num_nodes); 62 + res = codec->wcaps[verb]; 63 + } 56 64 if (put_user(res, &arg->res)) 57 65 return -EFAULT; 58 66 return 0;
+2 -1
sound/pci/hda/hda_intel.c
··· 1647 1647 */ 1648 1648 u8 val; 1649 1649 pci_read_config_byte(chip->pci, 0x42, &val); 1650 - if (!(val & 0x80) && chip->pci->revision == 0x30) 1650 + if (!(val & 0x80) && (chip->pci->revision == 0x30 || 1651 + chip->pci->revision == 0x20)) 1651 1652 snoop = false; 1652 1653 } 1653 1654
+8 -1
sound/pci/hda/patch_hdmi.c
··· 1383 1383 pcm = get_pcm_rec(spec, per_pin->pcm_idx); 1384 1384 else 1385 1385 return; 1386 + if (!pcm->pcm) 1387 + return; 1386 1388 if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use)) 1387 1389 return; 1388 1390 ··· 2153 2151 int dev, err; 2154 2152 int pin_idx, pcm_idx; 2155 2153 2156 - 2157 2154 for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) { 2155 + if (!get_pcm_rec(spec, pcm_idx)->pcm) { 2156 + /* no PCM: mark this for skipping permanently */ 2157 + set_bit(pcm_idx, &spec->pcm_bitmap); 2158 + continue; 2159 + } 2160 + 2158 2161 err = generic_hdmi_build_jack(codec, pcm_idx); 2159 2162 if (err < 0) 2160 2163 return err;
+8
sound/pci/hda/patch_realtek.c
··· 331 331 /* fallthrough */ 332 332 case 0x10ec0215: 333 333 case 0x10ec0233: 334 + case 0x10ec0235: 334 335 case 0x10ec0236: 335 336 case 0x10ec0255: 336 337 case 0x10ec0256: ··· 6371 6370 { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ 6372 6371 { } 6373 6372 }, 6373 + .chained = true, 6374 + .chain_id = ALC269_FIXUP_HEADSET_MIC 6374 6375 }, 6375 6376 }; 6376 6377 ··· 6576 6573 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6577 6574 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6578 6575 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6576 + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6577 + SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6579 6578 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6580 6579 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6581 6580 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), ··· 7162 7157 case 0x10ec0298: 7163 7158 spec->codec_variant = ALC269_TYPE_ALC298; 7164 7159 break; 7160 + case 0x10ec0235: 7165 7161 case 0x10ec0255: 7166 7162 spec->codec_variant = ALC269_TYPE_ALC255; 7163 + spec->shutup = alc256_shutup; 7164 + spec->init_hook = alc256_init; 7167 7165 break; 7168 7166 case 0x10ec0236: 7169 7167 case 0x10ec0256:
+14 -10
sound/pci/rme9652/hdspm.c
··· 137 137 #include <linux/pci.h> 138 138 #include <linux/math64.h> 139 139 #include <linux/io.h> 140 + #include <linux/nospec.h> 140 141 141 142 #include <sound/core.h> 142 143 #include <sound/control.h> ··· 5699 5698 struct snd_pcm_channel_info *info) 5700 5699 { 5701 5700 struct hdspm *hdspm = snd_pcm_substream_chip(substream); 5701 + unsigned int channel = info->channel; 5702 5702 5703 5703 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 5704 - if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) { 5704 + if (snd_BUG_ON(channel >= hdspm->max_channels_out)) { 5705 5705 dev_info(hdspm->card->dev, 5706 5706 "snd_hdspm_channel_info: output channel out of range (%d)\n", 5707 - info->channel); 5707 + channel); 5708 5708 return -EINVAL; 5709 5709 } 5710 5710 5711 - if (hdspm->channel_map_out[info->channel] < 0) { 5711 + channel = array_index_nospec(channel, hdspm->max_channels_out); 5712 + if (hdspm->channel_map_out[channel] < 0) { 5712 5713 dev_info(hdspm->card->dev, 5713 5714 "snd_hdspm_channel_info: output channel %d mapped out\n", 5714 - info->channel); 5715 + channel); 5715 5716 return -EINVAL; 5716 5717 } 5717 5718 5718 - info->offset = hdspm->channel_map_out[info->channel] * 5719 + info->offset = hdspm->channel_map_out[channel] * 5719 5720 HDSPM_CHANNEL_BUFFER_BYTES; 5720 5721 } else { 5721 - if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) { 5722 + if (snd_BUG_ON(channel >= hdspm->max_channels_in)) { 5722 5723 dev_info(hdspm->card->dev, 5723 5724 "snd_hdspm_channel_info: input channel out of range (%d)\n", 5724 - info->channel); 5725 + channel); 5725 5726 return -EINVAL; 5726 5727 } 5727 5728 5728 - if (hdspm->channel_map_in[info->channel] < 0) { 5729 + channel = array_index_nospec(channel, hdspm->max_channels_in); 5730 + if (hdspm->channel_map_in[channel] < 0) { 5729 5731 dev_info(hdspm->card->dev, 5730 5732 "snd_hdspm_channel_info: input channel %d mapped out\n", 5731 - info->channel); 5733 + channel); 5732 5734 return -EINVAL; 5733 5735 } 5734 5736 5735 - info->offset = hdspm->channel_map_in[info->channel] * 5737 + info->offset = hdspm->channel_map_in[channel] * 5736 5738 HDSPM_CHANNEL_BUFFER_BYTES; 5737 5739 } 5738 5740
+4 -2
sound/pci/rme9652/rme9652.c
··· 26 26 #include <linux/pci.h> 27 27 #include <linux/module.h> 28 28 #include <linux/io.h> 29 + #include <linux/nospec.h> 29 30 30 31 #include <sound/core.h> 31 32 #include <sound/control.h> ··· 2072 2071 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS)) 2073 2072 return -EINVAL; 2074 2073 2075 - if ((chn = rme9652->channel_map[info->channel]) < 0) { 2074 + chn = rme9652->channel_map[array_index_nospec(info->channel, 2075 + RME9652_NCHANNELS)]; 2076 + if (chn < 0) 2076 2077 return -EINVAL; 2077 - } 2078 2078 2079 2079 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES; 2080 2080 info->first = 0;
+1 -1
sound/soc/amd/acp-da7219-max98357a.c
··· 43 43 #define DUAL_CHANNEL 2 44 44 45 45 static struct snd_soc_jack cz_jack; 46 - struct clk *da7219_dai_clk; 46 + static struct clk *da7219_dai_clk; 47 47 48 48 static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd) 49 49 {
+20 -6
sound/soc/codecs/adau17x1.c
··· 502 502 } 503 503 504 504 if (adau->sigmadsp) { 505 - ret = adau17x1_setup_firmware(adau, params_rate(params)); 505 + ret = adau17x1_setup_firmware(component, params_rate(params)); 506 506 if (ret < 0) 507 507 return ret; 508 508 } ··· 835 835 } 836 836 EXPORT_SYMBOL_GPL(adau17x1_volatile_register); 837 837 838 - int adau17x1_setup_firmware(struct adau *adau, unsigned int rate) 838 + int adau17x1_setup_firmware(struct snd_soc_component *component, 839 + unsigned int rate) 839 840 { 840 841 int ret; 841 - int dspsr; 842 + int dspsr, dsp_run; 843 + struct adau *adau = snd_soc_component_get_drvdata(component); 844 + struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); 845 + 846 + snd_soc_dapm_mutex_lock(dapm); 842 847 843 848 ret = regmap_read(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, &dspsr); 844 849 if (ret) 845 - return ret; 850 + goto err; 851 + 852 + ret = regmap_read(adau->regmap, ADAU17X1_DSP_RUN, &dsp_run); 853 + if (ret) 854 + goto err; 846 855 847 856 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1); 848 857 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf); 858 + regmap_write(adau->regmap, ADAU17X1_DSP_RUN, 0); 849 859 850 860 ret = sigmadsp_setup(adau->sigmadsp, rate); 851 861 if (ret) { 852 862 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0); 853 - return ret; 863 + goto err; 854 864 } 855 865 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dspsr); 866 + regmap_write(adau->regmap, ADAU17X1_DSP_RUN, dsp_run); 856 867 857 - return 0; 868 + err: 869 + snd_soc_dapm_mutex_unlock(dapm); 870 + 871 + return ret; 858 872 } 859 873 EXPORT_SYMBOL_GPL(adau17x1_setup_firmware); 860 874
+2 -1
sound/soc/codecs/adau17x1.h
··· 68 68 69 69 extern const struct snd_soc_dai_ops adau17x1_dai_ops; 70 70 71 - int adau17x1_setup_firmware(struct adau *adau, unsigned int rate); 71 + int adau17x1_setup_firmware(struct snd_soc_component *component, 72 + unsigned int rate); 72 73 bool adau17x1_has_dsp(struct adau *adau); 73 74 74 75 #define ADAU17X1_CLOCK_CONTROL 0x4000
+6 -3
sound/soc/codecs/msm8916-wcd-analog.c
··· 1187 1187 return irq; 1188 1188 } 1189 1189 1190 - ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler, 1190 + ret = devm_request_threaded_irq(dev, irq, NULL, 1191 + pm8916_mbhc_switch_irq_handler, 1191 1192 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 1192 1193 IRQF_ONESHOT, 1193 1194 "mbhc switch irq", priv); ··· 1202 1201 return irq; 1203 1202 } 1204 1203 1205 - ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler, 1204 + ret = devm_request_threaded_irq(dev, irq, NULL, 1205 + mbhc_btn_press_irq_handler, 1206 1206 IRQF_TRIGGER_RISING | 1207 1207 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1208 1208 "mbhc btn press irq", priv); ··· 1216 1214 return irq; 1217 1215 } 1218 1216 1219 - ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler, 1217 + ret = devm_request_threaded_irq(dev, irq, NULL, 1218 + mbhc_btn_release_irq_handler, 1220 1219 IRQF_TRIGGER_RISING | 1221 1220 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1222 1221 "mbhc btn release irq", priv);
+3
sound/soc/codecs/rt5514.c
··· 89 89 {RT5514_PLL3_CALIB_CTRL5, 0x40220012}, 90 90 {RT5514_DELAY_BUF_CTRL1, 0x7fff006a}, 91 91 {RT5514_DELAY_BUF_CTRL3, 0x00000000}, 92 + {RT5514_ASRC_IN_CTRL1, 0x00000003}, 92 93 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, 93 94 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, 94 95 {RT5514_DOWNFILTER0_CTRL3, 0x10000362}, ··· 182 181 case RT5514_PLL3_CALIB_CTRL5: 183 182 case RT5514_DELAY_BUF_CTRL1: 184 183 case RT5514_DELAY_BUF_CTRL3: 184 + case RT5514_ASRC_IN_CTRL1: 185 185 case RT5514_DOWNFILTER0_CTRL1: 186 186 case RT5514_DOWNFILTER0_CTRL2: 187 187 case RT5514_DOWNFILTER0_CTRL3: ··· 240 238 case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5: 241 239 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1: 242 240 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3: 241 + case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1: 243 242 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1: 244 243 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2: 245 244 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3:
+7
sound/soc/fsl/fsl_esai.c
··· 144 144 145 145 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8; 146 146 147 + /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */ 148 + if (ratio <= 256) { 149 + pm = ratio; 150 + fp = 1; 151 + goto out; 152 + } 153 + 147 154 /* Set the max fluctuation -- 0.1% of the max devisor */ 148 155 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000; 149 156
+11 -3
sound/soc/fsl/fsl_ssi.c
··· 217 217 * @dai_fmt: DAI configuration this device is currently used with 218 218 * @streams: Mask of current active streams: BIT(TX) and BIT(RX) 219 219 * @i2s_net: I2S and Network mode configurations of SCR register 220 + * (this is the initial settings based on the DAI format) 220 221 * @synchronous: Use synchronous mode - both of TX and RX use STCK and SFCK 221 222 * @use_dma: DMA is used or FIQ with stream filter 222 223 * @use_dual_fifo: DMA with support for dual FIFO mode ··· 830 829 } 831 830 832 831 if (!fsl_ssi_is_ac97(ssi)) { 832 + /* 833 + * Keep the ssi->i2s_net intact while having a local variable 834 + * to override settings for special use cases. Otherwise, the 835 + * ssi->i2s_net will lose the settings for regular use cases. 836 + */ 837 + u8 i2s_net = ssi->i2s_net; 838 + 833 839 /* Normal + Network mode to send 16-bit data in 32-bit frames */ 834 840 if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16) 835 - ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET; 841 + i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET; 836 842 837 843 /* Use Normal mode to send mono data at 1st slot of 2 slots */ 838 844 if (channels == 1) 839 - ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL; 845 + i2s_net = SSI_SCR_I2S_MODE_NORMAL; 840 846 841 847 regmap_update_bits(regs, REG_SSI_SCR, 842 - SSI_SCR_I2S_NET_MASK, ssi->i2s_net); 848 + SSI_SCR_I2S_NET_MASK, i2s_net); 843 849 } 844 850 845 851 /* In synchronous mode, the SSI uses STCCR for capture */
+13 -9
sound/soc/intel/Kconfig
··· 72 72 for Baytrail Chromebooks but this option is now deprecated and is 73 73 not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead. 74 74 75 + config SND_SST_ATOM_HIFI2_PLATFORM 76 + tristate 77 + select SND_SOC_COMPRESS 78 + 75 79 config SND_SST_ATOM_HIFI2_PLATFORM_PCI 76 - tristate "PCI HiFi2 (Medfield, Merrifield) Platforms" 80 + tristate "PCI HiFi2 (Merrifield) Platforms" 77 81 depends on X86 && PCI 78 82 select SND_SST_IPC_PCI 79 - select SND_SOC_COMPRESS 83 + select SND_SST_ATOM_HIFI2_PLATFORM 80 84 help 81 - If you have a Intel Medfield or Merrifield/Edison platform, then 85 + If you have a Intel Merrifield/Edison platform, then 82 86 enable this option by saying Y or m. Distros will typically not 83 - enable this option: Medfield devices are not available to 84 - developers and while Merrifield/Edison can run a mainline kernel with 85 - limited functionality it will require a firmware file which 86 - is not in the standard firmware tree 87 + enable this option: while Merrifield/Edison can run a mainline 88 + kernel with limited functionality it will require a firmware file 89 + which is not in the standard firmware tree 87 90 88 - config SND_SST_ATOM_HIFI2_PLATFORM 91 + config SND_SST_ATOM_HIFI2_PLATFORM_ACPI 89 92 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" 93 + default ACPI 90 94 depends on X86 && ACPI 91 95 select SND_SST_IPC_ACPI 92 - select SND_SOC_COMPRESS 96 + select SND_SST_ATOM_HIFI2_PLATFORM 93 97 select SND_SOC_ACPI_INTEL_MATCH 94 98 select IOSF_MBI 95 99 help
+11 -3
sound/soc/omap/omap-dmic.c
··· 281 281 static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id, 282 282 unsigned int freq) 283 283 { 284 - struct clk *parent_clk; 284 + struct clk *parent_clk, *mux; 285 285 char *parent_clk_name; 286 286 int ret = 0; 287 287 ··· 329 329 return -ENODEV; 330 330 } 331 331 332 + mux = clk_get_parent(dmic->fclk); 333 + if (IS_ERR(mux)) { 334 + dev_err(dmic->dev, "can't get fck mux parent\n"); 335 + clk_put(parent_clk); 336 + return -ENODEV; 337 + } 338 + 332 339 mutex_lock(&dmic->mutex); 333 340 if (dmic->active) { 334 341 /* disable clock while reparenting */ 335 342 pm_runtime_put_sync(dmic->dev); 336 - ret = clk_set_parent(dmic->fclk, parent_clk); 343 + ret = clk_set_parent(mux, parent_clk); 337 344 pm_runtime_get_sync(dmic->dev); 338 345 } else { 339 - ret = clk_set_parent(dmic->fclk, parent_clk); 346 + ret = clk_set_parent(mux, parent_clk); 340 347 } 341 348 mutex_unlock(&dmic->mutex); 342 349 ··· 356 349 dmic->fclk_freq = freq; 357 350 358 351 err_busy: 352 + clk_put(mux); 359 353 clk_put(parent_clk); 360 354 361 355 return ret;
+2 -2
sound/soc/sh/rcar/core.c
··· 1536 1536 return ret; 1537 1537 } 1538 1538 1539 - static int rsnd_suspend(struct device *dev) 1539 + static int __maybe_unused rsnd_suspend(struct device *dev) 1540 1540 { 1541 1541 struct rsnd_priv *priv = dev_get_drvdata(dev); 1542 1542 ··· 1545 1545 return 0; 1546 1546 } 1547 1547 1548 - static int rsnd_resume(struct device *dev) 1548 + static int __maybe_unused rsnd_resume(struct device *dev) 1549 1549 { 1550 1550 struct rsnd_priv *priv = dev_get_drvdata(dev); 1551 1551
+9 -5
sound/soc/soc-topology.c
··· 513 513 */ 514 514 if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) { 515 515 /* enumerated widget mixer */ 516 - for (i = 0; i < w->num_kcontrols; i++) { 516 + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) { 517 517 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 518 518 struct soc_enum *se = 519 519 (struct soc_enum *)kcontrol->private_value; ··· 530 530 } 531 531 } else { 532 532 /* volume mixer or bytes controls */ 533 - for (i = 0; i < w->num_kcontrols; i++) { 533 + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) { 534 534 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 535 535 536 536 if (dobj->widget.kcontrol_type ··· 1325 1325 ec->hdr.name); 1326 1326 1327 1327 kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL); 1328 - if (kc[i].name == NULL) 1328 + if (kc[i].name == NULL) { 1329 + kfree(se); 1329 1330 goto err_se; 1331 + } 1330 1332 kc[i].private_value = (long)se; 1331 1333 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1332 1334 kc[i].access = ec->hdr.access; ··· 1444 1442 be->hdr.name, be->hdr.access); 1445 1443 1446 1444 kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL); 1447 - if (kc[i].name == NULL) 1445 + if (kc[i].name == NULL) { 1446 + kfree(sbe); 1448 1447 goto err; 1448 + } 1449 1449 kc[i].private_value = (long)sbe; 1450 1450 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1451 1451 kc[i].access = be->hdr.access; ··· 2580 2576 2581 2577 /* match index */ 2582 2578 if (dobj->index != index && 2583 - dobj->index != SND_SOC_TPLG_INDEX_ALL) 2579 + index != SND_SOC_TPLG_INDEX_ALL) 2584 2580 continue; 2585 2581 2586 2582 switch (dobj->type) {
+4 -3
sound/usb/mixer.c
··· 1776 1776 build_feature_ctl(state, _ftr, ch_bits, control, 1777 1777 &iterm, unitid, ch_read_only); 1778 1778 if (uac_v2v3_control_is_readable(master_bits, control)) 1779 - build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, 1779 + build_feature_ctl(state, _ftr, 0, control, 1780 + &iterm, unitid, 1780 1781 !uac_v2v3_control_is_writeable(master_bits, 1781 1782 control)); 1782 1783 } ··· 1860 1859 check_input_term(state, d->bTerminalID, &iterm); 1861 1860 if (state->mixer->protocol == UAC_VERSION_2) { 1862 1861 /* Check for jack detection. */ 1863 - if (uac_v2v3_control_is_readable(d->bmControls, 1862 + if (uac_v2v3_control_is_readable(le16_to_cpu(d->bmControls), 1864 1863 UAC2_TE_CONNECTOR)) { 1865 1864 build_connector_control(state, &iterm, true); 1866 1865 } ··· 2562 2561 if (err < 0 && err != -EINVAL) 2563 2562 return err; 2564 2563 2565 - if (uac_v2v3_control_is_readable(desc->bmControls, 2564 + if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), 2566 2565 UAC2_TE_CONNECTOR)) { 2567 2566 build_connector_control(&state, &state.oterm, 2568 2567 false);
+3
sound/usb/mixer_maps.c
··· 353 353 /* 354 354 * Dell usb dock with ALC4020 codec had a firmware problem where it got 355 355 * screwed up when zero volume is passed; just skip it as a workaround 356 + * 357 + * Also the extension unit gives an access error, so skip it as well. 356 358 */ 357 359 static const struct usbmix_name_map dell_alc4020_map[] = { 360 + { 4, NULL }, /* extension unit */ 358 361 { 16, NULL }, 359 362 { 19, NULL }, 360 363 { 0 }
+1 -1
sound/usb/stream.c
··· 349 349 * TODO: this conversion is not complete, update it 350 350 * after adding UAC3 values to asound.h 351 351 */ 352 - switch (is->bChPurpose) { 352 + switch (is->bChRelationship) { 353 353 case UAC3_CH_MONO: 354 354 map = SNDRV_CHMAP_MONO; 355 355 break;
+1 -1
sound/usb/usx2y/us122l.c
··· 139 139 snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); 140 140 } 141 141 142 - static int usb_stream_hwdep_vm_fault(struct vm_fault *vmf) 142 + static vm_fault_t usb_stream_hwdep_vm_fault(struct vm_fault *vmf) 143 143 { 144 144 unsigned long offset; 145 145 struct page *page;
+1 -1
sound/usb/usx2y/usX2Yhwdep.c
··· 31 31 #include "usbusx2y.h" 32 32 #include "usX2Yhwdep.h" 33 33 34 - static int snd_us428ctls_vm_fault(struct vm_fault *vmf) 34 + static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf) 35 35 { 36 36 unsigned long offset; 37 37 struct page * page;
+1 -1
sound/usb/usx2y/usx2yhwdeppcm.c
··· 652 652 } 653 653 654 654 655 - static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) 655 + static vm_fault_t snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) 656 656 { 657 657 unsigned long offset; 658 658 void *vaddr;
+9
tools/arch/arm/include/uapi/asm/kvm.h
··· 135 135 #define KVM_REG_ARM_CRM_SHIFT 7 136 136 #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 137 137 #define KVM_REG_ARM_32_CRN_SHIFT 11 138 + /* 139 + * For KVM currently all guest registers are nonsecure, but we reserve a bit 140 + * in the encoding to distinguish secure from nonsecure for AArch32 system 141 + * registers that are banked by security. This is 1 for the secure banked 142 + * register, and 0 for the nonsecure banked register or if the register is 143 + * not banked by security. 144 + */ 145 + #define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 146 + #define KVM_REG_ARM_SECURE_SHIFT 28 138 147 139 148 #define ARM_CP15_REG_SHIFT_MASK(x,n) \ 140 149 (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
+1 -7
tools/arch/x86/include/asm/required-features.h
··· 53 53 # define NEED_MOVBE 0 54 54 #endif 55 55 56 - #ifdef CONFIG_X86_5LEVEL 57 - # define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31)) 58 - #else 59 - # define NEED_LA57 0 60 - #endif 61 - 62 56 #ifdef CONFIG_X86_64 63 57 #ifdef CONFIG_PARAVIRT 64 58 /* Paravirtualized systems may not have PSE or PGE available */ ··· 98 104 #define REQUIRED_MASK13 0 99 105 #define REQUIRED_MASK14 0 100 106 #define REQUIRED_MASK15 0 101 - #define REQUIRED_MASK16 (NEED_LA57) 107 + #define REQUIRED_MASK16 0 102 108 #define REQUIRED_MASK17 0 103 109 #define REQUIRED_MASK18 0 104 110 #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+18 -1
tools/arch/x86/include/uapi/asm/kvm.h
··· 354 354 __u64 padding[16]; 355 355 }; 356 356 357 - /* definition of registers in kvm_run */ 357 + #define KVM_SYNC_X86_REGS (1UL << 0) 358 + #define KVM_SYNC_X86_SREGS (1UL << 1) 359 + #define KVM_SYNC_X86_EVENTS (1UL << 2) 360 + 361 + #define KVM_SYNC_X86_VALID_FIELDS \ 362 + (KVM_SYNC_X86_REGS| \ 363 + KVM_SYNC_X86_SREGS| \ 364 + KVM_SYNC_X86_EVENTS) 365 + 366 + /* kvm_sync_regs struct included by kvm_run struct */ 358 367 struct kvm_sync_regs { 368 + /* Members of this structure are potentially malicious. 369 + * Care must be taken by code reading, esp. interpreting, 370 + * data fields from them inside KVM to prevent TOCTOU and 371 + * double-fetch types of vulnerabilities. 372 + */ 373 + struct kvm_regs regs; 374 + struct kvm_sregs sregs; 375 + struct kvm_vcpu_events events; 359 376 }; 360 377 361 378 #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+14 -4
tools/include/linux/compiler.h
··· 151 151 * required ordering. 152 152 */ 153 153 154 - #define READ_ONCE(x) \ 155 - ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 154 + #define READ_ONCE(x) \ 155 + ({ \ 156 + union { typeof(x) __val; char __c[1]; } __u = \ 157 + { .__c = { 0 } }; \ 158 + __read_once_size(&(x), __u.__c, sizeof(x)); \ 159 + __u.__val; \ 160 + }) 156 161 157 - #define WRITE_ONCE(x, val) \ 158 - ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 162 + #define WRITE_ONCE(x, val) \ 163 + ({ \ 164 + union { typeof(x) __val; char __c[1]; } __u = \ 165 + { .__val = (val) }; \ 166 + __write_once_size(&(x), __u.__c, sizeof(x)); \ 167 + __u.__val; \ 168 + }) 159 169 160 170 161 171 #ifndef __fallthrough
+1 -12
tools/include/linux/coresight-pmu.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 2 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License version 2 as published by 7 - * the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 - * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program. If not, see <http://www.gnu.org/licenses/>. 16 5 */ 17 6 18 7 #ifndef _LINUX_CORESIGHT_PMU_H
+3
tools/include/uapi/asm-generic/mman-common.h
··· 27 27 # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ 28 28 #endif 29 29 30 + /* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ 31 + #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ 32 + 30 33 /* 31 34 * Flags for mlock 32 35 */
+1
tools/include/uapi/linux/bpf.h
··· 864 864 /* BPF_FUNC_skb_set_tunnel_key flags. */ 865 865 #define BPF_F_ZERO_CSUM_TX (1ULL << 1) 866 866 #define BPF_F_DONT_FRAGMENT (1ULL << 2) 867 + #define BPF_F_SEQ_NUMBER (1ULL << 3) 867 868 868 869 /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and 869 870 * BPF_FUNC_perf_event_read_value flags.
+39
tools/include/uapi/linux/if_link.h
··· 941 941 IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ 942 942 }; 943 943 944 + /* tun section */ 945 + 946 + enum { 947 + IFLA_TUN_UNSPEC, 948 + IFLA_TUN_OWNER, 949 + IFLA_TUN_GROUP, 950 + IFLA_TUN_TYPE, 951 + IFLA_TUN_PI, 952 + IFLA_TUN_VNET_HDR, 953 + IFLA_TUN_PERSIST, 954 + IFLA_TUN_MULTI_QUEUE, 955 + IFLA_TUN_NUM_QUEUES, 956 + IFLA_TUN_NUM_DISABLED_QUEUES, 957 + __IFLA_TUN_MAX, 958 + }; 959 + 960 + #define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1) 961 + 962 + /* rmnet section */ 963 + 964 + #define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) 965 + #define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) 966 + #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) 967 + #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) 968 + 969 + enum { 970 + IFLA_RMNET_UNSPEC, 971 + IFLA_RMNET_MUX_ID, 972 + IFLA_RMNET_FLAGS, 973 + __IFLA_RMNET_MAX, 974 + }; 975 + 976 + #define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1) 977 + 978 + struct ifla_rmnet_flags { 979 + __u32 flags; 980 + __u32 mask; 981 + }; 982 + 944 983 #endif /* _UAPI_LINUX_IF_LINK_H */
+20 -1
tools/include/uapi/linux/kvm.h
··· 396 396 char padding[256]; 397 397 }; 398 398 399 + /* 2048 is the size of the char array used to bound/pad the size 400 + * of the union that holds sync regs. 401 + */ 402 + #define SYNC_REGS_SIZE_BYTES 2048 399 403 /* 400 404 * shared registers between kvm and userspace. 401 405 * kvm_valid_regs specifies the register classes set by the host ··· 411 407 __u64 kvm_dirty_regs; 412 408 union { 413 409 struct kvm_sync_regs regs; 414 - char padding[2048]; 410 + char padding[SYNC_REGS_SIZE_BYTES]; 415 411 } s; 416 412 }; 417 413 ··· 940 936 #define KVM_CAP_PPC_GET_CPU_CHAR 151 941 937 #define KVM_CAP_S390_BPB 152 942 938 #define KVM_CAP_GET_MSR_FEATURES 153 939 + #define KVM_CAP_HYPERV_EVENTFD 154 943 940 944 941 #ifdef KVM_CAP_IRQ_ROUTING 945 942 ··· 1380 1375 #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) 1381 1376 #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) 1382 1377 1378 + /* Available with KVM_CAP_HYPERV_EVENTFD */ 1379 + #define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd) 1380 + 1381 + 1383 1382 /* Secure Encrypted Virtualization command */ 1384 1383 enum sev_cmd_id { 1385 1384 /* Guest initialization commands */ ··· 1523 1514 #define KVM_ARM_DEV_EL1_VTIMER (1 << 0) 1524 1515 #define KVM_ARM_DEV_EL1_PTIMER (1 << 1) 1525 1516 #define KVM_ARM_DEV_PMU (1 << 2) 1517 + 1518 + struct kvm_hyperv_eventfd { 1519 + __u32 conn_id; 1520 + __s32 fd; 1521 + __u32 flags; 1522 + __u32 padding[3]; 1523 + }; 1524 + 1525 + #define KVM_HYPERV_CONN_ID_MASK 0x00ffffff 1526 + #define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) 1526 1527 1527 1528 #endif /* __LINUX_KVM_H */
+15 -3
tools/include/uapi/linux/perf_event.h
··· 650 650 #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 651 651 #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 652 652 /* 653 - * Indicates that the content of PERF_SAMPLE_IP points to 654 - * the actual instruction that triggered the event. See also 655 - * perf_event_attr::precise_ip. 653 + * These PERF_RECORD_MISC_* flags below are safely reused 654 + * for the following events: 655 + * 656 + * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events 657 + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events 658 + * 659 + * 660 + * PERF_RECORD_MISC_EXACT_IP: 661 + * Indicates that the content of PERF_SAMPLE_IP points to 662 + * the actual instruction that triggered the event. See also 663 + * perf_event_attr::precise_ip. 664 + * 665 + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: 666 + * Indicates that thread was preempted in TASK_RUNNING state. 656 667 */ 657 668 #define PERF_RECORD_MISC_EXACT_IP (1 << 14) 669 + #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) 658 670 /* 659 671 * Reserve the last bit to indicate some extended misc field 660 672 */
+1
tools/include/uapi/sound/asound.h
··· 242 242 #define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ 243 243 #define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ 244 244 #define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE 245 + #define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8 245 246 246 247 #ifdef SNDRV_LITTLE_ENDIAN 247 248 #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
+3 -3
tools/lib/subcmd/parse-options.c
··· 433 433 434 434 if (ambiguous_option) { 435 435 fprintf(stderr, 436 - " Error: Ambiguous option: %s (could be --%s%s or --%s%s)", 436 + " Error: Ambiguous option: %s (could be --%s%s or --%s%s)\n", 437 437 arg, 438 438 (ambiguous_flags & OPT_UNSET) ? "no-" : "", 439 439 ambiguous_option->long_name, ··· 458 458 return; 459 459 460 460 if (strstarts(arg, "no-")) { 461 - fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); 461 + fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg); 462 462 exit(129); 463 463 } 464 464 ··· 466 466 if (!options->long_name) 467 467 continue; 468 468 if (strstarts(options->long_name, arg)) { 469 - fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); 469 + fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg); 470 470 exit(129); 471 471 } 472 472 }
+2 -2
tools/objtool/Makefile
··· 31 31 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ 32 32 -I$(srctree)/tools/objtool/arch/$(ARCH)/include 33 33 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed 34 - CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) 35 - LDFLAGS += -lelf $(LIBSUBCMD) 34 + CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES) 35 + LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS) 36 36 37 37 # Allow old libelf to be used: 38 38 elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
+5
tools/perf/Documentation/perf-config.txt
··· 334 334 335 335 99.93 │ mov %eax,%eax 336 336 337 + annotate.offset_level:: 338 + Default is '1', meaning just jump targets will have offsets show right beside 339 + the instruction. When set to '2' 'call' instructions will also have its offsets 340 + shown, 3 or higher will show offsets for all instructions. 341 + 337 342 hist.*:: 338 343 hist.percentage:: 339 344 This option control the way to calculate overhead of filtered entries -
+3
tools/perf/Documentation/perf-mem.txt
··· 67 67 --phys-data:: 68 68 Record/Report sample physical addresses 69 69 70 + In addition, for report all perf report options are valid, and for record 71 + all perf record options. 72 + 70 73 SEE ALSO 71 74 -------- 72 75 linkperf:perf-record[1], linkperf:perf-report[1]
+2 -2
tools/perf/Documentation/perf-sched.txt
··· 104 104 kallsyms pathname 105 105 106 106 -g:: 107 - --no-call-graph:: 108 - Do not display call chains if present. 107 + --call-graph:: 108 + Display call chains if present (default on). 109 109 110 110 --max-stack:: 111 111 Maximum number of functions to display in backtrace, default 5.
+9 -8
tools/perf/Documentation/perf-script.txt
··· 228 228 For sample events it's possible to display misc field with -F +misc option, 229 229 following letters are displayed for each bit: 230 230 231 - PERF_RECORD_MISC_KERNEL K 232 - PERF_RECORD_MISC_USER U 233 - PERF_RECORD_MISC_HYPERVISOR H 234 - PERF_RECORD_MISC_GUEST_KERNEL G 235 - PERF_RECORD_MISC_GUEST_USER g 236 - PERF_RECORD_MISC_MMAP_DATA* M 237 - PERF_RECORD_MISC_COMM_EXEC E 238 - PERF_RECORD_MISC_SWITCH_OUT S 231 + PERF_RECORD_MISC_KERNEL K 232 + PERF_RECORD_MISC_USER U 233 + PERF_RECORD_MISC_HYPERVISOR H 234 + PERF_RECORD_MISC_GUEST_KERNEL G 235 + PERF_RECORD_MISC_GUEST_USER g 236 + PERF_RECORD_MISC_MMAP_DATA* M 237 + PERF_RECORD_MISC_COMM_EXEC E 238 + PERF_RECORD_MISC_SWITCH_OUT S 239 + PERF_RECORD_MISC_SWITCH_OUT_PREEMPT Sp 239 240 240 241 $ perf script -F +misc ... 241 242 sched-messaging 1414 K 28690.636582: 4590 cycles ...
+1 -1
tools/perf/Documentation/perf-stat.txt
··· 153 153 154 154 -I msecs:: 155 155 --interval-print msecs:: 156 - Print count deltas every N milliseconds (minimum: 10ms) 156 + Print count deltas every N milliseconds (minimum: 1ms) 157 157 The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution. 158 158 example: 'perf stat -I 1000 -e cycles -a sleep 5' 159 159
+2 -2
tools/perf/Makefile.config
··· 68 68 endif 69 69 70 70 ifneq ($(NO_SYSCALL_TABLE),1) 71 - CFLAGS += -DHAVE_SYSCALL_TABLE 71 + CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT 72 72 endif 73 73 74 74 # So far there's only x86 and arm libdw unwind support merged in perf. ··· 847 847 ifeq ($(feature-jvmti), 1) 848 848 $(call detected_var,JDIR) 849 849 else 850 - $(warning No openjdk development package found, please install JDK package) 850 + $(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel) 851 851 NO_JVMTI := 1 852 852 endif 853 853 endif
+12
tools/perf/arch/arm/include/arch-tests.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef ARCH_TESTS_H 3 + #define ARCH_TESTS_H 4 + 5 + #ifdef HAVE_DWARF_UNWIND_SUPPORT 6 + struct thread; 7 + struct perf_sample; 8 + #endif 9 + 10 + extern struct test arch_tests[]; 11 + 12 + #endif
+2
tools/perf/arch/arm/tests/Build
··· 1 1 libperf-y += regs_load.o 2 2 libperf-y += dwarf-unwind.o 3 + 4 + libperf-y += arch-tests.o
+16
tools/perf/arch/arm/tests/arch-tests.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <string.h> 3 + #include "tests/tests.h" 4 + #include "arch-tests.h" 5 + 6 + struct test arch_tests[] = { 7 + #ifdef HAVE_DWARF_UNWIND_SUPPORT 8 + { 9 + .desc = "DWARF unwind", 10 + .func = test__dwarf_unwind, 11 + }, 12 + #endif 13 + { 14 + .func = NULL, 15 + }, 16 + };
+1 -12
tools/perf/arch/arm/util/auxtrace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License version 2 as published by 7 - * the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 - * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program. If not, see <http://www.gnu.org/licenses/>. 16 5 */ 17 6 18 7 #include <stdbool.h>
+1 -12
tools/perf/arch/arm/util/cs-etm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License version 2 as published by 7 - * the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 - * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program. If not, see <http://www.gnu.org/licenses/>. 16 5 */ 17 6 18 7 #include <api/fs/fs.h>
+1 -12
tools/perf/arch/arm/util/cs-etm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 2 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License version 2 as published by 7 - * the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 - * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program. If not, see <http://www.gnu.org/licenses/>. 16 5 */ 17 6 18 7 #ifndef INCLUDE__PERF_CS_ETM_H__
+1 -12
tools/perf/arch/arm/util/pmu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License version 2 as published by 7 - * the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 - * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program. If not, see <http://www.gnu.org/licenses/>. 16 5 */ 17 6 18 7 #include <string.h>
+1 -1
tools/perf/arch/x86/Makefile
··· 21 21 $(header): $(sys)/syscall_64.tbl $(systbl) 22 22 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ 23 23 (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \ 24 - || echo "Warning: Kernel ABI header at 'tools/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true 24 + || echo "Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true 25 25 $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ 26 26 27 27 clean::
+66 -1
tools/perf/arch/x86/annotate/instructions.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 static struct ins x86__instructions[] = { 3 + { .name = "adc", .ops = &mov_ops, }, 4 + { .name = "adcb", .ops = &mov_ops, }, 5 + { .name = "adcl", .ops = &mov_ops, }, 3 6 { .name = "add", .ops = &mov_ops, }, 4 7 { .name = "addl", .ops = &mov_ops, }, 5 8 { .name = "addq", .ops = &mov_ops, }, 9 + { .name = "addsd", .ops = &mov_ops, }, 6 10 { .name = "addw", .ops = &mov_ops, }, 7 11 { .name = "and", .ops = &mov_ops, }, 12 + { .name = "andb", .ops = &mov_ops, }, 13 + { .name = "andl", .ops = &mov_ops, }, 14 + { .name = "andpd", .ops = &mov_ops, }, 15 + { .name = "andps", .ops = &mov_ops, }, 16 + { .name = "andq", .ops = &mov_ops, }, 17 + { .name = "andw", .ops = &mov_ops, }, 18 + { .name = "bsr", .ops = &mov_ops, }, 19 + { .name = "bt", .ops = &mov_ops, }, 20 + { .name = "btr", .ops = &mov_ops, }, 8 21 { .name = "bts", .ops = &mov_ops, }, 22 + { .name = "btsq", .ops = &mov_ops, }, 9 23 { .name = "call", .ops = &call_ops, }, 10 24 { .name = "callq", .ops = &call_ops, }, 25 + { .name = "cmovbe", .ops = &mov_ops, }, 26 + { .name = "cmove", .ops = &mov_ops, }, 27 + { .name = "cmovae", .ops = &mov_ops, }, 11 28 { .name = "cmp", .ops = &mov_ops, }, 12 29 { .name = "cmpb", .ops = &mov_ops, }, 13 30 { .name = "cmpl", .ops = &mov_ops, }, 14 31 { .name = "cmpq", .ops = &mov_ops, }, 15 32 { .name = "cmpw", .ops = &mov_ops, }, 16 33 { .name = "cmpxch", .ops = &mov_ops, }, 34 + { .name = "cmpxchg", .ops = &mov_ops, }, 35 + { .name = "cs", .ops = &mov_ops, }, 17 36 { .name = "dec", .ops = &dec_ops, }, 18 37 { .name = "decl", .ops = &dec_ops, }, 38 + { .name = "divsd", .ops = &mov_ops, }, 39 + { .name = "divss", .ops = &mov_ops, }, 40 + { .name = "gs", .ops = &mov_ops, }, 19 41 { .name = "imul", .ops = &mov_ops, }, 20 42 { .name = "inc", .ops = &dec_ops, }, 21 43 { .name = "incl", .ops = &dec_ops, }, ··· 79 57 { .name = "lea", .ops = &mov_ops, }, 80 58 { .name = "lock", .ops = &lock_ops, }, 81 59 { .name = "mov", .ops = &mov_ops, }, 60 + { .name = "movapd", .ops = &mov_ops, }, 61 + { .name = "movaps", .ops = &mov_ops, }, 82 62 { .name = "movb", .ops = &mov_ops, }, 83 63 { .name = "movdqa", .ops = &mov_ops, }, 64 + { .name = "movdqu", .ops = &mov_ops, }, 84 65 { .name = "movl", .ops = &mov_ops, }, 85 66 { .name = "movq", .ops = &mov_ops, }, 67 + { .name = "movsd", .ops = &mov_ops, }, 86 68 { .name = "movslq", .ops = &mov_ops, }, 69 + { .name = "movss", .ops = &mov_ops, }, 70 + { .name = "movupd", .ops = &mov_ops, }, 71 + { .name = "movups", .ops = &mov_ops, }, 72 + { .name = "movw", .ops = &mov_ops, }, 87 73 { .name = "movzbl", .ops = &mov_ops, }, 88 74 { .name = "movzwl", .ops = &mov_ops, }, 75 + { .name = "mulsd", .ops = &mov_ops, }, 76 + { .name = "mulss", .ops = &mov_ops, }, 89 77 { .name = "nop", .ops = &nop_ops, }, 90 78 { .name = "nopl", .ops = &nop_ops, }, 91 79 { .name = "nopw", .ops = &nop_ops, }, 92 80 { .name = "or", .ops = &mov_ops, }, 81 + { .name = "orb", .ops = &mov_ops, }, 93 82 { .name = "orl", .ops = &mov_ops, }, 83 + { .name = "orps", .ops = &mov_ops, }, 84 + { .name = "orq", .ops = &mov_ops, }, 85 + { .name = "pand", .ops = &mov_ops, }, 86 + { .name = "paddq", .ops = &mov_ops, }, 87 + { .name = "pcmpeqb", .ops = &mov_ops, }, 88 + { .name = "por", .ops = &mov_ops, }, 89 + { .name = "rclb", .ops = &mov_ops, }, 90 + { .name = "rcll", .ops = &mov_ops, }, 91 + { .name = "retq", .ops = &ret_ops, }, 92 + { .name = "sbb", .ops = &mov_ops, }, 93 + { .name = "sbbl", .ops = &mov_ops, }, 94 + { .name = "sete", .ops = &mov_ops, }, 95 + { .name = "sub", .ops = &mov_ops, }, 96 + { .name = "subl", .ops = &mov_ops, }, 97 + { .name = "subq", .ops = &mov_ops, }, 98 + { .name = "subsd", .ops = &mov_ops, }, 99 + { .name = "subw", .ops = &mov_ops, }, 94 100 { .name = "test", .ops = &mov_ops, }, 95 101 { .name = "testb", .ops = &mov_ops, }, 96 102 { .name = "testl", .ops = &mov_ops, }, 103 + { .name = "ucomisd", .ops = &mov_ops, }, 104 + { .name = "ucomiss", .ops = &mov_ops, }, 105 + { .name = "vaddsd", .ops = &mov_ops, }, 106 + { .name = "vandpd", .ops = &mov_ops, }, 107 + { .name = "vmovdqa", .ops = &mov_ops, }, 108 + { .name = "vmovq", .ops = &mov_ops, }, 109 + { .name = "vmovsd", .ops = &mov_ops, }, 110 + { .name = "vmulsd", .ops = &mov_ops, }, 111 + { .name = "vorpd", .ops = &mov_ops, }, 112 + { .name = "vsubsd", .ops = &mov_ops, }, 113 + { .name = "vucomisd", .ops = &mov_ops, }, 97 114 { .name = "xadd", .ops = &mov_ops, }, 98 115 { .name = "xbeginl", .ops = &jump_ops, }, 99 116 { .name = "xbeginq", .ops = &jump_ops, }, 100 - { .name = "retq", .ops = &ret_ops, }, 117 + { .name = "xchg", .ops = &mov_ops, }, 118 + { .name = "xor", .ops = &mov_ops, }, 119 + { .name = "xorb", .ops = &mov_ops, }, 120 + { .name = "xorpd", .ops = &mov_ops, }, 121 + { .name = "xorps", .ops = &mov_ops, }, 101 122 }; 102 123 103 124 static bool x86__ins_is_fused(struct arch *arch, const char *ins1,
+358 -354
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
··· 4 4 # The format is: 5 5 # <number> <abi> <name> <entry point> 6 6 # 7 + # The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls 8 + # 7 9 # The abi is "common", "64" or "x32" for this file. 8 10 # 9 - 0 common read sys_read 10 - 1 common write sys_write 11 - 2 common open sys_open 12 - 3 common close sys_close 13 - 4 common stat sys_newstat 14 - 5 common fstat sys_newfstat 15 - 6 common lstat sys_newlstat 16 - 7 common poll sys_poll 17 - 8 common lseek sys_lseek 18 - 9 common mmap sys_mmap 19 - 10 common mprotect sys_mprotect 20 - 11 common munmap sys_munmap 21 - 12 common brk sys_brk 22 - 13 64 rt_sigaction sys_rt_sigaction 23 - 14 common rt_sigprocmask sys_rt_sigprocmask 24 - 15 64 rt_sigreturn sys_rt_sigreturn/ptregs 25 - 16 64 ioctl sys_ioctl 26 - 17 common pread64 sys_pread64 27 - 18 common pwrite64 sys_pwrite64 28 - 19 64 readv sys_readv 29 - 20 64 writev sys_writev 30 - 21 common access sys_access 31 - 22 common pipe sys_pipe 32 - 23 common select sys_select 33 - 24 common sched_yield sys_sched_yield 34 - 25 common mremap sys_mremap 35 - 26 common msync sys_msync 36 - 27 common mincore sys_mincore 37 - 28 common madvise sys_madvise 38 - 29 common shmget sys_shmget 39 - 30 common shmat sys_shmat 40 - 31 common shmctl sys_shmctl 41 - 32 common dup sys_dup 42 - 33 common dup2 sys_dup2 43 - 34 common pause sys_pause 44 - 35 common nanosleep sys_nanosleep 45 - 36 common getitimer sys_getitimer 46 - 37 common alarm sys_alarm 47 - 38 common setitimer sys_setitimer 48 - 39 common getpid sys_getpid 49 - 40 common sendfile sys_sendfile64 50 - 41 common socket sys_socket 51 - 42 common connect sys_connect 52 - 43 common accept sys_accept 53 - 44 common sendto sys_sendto 54 - 45 64 recvfrom sys_recvfrom 55 - 46 64 sendmsg sys_sendmsg 56 - 47 64 recvmsg sys_recvmsg 57 - 48 common shutdown sys_shutdown 58 - 49 common bind sys_bind 59 - 50 common listen sys_listen 60 - 51 common getsockname sys_getsockname 61 - 52 common getpeername sys_getpeername 62 - 53 common socketpair sys_socketpair 63 - 54 64 setsockopt sys_setsockopt 64 - 55 64 getsockopt sys_getsockopt 65 - 56 common clone sys_clone/ptregs 66 - 57 common fork sys_fork/ptregs 67 - 58 common vfork sys_vfork/ptregs 68 - 59 64 execve sys_execve/ptregs 69 - 60 common exit sys_exit 70 - 61 common wait4 sys_wait4 71 - 62 common kill sys_kill 72 - 63 common uname sys_newuname 73 - 64 common semget sys_semget 74 - 65 common semop sys_semop 75 - 66 common semctl sys_semctl 76 - 67 common shmdt sys_shmdt 77 - 68 common msgget sys_msgget 78 - 69 common msgsnd sys_msgsnd 79 - 70 common msgrcv sys_msgrcv 80 - 71 common msgctl sys_msgctl 81 - 72 common fcntl sys_fcntl 82 - 73 common flock sys_flock 83 - 74 common fsync sys_fsync 84 - 75 common fdatasync sys_fdatasync 85 - 76 common truncate sys_truncate 86 - 77 common ftruncate sys_ftruncate 87 - 78 common getdents sys_getdents 88 - 79 common getcwd sys_getcwd 89 - 80 common chdir sys_chdir 90 - 81 common fchdir sys_fchdir 91 - 82 common rename sys_rename 92 - 83 common mkdir sys_mkdir 93 - 84 common rmdir sys_rmdir 94 - 85 common creat sys_creat 95 - 86 common link sys_link 96 - 87 common unlink sys_unlink 97 - 88 common symlink sys_symlink 98 - 89 common readlink sys_readlink 99 - 90 common chmod sys_chmod 100 - 91 common fchmod sys_fchmod 101 - 92 common chown sys_chown 102 - 93 common fchown sys_fchown 103 - 94 common lchown sys_lchown 104 - 95 common umask sys_umask 105 - 96 common gettimeofday sys_gettimeofday 106 - 97 common getrlimit sys_getrlimit 107 - 98 common getrusage sys_getrusage 108 - 99 common sysinfo sys_sysinfo 109 - 100 common times sys_times 110 - 101 64 ptrace sys_ptrace 111 - 102 common getuid sys_getuid 112 - 103 common syslog sys_syslog 113 - 104 common getgid sys_getgid 114 - 105 common setuid sys_setuid 115 - 106 common setgid sys_setgid 116 - 107 common geteuid sys_geteuid 117 - 108 common getegid sys_getegid 118 - 109 common setpgid sys_setpgid 119 - 110 common getppid sys_getppid 120 - 111 common getpgrp sys_getpgrp 121 - 112 common setsid sys_setsid 122 - 113 common setreuid sys_setreuid 123 - 114 common setregid sys_setregid 124 - 115 common getgroups sys_getgroups 125 - 116 common setgroups sys_setgroups 126 - 117 common setresuid sys_setresuid 127 - 118 common getresuid sys_getresuid 128 - 119 common setresgid sys_setresgid 129 - 120 common getresgid sys_getresgid 130 - 121 common getpgid sys_getpgid 131 - 122 common setfsuid sys_setfsuid 132 - 123 common setfsgid sys_setfsgid 133 - 124 common getsid sys_getsid 134 - 125 common capget sys_capget 135 - 126 common capset sys_capset 136 - 127 64 rt_sigpending sys_rt_sigpending 137 - 128 64 rt_sigtimedwait sys_rt_sigtimedwait 138 - 129 64 rt_sigqueueinfo sys_rt_sigqueueinfo 139 - 130 common rt_sigsuspend sys_rt_sigsuspend 140 - 131 64 sigaltstack sys_sigaltstack 141 - 132 common utime sys_utime 142 - 133 common mknod sys_mknod 11 + 0 common read __x64_sys_read 12 + 1 common write __x64_sys_write 13 + 2 common open __x64_sys_open 14 + 3 common close __x64_sys_close 15 + 4 common stat __x64_sys_newstat 16 + 5 common fstat __x64_sys_newfstat 17 + 6 common lstat __x64_sys_newlstat 18 + 7 common poll __x64_sys_poll 19 + 8 common lseek __x64_sys_lseek 20 + 9 common mmap __x64_sys_mmap 21 + 10 common mprotect __x64_sys_mprotect 22 + 11 common munmap __x64_sys_munmap 23 + 12 common brk __x64_sys_brk 24 + 13 64 rt_sigaction __x64_sys_rt_sigaction 25 + 14 common rt_sigprocmask __x64_sys_rt_sigprocmask 26 + 15 64 rt_sigreturn __x64_sys_rt_sigreturn/ptregs 27 + 16 64 ioctl __x64_sys_ioctl 28 + 17 common pread64 __x64_sys_pread64 29 + 18 common pwrite64 __x64_sys_pwrite64 30 + 19 64 readv __x64_sys_readv 31 + 20 64 writev __x64_sys_writev 32 + 21 common access __x64_sys_access 33 + 22 common pipe __x64_sys_pipe 34 + 23 common select __x64_sys_select 35 + 24 common sched_yield __x64_sys_sched_yield 36 + 25 common mremap __x64_sys_mremap 37 + 26 common msync __x64_sys_msync 38 + 27 common mincore __x64_sys_mincore 39 + 28 common madvise __x64_sys_madvise 40 + 29 common shmget __x64_sys_shmget 41 + 30 common shmat __x64_sys_shmat 42 + 31 common shmctl __x64_sys_shmctl 43 + 32 common dup __x64_sys_dup 44 + 33 common dup2 __x64_sys_dup2 45 + 34 common pause __x64_sys_pause 46 + 35 common nanosleep __x64_sys_nanosleep 47 + 36 common getitimer __x64_sys_getitimer 48 + 37 common alarm __x64_sys_alarm 49 + 38 common setitimer __x64_sys_setitimer 50 + 39 common getpid __x64_sys_getpid 51 + 40 common sendfile __x64_sys_sendfile64 52 + 41 common socket __x64_sys_socket 53 + 42 common connect __x64_sys_connect 54 + 43 common accept __x64_sys_accept 55 + 44 common sendto __x64_sys_sendto 56 + 45 64 recvfrom __x64_sys_recvfrom 57 + 46 64 sendmsg __x64_sys_sendmsg 58 + 47 64 recvmsg __x64_sys_recvmsg 59 + 48 common shutdown __x64_sys_shutdown 60 + 49 common bind __x64_sys_bind 61 + 50 common listen __x64_sys_listen 62 + 51 common getsockname __x64_sys_getsockname 63 + 52 common getpeername __x64_sys_getpeername 64 + 53 common socketpair __x64_sys_socketpair 65 + 54 64 setsockopt __x64_sys_setsockopt 66 + 55 64 getsockopt __x64_sys_getsockopt 67 + 56 common clone __x64_sys_clone/ptregs 68 + 57 common fork __x64_sys_fork/ptregs 69 + 58 common vfork __x64_sys_vfork/ptregs 70 + 59 64 execve __x64_sys_execve/ptregs 71 + 60 common exit __x64_sys_exit 72 + 61 common wait4 __x64_sys_wait4 73 + 62 common kill __x64_sys_kill 74 + 63 common uname __x64_sys_newuname 75 + 64 common semget __x64_sys_semget 76 + 65 common semop __x64_sys_semop 77 + 66 common semctl __x64_sys_semctl 78 + 67 common shmdt __x64_sys_shmdt 79 + 68 common msgget __x64_sys_msgget 80 + 69 common msgsnd __x64_sys_msgsnd 81 + 70 common msgrcv __x64_sys_msgrcv 82 + 71 common msgctl __x64_sys_msgctl 83 + 72 common fcntl __x64_sys_fcntl 84 + 73 common flock __x64_sys_flock 85 + 74 common fsync __x64_sys_fsync 86 + 75 common fdatasync __x64_sys_fdatasync 87 + 76 common truncate __x64_sys_truncate 88 + 77 common ftruncate __x64_sys_ftruncate 89 + 78 common getdents __x64_sys_getdents 90 + 79 common getcwd __x64_sys_getcwd 91 + 80 common chdir __x64_sys_chdir 92 + 81 common fchdir __x64_sys_fchdir 93 + 82 common rename __x64_sys_rename 94 + 83 common mkdir __x64_sys_mkdir 95 + 84 common rmdir __x64_sys_rmdir 96 + 85 common creat __x64_sys_creat 97 + 86 common link __x64_sys_link 98 + 87 common unlink __x64_sys_unlink 99 + 88 common symlink __x64_sys_symlink 100 + 89 common readlink __x64_sys_readlink 101 + 90 common chmod __x64_sys_chmod 102 + 91 common fchmod __x64_sys_fchmod 103 + 92 common chown __x64_sys_chown 104 + 93 common fchown __x64_sys_fchown 105 + 94 common lchown __x64_sys_lchown 106 + 95 common umask __x64_sys_umask 107 + 96 common gettimeofday __x64_sys_gettimeofday 108 + 97 common getrlimit __x64_sys_getrlimit 109 + 98 common getrusage __x64_sys_getrusage 110 + 99 common sysinfo __x64_sys_sysinfo 111 + 100 common times __x64_sys_times 112 + 101 64 ptrace __x64_sys_ptrace 113 + 102 common getuid __x64_sys_getuid 114 + 103 common syslog __x64_sys_syslog 115 + 104 common getgid __x64_sys_getgid 116 + 105 common setuid __x64_sys_setuid 117 + 106 common setgid __x64_sys_setgid 118 + 107 common geteuid __x64_sys_geteuid 119 + 108 common getegid __x64_sys_getegid 120 + 109 common setpgid __x64_sys_setpgid 121 + 110 common getppid __x64_sys_getppid 122 + 111 common getpgrp __x64_sys_getpgrp 123 + 112 common setsid __x64_sys_setsid 124 + 113 common setreuid __x64_sys_setreuid 125 + 114 common setregid __x64_sys_setregid 126 + 115 common getgroups __x64_sys_getgroups 127 + 116 common setgroups __x64_sys_setgroups 128 + 117 common setresuid __x64_sys_setresuid 129 + 118 common getresuid __x64_sys_getresuid 130 + 119 common setresgid __x64_sys_setresgid 131 + 120 common getresgid __x64_sys_getresgid 132 + 121 common getpgid __x64_sys_getpgid 133 + 122 common setfsuid __x64_sys_setfsuid 134 + 123 common setfsgid __x64_sys_setfsgid 135 + 124 common getsid __x64_sys_getsid 136 + 125 common capget __x64_sys_capget 137 + 126 common capset __x64_sys_capset 138 + 127 64 rt_sigpending __x64_sys_rt_sigpending 139 + 128 64 rt_sigtimedwait __x64_sys_rt_sigtimedwait 140 + 129 64 rt_sigqueueinfo __x64_sys_rt_sigqueueinfo 141 + 130 common rt_sigsuspend __x64_sys_rt_sigsuspend 142 + 131 64 sigaltstack __x64_sys_sigaltstack 143 + 132 common utime __x64_sys_utime 144 + 133 common mknod __x64_sys_mknod 143 145 134 64 uselib 144 - 135 common personality sys_personality 145 - 136 common ustat sys_ustat 146 - 137 common statfs sys_statfs 147 - 138 common fstatfs sys_fstatfs 148 - 139 common sysfs sys_sysfs 149 - 140 common getpriority sys_getpriority 150 - 141 common setpriority sys_setpriority 151 - 142 common sched_setparam sys_sched_setparam 152 - 143 common sched_getparam sys_sched_getparam 153 - 144 common sched_setscheduler sys_sched_setscheduler 154 - 145 common sched_getscheduler sys_sched_getscheduler 155 - 146 common sched_get_priority_max sys_sched_get_priority_max 156 - 147 common sched_get_priority_min sys_sched_get_priority_min 157 - 148 common sched_rr_get_interval sys_sched_rr_get_interval 158 - 149 common mlock sys_mlock 159 - 150 common munlock sys_munlock 160 - 151 common mlockall sys_mlockall 161 - 152 common munlockall sys_munlockall 162 - 153 common vhangup sys_vhangup 163 - 154 common modify_ldt sys_modify_ldt 164 - 155 common pivot_root sys_pivot_root 165 - 156 64 _sysctl sys_sysctl 166 - 157 common prctl sys_prctl 167 - 158 common arch_prctl sys_arch_prctl 168 - 159 common adjtimex sys_adjtimex 169 - 160 common setrlimit sys_setrlimit 170 - 161 common chroot sys_chroot 171 - 162 common sync sys_sync 172 - 163 common acct sys_acct 173 - 164 common settimeofday sys_settimeofday 174 - 165 common mount sys_mount 175 - 166 common umount2 sys_umount 176 - 167 common swapon sys_swapon 177 - 168 common swapoff sys_swapoff 178 - 169 common reboot sys_reboot 179 - 170 common sethostname sys_sethostname 180 - 171 common setdomainname sys_setdomainname 181 - 172 common iopl sys_iopl/ptregs 182 - 173 common ioperm sys_ioperm 146 + 135 common personality __x64_sys_personality 147 + 136 common ustat __x64_sys_ustat 148 + 137 common statfs __x64_sys_statfs 149 + 138 common fstatfs __x64_sys_fstatfs 150 + 139 common sysfs __x64_sys_sysfs 151 + 140 common getpriority __x64_sys_getpriority 152 + 141 common setpriority __x64_sys_setpriority 153 + 142 common sched_setparam __x64_sys_sched_setparam 154 + 143 common sched_getparam __x64_sys_sched_getparam 155 + 144 common sched_setscheduler __x64_sys_sched_setscheduler 156 + 145 common sched_getscheduler __x64_sys_sched_getscheduler 157 + 146 common sched_get_priority_max __x64_sys_sched_get_priority_max 158 + 147 common sched_get_priority_min __x64_sys_sched_get_priority_min 159 + 148 common sched_rr_get_interval __x64_sys_sched_rr_get_interval 160 + 149 common mlock __x64_sys_mlock 161 + 150 common munlock __x64_sys_munlock 162 + 151 common mlockall __x64_sys_mlockall 163 + 152 common munlockall __x64_sys_munlockall 164 + 153 common vhangup __x64_sys_vhangup 165 + 154 common modify_ldt __x64_sys_modify_ldt 166 + 155 common pivot_root __x64_sys_pivot_root 167 + 156 64 _sysctl __x64_sys_sysctl 168 + 157 common prctl __x64_sys_prctl 169 + 158 common arch_prctl __x64_sys_arch_prctl 170 + 159 common adjtimex __x64_sys_adjtimex 171 + 160 common setrlimit __x64_sys_setrlimit 172 + 161 common chroot __x64_sys_chroot 173 + 162 common sync __x64_sys_sync 174 + 163 common acct __x64_sys_acct 175 + 164 common settimeofday __x64_sys_settimeofday 176 + 165 common mount __x64_sys_mount 177 + 166 common umount2 __x64_sys_umount 178 + 167 common swapon __x64_sys_swapon 179 + 168 common swapoff __x64_sys_swapoff 180 + 169 common reboot __x64_sys_reboot 181 + 170 common sethostname __x64_sys_sethostname 182 + 171 common setdomainname __x64_sys_setdomainname 183 + 172 common iopl __x64_sys_iopl/ptregs 184 + 173 common ioperm __x64_sys_ioperm 183 185 174 64 create_module 184 - 175 common init_module sys_init_module 185 - 176 common delete_module sys_delete_module 186 + 175 common init_module __x64_sys_init_module 187 + 176 common delete_module __x64_sys_delete_module 186 188 177 64 get_kernel_syms 187 189 178 64 query_module 188 - 179 common quotactl sys_quotactl 190 + 179 common quotactl __x64_sys_quotactl 189 191 180 64 nfsservctl 190 192 181 common getpmsg 191 193 182 common putpmsg 192 194 183 common afs_syscall 193 195 184 common tuxcall 194 196 185 common security 195 - 186 common gettid sys_gettid 196 - 187 common readahead sys_readahead 197 - 188 common setxattr sys_setxattr 198 - 189 common lsetxattr sys_lsetxattr 199 - 190 common fsetxattr sys_fsetxattr 200 - 191 common getxattr sys_getxattr 201 - 192 common lgetxattr sys_lgetxattr 202 - 193 common fgetxattr sys_fgetxattr 203 - 194 common listxattr sys_listxattr 204 - 195 common llistxattr sys_llistxattr 205 - 196 common flistxattr sys_flistxattr 206 - 197 common removexattr sys_removexattr 207 - 198 common lremovexattr sys_lremovexattr 208 - 199 common fremovexattr sys_fremovexattr 209 - 200 common tkill sys_tkill 210 - 201 common time sys_time 211 - 202 common futex sys_futex 212 - 203 common sched_setaffinity sys_sched_setaffinity 213 - 204 common sched_getaffinity sys_sched_getaffinity 197 + 186 common gettid __x64_sys_gettid 198 + 187 common readahead __x64_sys_readahead 199 + 188 common setxattr __x64_sys_setxattr 200 + 189 common lsetxattr __x64_sys_lsetxattr 201 + 190 common fsetxattr __x64_sys_fsetxattr 202 + 191 common getxattr __x64_sys_getxattr 203 + 192 common lgetxattr __x64_sys_lgetxattr 204 + 193 common fgetxattr __x64_sys_fgetxattr 205 + 194 common listxattr __x64_sys_listxattr 206 + 195 common llistxattr __x64_sys_llistxattr 207 + 196 common flistxattr __x64_sys_flistxattr 208 + 197 common removexattr __x64_sys_removexattr 209 + 198 common lremovexattr __x64_sys_lremovexattr 210 + 199 common fremovexattr __x64_sys_fremovexattr 211 + 200 common tkill __x64_sys_tkill 212 + 201 common time __x64_sys_time 213 + 202 common futex __x64_sys_futex 214 + 203 common sched_setaffinity __x64_sys_sched_setaffinity 215 + 204 common sched_getaffinity __x64_sys_sched_getaffinity 214 216 205 64 set_thread_area 215 - 206 64 io_setup sys_io_setup 216 - 207 common io_destroy sys_io_destroy 217 - 208 common io_getevents sys_io_getevents 218 - 209 64 io_submit sys_io_submit 219 - 210 common io_cancel sys_io_cancel 217 + 206 64 io_setup __x64_sys_io_setup 218 + 207 common io_destroy __x64_sys_io_destroy 219 + 208 common io_getevents __x64_sys_io_getevents 220 + 209 64 io_submit __x64_sys_io_submit 221 + 210 common io_cancel __x64_sys_io_cancel 220 222 211 64 get_thread_area 221 - 212 common lookup_dcookie sys_lookup_dcookie 222 - 213 common epoll_create sys_epoll_create 223 + 212 common lookup_dcookie __x64_sys_lookup_dcookie 224 + 213 common epoll_create __x64_sys_epoll_create 223 225 214 64 epoll_ctl_old 224 226 215 64 epoll_wait_old 225 - 216 common remap_file_pages sys_remap_file_pages 226 - 217 common getdents64 sys_getdents64 227 - 218 common set_tid_address sys_set_tid_address 228 - 219 common restart_syscall sys_restart_syscall 229 - 220 common semtimedop sys_semtimedop 230 - 221 common fadvise64 sys_fadvise64 231 - 222 64 timer_create sys_timer_create 232 - 223 common timer_settime sys_timer_settime 233 - 224 common timer_gettime sys_timer_gettime 234 - 225 common timer_getoverrun sys_timer_getoverrun 235 - 226 common timer_delete sys_timer_delete 236 - 227 common clock_settime sys_clock_settime 237 - 228 common clock_gettime sys_clock_gettime 238 - 229 common clock_getres sys_clock_getres 239 - 230 common clock_nanosleep sys_clock_nanosleep 240 - 231 common exit_group sys_exit_group 241 - 232 common epoll_wait sys_epoll_wait 242 - 233 common epoll_ctl sys_epoll_ctl 243 - 234 common tgkill sys_tgkill 244 - 235 common utimes sys_utimes 227 + 216 common remap_file_pages __x64_sys_remap_file_pages 228 + 217 common getdents64 __x64_sys_getdents64 229 + 218 common set_tid_address __x64_sys_set_tid_address 230 + 219 common restart_syscall __x64_sys_restart_syscall 231 + 220 common semtimedop __x64_sys_semtimedop 232 + 221 common fadvise64 __x64_sys_fadvise64 233 + 222 64 timer_create __x64_sys_timer_create 234 + 223 common timer_settime __x64_sys_timer_settime 235 + 224 common timer_gettime __x64_sys_timer_gettime 236 + 225 common timer_getoverrun __x64_sys_timer_getoverrun 237 + 226 common timer_delete __x64_sys_timer_delete 238 + 227 common clock_settime __x64_sys_clock_settime 239 + 228 common clock_gettime __x64_sys_clock_gettime 240 + 229 common clock_getres __x64_sys_clock_getres 241 + 230 common clock_nanosleep __x64_sys_clock_nanosleep 242 + 231 common exit_group __x64_sys_exit_group 243 + 232 common epoll_wait __x64_sys_epoll_wait 244 + 233 common epoll_ctl __x64_sys_epoll_ctl 245 + 234 common tgkill __x64_sys_tgkill 246 + 235 common utimes __x64_sys_utimes 245 247 236 64 vserver 246 - 237 common mbind sys_mbind 247 - 238 common set_mempolicy sys_set_mempolicy 248 - 239 common get_mempolicy sys_get_mempolicy 249 - 240 common mq_open sys_mq_open 250 - 241 common mq_unlink sys_mq_unlink 251 - 242 common mq_timedsend sys_mq_timedsend 252 - 243 common mq_timedreceive sys_mq_timedreceive 253 - 244 64 mq_notify sys_mq_notify 254 - 245 common mq_getsetattr sys_mq_getsetattr 255 - 246 64 kexec_load sys_kexec_load 256 - 247 64 waitid sys_waitid 257 - 248 common add_key sys_add_key 258 - 249 common request_key sys_request_key 259 - 250 common keyctl sys_keyctl 260 - 251 common ioprio_set sys_ioprio_set 261 - 252 common ioprio_get sys_ioprio_get 262 - 253 common inotify_init sys_inotify_init 263 - 254 common inotify_add_watch sys_inotify_add_watch 264 - 255 common inotify_rm_watch sys_inotify_rm_watch 265 - 256 common migrate_pages sys_migrate_pages 266 - 257 common openat sys_openat 267 - 258 common mkdirat sys_mkdirat 268 - 259 common mknodat sys_mknodat 269 - 260 common fchownat sys_fchownat 270 - 261 common futimesat sys_futimesat 271 - 262 common newfstatat sys_newfstatat 272 - 263 common unlinkat sys_unlinkat 273 - 264 common renameat sys_renameat 274 - 265 common linkat sys_linkat 275 - 266 common symlinkat sys_symlinkat 276 - 267 common readlinkat sys_readlinkat 277 - 268 common fchmodat sys_fchmodat 278 - 269 common faccessat sys_faccessat 279 - 270 common pselect6 sys_pselect6 280 - 271 common ppoll sys_ppoll 281 - 272 common unshare sys_unshare 282 - 273 64 set_robust_list sys_set_robust_list 283 - 274 64 get_robust_list sys_get_robust_list 284 - 275 common splice sys_splice 285 - 276 common tee sys_tee 286 - 277 common sync_file_range sys_sync_file_range 287 - 278 64 vmsplice sys_vmsplice 288 - 279 64 move_pages sys_move_pages 289 - 280 common utimensat sys_utimensat 290 - 281 common epoll_pwait sys_epoll_pwait 291 - 282 common signalfd sys_signalfd 292 - 283 common timerfd_create sys_timerfd_create 293 - 284 common eventfd sys_eventfd 294 - 285 common fallocate sys_fallocate 295 - 286 common timerfd_settime sys_timerfd_settime 296 - 287 common timerfd_gettime sys_timerfd_gettime 297 - 288 common accept4 sys_accept4 298 - 289 common signalfd4 sys_signalfd4 299 - 290 common eventfd2 sys_eventfd2 300 - 291 common epoll_create1 sys_epoll_create1 301 - 292 common dup3 sys_dup3 302 - 293 common pipe2 sys_pipe2 303 - 294 common inotify_init1 sys_inotify_init1 304 - 295 64 preadv sys_preadv 305 - 296 64 pwritev sys_pwritev 306 - 297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 307 - 298 common perf_event_open sys_perf_event_open 308 - 299 64 recvmmsg sys_recvmmsg 309 - 300 common fanotify_init sys_fanotify_init 310 - 301 common fanotify_mark sys_fanotify_mark 311 - 302 common prlimit64 sys_prlimit64 312 - 303 common name_to_handle_at sys_name_to_handle_at 313 - 304 common open_by_handle_at sys_open_by_handle_at 314 - 305 common clock_adjtime sys_clock_adjtime 315 - 306 common syncfs sys_syncfs 316 - 307 64 sendmmsg sys_sendmmsg 317 - 308 common setns sys_setns 318 - 309 common getcpu sys_getcpu 319 - 310 64 process_vm_readv sys_process_vm_readv 320 - 311 64 process_vm_writev sys_process_vm_writev 321 - 312 common kcmp sys_kcmp 322 - 313 common finit_module sys_finit_module 323 - 314 common sched_setattr sys_sched_setattr 324 - 315 common sched_getattr sys_sched_getattr 325 - 316 common renameat2 sys_renameat2 326 - 317 common seccomp sys_seccomp 327 - 318 common getrandom sys_getrandom 328 - 319 common memfd_create sys_memfd_create 329 - 320 common kexec_file_load sys_kexec_file_load 330 - 321 common bpf sys_bpf 331 - 322 64 execveat sys_execveat/ptregs 332 - 323 common userfaultfd sys_userfaultfd 333 - 324 common membarrier sys_membarrier 334 - 325 common mlock2 sys_mlock2 335 - 326 common copy_file_range sys_copy_file_range 336 - 327 64 preadv2 sys_preadv2 337 - 328 64 pwritev2 sys_pwritev2 338 - 329 common pkey_mprotect sys_pkey_mprotect 339 - 330 common pkey_alloc sys_pkey_alloc 340 - 331 common pkey_free sys_pkey_free 341 - 332 common statx sys_statx 248 + 237 common mbind __x64_sys_mbind 249 + 238 common set_mempolicy __x64_sys_set_mempolicy 250 + 239 common get_mempolicy __x64_sys_get_mempolicy 251 + 240 common mq_open __x64_sys_mq_open 252 + 241 common mq_unlink __x64_sys_mq_unlink 253 + 242 common mq_timedsend __x64_sys_mq_timedsend 254 + 243 common mq_timedreceive __x64_sys_mq_timedreceive 255 + 244 64 mq_notify __x64_sys_mq_notify 256 + 245 common mq_getsetattr __x64_sys_mq_getsetattr 257 + 246 64 kexec_load __x64_sys_kexec_load 258 + 247 64 waitid __x64_sys_waitid 259 + 248 common add_key __x64_sys_add_key 260 + 249 common request_key __x64_sys_request_key 261 + 250 common keyctl __x64_sys_keyctl 262 + 251 common ioprio_set __x64_sys_ioprio_set 263 + 252 common ioprio_get __x64_sys_ioprio_get 264 + 253 common inotify_init __x64_sys_inotify_init 265 + 254 common inotify_add_watch __x64_sys_inotify_add_watch 266 + 255 common inotify_rm_watch __x64_sys_inotify_rm_watch 267 + 256 common migrate_pages __x64_sys_migrate_pages 268 + 257 common openat __x64_sys_openat 269 + 258 common mkdirat __x64_sys_mkdirat 270 + 259 common mknodat __x64_sys_mknodat 271 + 260 common fchownat __x64_sys_fchownat 272 + 261 common futimesat __x64_sys_futimesat 273 + 262 common newfstatat __x64_sys_newfstatat 274 + 263 common unlinkat __x64_sys_unlinkat 275 + 264 common renameat __x64_sys_renameat 276 + 265 common linkat __x64_sys_linkat 277 + 266 common symlinkat __x64_sys_symlinkat 278 + 267 common readlinkat __x64_sys_readlinkat 279 + 268 common fchmodat __x64_sys_fchmodat 280 + 269 common faccessat __x64_sys_faccessat 281 + 270 common pselect6 __x64_sys_pselect6 282 + 271 common ppoll __x64_sys_ppoll 283 + 272 common unshare __x64_sys_unshare 284 + 273 64 set_robust_list __x64_sys_set_robust_list 285 + 274 64 get_robust_list __x64_sys_get_robust_list 286 + 275 common splice __x64_sys_splice 287 + 276 common tee __x64_sys_tee 288 + 277 common sync_file_range __x64_sys_sync_file_range 289 + 278 64 vmsplice __x64_sys_vmsplice 290 + 279 64 move_pages __x64_sys_move_pages 291 + 280 common utimensat __x64_sys_utimensat 292 + 281 common epoll_pwait __x64_sys_epoll_pwait 293 + 282 common signalfd __x64_sys_signalfd 294 + 283 common timerfd_create __x64_sys_timerfd_create 295 + 284 common eventfd __x64_sys_eventfd 296 + 285 common fallocate __x64_sys_fallocate 297 + 286 common timerfd_settime __x64_sys_timerfd_settime 298 + 287 common timerfd_gettime __x64_sys_timerfd_gettime 299 + 288 common accept4 __x64_sys_accept4 300 + 289 common signalfd4 __x64_sys_signalfd4 301 + 290 common eventfd2 __x64_sys_eventfd2 302 + 291 common epoll_create1 __x64_sys_epoll_create1 303 + 292 common dup3 __x64_sys_dup3 304 + 293 common pipe2 __x64_sys_pipe2 305 + 294 common inotify_init1 __x64_sys_inotify_init1 306 + 295 64 preadv __x64_sys_preadv 307 + 296 64 pwritev __x64_sys_pwritev 308 + 297 64 rt_tgsigqueueinfo __x64_sys_rt_tgsigqueueinfo 309 + 298 common perf_event_open __x64_sys_perf_event_open 310 + 299 64 recvmmsg __x64_sys_recvmmsg 311 + 300 common fanotify_init __x64_sys_fanotify_init 312 + 301 common fanotify_mark __x64_sys_fanotify_mark 313 + 302 common prlimit64 __x64_sys_prlimit64 314 + 303 common name_to_handle_at __x64_sys_name_to_handle_at 315 + 304 common open_by_handle_at __x64_sys_open_by_handle_at 316 + 305 common clock_adjtime __x64_sys_clock_adjtime 317 + 306 common syncfs __x64_sys_syncfs 318 + 307 64 sendmmsg __x64_sys_sendmmsg 319 + 308 common setns __x64_sys_setns 320 + 309 common getcpu __x64_sys_getcpu 321 + 310 64 process_vm_readv __x64_sys_process_vm_readv 322 + 311 64 process_vm_writev __x64_sys_process_vm_writev 323 + 312 common kcmp __x64_sys_kcmp 324 + 313 common finit_module __x64_sys_finit_module 325 + 314 common sched_setattr __x64_sys_sched_setattr 326 + 315 common sched_getattr __x64_sys_sched_getattr 327 + 316 common renameat2 __x64_sys_renameat2 328 + 317 common seccomp __x64_sys_seccomp 329 + 318 common getrandom __x64_sys_getrandom 330 + 319 common memfd_create __x64_sys_memfd_create 331 + 320 common kexec_file_load __x64_sys_kexec_file_load 332 + 321 common bpf __x64_sys_bpf 333 + 322 64 execveat __x64_sys_execveat/ptregs 334 + 323 common userfaultfd __x64_sys_userfaultfd 335 + 324 common membarrier __x64_sys_membarrier 336 + 325 common mlock2 __x64_sys_mlock2 337 + 326 common copy_file_range __x64_sys_copy_file_range 338 + 327 64 preadv2 __x64_sys_preadv2 339 + 328 64 pwritev2 __x64_sys_pwritev2 340 + 329 common pkey_mprotect __x64_sys_pkey_mprotect 341 + 330 common pkey_alloc __x64_sys_pkey_alloc 342 + 331 common pkey_free __x64_sys_pkey_free 343 + 332 common statx __x64_sys_statx 342 344 343 345 # 344 346 # x32-specific system call numbers start at 512 to avoid cache impact 345 - # for native 64-bit operation. 347 + # for native 64-bit operation. The __x32_compat_sys stubs are created 348 + # on-the-fly for compat_sys_*() compatibility system calls if X86_X32 349 + # is defined. 346 350 # 347 - 512 x32 rt_sigaction compat_sys_rt_sigaction 351 + 512 x32 rt_sigaction __x32_compat_sys_rt_sigaction 348 352 513 x32 rt_sigreturn sys32_x32_rt_sigreturn 349 - 514 x32 ioctl compat_sys_ioctl 350 - 515 x32 readv compat_sys_readv 351 - 516 x32 writev compat_sys_writev 352 - 517 x32 recvfrom compat_sys_recvfrom 353 - 518 x32 sendmsg compat_sys_sendmsg 354 - 519 x32 recvmsg compat_sys_recvmsg 355 - 520 x32 execve compat_sys_execve/ptregs 356 - 521 x32 ptrace compat_sys_ptrace 357 - 522 x32 rt_sigpending compat_sys_rt_sigpending 358 - 523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait 359 - 524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo 360 - 525 x32 sigaltstack compat_sys_sigaltstack 361 - 526 x32 timer_create compat_sys_timer_create 362 - 527 x32 mq_notify compat_sys_mq_notify 363 - 528 x32 kexec_load compat_sys_kexec_load 364 - 529 x32 waitid compat_sys_waitid 365 - 530 x32 set_robust_list compat_sys_set_robust_list 366 - 531 x32 get_robust_list compat_sys_get_robust_list 367 - 532 x32 vmsplice compat_sys_vmsplice 368 - 533 x32 move_pages compat_sys_move_pages 369 - 534 x32 preadv compat_sys_preadv64 370 - 535 x32 pwritev compat_sys_pwritev64 371 - 536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 372 - 537 x32 recvmmsg compat_sys_recvmmsg 373 - 538 x32 sendmmsg compat_sys_sendmmsg 374 - 539 x32 process_vm_readv compat_sys_process_vm_readv 375 - 540 x32 process_vm_writev compat_sys_process_vm_writev 376 - 541 x32 setsockopt compat_sys_setsockopt 377 - 542 x32 getsockopt compat_sys_getsockopt 378 - 543 x32 io_setup compat_sys_io_setup 379 - 544 x32 io_submit compat_sys_io_submit 380 - 545 x32 execveat compat_sys_execveat/ptregs 381 - 546 x32 preadv2 compat_sys_preadv64v2 382 - 547 x32 pwritev2 compat_sys_pwritev64v2 353 + 514 x32 ioctl __x32_compat_sys_ioctl 354 + 515 x32 readv __x32_compat_sys_readv 355 + 516 x32 writev __x32_compat_sys_writev 356 + 517 x32 recvfrom __x32_compat_sys_recvfrom 357 + 518 x32 sendmsg __x32_compat_sys_sendmsg 358 + 519 x32 recvmsg __x32_compat_sys_recvmsg 359 + 520 x32 execve __x32_compat_sys_execve/ptregs 360 + 521 x32 ptrace __x32_compat_sys_ptrace 361 + 522 x32 rt_sigpending __x32_compat_sys_rt_sigpending 362 + 523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait 363 + 524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo 364 + 525 x32 sigaltstack __x32_compat_sys_sigaltstack 365 + 526 x32 timer_create __x32_compat_sys_timer_create 366 + 527 x32 mq_notify __x32_compat_sys_mq_notify 367 + 528 x32 kexec_load __x32_compat_sys_kexec_load 368 + 529 x32 waitid __x32_compat_sys_waitid 369 + 530 x32 set_robust_list __x32_compat_sys_set_robust_list 370 + 531 x32 get_robust_list __x32_compat_sys_get_robust_list 371 + 532 x32 vmsplice __x32_compat_sys_vmsplice 372 + 533 x32 move_pages __x32_compat_sys_move_pages 373 + 534 x32 preadv __x32_compat_sys_preadv64 374 + 535 x32 pwritev __x32_compat_sys_pwritev64 375 + 536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo 376 + 537 x32 recvmmsg __x32_compat_sys_recvmmsg 377 + 538 x32 sendmmsg __x32_compat_sys_sendmmsg 378 + 539 x32 process_vm_readv __x32_compat_sys_process_vm_readv 379 + 540 x32 process_vm_writev __x32_compat_sys_process_vm_writev 380 + 541 x32 setsockopt __x32_compat_sys_setsockopt 381 + 542 x32 getsockopt __x32_compat_sys_getsockopt 382 + 543 x32 io_setup __x32_compat_sys_io_setup 383 + 544 x32 io_submit __x32_compat_sys_io_submit 384 + 545 x32 execveat __x32_compat_sys_execveat/ptregs 385 + 546 x32 preadv2 __x32_compat_sys_preadv64v2 386 + 547 x32 pwritev2 __x32_compat_sys_pwritev64v2
+1 -1
tools/perf/builtin-help.c
··· 439 439 #ifdef HAVE_LIBELF_SUPPORT 440 440 "probe", 441 441 #endif 442 - #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) 442 + #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) 443 443 "trace", 444 444 #endif 445 445 NULL };
+2 -2
tools/perf/builtin-mem.c
··· 83 83 }; 84 84 85 85 argc = parse_options(argc, argv, options, record_mem_usage, 86 - PARSE_OPT_STOP_AT_NON_OPTION); 86 + PARSE_OPT_KEEP_UNKNOWN); 87 87 88 88 rec_argc = argc + 9; /* max number of arguments */ 89 89 rec_argv = calloc(rec_argc + 1, sizeof(char *)); ··· 436 436 } 437 437 438 438 argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, 439 - mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); 439 + mem_usage, PARSE_OPT_KEEP_UNKNOWN); 440 440 441 441 if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) 442 442 usage_with_options(mem_usage, mem_options);
+6 -3
tools/perf/builtin-script.c
··· 657 657 break; 658 658 case PERF_RECORD_SWITCH: 659 659 case PERF_RECORD_SWITCH_CPU_WIDE: 660 - if (has(SWITCH_OUT)) 660 + if (has(SWITCH_OUT)) { 661 661 ret += fprintf(fp, "S"); 662 + if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) 663 + ret += fprintf(fp, "p"); 664 + } 662 665 default: 663 666 break; 664 667 } ··· 2804 2801 for_each_lang(scripts_path, scripts_dir, lang_dirent) { 2805 2802 scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, 2806 2803 lang_dirent->d_name); 2807 - #ifdef NO_LIBPERL 2804 + #ifndef HAVE_LIBPERL_SUPPORT 2808 2805 if (strstr(lang_path, "perl")) 2809 2806 continue; 2810 2807 #endif 2811 - #ifdef NO_LIBPYTHON 2808 + #ifndef HAVE_LIBPYTHON_SUPPORT 2812 2809 if (strstr(lang_path, "python")) 2813 2810 continue; 2814 2811 #endif
+2 -12
tools/perf/builtin-stat.c
··· 1943 1943 OPT_STRING(0, "post", &post_cmd, "command", 1944 1944 "command to run after to the measured command"), 1945 1945 OPT_UINTEGER('I', "interval-print", &stat_config.interval, 1946 - "print counts at regular interval in ms (>= 10)"), 1946 + "print counts at regular interval in ms " 1947 + "(overhead is possible for values <= 100ms)"), 1947 1948 OPT_INTEGER(0, "interval-count", &stat_config.times, 1948 1949 "print counts for fixed number of times"), 1949 1950 OPT_UINTEGER(0, "timeout", &stat_config.timeout, ··· 2922 2921 goto out; 2923 2922 } 2924 2923 } 2925 - } 2926 - 2927 - if (interval && interval < 100) { 2928 - if (interval < 10) { 2929 - pr_err("print interval must be >= 10ms\n"); 2930 - parse_options_usage(stat_usage, stat_options, "I", 1); 2931 - goto out; 2932 - } else 2933 - pr_warning("print interval < 100ms. " 2934 - "The overhead percentage could be high in some cases. " 2935 - "Please proceed with caution.\n"); 2936 2924 } 2937 2925 2938 2926 if (stat_config.times && interval)
+3
tools/perf/builtin-version.c
··· 60 60 STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations); 61 61 STATUS(HAVE_GLIBC_SUPPORT, glibc); 62 62 STATUS(HAVE_GTK2_SUPPORT, gtk2); 63 + #ifndef HAVE_SYSCALL_TABLE_SUPPORT 63 64 STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit); 65 + #endif 66 + STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table); 64 67 STATUS(HAVE_LIBBFD_SUPPORT, libbfd); 65 68 STATUS(HAVE_LIBELF_SUPPORT, libelf); 66 69 STATUS(HAVE_LIBNUMA_SUPPORT, libnuma);
+2 -2
tools/perf/perf.c
··· 73 73 { "lock", cmd_lock, 0 }, 74 74 { "kvm", cmd_kvm, 0 }, 75 75 { "test", cmd_test, 0 }, 76 - #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) 76 + #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) 77 77 { "trace", cmd_trace, 0 }, 78 78 #endif 79 79 { "inject", cmd_inject, 0 }, ··· 491 491 argv[0] = cmd; 492 492 } 493 493 if (strstarts(cmd, "trace")) { 494 - #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) 494 + #if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) 495 495 setup_path(); 496 496 argv[0] = "trace"; 497 497 return cmd_trace(argc, argv);
+1 -1
tools/perf/tests/bpf-script-example.c
··· 31 31 .max_entries = 1, 32 32 }; 33 33 34 - SEC("func=SyS_epoll_pwait") 34 + SEC("func=do_epoll_wait") 35 35 int bpf_func__SyS_epoll_pwait(void *ctx) 36 36 { 37 37 int ind =0;
-1
tools/perf/tests/bpf-script-test-kbuild.c
··· 9 9 #define SEC(NAME) __attribute__((section(NAME), used)) 10 10 11 11 #include <uapi/linux/fs.h> 12 - #include <uapi/asm/ptrace.h> 13 12 14 13 SEC("func=vfs_llseek") 15 14 int bpf_func__vfs_llseek(void *ctx)
+1
tools/perf/tests/builtin-test.c
··· 118 118 { 119 119 .desc = "Breakpoint accounting", 120 120 .func = test__bp_accounting, 121 + .is_supported = test__bp_signal_is_supported, 121 122 }, 122 123 { 123 124 .desc = "Number of exit events of a simple workload",
+1 -1
tools/perf/tests/mmap-basic.c
··· 75 75 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 76 76 evsels[i] = perf_evsel__newtp("syscalls", name); 77 77 if (IS_ERR(evsels[i])) { 78 - pr_debug("perf_evsel__new\n"); 78 + pr_debug("perf_evsel__new(%s)\n", name); 79 79 goto out_delete_evlist; 80 80 } 81 81
+3
tools/perf/trace/beauty/mmap.c
··· 54 54 P_MMAP_FLAG(EXECUTABLE); 55 55 P_MMAP_FLAG(FILE); 56 56 P_MMAP_FLAG(FIXED); 57 + #ifdef MAP_FIXED_NOREPLACE 58 + P_MMAP_FLAG(FIXED_NOREPLACE); 59 + #endif 57 60 P_MMAP_FLAG(GROWSDOWN); 58 61 P_MMAP_FLAG(HUGETLB); 59 62 P_MMAP_FLAG(LOCKED);
+5
tools/perf/ui/browsers/annotate.c
··· 692 692 "J Toggle showing number of jump sources on targets\n" 693 693 "n Search next string\n" 694 694 "o Toggle disassembler output/simplified view\n" 695 + "O Bump offset level (jump targets -> +call -> all -> cycle thru)\n" 695 696 "s Toggle source code view\n" 696 697 "t Circulate percent, total period, samples view\n" 697 698 "/ Search string\n" ··· 719 718 case 'o': 720 719 notes->options->use_offset = !notes->options->use_offset; 721 720 annotation__update_column_widths(notes); 721 + continue; 722 + case 'O': 723 + if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) 724 + notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; 722 725 continue; 723 726 case 'j': 724 727 notes->options->jump_arrows = !notes->options->jump_arrows;
+1 -1
tools/perf/ui/browsers/hists.c
··· 2714 2714 "h/?/F1 Show this window\n" \ 2715 2715 "UP/DOWN/PGUP\n" \ 2716 2716 "PGDN/SPACE Navigate\n" \ 2717 - "q/ESC/CTRL+C Exit browser\n\n" \ 2717 + "q/ESC/CTRL+C Exit browser or go back to previous screen\n\n" \ 2718 2718 "For multiple event sessions:\n\n" \ 2719 2719 "TAB/UNTAB Switch events\n\n" \ 2720 2720 "For symbolic views (--sort has sym):\n\n" \
+21 -5
tools/perf/util/annotate.c
··· 46 46 struct annotation_options annotation__default_options = { 47 47 .use_offset = true, 48 48 .jump_arrows = true, 49 + .offset_level = ANNOTATION__OFFSET_JUMP_TARGETS, 49 50 }; 50 51 51 52 const char *disassembler_style; ··· 2513 2512 if (!notes->options->use_offset) { 2514 2513 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); 2515 2514 } else { 2516 - if (al->jump_sources) { 2515 + if (al->jump_sources && 2516 + notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { 2517 2517 if (notes->options->show_nr_jumps) { 2518 2518 int prev; 2519 2519 printed = scnprintf(bf, sizeof(bf), "%*d ", ··· 2525 2523 obj__printf(obj, bf); 2526 2524 obj__set_color(obj, prev); 2527 2525 } 2528 - 2526 + print_addr: 2529 2527 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", 2530 2528 notes->widths.target, addr); 2529 + } else if (ins__is_call(&disasm_line(al)->ins) && 2530 + notes->options->offset_level >= ANNOTATION__OFFSET_CALL) { 2531 + goto print_addr; 2532 + } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { 2533 + goto print_addr; 2531 2534 } else { 2532 2535 printed = scnprintf(bf, sizeof(bf), "%-*s ", 2533 2536 notes->widths.addr, " "); ··· 2649 2642 */ 2650 2643 static struct annotation_config { 2651 2644 const char *name; 2652 - bool *value; 2645 + void *value; 2653 2646 } annotation__configs[] = { 2654 2647 ANNOTATION__CFG(hide_src_code), 2655 2648 ANNOTATION__CFG(jump_arrows), 2649 + ANNOTATION__CFG(offset_level), 2656 2650 ANNOTATION__CFG(show_linenr), 2657 2651 ANNOTATION__CFG(show_nr_jumps), 2658 2652 ANNOTATION__CFG(show_nr_samples), ··· 2685 2677 2686 2678 if (cfg == NULL) 2687 2679 pr_debug("%s variable unknown, ignoring...", var); 2688 - else 2689 - *cfg->value = perf_config_bool(name, value); 2680 + else if (strcmp(var, "annotate.offset_level") == 0) { 2681 + perf_config_int(cfg->value, name, value); 2682 + 2683 + if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL) 2684 + *(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL; 2685 + else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL) 2686 + *(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL; 2687 + } else { 2688 + *(bool *)cfg->value = perf_config_bool(name, value); 2689 + } 2690 2690 return 0; 2691 2691 } 2692 2692
+9
tools/perf/util/annotate.h
··· 70 70 show_nr_jumps, 71 71 show_nr_samples, 72 72 show_total_period; 73 + u8 offset_level; 73 74 }; 75 + 76 + enum { 77 + ANNOTATION__OFFSET_JUMP_TARGETS = 1, 78 + ANNOTATION__OFFSET_CALL, 79 + ANNOTATION__MAX_OFFSET_LEVEL, 80 + }; 81 + 82 + #define ANNOTATION__MIN_OFFSET_LEVEL ANNOTATION__OFFSET_JUMP_TARGETS 74 83 75 84 extern struct annotation_options annotation__default_options; 76 85
+1 -2
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 - * SPDX-License-Identifier: GPL-2.0 3 - * 4 3 * Copyright(C) 2015-2018 Linaro Limited. 5 4 * 6 5 * Author: Tor Jeremiassen <tor@ti.com>
+1 -2
tools/perf/util/cs-etm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 - * SPDX-License-Identifier: GPL-2.0 3 - * 4 3 * Copyright(C) 2015-2018 Linaro Limited. 5 4 * 6 5 * Author: Tor Jeremiassen <tor@ti.com>
+1 -12
tools/perf/util/cs-etm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 2 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 3 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 - * 5 - * This program is free software; you can redistribute it and/or modify it 6 - * under the terms of the GNU General Public License version 2 as published by 7 - * the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it will be useful, but WITHOUT 10 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 - * more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along with 15 - * this program. If not, see <http://www.gnu.org/licenses/>. 16 5 */ 17 6 18 7 #ifndef INCLUDE__UTIL_PERF_CS_ETM_H__
+3 -1
tools/perf/util/event.c
··· 1421 1421 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 1422 1422 { 1423 1423 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1424 - const char *in_out = out ? "OUT" : "IN "; 1424 + const char *in_out = !out ? "IN " : 1425 + !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ? 1426 + "OUT " : "OUT preempt"; 1425 1427 1426 1428 if (event->header.type == PERF_RECORD_SWITCH) 1427 1429 return fprintf(fp, " %s\n", in_out);
+2 -4
tools/perf/util/evsel.c
··· 2870 2870 #if defined(__i386__) || defined(__x86_64__) 2871 2871 if (evsel->attr.type == PERF_TYPE_HARDWARE) 2872 2872 return scnprintf(msg, size, "%s", 2873 - "No hardware sampling interrupt available.\n" 2874 - "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); 2873 + "No hardware sampling interrupt available.\n"); 2875 2874 #endif 2876 2875 break; 2877 2876 case EBUSY: ··· 2893 2894 2894 2895 return scnprintf(msg, size, 2895 2896 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" 2896 - "/bin/dmesg may provide additional information.\n" 2897 - "No CONFIG_PERF_EVENTS=y kernel support configured?", 2897 + "/bin/dmesg | grep -i perf may provide additional information.\n", 2898 2898 err, str_error_r(err, sbuf, sizeof(sbuf)), 2899 2899 perf_evsel__name(evsel)); 2900 2900 }
+1 -1
tools/perf/util/generate-cmdlist.sh
··· 38 38 done 39 39 echo "#endif /* HAVE_LIBELF_SUPPORT */" 40 40 41 - echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)" 41 + echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)" 42 42 sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt | 43 43 sort | 44 44 while read cmd
+2 -1
tools/perf/util/header.c
··· 1320 1320 1321 1321 dir = opendir(path); 1322 1322 if (!dir) { 1323 - pr_warning("failed: can't open node sysfs data\n"); 1323 + pr_debug2("%s: could't read %s, does this arch have topology information?\n", 1324 + __func__, path); 1324 1325 return -1; 1325 1326 } 1326 1327
+6
tools/perf/util/pmu.c
··· 562 562 if (stat(path, &st) == 0) 563 563 return 1; 564 564 565 + /* Look for cpu sysfs (specific to s390) */ 566 + scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s", 567 + sysfs, name); 568 + if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5)) 569 + return 1; 570 + 565 571 return 0; 566 572 } 567 573
+3 -5
tools/perf/util/symbol.c
··· 2091 2091 2092 2092 int symbol__annotation_init(void) 2093 2093 { 2094 + if (symbol_conf.init_annotation) 2095 + return 0; 2096 + 2094 2097 if (symbol_conf.initialized) { 2095 2098 pr_err("Annotation needs to be init before symbol__init()\n"); 2096 2099 return -1; 2097 - } 2098 - 2099 - if (symbol_conf.init_annotation) { 2100 - pr_warning("Annotation being initialized multiple times\n"); 2101 - return 0; 2102 2100 } 2103 2101 2104 2102 symbol_conf.priv_size += sizeof(struct annotation);
+3 -3
tools/perf/util/syscalltbl.c
··· 17 17 #include <stdlib.h> 18 18 #include <linux/compiler.h> 19 19 20 - #ifdef HAVE_SYSCALL_TABLE 20 + #ifdef HAVE_SYSCALL_TABLE_SUPPORT 21 21 #include <string.h> 22 22 #include "string2.h" 23 23 #include "util.h" ··· 139 139 return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); 140 140 } 141 141 142 - #else /* HAVE_SYSCALL_TABLE */ 142 + #else /* HAVE_SYSCALL_TABLE_SUPPORT */ 143 143 144 144 #include <libaudit.h> 145 145 ··· 176 176 { 177 177 return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); 178 178 } 179 - #endif /* HAVE_SYSCALL_TABLE */ 179 + #endif /* HAVE_SYSCALL_TABLE_SUPPORT */
+2 -2
tools/perf/util/trace-event-scripting.c
··· 98 98 } 99 99 } 100 100 101 - #ifdef NO_LIBPYTHON 101 + #ifndef HAVE_LIBPYTHON_SUPPORT 102 102 void setup_python_scripting(void) 103 103 { 104 104 register_python_scripting(&python_scripting_unsupported_ops); ··· 161 161 } 162 162 } 163 163 164 - #ifdef NO_LIBPERL 164 + #ifndef HAVE_LIBPERL_SUPPORT 165 165 void setup_perl_scripting(void) 166 166 { 167 167 register_perl_scripting(&perl_scripting_unsupported_ops);
+65 -19
tools/testing/nvdimm/test/nfit.c
··· 138 138 }; 139 139 140 140 static unsigned long dimm_fail_cmd_flags[NUM_DCR]; 141 + static int dimm_fail_cmd_code[NUM_DCR]; 141 142 142 143 struct nfit_test_fw { 143 144 enum intel_fw_update_state state; ··· 893 892 if (i >= ARRAY_SIZE(handle)) 894 893 return -ENXIO; 895 894 896 - if ((1 << func) & dimm_fail_cmd_flags[i]) 895 + if ((1 << func) & dimm_fail_cmd_flags[i]) { 896 + if (dimm_fail_cmd_code[i]) 897 + return dimm_fail_cmd_code[i]; 897 898 return -EIO; 899 + } 898 900 899 901 return i; 900 902 } ··· 1166 1162 1167 1163 static void put_dimms(void *data) 1168 1164 { 1169 - struct device **dimm_dev = data; 1165 + struct nfit_test *t = data; 1170 1166 int i; 1171 1167 1172 - for (i = 0; i < NUM_DCR; i++) 1173 - if (dimm_dev[i]) 1174 - device_unregister(dimm_dev[i]); 1168 + for (i = 0; i < t->num_dcr; i++) 1169 + if (t->dimm_dev[i]) 1170 + device_unregister(t->dimm_dev[i]); 1175 1171 } 1176 1172 1177 1173 static struct class *nfit_test_dimm; ··· 1180 1176 { 1181 1177 int dimm; 1182 1178 1183 - if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 1184 - || dimm >= NUM_DCR || dimm < 0) 1179 + if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1) 1185 1180 return -ENXIO; 1186 1181 return dimm; 1187 1182 } 1188 - 1189 1183 1190 1184 static ssize_t handle_show(struct device *dev, struct device_attribute *attr, 1191 1185 char *buf) ··· 1193 1191 if (dimm < 0) 1194 1192 return dimm; 1195 1193 1196 - return sprintf(buf, "%#x", handle[dimm]); 1194 + return sprintf(buf, "%#x\n", handle[dimm]); 1197 1195 } 1198 1196 DEVICE_ATTR_RO(handle); 1199 1197 ··· 1227 1225 } 1228 1226 static DEVICE_ATTR_RW(fail_cmd); 1229 1227 1228 + static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr, 1229 + char *buf) 1230 + { 1231 + int dimm = dimm_name_to_id(dev); 1232 + 1233 + if (dimm < 0) 1234 + return dimm; 1235 + 1236 + return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]); 1237 + } 1238 + 1239 + static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr, 1240 + const char *buf, size_t size) 1241 + { 1242 + int dimm = dimm_name_to_id(dev); 1243 + unsigned long val; 1244 + ssize_t rc; 1245 + 1246 + if (dimm < 0) 1247 + return dimm; 1248 + 1249 + rc = kstrtol(buf, 0, &val); 1250 + if (rc) 1251 + return rc; 1252 + 1253 + dimm_fail_cmd_code[dimm] = val; 1254 + return size; 1255 + } 1256 + static DEVICE_ATTR_RW(fail_cmd_code); 1257 + 1230 1258 static struct attribute *nfit_test_dimm_attributes[] = { 1231 1259 &dev_attr_fail_cmd.attr, 1260 + &dev_attr_fail_cmd_code.attr, 1232 1261 &dev_attr_handle.attr, 1233 1262 NULL, 1234 1263 }; ··· 1272 1239 &nfit_test_dimm_attribute_group, 1273 1240 NULL, 1274 1241 }; 1242 + 1243 + static int nfit_test_dimm_init(struct nfit_test *t) 1244 + { 1245 + int i; 1246 + 1247 + if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t)) 1248 + return -ENOMEM; 1249 + for (i = 0; i < t->num_dcr; i++) { 1250 + t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, 1251 + &t->pdev.dev, 0, NULL, 1252 + nfit_test_dimm_attribute_groups, 1253 + "test_dimm%d", i + t->dcr_idx); 1254 + if (!t->dimm_dev[i]) 1255 + return -ENOMEM; 1256 + } 1257 + return 0; 1258 + } 1275 1259 1276 1260 static void smart_init(struct nfit_test *t) 1277 1261 { ··· 1385 1335 if (!t->_fit) 1386 1336 return -ENOMEM; 1387 1337 1388 - if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) 1338 + if (nfit_test_dimm_init(t)) 1389 1339 return -ENOMEM; 1390 - for (i = 0; i < NUM_DCR; i++) { 1391 - t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, 1392 - &t->pdev.dev, 0, NULL, 1393 - nfit_test_dimm_attribute_groups, 1394 - "test_dimm%d", i); 1395 - if (!t->dimm_dev[i]) 1396 - return -ENOMEM; 1397 - } 1398 - 1399 1340 smart_init(t); 1400 1341 return ars_state_init(&t->pdev.dev, &t->ars_state); 1401 1342 } ··· 1418 1377 if (!t->spa_set[1]) 1419 1378 return -ENOMEM; 1420 1379 1380 + if (nfit_test_dimm_init(t)) 1381 + return -ENOMEM; 1421 1382 smart_init(t); 1422 1383 return ars_state_init(&t->pdev.dev, &t->ars_state); 1423 1384 } ··· 2265 2222 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 2266 2223 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 2267 2224 set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); 2225 + set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); 2226 + set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 2227 + set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); 2268 2228 } 2269 2229 2270 2230 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
+3 -5
tools/testing/selftests/filesystems/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - TEST_PROGS := dnotify_test devpts_pts 3 - all: $(TEST_PROGS) 2 + 3 + TEST_GEN_PROGS := devpts_pts 4 + TEST_GEN_PROGS_EXTENDED := dnotify_test 4 5 5 6 include ../lib.mk 6 - 7 - clean: 8 - rm -fr $(TEST_PROGS)
+3 -2
tools/testing/selftests/kvm/Makefile
··· 4 4 UNAME_M := $(shell uname -m) 5 5 6 6 LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c 7 - LIBKVM_x86_64 = lib/x86.c 7 + LIBKVM_x86_64 = lib/x86.c lib/vmx.c 8 8 9 9 TEST_GEN_PROGS_x86_64 = set_sregs_test 10 10 TEST_GEN_PROGS_x86_64 += sync_regs_test 11 + TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test 11 12 12 13 TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) 13 14 LIBKVM += $(LIBKVM_$(UNAME_M)) 14 15 15 16 INSTALL_HDR_PATH = $(top_srcdir)/usr 16 17 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ 17 - CFLAGS += -O2 -g -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) 18 + CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) 18 19 19 20 # After inclusion, $(OUTPUT) is defined and 20 21 # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
+9 -6
tools/testing/selftests/kvm/include/kvm_util.h
··· 112 112 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, 113 113 vm_paddr_t paddr_min, uint32_t memslot); 114 114 115 - void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid); 115 + struct kvm_cpuid2 *kvm_get_supported_cpuid(void); 116 116 void vcpu_set_cpuid( 117 117 struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid); 118 118 119 - struct kvm_cpuid2 *allocate_kvm_cpuid2(void); 120 119 struct kvm_cpuid_entry2 * 121 - find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, 122 - uint32_t index); 120 + kvm_get_supported_cpuid_index(uint32_t function, uint32_t index); 123 121 124 122 static inline struct kvm_cpuid_entry2 * 125 - find_cpuid_entry(struct kvm_cpuid2 *cpuid, uint32_t function) 123 + kvm_get_supported_cpuid_entry(uint32_t function) 126 124 { 127 - return find_cpuid_index_entry(cpuid, function, 0); 125 + return kvm_get_supported_cpuid_index(function, 0); 128 126 } 129 127 130 128 struct kvm_vm *vm_create_default(uint32_t vcpuid, void *guest_code); 131 129 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); 130 + 131 + typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr, 132 + vm_paddr_t vmxon_paddr, 133 + vm_vaddr_t vmcs_vaddr, 134 + vm_paddr_t vmcs_paddr); 132 135 133 136 struct kvm_userspace_memory_region * 134 137 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
+494
tools/testing/selftests/kvm/include/vmx.h
··· 1 + /* 2 + * tools/testing/selftests/kvm/include/vmx.h 3 + * 4 + * Copyright (C) 2018, Google LLC. 5 + * 6 + * This work is licensed under the terms of the GNU GPL, version 2. 7 + * 8 + */ 9 + 10 + #ifndef SELFTEST_KVM_VMX_H 11 + #define SELFTEST_KVM_VMX_H 12 + 13 + #include <stdint.h> 14 + #include "x86.h" 15 + 16 + #define CPUID_VMX_BIT 5 17 + 18 + #define CPUID_VMX (1 << 5) 19 + 20 + /* 21 + * Definitions of Primary Processor-Based VM-Execution Controls. 22 + */ 23 + #define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 24 + #define CPU_BASED_USE_TSC_OFFSETING 0x00000008 25 + #define CPU_BASED_HLT_EXITING 0x00000080 26 + #define CPU_BASED_INVLPG_EXITING 0x00000200 27 + #define CPU_BASED_MWAIT_EXITING 0x00000400 28 + #define CPU_BASED_RDPMC_EXITING 0x00000800 29 + #define CPU_BASED_RDTSC_EXITING 0x00001000 30 + #define CPU_BASED_CR3_LOAD_EXITING 0x00008000 31 + #define CPU_BASED_CR3_STORE_EXITING 0x00010000 32 + #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 33 + #define CPU_BASED_CR8_STORE_EXITING 0x00100000 34 + #define CPU_BASED_TPR_SHADOW 0x00200000 35 + #define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 36 + #define CPU_BASED_MOV_DR_EXITING 0x00800000 37 + #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 38 + #define CPU_BASED_USE_IO_BITMAPS 0x02000000 39 + #define CPU_BASED_MONITOR_TRAP 0x08000000 40 + #define CPU_BASED_USE_MSR_BITMAPS 0x10000000 41 + #define CPU_BASED_MONITOR_EXITING 0x20000000 42 + #define CPU_BASED_PAUSE_EXITING 0x40000000 43 + #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 44 + 45 + #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 46 + 47 + /* 48 + * Definitions of Secondary Processor-Based VM-Execution Controls. 49 + */ 50 + #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 51 + #define SECONDARY_EXEC_ENABLE_EPT 0x00000002 52 + #define SECONDARY_EXEC_DESC 0x00000004 53 + #define SECONDARY_EXEC_RDTSCP 0x00000008 54 + #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010 55 + #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 56 + #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 57 + #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 58 + #define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100 59 + #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200 60 + #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 61 + #define SECONDARY_EXEC_RDRAND_EXITING 0x00000800 62 + #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 63 + #define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 64 + #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 65 + #define SECONDARY_EXEC_RDSEED_EXITING 0x00010000 66 + #define SECONDARY_EXEC_ENABLE_PML 0x00020000 67 + #define SECONDARY_EPT_VE 0x00040000 68 + #define SECONDARY_ENABLE_XSAV_RESTORE 0x00100000 69 + #define SECONDARY_EXEC_TSC_SCALING 0x02000000 70 + 71 + #define PIN_BASED_EXT_INTR_MASK 0x00000001 72 + #define PIN_BASED_NMI_EXITING 0x00000008 73 + #define PIN_BASED_VIRTUAL_NMIS 0x00000020 74 + #define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 75 + #define PIN_BASED_POSTED_INTR 0x00000080 76 + 77 + #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 78 + 79 + #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 80 + #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 81 + #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 82 + #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 83 + #define VM_EXIT_SAVE_IA32_PAT 0x00040000 84 + #define VM_EXIT_LOAD_IA32_PAT 0x00080000 85 + #define VM_EXIT_SAVE_IA32_EFER 0x00100000 86 + #define VM_EXIT_LOAD_IA32_EFER 0x00200000 87 + #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000 88 + 89 + #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff 90 + 91 + #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 92 + #define VM_ENTRY_IA32E_MODE 0x00000200 93 + #define VM_ENTRY_SMM 0x00000400 94 + #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 95 + #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000 96 + #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 97 + #define VM_ENTRY_LOAD_IA32_EFER 0x00008000 98 + 99 + #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff 100 + 101 + #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f 102 + #define VMX_MISC_SAVE_EFER_LMA 0x00000020 103 + 104 + #define EXIT_REASON_FAILED_VMENTRY 0x80000000 105 + #define EXIT_REASON_EXCEPTION_NMI 0 106 + #define EXIT_REASON_EXTERNAL_INTERRUPT 1 107 + #define EXIT_REASON_TRIPLE_FAULT 2 108 + #define EXIT_REASON_PENDING_INTERRUPT 7 109 + #define EXIT_REASON_NMI_WINDOW 8 110 + #define EXIT_REASON_TASK_SWITCH 9 111 + #define EXIT_REASON_CPUID 10 112 + #define EXIT_REASON_HLT 12 113 + #define EXIT_REASON_INVD 13 114 + #define EXIT_REASON_INVLPG 14 115 + #define EXIT_REASON_RDPMC 15 116 + #define EXIT_REASON_RDTSC 16 117 + #define EXIT_REASON_VMCALL 18 118 + #define EXIT_REASON_VMCLEAR 19 119 + #define EXIT_REASON_VMLAUNCH 20 120 + #define EXIT_REASON_VMPTRLD 21 121 + #define EXIT_REASON_VMPTRST 22 122 + #define EXIT_REASON_VMREAD 23 123 + #define EXIT_REASON_VMRESUME 24 124 + #define EXIT_REASON_VMWRITE 25 125 + #define EXIT_REASON_VMOFF 26 126 + #define EXIT_REASON_VMON 27 127 + #define EXIT_REASON_CR_ACCESS 28 128 + #define EXIT_REASON_DR_ACCESS 29 129 + #define EXIT_REASON_IO_INSTRUCTION 30 130 + #define EXIT_REASON_MSR_READ 31 131 + #define EXIT_REASON_MSR_WRITE 32 132 + #define EXIT_REASON_INVALID_STATE 33 133 + #define EXIT_REASON_MWAIT_INSTRUCTION 36 134 + #define EXIT_REASON_MONITOR_INSTRUCTION 39 135 + #define EXIT_REASON_PAUSE_INSTRUCTION 40 136 + #define EXIT_REASON_MCE_DURING_VMENTRY 41 137 + #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 138 + #define EXIT_REASON_APIC_ACCESS 44 139 + #define EXIT_REASON_EOI_INDUCED 45 140 + #define EXIT_REASON_EPT_VIOLATION 48 141 + #define EXIT_REASON_EPT_MISCONFIG 49 142 + #define EXIT_REASON_INVEPT 50 143 + #define EXIT_REASON_RDTSCP 51 144 + #define EXIT_REASON_PREEMPTION_TIMER 52 145 + #define EXIT_REASON_INVVPID 53 146 + #define EXIT_REASON_WBINVD 54 147 + #define EXIT_REASON_XSETBV 55 148 + #define EXIT_REASON_APIC_WRITE 56 149 + #define EXIT_REASON_INVPCID 58 150 + #define EXIT_REASON_PML_FULL 62 151 + #define EXIT_REASON_XSAVES 63 152 + #define EXIT_REASON_XRSTORS 64 153 + #define LAST_EXIT_REASON 64 154 + 155 + enum vmcs_field { 156 + VIRTUAL_PROCESSOR_ID = 0x00000000, 157 + POSTED_INTR_NV = 0x00000002, 158 + GUEST_ES_SELECTOR = 0x00000800, 159 + GUEST_CS_SELECTOR = 0x00000802, 160 + GUEST_SS_SELECTOR = 0x00000804, 161 + GUEST_DS_SELECTOR = 0x00000806, 162 + GUEST_FS_SELECTOR = 0x00000808, 163 + GUEST_GS_SELECTOR = 0x0000080a, 164 + GUEST_LDTR_SELECTOR = 0x0000080c, 165 + GUEST_TR_SELECTOR = 0x0000080e, 166 + GUEST_INTR_STATUS = 0x00000810, 167 + GUEST_PML_INDEX = 0x00000812, 168 + HOST_ES_SELECTOR = 0x00000c00, 169 + HOST_CS_SELECTOR = 0x00000c02, 170 + HOST_SS_SELECTOR = 0x00000c04, 171 + HOST_DS_SELECTOR = 0x00000c06, 172 + HOST_FS_SELECTOR = 0x00000c08, 173 + HOST_GS_SELECTOR = 0x00000c0a, 174 + HOST_TR_SELECTOR = 0x00000c0c, 175 + IO_BITMAP_A = 0x00002000, 176 + IO_BITMAP_A_HIGH = 0x00002001, 177 + IO_BITMAP_B = 0x00002002, 178 + IO_BITMAP_B_HIGH = 0x00002003, 179 + MSR_BITMAP = 0x00002004, 180 + MSR_BITMAP_HIGH = 0x00002005, 181 + VM_EXIT_MSR_STORE_ADDR = 0x00002006, 182 + VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, 183 + VM_EXIT_MSR_LOAD_ADDR = 0x00002008, 184 + VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, 185 + VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, 186 + VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, 187 + PML_ADDRESS = 0x0000200e, 188 + PML_ADDRESS_HIGH = 0x0000200f, 189 + TSC_OFFSET = 0x00002010, 190 + TSC_OFFSET_HIGH = 0x00002011, 191 + VIRTUAL_APIC_PAGE_ADDR = 0x00002012, 192 + VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, 193 + APIC_ACCESS_ADDR = 0x00002014, 194 + APIC_ACCESS_ADDR_HIGH = 0x00002015, 195 + POSTED_INTR_DESC_ADDR = 0x00002016, 196 + POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, 197 + EPT_POINTER = 0x0000201a, 198 + EPT_POINTER_HIGH = 0x0000201b, 199 + EOI_EXIT_BITMAP0 = 0x0000201c, 200 + EOI_EXIT_BITMAP0_HIGH = 0x0000201d, 201 + EOI_EXIT_BITMAP1 = 0x0000201e, 202 + EOI_EXIT_BITMAP1_HIGH = 0x0000201f, 203 + EOI_EXIT_BITMAP2 = 0x00002020, 204 + EOI_EXIT_BITMAP2_HIGH = 0x00002021, 205 + EOI_EXIT_BITMAP3 = 0x00002022, 206 + EOI_EXIT_BITMAP3_HIGH = 0x00002023, 207 + VMREAD_BITMAP = 0x00002026, 208 + VMREAD_BITMAP_HIGH = 0x00002027, 209 + VMWRITE_BITMAP = 0x00002028, 210 + VMWRITE_BITMAP_HIGH = 0x00002029, 211 + XSS_EXIT_BITMAP = 0x0000202C, 212 + XSS_EXIT_BITMAP_HIGH = 0x0000202D, 213 + TSC_MULTIPLIER = 0x00002032, 214 + TSC_MULTIPLIER_HIGH = 0x00002033, 215 + GUEST_PHYSICAL_ADDRESS = 0x00002400, 216 + GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, 217 + VMCS_LINK_POINTER = 0x00002800, 218 + VMCS_LINK_POINTER_HIGH = 0x00002801, 219 + GUEST_IA32_DEBUGCTL = 0x00002802, 220 + GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, 221 + GUEST_IA32_PAT = 0x00002804, 222 + GUEST_IA32_PAT_HIGH = 0x00002805, 223 + GUEST_IA32_EFER = 0x00002806, 224 + GUEST_IA32_EFER_HIGH = 0x00002807, 225 + GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, 226 + GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809, 227 + GUEST_PDPTR0 = 0x0000280a, 228 + GUEST_PDPTR0_HIGH = 0x0000280b, 229 + GUEST_PDPTR1 = 0x0000280c, 230 + GUEST_PDPTR1_HIGH = 0x0000280d, 231 + GUEST_PDPTR2 = 0x0000280e, 232 + GUEST_PDPTR2_HIGH = 0x0000280f, 233 + GUEST_PDPTR3 = 0x00002810, 234 + GUEST_PDPTR3_HIGH = 0x00002811, 235 + GUEST_BNDCFGS = 0x00002812, 236 + GUEST_BNDCFGS_HIGH = 0x00002813, 237 + HOST_IA32_PAT = 0x00002c00, 238 + HOST_IA32_PAT_HIGH = 0x00002c01, 239 + HOST_IA32_EFER = 0x00002c02, 240 + HOST_IA32_EFER_HIGH = 0x00002c03, 241 + HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, 242 + HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05, 243 + PIN_BASED_VM_EXEC_CONTROL = 0x00004000, 244 + CPU_BASED_VM_EXEC_CONTROL = 0x00004002, 245 + EXCEPTION_BITMAP = 0x00004004, 246 + PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, 247 + PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, 248 + CR3_TARGET_COUNT = 0x0000400a, 249 + VM_EXIT_CONTROLS = 0x0000400c, 250 + VM_EXIT_MSR_STORE_COUNT = 0x0000400e, 251 + VM_EXIT_MSR_LOAD_COUNT = 0x00004010, 252 + VM_ENTRY_CONTROLS = 0x00004012, 253 + VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, 254 + VM_ENTRY_INTR_INFO_FIELD = 0x00004016, 255 + VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, 256 + VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, 257 + TPR_THRESHOLD = 0x0000401c, 258 + SECONDARY_VM_EXEC_CONTROL = 0x0000401e, 259 + PLE_GAP = 0x00004020, 260 + PLE_WINDOW = 0x00004022, 261 + VM_INSTRUCTION_ERROR = 0x00004400, 262 + VM_EXIT_REASON = 0x00004402, 263 + VM_EXIT_INTR_INFO = 0x00004404, 264 + VM_EXIT_INTR_ERROR_CODE = 0x00004406, 265 + IDT_VECTORING_INFO_FIELD = 0x00004408, 266 + IDT_VECTORING_ERROR_CODE = 0x0000440a, 267 + VM_EXIT_INSTRUCTION_LEN = 0x0000440c, 268 + VMX_INSTRUCTION_INFO = 0x0000440e, 269 + GUEST_ES_LIMIT = 0x00004800, 270 + GUEST_CS_LIMIT = 0x00004802, 271 + GUEST_SS_LIMIT = 0x00004804, 272 + GUEST_DS_LIMIT = 0x00004806, 273 + GUEST_FS_LIMIT = 0x00004808, 274 + GUEST_GS_LIMIT = 0x0000480a, 275 + GUEST_LDTR_LIMIT = 0x0000480c, 276 + GUEST_TR_LIMIT = 0x0000480e, 277 + GUEST_GDTR_LIMIT = 0x00004810, 278 + GUEST_IDTR_LIMIT = 0x00004812, 279 + GUEST_ES_AR_BYTES = 0x00004814, 280 + GUEST_CS_AR_BYTES = 0x00004816, 281 + GUEST_SS_AR_BYTES = 0x00004818, 282 + GUEST_DS_AR_BYTES = 0x0000481a, 283 + GUEST_FS_AR_BYTES = 0x0000481c, 284 + GUEST_GS_AR_BYTES = 0x0000481e, 285 + GUEST_LDTR_AR_BYTES = 0x00004820, 286 + GUEST_TR_AR_BYTES = 0x00004822, 287 + GUEST_INTERRUPTIBILITY_INFO = 0x00004824, 288 + GUEST_ACTIVITY_STATE = 0X00004826, 289 + GUEST_SYSENTER_CS = 0x0000482A, 290 + VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, 291 + HOST_IA32_SYSENTER_CS = 0x00004c00, 292 + CR0_GUEST_HOST_MASK = 0x00006000, 293 + CR4_GUEST_HOST_MASK = 0x00006002, 294 + CR0_READ_SHADOW = 0x00006004, 295 + CR4_READ_SHADOW = 0x00006006, 296 + CR3_TARGET_VALUE0 = 0x00006008, 297 + CR3_TARGET_VALUE1 = 0x0000600a, 298 + CR3_TARGET_VALUE2 = 0x0000600c, 299 + CR3_TARGET_VALUE3 = 0x0000600e, 300 + EXIT_QUALIFICATION = 0x00006400, 301 + GUEST_LINEAR_ADDRESS = 0x0000640a, 302 + GUEST_CR0 = 0x00006800, 303 + GUEST_CR3 = 0x00006802, 304 + GUEST_CR4 = 0x00006804, 305 + GUEST_ES_BASE = 0x00006806, 306 + GUEST_CS_BASE = 0x00006808, 307 + GUEST_SS_BASE = 0x0000680a, 308 + GUEST_DS_BASE = 0x0000680c, 309 + GUEST_FS_BASE = 0x0000680e, 310 + GUEST_GS_BASE = 0x00006810, 311 + GUEST_LDTR_BASE = 0x00006812, 312 + GUEST_TR_BASE = 0x00006814, 313 + GUEST_GDTR_BASE = 0x00006816, 314 + GUEST_IDTR_BASE = 0x00006818, 315 + GUEST_DR7 = 0x0000681a, 316 + GUEST_RSP = 0x0000681c, 317 + GUEST_RIP = 0x0000681e, 318 + GUEST_RFLAGS = 0x00006820, 319 + GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, 320 + GUEST_SYSENTER_ESP = 0x00006824, 321 + GUEST_SYSENTER_EIP = 0x00006826, 322 + HOST_CR0 = 0x00006c00, 323 + HOST_CR3 = 0x00006c02, 324 + HOST_CR4 = 0x00006c04, 325 + HOST_FS_BASE = 0x00006c06, 326 + HOST_GS_BASE = 0x00006c08, 327 + HOST_TR_BASE = 0x00006c0a, 328 + HOST_GDTR_BASE = 0x00006c0c, 329 + HOST_IDTR_BASE = 0x00006c0e, 330 + HOST_IA32_SYSENTER_ESP = 0x00006c10, 331 + HOST_IA32_SYSENTER_EIP = 0x00006c12, 332 + HOST_RSP = 0x00006c14, 333 + HOST_RIP = 0x00006c16, 334 + }; 335 + 336 + struct vmx_msr_entry { 337 + uint32_t index; 338 + uint32_t reserved; 339 + uint64_t value; 340 + } __attribute__ ((aligned(16))); 341 + 342 + static inline int vmxon(uint64_t phys) 343 + { 344 + uint8_t ret; 345 + 346 + __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]" 347 + : [ret]"=rm"(ret) 348 + : [pa]"m"(phys) 349 + : "cc", "memory"); 350 + 351 + return ret; 352 + } 353 + 354 + static inline void vmxoff(void) 355 + { 356 + __asm__ __volatile__("vmxoff"); 357 + } 358 + 359 + static inline int vmclear(uint64_t vmcs_pa) 360 + { 361 + uint8_t ret; 362 + 363 + __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]" 364 + : [ret]"=rm"(ret) 365 + : [pa]"m"(vmcs_pa) 366 + : "cc", "memory"); 367 + 368 + return ret; 369 + } 370 + 371 + static inline int vmptrld(uint64_t vmcs_pa) 372 + { 373 + uint8_t ret; 374 + 375 + __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]" 376 + : [ret]"=rm"(ret) 377 + : [pa]"m"(vmcs_pa) 378 + : "cc", "memory"); 379 + 380 + return ret; 381 + } 382 + 383 + /* 384 + * No guest state (e.g. GPRs) is established by this vmlaunch. 385 + */ 386 + static inline int vmlaunch(void) 387 + { 388 + int ret; 389 + 390 + __asm__ __volatile__("push %%rbp;" 391 + "push %%rcx;" 392 + "push %%rdx;" 393 + "push %%rsi;" 394 + "push %%rdi;" 395 + "push $0;" 396 + "vmwrite %%rsp, %[host_rsp];" 397 + "lea 1f(%%rip), %%rax;" 398 + "vmwrite %%rax, %[host_rip];" 399 + "vmlaunch;" 400 + "incq (%%rsp);" 401 + "1: pop %%rax;" 402 + "pop %%rdi;" 403 + "pop %%rsi;" 404 + "pop %%rdx;" 405 + "pop %%rcx;" 406 + "pop %%rbp;" 407 + : [ret]"=&a"(ret) 408 + : [host_rsp]"r"((uint64_t)HOST_RSP), 409 + [host_rip]"r"((uint64_t)HOST_RIP) 410 + : "memory", "cc", "rbx", "r8", "r9", "r10", 411 + "r11", "r12", "r13", "r14", "r15"); 412 + return ret; 413 + } 414 + 415 + /* 416 + * No guest state (e.g. GPRs) is established by this vmresume. 417 + */ 418 + static inline int vmresume(void) 419 + { 420 + int ret; 421 + 422 + __asm__ __volatile__("push %%rbp;" 423 + "push %%rcx;" 424 + "push %%rdx;" 425 + "push %%rsi;" 426 + "push %%rdi;" 427 + "push $0;" 428 + "vmwrite %%rsp, %[host_rsp];" 429 + "lea 1f(%%rip), %%rax;" 430 + "vmwrite %%rax, %[host_rip];" 431 + "vmresume;" 432 + "incq (%%rsp);" 433 + "1: pop %%rax;" 434 + "pop %%rdi;" 435 + "pop %%rsi;" 436 + "pop %%rdx;" 437 + "pop %%rcx;" 438 + "pop %%rbp;" 439 + : [ret]"=&a"(ret) 440 + : [host_rsp]"r"((uint64_t)HOST_RSP), 441 + [host_rip]"r"((uint64_t)HOST_RIP) 442 + : "memory", "cc", "rbx", "r8", "r9", "r10", 443 + "r11", "r12", "r13", "r14", "r15"); 444 + return ret; 445 + } 446 + 447 + static inline int vmread(uint64_t encoding, uint64_t *value) 448 + { 449 + uint64_t tmp; 450 + uint8_t ret; 451 + 452 + __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]" 453 + : [value]"=rm"(tmp), [ret]"=rm"(ret) 454 + : [encoding]"r"(encoding) 455 + : "cc", "memory"); 456 + 457 + *value = tmp; 458 + return ret; 459 + } 460 + 461 + /* 462 + * A wrapper around vmread that ignores errors and returns zero if the 463 + * vmread instruction fails. 464 + */ 465 + static inline uint64_t vmreadz(uint64_t encoding) 466 + { 467 + uint64_t value = 0; 468 + vmread(encoding, &value); 469 + return value; 470 + } 471 + 472 + static inline int vmwrite(uint64_t encoding, uint64_t value) 473 + { 474 + uint8_t ret; 475 + 476 + __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]" 477 + : [ret]"=rm"(ret) 478 + : [value]"rm"(value), [encoding]"r"(encoding) 479 + : "cc", "memory"); 480 + 481 + return ret; 482 + } 483 + 484 + static inline uint32_t vmcs_revision(void) 485 + { 486 + return rdmsr(MSR_IA32_VMX_BASIC); 487 + } 488 + 489 + void prepare_for_vmx_operation(void); 490 + void prepare_vmcs(void *guest_rip, void *guest_rsp); 491 + struct kvm_vm *vm_create_default_vmx(uint32_t vcpuid, 492 + vmx_guest_code_t guest_code); 493 + 494 + #endif /* !SELFTEST_KVM_VMX_H */
+13 -7
tools/testing/selftests/kvm/lib/kvm_util.c
··· 378 378 * complicated. This function uses a reasonable default length for 379 379 * the array and performs the appropriate allocation. 380 380 */ 381 - struct kvm_cpuid2 *allocate_kvm_cpuid2(void) 381 + static struct kvm_cpuid2 *allocate_kvm_cpuid2(void) 382 382 { 383 383 struct kvm_cpuid2 *cpuid; 384 384 int nent = 100; ··· 402 402 * Input Args: None 403 403 * 404 404 * Output Args: 405 - * cpuid - The supported KVM CPUID 406 405 * 407 - * Return: void 406 + * Return: The supported KVM CPUID 408 407 * 409 408 * Get the guest CPUID supported by KVM. 410 409 */ 411 - void kvm_get_supported_cpuid(struct kvm_cpuid2 *cpuid) 410 + struct kvm_cpuid2 *kvm_get_supported_cpuid(void) 412 411 { 412 + static struct kvm_cpuid2 *cpuid; 413 413 int ret; 414 414 int kvm_fd; 415 415 416 + if (cpuid) 417 + return cpuid; 418 + 419 + cpuid = allocate_kvm_cpuid2(); 416 420 kvm_fd = open(KVM_DEV_PATH, O_RDONLY); 417 421 TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", 418 422 KVM_DEV_PATH, kvm_fd, errno); ··· 426 422 ret, errno); 427 423 428 424 close(kvm_fd); 425 + return cpuid; 429 426 } 430 427 431 428 /* Locate a cpuid entry. ··· 440 435 * Return: A pointer to the cpuid entry. Never returns NULL. 441 436 */ 442 437 struct kvm_cpuid_entry2 * 443 - find_cpuid_index_entry(struct kvm_cpuid2 *cpuid, uint32_t function, 444 - uint32_t index) 438 + kvm_get_supported_cpuid_index(uint32_t function, uint32_t index) 445 439 { 440 + struct kvm_cpuid2 *cpuid; 446 441 struct kvm_cpuid_entry2 *entry = NULL; 447 442 int i; 448 443 444 + cpuid = kvm_get_supported_cpuid(); 449 445 for (i = 0; i < cpuid->nent; i++) { 450 446 if (cpuid->entries[i].function == function && 451 447 cpuid->entries[i].index == index) { ··· 1441 1435 sparsebit_idx_t pg; 1442 1436 1443 1437 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " 1444 - "not divisable by page size.\n" 1438 + "not divisible by page size.\n" 1445 1439 " paddr_min: 0x%lx page_size: 0x%x", 1446 1440 paddr_min, vm->page_size); 1447 1441
+2 -2
tools/testing/selftests/kvm/lib/sparsebit.c
··· 121 121 * avoided by moving the setting of the nodes mask bits into 122 122 * the previous nodes num_after setting. 123 123 * 124 - * + Node starting index is evenly divisable by the number of bits 124 + * + Node starting index is evenly divisible by the number of bits 125 125 * within a nodes mask member. 126 126 * 127 127 * + Nodes never represent a range of bits that wrap around the ··· 1741 1741 1742 1742 /* Validate node index is divisible by the mask size */ 1743 1743 if (nodep->idx % MASK_BITS) { 1744 - fprintf(stderr, "Node index not divisable by " 1744 + fprintf(stderr, "Node index not divisible by " 1745 1745 "mask size,\n" 1746 1746 " nodep: %p nodep->idx: 0x%lx " 1747 1747 "MASK_BITS: %lu\n",
+243
tools/testing/selftests/kvm/lib/vmx.c
··· 1 + /* 2 + * tools/testing/selftests/kvm/lib/x86.c 3 + * 4 + * Copyright (C) 2018, Google LLC. 5 + * 6 + * This work is licensed under the terms of the GNU GPL, version 2. 7 + */ 8 + 9 + #define _GNU_SOURCE /* for program_invocation_name */ 10 + 11 + #include "test_util.h" 12 + #include "kvm_util.h" 13 + #include "x86.h" 14 + #include "vmx.h" 15 + 16 + /* Create a default VM for VMX tests. 17 + * 18 + * Input Args: 19 + * vcpuid - The id of the single VCPU to add to the VM. 20 + * guest_code - The vCPU's entry point 21 + * 22 + * Output Args: None 23 + * 24 + * Return: 25 + * Pointer to opaque structure that describes the created VM. 26 + */ 27 + struct kvm_vm * 28 + vm_create_default_vmx(uint32_t vcpuid, vmx_guest_code_t guest_code) 29 + { 30 + struct kvm_cpuid2 *cpuid; 31 + struct kvm_vm *vm; 32 + vm_vaddr_t vmxon_vaddr; 33 + vm_paddr_t vmxon_paddr; 34 + vm_vaddr_t vmcs_vaddr; 35 + vm_paddr_t vmcs_paddr; 36 + 37 + vm = vm_create_default(vcpuid, (void *) guest_code); 38 + 39 + /* Enable nesting in CPUID */ 40 + vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid()); 41 + 42 + /* Setup of a region of guest memory for the vmxon region. */ 43 + vmxon_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); 44 + vmxon_paddr = addr_gva2gpa(vm, vmxon_vaddr); 45 + 46 + /* Setup of a region of guest memory for a vmcs. */ 47 + vmcs_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0); 48 + vmcs_paddr = addr_gva2gpa(vm, vmcs_vaddr); 49 + 50 + vcpu_args_set(vm, vcpuid, 4, vmxon_vaddr, vmxon_paddr, vmcs_vaddr, 51 + vmcs_paddr); 52 + 53 + return vm; 54 + } 55 + 56 + void prepare_for_vmx_operation(void) 57 + { 58 + uint64_t feature_control; 59 + uint64_t required; 60 + unsigned long cr0; 61 + unsigned long cr4; 62 + 63 + /* 64 + * Ensure bits in CR0 and CR4 are valid in VMX operation: 65 + * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. 66 + * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. 67 + */ 68 + __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory"); 69 + cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1); 70 + cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0); 71 + __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory"); 72 + 73 + __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory"); 74 + cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1); 75 + cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0); 76 + /* Enable VMX operation */ 77 + cr4 |= X86_CR4_VMXE; 78 + __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory"); 79 + 80 + /* 81 + * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: 82 + * Bit 0: Lock bit. If clear, VMXON causes a #GP. 83 + * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON 84 + * outside of SMX causes a #GP. 85 + */ 86 + required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 87 + required |= FEATURE_CONTROL_LOCKED; 88 + feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 89 + if ((feature_control & required) != required) 90 + wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required); 91 + } 92 + 93 + /* 94 + * Initialize the control fields to the most basic settings possible. 95 + */ 96 + static inline void init_vmcs_control_fields(void) 97 + { 98 + vmwrite(VIRTUAL_PROCESSOR_ID, 0); 99 + vmwrite(POSTED_INTR_NV, 0); 100 + 101 + vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PINBASED_CTLS)); 102 + vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_PROCBASED_CTLS)); 103 + vmwrite(EXCEPTION_BITMAP, 0); 104 + vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); 105 + vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ 106 + vmwrite(CR3_TARGET_COUNT, 0); 107 + vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) | 108 + VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */ 109 + vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); 110 + vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); 111 + vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) | 112 + VM_ENTRY_IA32E_MODE); /* 64-bit guest */ 113 + vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); 114 + vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); 115 + vmwrite(TPR_THRESHOLD, 0); 116 + vmwrite(SECONDARY_VM_EXEC_CONTROL, 0); 117 + 118 + vmwrite(CR0_GUEST_HOST_MASK, 0); 119 + vmwrite(CR4_GUEST_HOST_MASK, 0); 120 + vmwrite(CR0_READ_SHADOW, get_cr0()); 121 + vmwrite(CR4_READ_SHADOW, get_cr4()); 122 + } 123 + 124 + /* 125 + * Initialize the host state fields based on the current host state, with 126 + * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch 127 + * or vmresume. 128 + */ 129 + static inline void init_vmcs_host_state(void) 130 + { 131 + uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); 132 + 133 + vmwrite(HOST_ES_SELECTOR, get_es()); 134 + vmwrite(HOST_CS_SELECTOR, get_cs()); 135 + vmwrite(HOST_SS_SELECTOR, get_ss()); 136 + vmwrite(HOST_DS_SELECTOR, get_ds()); 137 + vmwrite(HOST_FS_SELECTOR, get_fs()); 138 + vmwrite(HOST_GS_SELECTOR, get_gs()); 139 + vmwrite(HOST_TR_SELECTOR, get_tr()); 140 + 141 + if (exit_controls & VM_EXIT_LOAD_IA32_PAT) 142 + vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT)); 143 + if (exit_controls & VM_EXIT_LOAD_IA32_EFER) 144 + vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER)); 145 + if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 146 + vmwrite(HOST_IA32_PERF_GLOBAL_CTRL, 147 + rdmsr(MSR_CORE_PERF_GLOBAL_CTRL)); 148 + 149 + vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS)); 150 + 151 + vmwrite(HOST_CR0, get_cr0()); 152 + vmwrite(HOST_CR3, get_cr3()); 153 + vmwrite(HOST_CR4, get_cr4()); 154 + vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE)); 155 + vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE)); 156 + vmwrite(HOST_TR_BASE, 157 + get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr()))); 158 + vmwrite(HOST_GDTR_BASE, get_gdt_base()); 159 + vmwrite(HOST_IDTR_BASE, get_idt_base()); 160 + vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP)); 161 + vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP)); 162 + } 163 + 164 + /* 165 + * Initialize the guest state fields essentially as a clone of 166 + * the host state fields. Some host state fields have fixed 167 + * values, and we set the corresponding guest state fields accordingly. 168 + */ 169 + static inline void init_vmcs_guest_state(void *rip, void *rsp) 170 + { 171 + vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR)); 172 + vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR)); 173 + vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR)); 174 + vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR)); 175 + vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR)); 176 + vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR)); 177 + vmwrite(GUEST_LDTR_SELECTOR, 0); 178 + vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR)); 179 + vmwrite(GUEST_INTR_STATUS, 0); 180 + vmwrite(GUEST_PML_INDEX, 0); 181 + 182 + vmwrite(VMCS_LINK_POINTER, -1ll); 183 + vmwrite(GUEST_IA32_DEBUGCTL, 0); 184 + vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT)); 185 + vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER)); 186 + vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL, 187 + vmreadz(HOST_IA32_PERF_GLOBAL_CTRL)); 188 + 189 + vmwrite(GUEST_ES_LIMIT, -1); 190 + vmwrite(GUEST_CS_LIMIT, -1); 191 + vmwrite(GUEST_SS_LIMIT, -1); 192 + vmwrite(GUEST_DS_LIMIT, -1); 193 + vmwrite(GUEST_FS_LIMIT, -1); 194 + vmwrite(GUEST_GS_LIMIT, -1); 195 + vmwrite(GUEST_LDTR_LIMIT, -1); 196 + vmwrite(GUEST_TR_LIMIT, 0x67); 197 + vmwrite(GUEST_GDTR_LIMIT, 0xffff); 198 + vmwrite(GUEST_IDTR_LIMIT, 0xffff); 199 + vmwrite(GUEST_ES_AR_BYTES, 200 + vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093); 201 + vmwrite(GUEST_CS_AR_BYTES, 0xa09b); 202 + vmwrite(GUEST_SS_AR_BYTES, 0xc093); 203 + vmwrite(GUEST_DS_AR_BYTES, 204 + vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093); 205 + vmwrite(GUEST_FS_AR_BYTES, 206 + vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093); 207 + vmwrite(GUEST_GS_AR_BYTES, 208 + vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093); 209 + vmwrite(GUEST_LDTR_AR_BYTES, 0x10000); 210 + vmwrite(GUEST_TR_AR_BYTES, 0x8b); 211 + vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); 212 + vmwrite(GUEST_ACTIVITY_STATE, 0); 213 + vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS)); 214 + vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0); 215 + 216 + vmwrite(GUEST_CR0, vmreadz(HOST_CR0)); 217 + vmwrite(GUEST_CR3, vmreadz(HOST_CR3)); 218 + vmwrite(GUEST_CR4, vmreadz(HOST_CR4)); 219 + vmwrite(GUEST_ES_BASE, 0); 220 + vmwrite(GUEST_CS_BASE, 0); 221 + vmwrite(GUEST_SS_BASE, 0); 222 + vmwrite(GUEST_DS_BASE, 0); 223 + vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE)); 224 + vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE)); 225 + vmwrite(GUEST_LDTR_BASE, 0); 226 + vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE)); 227 + vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); 228 + vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); 229 + vmwrite(GUEST_DR7, 0x400); 230 + vmwrite(GUEST_RSP, (uint64_t)rsp); 231 + vmwrite(GUEST_RIP, (uint64_t)rip); 232 + vmwrite(GUEST_RFLAGS, 2); 233 + vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); 234 + vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); 235 + vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP)); 236 + } 237 + 238 + void prepare_vmcs(void *guest_rip, void *guest_rsp) 239 + { 240 + init_vmcs_control_fields(); 241 + init_vmcs_host_state(); 242 + init_vmcs_guest_state(guest_rip, guest_rsp); 243 + }
+231
tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
··· 1 + /* 2 + * gtests/tests/vmx_tsc_adjust_test.c 3 + * 4 + * Copyright (C) 2018, Google LLC. 5 + * 6 + * This work is licensed under the terms of the GNU GPL, version 2. 7 + * 8 + * 9 + * IA32_TSC_ADJUST test 10 + * 11 + * According to the SDM, "if an execution of WRMSR to the 12 + * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC, 13 + * the logical processor also adds (or subtracts) value X from the 14 + * IA32_TSC_ADJUST MSR. 15 + * 16 + * Note that when L1 doesn't intercept writes to IA32_TSC, a 17 + * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC 18 + * value. 19 + * 20 + * This test verifies that this unusual case is handled correctly. 21 + */ 22 + 23 + #include "test_util.h" 24 + #include "kvm_util.h" 25 + #include "x86.h" 26 + #include "vmx.h" 27 + 28 + #include <string.h> 29 + #include <sys/ioctl.h> 30 + 31 + #ifndef MSR_IA32_TSC_ADJUST 32 + #define MSR_IA32_TSC_ADJUST 0x3b 33 + #endif 34 + 35 + #define PAGE_SIZE 4096 36 + #define VCPU_ID 5 37 + 38 + #define TSC_ADJUST_VALUE (1ll << 32) 39 + #define TSC_OFFSET_VALUE -(1ll << 48) 40 + 41 + enum { 42 + PORT_ABORT = 0x1000, 43 + PORT_REPORT, 44 + PORT_DONE, 45 + }; 46 + 47 + struct vmx_page { 48 + vm_vaddr_t virt; 49 + vm_paddr_t phys; 50 + }; 51 + 52 + enum { 53 + VMXON_PAGE = 0, 54 + VMCS_PAGE, 55 + MSR_BITMAP_PAGE, 56 + 57 + NUM_VMX_PAGES, 58 + }; 59 + 60 + struct kvm_single_msr { 61 + struct kvm_msrs header; 62 + struct kvm_msr_entry entry; 63 + } __attribute__((packed)); 64 + 65 + /* The virtual machine object. */ 66 + static struct kvm_vm *vm; 67 + 68 + /* Array of vmx_page descriptors that is shared with the guest. */ 69 + struct vmx_page *vmx_pages; 70 + 71 + #define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg)) 72 + static void do_exit_to_l0(uint16_t port, unsigned long arg) 73 + { 74 + __asm__ __volatile__("in %[port], %%al" 75 + : 76 + : [port]"d"(port), "D"(arg) 77 + : "rax"); 78 + } 79 + 80 + 81 + #define GUEST_ASSERT(_condition) do { \ 82 + if (!(_condition)) \ 83 + exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \ 84 + } while (0) 85 + 86 + static void check_ia32_tsc_adjust(int64_t max) 87 + { 88 + int64_t adjust; 89 + 90 + adjust = rdmsr(MSR_IA32_TSC_ADJUST); 91 + exit_to_l0(PORT_REPORT, adjust); 92 + GUEST_ASSERT(adjust <= max); 93 + } 94 + 95 + static void l2_guest_code(void) 96 + { 97 + uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 98 + 99 + wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 100 + check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); 101 + 102 + /* Exit to L1 */ 103 + __asm__ __volatile__("vmcall"); 104 + } 105 + 106 + static void l1_guest_code(struct vmx_page *vmx_pages) 107 + { 108 + #define L2_GUEST_STACK_SIZE 64 109 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 110 + uint32_t control; 111 + uintptr_t save_cr3; 112 + 113 + GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE); 114 + wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE); 115 + check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); 116 + 117 + prepare_for_vmx_operation(); 118 + 119 + /* Enter VMX root operation. */ 120 + *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision(); 121 + GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys)); 122 + 123 + /* Load a VMCS. */ 124 + *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision(); 125 + GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys)); 126 + GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys)); 127 + 128 + /* Prepare the VMCS for L2 execution. */ 129 + prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); 130 + control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); 131 + control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING; 132 + vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); 133 + vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys); 134 + vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE); 135 + 136 + /* Jump into L2. First, test failure to load guest CR3. */ 137 + save_cr3 = vmreadz(GUEST_CR3); 138 + vmwrite(GUEST_CR3, -1ull); 139 + GUEST_ASSERT(!vmlaunch()); 140 + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == 141 + (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE)); 142 + check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); 143 + vmwrite(GUEST_CR3, save_cr3); 144 + 145 + GUEST_ASSERT(!vmlaunch()); 146 + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 147 + 148 + check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); 149 + 150 + exit_to_l0(PORT_DONE, 0); 151 + } 152 + 153 + static void allocate_vmx_page(struct vmx_page *page) 154 + { 155 + vm_vaddr_t virt; 156 + 157 + virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0); 158 + memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE); 159 + 160 + page->virt = virt; 161 + page->phys = addr_gva2gpa(vm, virt); 162 + } 163 + 164 + static vm_vaddr_t allocate_vmx_pages(void) 165 + { 166 + vm_vaddr_t vmx_pages_vaddr; 167 + int i; 168 + 169 + vmx_pages_vaddr = vm_vaddr_alloc( 170 + vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0); 171 + 172 + vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr); 173 + 174 + for (i = 0; i < NUM_VMX_PAGES; i++) 175 + allocate_vmx_page(&vmx_pages[i]); 176 + 177 + return vmx_pages_vaddr; 178 + } 179 + 180 + void report(int64_t val) 181 + { 182 + printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", 183 + val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); 184 + } 185 + 186 + int main(int argc, char *argv[]) 187 + { 188 + vm_vaddr_t vmx_pages_vaddr; 189 + struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 190 + 191 + if (!(entry->ecx & CPUID_VMX)) { 192 + printf("nested VMX not enabled, skipping test"); 193 + return 0; 194 + } 195 + 196 + vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); 197 + 198 + /* Allocate VMX pages and shared descriptors (vmx_pages). */ 199 + vmx_pages_vaddr = allocate_vmx_pages(); 200 + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr); 201 + 202 + for (;;) { 203 + volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); 204 + struct kvm_regs regs; 205 + 206 + vcpu_run(vm, VCPU_ID); 207 + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 208 + "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n", 209 + run->exit_reason, 210 + exit_reason_str(run->exit_reason)); 211 + 212 + vcpu_regs_get(vm, VCPU_ID, &regs); 213 + 214 + switch (run->io.port) { 215 + case PORT_ABORT: 216 + TEST_ASSERT(false, "%s", (const char *) regs.rdi); 217 + /* NOT REACHED */ 218 + case PORT_REPORT: 219 + report(regs.rdi); 220 + break; 221 + case PORT_DONE: 222 + goto done; 223 + default: 224 + TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port); 225 + } 226 + } 227 + 228 + kvm_vm_free(vm); 229 + done: 230 + return 0; 231 + }
+1 -1
tools/testing/selftests/net/Makefile
··· 5 5 CFLAGS += -I../../../../usr/include/ 6 6 7 7 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh 8 - TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh 8 + TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh 9 9 TEST_GEN_FILES = socket 10 10 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy 11 11 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa