Merge branch 'open_state'

+4794 -1387
+6
Documentation/fb/efifb.txt
··· 27 Macbook Pro 17", iMac 20" : 28 video=efifb:i20 29 30 -- 31 Edgar Hucek <gimli@dark-green.com>
··· 27 Macbook Pro 17", iMac 20" : 28 video=efifb:i20 29 30 + Accepted options: 31 + 32 + nowc Don't map the framebuffer write combined. This can be used 33 + to workaround side-effects and slowdowns on other CPU cores 34 + when large amounts of console data are written. 35 + 36 -- 37 Edgar Hucek <gimli@dark-green.com>
+1 -1
Documentation/gpio/gpio-legacy.txt
··· 459 460 This is done by registering "ranges" of pins, which are essentially 461 cross-reference tables. These are described in 462 - Documentation/pinctrl.txt 463 464 While the pin allocation is totally managed by the pinctrl subsystem, 465 gpio (under gpiolib) is still maintained by gpio drivers. It may happen
··· 459 460 This is done by registering "ranges" of pins, which are essentially 461 cross-reference tables. These are described in 462 + Documentation/driver-api/pinctl.rst 463 464 While the pin allocation is totally managed by the pinctrl subsystem, 465 gpio (under gpiolib) is still maintained by gpio drivers. It may happen
+4 -3
MAINTAINERS
··· 1161 R: Benjamin Herrenschmidt <benh@kernel.crashing.org> 1162 R: Joel Stanley <joel@jms.id.au> 1163 L: linux-i2c@vger.kernel.org 1164 - L: openbmc@lists.ozlabs.org 1165 S: Maintained 1166 F: drivers/irqchip/irq-aspeed-i2c-ic.c 1167 F: drivers/i2c/busses/i2c-aspeed.c ··· 5834 F: drivers/staging/greybus/spilib.c 5835 F: drivers/staging/greybus/spilib.h 5836 5837 - GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS 5838 M: Bryan O'Donoghue <pure.logic@nexus-software.ie> 5839 S: Maintained 5840 F: drivers/staging/greybus/loopback.c ··· 10383 T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git 10384 S: Maintained 10385 F: Documentation/devicetree/bindings/pinctrl/ 10386 - F: Documentation/pinctrl.txt 10387 F: drivers/pinctrl/ 10388 F: include/linux/pinctrl/ 10389 ··· 14004 F: include/linux/virtio*.h 14005 F: include/uapi/linux/virtio_*.h 14006 F: drivers/crypto/virtio/ 14007 14008 VIRTIO CRYPTO DRIVER 14009 M: Gonglei <arei.gonglei@huawei.com>
··· 1161 R: Benjamin Herrenschmidt <benh@kernel.crashing.org> 1162 R: Joel Stanley <joel@jms.id.au> 1163 L: linux-i2c@vger.kernel.org 1164 + L: openbmc@lists.ozlabs.org (moderated for non-subscribers) 1165 S: Maintained 1166 F: drivers/irqchip/irq-aspeed-i2c-ic.c 1167 F: drivers/i2c/busses/i2c-aspeed.c ··· 5834 F: drivers/staging/greybus/spilib.c 5835 F: drivers/staging/greybus/spilib.h 5836 5837 + GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS 5838 M: Bryan O'Donoghue <pure.logic@nexus-software.ie> 5839 S: Maintained 5840 F: drivers/staging/greybus/loopback.c ··· 10383 T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git 10384 S: Maintained 10385 F: Documentation/devicetree/bindings/pinctrl/ 10386 + F: Documentation/driver-api/pinctl.rst 10387 F: drivers/pinctrl/ 10388 F: include/linux/pinctrl/ 10389 ··· 14004 F: include/linux/virtio*.h 14005 F: include/uapi/linux/virtio_*.h 14006 F: drivers/crypto/virtio/ 14007 + F: mm/balloon_compaction.c 14008 14009 VIRTIO CRYPTO DRIVER 14010 M: Gonglei <arei.gonglei@huawei.com>
+1 -1
Makefile
··· 1 VERSION = 4 2 PATCHLEVEL = 13 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 5 NAME = Fearless Coyote 6 7 # *DOCUMENTATION*
··· 1 VERSION = 4 2 PATCHLEVEL = 13 3 SUBLEVEL = 0 4 + EXTRAVERSION = -rc5 5 NAME = Fearless Coyote 6 7 # *DOCUMENTATION*
+9 -2
arch/arm/include/asm/tlb.h
··· 148 } 149 150 static inline void 151 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 152 { 153 tlb->mm = mm; 154 tlb->fullmm = !(start | (end+1)); ··· 167 } 168 169 static inline void 170 - tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 171 { 172 tlb_flush_mmu(tlb); 173 174 /* keep the page table cache within bounds */
··· 148 } 149 150 static inline void 151 + arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 152 + unsigned long start, unsigned long end) 153 { 154 tlb->mm = mm; 155 tlb->fullmm = !(start | (end+1)); ··· 166 } 167 168 static inline void 169 + arch_tlb_finish_mmu(struct mmu_gather *tlb, 170 + unsigned long start, unsigned long end, bool force) 171 { 172 + if (force) { 173 + tlb->range_start = start; 174 + tlb->range_end = end; 175 + } 176 + 177 tlb_flush_mmu(tlb); 178 179 /* keep the page table cache within bounds */
+6 -2
arch/ia64/include/asm/tlb.h
··· 168 169 170 static inline void 171 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 172 { 173 tlb->mm = mm; 174 tlb->max = ARRAY_SIZE(tlb->local); ··· 186 * collected. 187 */ 188 static inline void 189 - tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 190 { 191 /* 192 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 193 * tlb->end_addr.
··· 168 169 170 static inline void 171 + arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 172 + unsigned long start, unsigned long end) 173 { 174 tlb->mm = mm; 175 tlb->max = ARRAY_SIZE(tlb->local); ··· 185 * collected. 186 */ 187 static inline void 188 + arch_tlb_finish_mmu(struct mmu_gather *tlb, 189 + unsigned long start, unsigned long end, bool force) 190 { 191 + if (force) 192 + tlb->need_flush = 1; 193 /* 194 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 195 * tlb->end_addr.
+1 -1
arch/mips/Kconfig
··· 2260 2261 config MIPS_MT_SMP 2262 bool "MIPS MT SMP support (1 TC on each available VPE)" 2263 - depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 2264 select CPU_MIPSR2_IRQ_VI 2265 select CPU_MIPSR2_IRQ_EI 2266 select SYNC_R4K
··· 2260 2261 config MIPS_MT_SMP 2262 bool "MIPS MT SMP support (1 TC on each available VPE)" 2263 + depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS 2264 select CPU_MIPSR2_IRQ_VI 2265 select CPU_MIPSR2_IRQ_EI 2266 select SYNC_R4K
+14 -1
arch/mips/Makefile
··· 243 ifdef CONFIG_PHYSICAL_START 244 load-y = $(CONFIG_PHYSICAL_START) 245 endif 246 - entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \ 247 | grep "\bkernel_entry\b" | cut -f1 -d \ ) 248 249 cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic 250 drivers-$(CONFIG_PCI) += arch/mips/pci/
··· 243 ifdef CONFIG_PHYSICAL_START 244 load-y = $(CONFIG_PHYSICAL_START) 245 endif 246 + 247 + entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \ 248 | grep "\bkernel_entry\b" | cut -f1 -d \ ) 249 + ifdef CONFIG_CPU_MICROMIPS 250 + # 251 + # Set the ISA bit, since the kernel_entry symbol in the ELF will have it 252 + # clear which would lead to images containing addresses which bootloaders may 253 + # jump to as MIPS32 code. 254 + # 255 + entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \ 256 + $(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \ 257 + $(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y))))))))) 258 + else 259 + entry-y = $(entry-noisa-y) 260 + endif 261 262 cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic 263 drivers-$(CONFIG_PCI) += arch/mips/pci/
+2
arch/mips/boot/compressed/.gitignore
···
··· 1 + ashldi3.c 2 + bswapsi.c
+1 -1
arch/mips/cavium-octeon/octeon-usb.c
··· 13 #include <linux/mutex.h> 14 #include <linux/delay.h> 15 #include <linux/of_platform.h> 16 17 #include <asm/octeon/octeon.h> 18 - #include <asm/octeon/cvmx-gpio-defs.h> 19 20 /* USB Control Register */ 21 union cvm_usbdrd_uctl_ctl {
··· 13 #include <linux/mutex.h> 14 #include <linux/delay.h> 15 #include <linux/of_platform.h> 16 + #include <linux/io.h> 17 18 #include <asm/octeon/octeon.h> 19 20 /* USB Control Register */ 21 union cvm_usbdrd_uctl_ctl {
+6 -28
arch/mips/dec/int-handler.S
··· 147 * Find irq with highest priority 148 */ 149 # open coded PTR_LA t1, cpu_mask_nr_tbl 150 - #if (_MIPS_SZPTR == 32) 151 # open coded la t1, cpu_mask_nr_tbl 152 lui t1, %hi(cpu_mask_nr_tbl) 153 addiu t1, %lo(cpu_mask_nr_tbl) 154 - 155 - #endif 156 - #if (_MIPS_SZPTR == 64) 157 - # open coded dla t1, cpu_mask_nr_tbl 158 - .set push 159 - .set noat 160 - lui t1, %highest(cpu_mask_nr_tbl) 161 - lui AT, %hi(cpu_mask_nr_tbl) 162 - daddiu t1, t1, %higher(cpu_mask_nr_tbl) 163 - daddiu AT, AT, %lo(cpu_mask_nr_tbl) 164 - dsll t1, 32 165 - daddu t1, t1, AT 166 - .set pop 167 #endif 168 1: lw t2,(t1) 169 nop ··· 203 * Find irq with highest priority 204 */ 205 # open coded PTR_LA t1,asic_mask_nr_tbl 206 - #if (_MIPS_SZPTR == 32) 207 # open coded la t1, asic_mask_nr_tbl 208 lui t1, %hi(asic_mask_nr_tbl) 209 addiu t1, %lo(asic_mask_nr_tbl) 210 - 211 - #endif 212 - #if (_MIPS_SZPTR == 64) 213 - # open coded dla t1, asic_mask_nr_tbl 214 - .set push 215 - .set noat 216 - lui t1, %highest(asic_mask_nr_tbl) 217 - lui AT, %hi(asic_mask_nr_tbl) 218 - daddiu t1, t1, %higher(asic_mask_nr_tbl) 219 - daddiu AT, AT, %lo(asic_mask_nr_tbl) 220 - dsll t1, 32 221 - daddu t1, t1, AT 222 - .set pop 223 #endif 224 2: lw t2,(t1) 225 nop
··· 147 * Find irq with highest priority 148 */ 149 # open coded PTR_LA t1, cpu_mask_nr_tbl 150 + #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 151 # open coded la t1, cpu_mask_nr_tbl 152 lui t1, %hi(cpu_mask_nr_tbl) 153 addiu t1, %lo(cpu_mask_nr_tbl) 154 + #else 155 + #error GCC `-msym32' option required for 64-bit DECstation builds 156 #endif 157 1: lw t2,(t1) 158 nop ··· 214 * Find irq with highest priority 215 */ 216 # open coded PTR_LA t1,asic_mask_nr_tbl 217 + #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 218 # open coded la t1, asic_mask_nr_tbl 219 lui t1, %hi(asic_mask_nr_tbl) 220 addiu t1, %lo(asic_mask_nr_tbl) 221 + #else 222 + #error GCC `-msym32' option required for 64-bit DECstation builds 223 #endif 224 2: lw t2,(t1) 225 nop
+2
arch/mips/include/asm/cache.h
··· 9 #ifndef _ASM_CACHE_H 10 #define _ASM_CACHE_H 11 12 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT 13 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 14
··· 9 #ifndef _ASM_CACHE_H 10 #define _ASM_CACHE_H 11 12 + #include <kmalloc.h> 13 + 14 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT 15 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 16
+3
arch/mips/include/asm/cpu-features.h
··· 428 #ifndef cpu_scache_line_size 429 #define cpu_scache_line_size() cpu_data[0].scache.linesz 430 #endif 431 432 #ifndef cpu_hwrena_impl_bits 433 #define cpu_hwrena_impl_bits 0
··· 428 #ifndef cpu_scache_line_size 429 #define cpu_scache_line_size() cpu_data[0].scache.linesz 430 #endif 431 + #ifndef cpu_tcache_line_size 432 + #define cpu_tcache_line_size() cpu_data[0].tcache.linesz 433 + #endif 434 435 #ifndef cpu_hwrena_impl_bits 436 #define cpu_hwrena_impl_bits 0
+36 -1
arch/mips/include/asm/octeon/cvmx-l2c-defs.h
··· 33 #define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull)) 34 #define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull)) 35 #define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull)) 36 #define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull)) 37 #define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull)) 38 #define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull)) ··· 70 ((offset) & 1) * 8) 71 #define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \ 72 ((offset) & 31) * 8) 73 - #define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull)) 74 75 76 union cvmx_l2c_cfg { 77 uint64_t u64;
··· 33 #define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull)) 34 #define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull)) 35 #define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull)) 36 + #define CVMX_L2C_ERR_TDTX(block_id) \ 37 + (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull) 38 + #define CVMX_L2C_ERR_TTGX(block_id) \ 39 + (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull) 40 #define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull)) 41 #define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull)) 42 #define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull)) ··· 66 ((offset) & 1) * 8) 67 #define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \ 68 ((offset) & 31) * 8) 69 70 + 71 + union cvmx_l2c_err_tdtx { 72 + uint64_t u64; 73 + struct cvmx_l2c_err_tdtx_s { 74 + __BITFIELD_FIELD(uint64_t dbe:1, 75 + __BITFIELD_FIELD(uint64_t sbe:1, 76 + __BITFIELD_FIELD(uint64_t vdbe:1, 77 + __BITFIELD_FIELD(uint64_t vsbe:1, 78 + __BITFIELD_FIELD(uint64_t syn:10, 79 + __BITFIELD_FIELD(uint64_t reserved_22_49:28, 80 + __BITFIELD_FIELD(uint64_t wayidx:18, 81 + __BITFIELD_FIELD(uint64_t reserved_2_3:2, 82 + __BITFIELD_FIELD(uint64_t type:2, 83 + ;))))))))) 84 + } s; 85 + }; 86 + 87 + union cvmx_l2c_err_ttgx { 88 + uint64_t u64; 89 + struct cvmx_l2c_err_ttgx_s { 90 + __BITFIELD_FIELD(uint64_t dbe:1, 91 + __BITFIELD_FIELD(uint64_t sbe:1, 92 + __BITFIELD_FIELD(uint64_t noway:1, 93 + __BITFIELD_FIELD(uint64_t reserved_56_60:5, 94 + __BITFIELD_FIELD(uint64_t syn:6, 95 + __BITFIELD_FIELD(uint64_t reserved_22_49:28, 96 + __BITFIELD_FIELD(uint64_t wayidx:15, 97 + __BITFIELD_FIELD(uint64_t reserved_2_6:5, 98 + __BITFIELD_FIELD(uint64_t type:2, 99 + ;))))))))) 100 + } s; 101 + }; 102 103 union cvmx_l2c_cfg { 104 uint64_t u64;
+60
arch/mips/include/asm/octeon/cvmx-l2d-defs.h
···
··· 1 + /***********************license start*************** 2 + * Author: Cavium Networks 3 + * 4 + * Contact: support@caviumnetworks.com 5 + * This file is part of the OCTEON SDK 6 + * 7 + * Copyright (c) 2003-2017 Cavium, Inc. 8 + * 9 + * This file is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License, Version 2, as 11 + * published by the Free Software Foundation. 12 + * 13 + * This file is distributed in the hope that it will be useful, but 14 + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 + * NONINFRINGEMENT. See the GNU General Public License for more 17 + * details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this file; if not, write to the Free Software 21 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 + * or visit http://www.gnu.org/licenses/. 23 + * 24 + * This file may also be available under a different license from Cavium. 25 + * Contact Cavium Networks for more information 26 + ***********************license end**************************************/ 27 + 28 + #ifndef __CVMX_L2D_DEFS_H__ 29 + #define __CVMX_L2D_DEFS_H__ 30 + 31 + #define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull)) 32 + #define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull)) 33 + 34 + 35 + union cvmx_l2d_err { 36 + uint64_t u64; 37 + struct cvmx_l2d_err_s { 38 + __BITFIELD_FIELD(uint64_t reserved_6_63:58, 39 + __BITFIELD_FIELD(uint64_t bmhclsel:1, 40 + __BITFIELD_FIELD(uint64_t ded_err:1, 41 + __BITFIELD_FIELD(uint64_t sec_err:1, 42 + __BITFIELD_FIELD(uint64_t ded_intena:1, 43 + __BITFIELD_FIELD(uint64_t sec_intena:1, 44 + __BITFIELD_FIELD(uint64_t ecc_ena:1, 45 + ;))))))) 46 + } s; 47 + }; 48 + 49 + union cvmx_l2d_fus3 { 50 + uint64_t u64; 51 + struct cvmx_l2d_fus3_s { 52 + __BITFIELD_FIELD(uint64_t reserved_40_63:24, 53 + __BITFIELD_FIELD(uint64_t ema_ctl:3, 54 + __BITFIELD_FIELD(uint64_t reserved_34_36:3, 55 + __BITFIELD_FIELD(uint64_t q3fus:34, 56 + ;)))) 57 + } s; 58 + }; 59 + 60 + #endif
+1
arch/mips/include/asm/octeon/cvmx.h
··· 62 #include <asm/octeon/cvmx-iob-defs.h> 63 #include <asm/octeon/cvmx-ipd-defs.h> 64 #include <asm/octeon/cvmx-l2c-defs.h> 65 #include <asm/octeon/cvmx-l2t-defs.h> 66 #include <asm/octeon/cvmx-led-defs.h> 67 #include <asm/octeon/cvmx-mio-defs.h>
··· 62 #include <asm/octeon/cvmx-iob-defs.h> 63 #include <asm/octeon/cvmx-ipd-defs.h> 64 #include <asm/octeon/cvmx-l2c-defs.h> 65 + #include <asm/octeon/cvmx-l2d-defs.h> 66 #include <asm/octeon/cvmx-l2t-defs.h> 67 #include <asm/octeon/cvmx-led-defs.h> 68 #include <asm/octeon/cvmx-mio-defs.h>
+3 -3
arch/mips/kernel/smp.c
··· 376 cpumask_set_cpu(cpu, &cpu_coherent_mask); 377 notify_cpu_starting(cpu); 378 379 - complete(&cpu_running); 380 - synchronise_count_slave(cpu); 381 - 382 set_cpu_online(cpu, true); 383 384 set_cpu_sibling_map(cpu); 385 set_cpu_core_map(cpu); 386 387 calculate_cpu_foreign_map(); 388 389 /* 390 * irq will be enabled in ->smp_finish(), enabling it too early
··· 376 cpumask_set_cpu(cpu, &cpu_coherent_mask); 377 notify_cpu_starting(cpu); 378 379 set_cpu_online(cpu, true); 380 381 set_cpu_sibling_map(cpu); 382 set_cpu_core_map(cpu); 383 384 calculate_cpu_foreign_map(); 385 + 386 + complete(&cpu_running); 387 + synchronise_count_slave(cpu); 388 389 /* 390 * irq will be enabled in ->smp_finish(), enabling it too early
+1 -1
arch/mips/mm/uasm-mips.c
··· 48 49 #include "uasm.c" 50 51 - static const struct insn const insn_table[insn_invalid] = { 52 [insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, 53 [insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD}, 54 [insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
··· 48 49 #include "uasm.c" 50 51 + static const struct insn insn_table[insn_invalid] = { 52 [insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, 53 [insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD}, 54 [insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
+1950
arch/mips/net/ebpf_jit.c
···
··· 1 + /* 2 + * Just-In-Time compiler for eBPF filters on MIPS 3 + * 4 + * Copyright (c) 2017 Cavium, Inc. 5 + * 6 + * Based on code from: 7 + * 8 + * Copyright (c) 2014 Imagination Technologies Ltd. 9 + * Author: Markos Chandras <markos.chandras@imgtec.com> 10 + * 11 + * This program is free software; you can redistribute it and/or modify it 12 + * under the terms of the GNU General Public License as published by the 13 + * Free Software Foundation; version 2 of the License. 14 + */ 15 + 16 + #include <linux/bitops.h> 17 + #include <linux/errno.h> 18 + #include <linux/filter.h> 19 + #include <linux/bpf.h> 20 + #include <linux/slab.h> 21 + #include <asm/bitops.h> 22 + #include <asm/byteorder.h> 23 + #include <asm/cacheflush.h> 24 + #include <asm/cpu-features.h> 25 + #include <asm/uasm.h> 26 + 27 + /* Registers used by JIT */ 28 + #define MIPS_R_ZERO 0 29 + #define MIPS_R_AT 1 30 + #define MIPS_R_V0 2 /* BPF_R0 */ 31 + #define MIPS_R_V1 3 32 + #define MIPS_R_A0 4 /* BPF_R1 */ 33 + #define MIPS_R_A1 5 /* BPF_R2 */ 34 + #define MIPS_R_A2 6 /* BPF_R3 */ 35 + #define MIPS_R_A3 7 /* BPF_R4 */ 36 + #define MIPS_R_A4 8 /* BPF_R5 */ 37 + #define MIPS_R_T4 12 /* BPF_AX */ 38 + #define MIPS_R_T5 13 39 + #define MIPS_R_T6 14 40 + #define MIPS_R_T7 15 41 + #define MIPS_R_S0 16 /* BPF_R6 */ 42 + #define MIPS_R_S1 17 /* BPF_R7 */ 43 + #define MIPS_R_S2 18 /* BPF_R8 */ 44 + #define MIPS_R_S3 19 /* BPF_R9 */ 45 + #define MIPS_R_S4 20 /* BPF_TCC */ 46 + #define MIPS_R_S5 21 47 + #define MIPS_R_S6 22 48 + #define MIPS_R_S7 23 49 + #define MIPS_R_T8 24 50 + #define MIPS_R_T9 25 51 + #define MIPS_R_SP 29 52 + #define MIPS_R_RA 31 53 + 54 + /* eBPF flags */ 55 + #define EBPF_SAVE_S0 BIT(0) 56 + #define EBPF_SAVE_S1 BIT(1) 57 + #define EBPF_SAVE_S2 BIT(2) 58 + #define EBPF_SAVE_S3 BIT(3) 59 + #define EBPF_SAVE_S4 BIT(4) 60 + #define EBPF_SAVE_RA BIT(5) 61 + #define EBPF_SEEN_FP BIT(6) 62 + #define EBPF_SEEN_TC BIT(7) 63 + #define EBPF_TCC_IN_V1 BIT(8) 64 + 65 + /* 66 + * For the mips64 ISA, we need to track the value range or type for 67 + * each JIT register. The BPF machine requires zero extended 32-bit 68 + * values, but the mips64 ISA requires sign extended 32-bit values. 69 + * At each point in the BPF program we track the state of every 70 + * register so that we can zero extend or sign extend as the BPF 71 + * semantics require. 72 + */ 73 + enum reg_val_type { 74 + /* uninitialized */ 75 + REG_UNKNOWN, 76 + /* not known to be 32-bit compatible. */ 77 + REG_64BIT, 78 + /* 32-bit compatible, no truncation needed for 64-bit ops. */ 79 + REG_64BIT_32BIT, 80 + /* 32-bit compatible, need truncation for 64-bit ops. */ 81 + REG_32BIT, 82 + /* 32-bit zero extended. */ 83 + REG_32BIT_ZERO_EX, 84 + /* 32-bit no sign/zero extension needed. */ 85 + REG_32BIT_POS 86 + }; 87 + 88 + /* 89 + * high bit of offsets indicates if long branch conversion done at 90 + * this insn. 91 + */ 92 + #define OFFSETS_B_CONV BIT(31) 93 + 94 + /** 95 + * struct jit_ctx - JIT context 96 + * @skf: The sk_filter 97 + * @stack_size: eBPF stack size 98 + * @tmp_offset: eBPF $sp offset to 8-byte temporary memory 99 + * @idx: Instruction index 100 + * @flags: JIT flags 101 + * @offsets: Instruction offsets 102 + * @target: Memory location for the compiled filter 103 + * @reg_val_types Packed enum reg_val_type for each register. 104 + */ 105 + struct jit_ctx { 106 + const struct bpf_prog *skf; 107 + int stack_size; 108 + int tmp_offset; 109 + u32 idx; 110 + u32 flags; 111 + u32 *offsets; 112 + u32 *target; 113 + u64 *reg_val_types; 114 + unsigned int long_b_conversion:1; 115 + unsigned int gen_b_offsets:1; 116 + }; 117 + 118 + static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) 119 + { 120 + *rvt &= ~(7ull << (reg * 3)); 121 + *rvt |= ((u64)type << (reg * 3)); 122 + } 123 + 124 + static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx, 125 + int index, int reg) 126 + { 127 + return (ctx->reg_val_types[index] >> (reg * 3)) & 7; 128 + } 129 + 130 + /* Simply emit the instruction if the JIT memory space has been allocated */ 131 + #define emit_instr(ctx, func, ...) \ 132 + do { \ 133 + if ((ctx)->target != NULL) { \ 134 + u32 *p = &(ctx)->target[ctx->idx]; \ 135 + uasm_i_##func(&p, ##__VA_ARGS__); \ 136 + } \ 137 + (ctx)->idx++; \ 138 + } while (0) 139 + 140 + static unsigned int j_target(struct jit_ctx *ctx, int target_idx) 141 + { 142 + unsigned long target_va, base_va; 143 + unsigned int r; 144 + 145 + if (!ctx->target) 146 + return 0; 147 + 148 + base_va = (unsigned long)ctx->target; 149 + target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV); 150 + 151 + if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful)) 152 + return (unsigned int)-1; 153 + r = target_va & 0x0ffffffful; 154 + return r; 155 + } 156 + 157 + /* Compute the immediate value for PC-relative branches. */ 158 + static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) 159 + { 160 + if (!ctx->gen_b_offsets) 161 + return 0; 162 + 163 + /* 164 + * We want a pc-relative branch. tgt is the instruction offset 165 + * we want to jump to. 166 + 167 + * Branch on MIPS: 168 + * I: target_offset <- sign_extend(offset) 169 + * I+1: PC += target_offset (delay slot) 170 + * 171 + * ctx->idx currently points to the branch instruction 172 + * but the offset is added to the delay slot so we need 173 + * to subtract 4. 174 + */ 175 + return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) - 176 + (ctx->idx * 4) - 4; 177 + } 178 + 179 + int bpf_jit_enable __read_mostly; 180 + 181 + enum which_ebpf_reg { 182 + src_reg, 183 + src_reg_no_fp, 184 + dst_reg, 185 + dst_reg_fp_ok 186 + }; 187 + 188 + /* 189 + * For eBPF, the register mapping naturally falls out of the 190 + * requirements of eBPF and the MIPS n64 ABI. We don't maintain a 191 + * separate frame pointer, so BPF_REG_10 relative accesses are 192 + * adjusted to be $sp relative. 193 + */ 194 + int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn, 195 + enum which_ebpf_reg w) 196 + { 197 + int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ? 198 + insn->src_reg : insn->dst_reg; 199 + 200 + switch (ebpf_reg) { 201 + case BPF_REG_0: 202 + return MIPS_R_V0; 203 + case BPF_REG_1: 204 + return MIPS_R_A0; 205 + case BPF_REG_2: 206 + return MIPS_R_A1; 207 + case BPF_REG_3: 208 + return MIPS_R_A2; 209 + case BPF_REG_4: 210 + return MIPS_R_A3; 211 + case BPF_REG_5: 212 + return MIPS_R_A4; 213 + case BPF_REG_6: 214 + ctx->flags |= EBPF_SAVE_S0; 215 + return MIPS_R_S0; 216 + case BPF_REG_7: 217 + ctx->flags |= EBPF_SAVE_S1; 218 + return MIPS_R_S1; 219 + case BPF_REG_8: 220 + ctx->flags |= EBPF_SAVE_S2; 221 + return MIPS_R_S2; 222 + case BPF_REG_9: 223 + ctx->flags |= EBPF_SAVE_S3; 224 + return MIPS_R_S3; 225 + case BPF_REG_10: 226 + if (w == dst_reg || w == src_reg_no_fp) 227 + goto bad_reg; 228 + ctx->flags |= EBPF_SEEN_FP; 229 + /* 230 + * Needs special handling, return something that 231 + * cannot be clobbered just in case. 232 + */ 233 + return MIPS_R_ZERO; 234 + case BPF_REG_AX: 235 + return MIPS_R_T4; 236 + default: 237 + bad_reg: 238 + WARN(1, "Illegal bpf reg: %d\n", ebpf_reg); 239 + return -EINVAL; 240 + } 241 + } 242 + /* 243 + * eBPF stack frame will be something like: 244 + * 245 + * Entry $sp ------> +--------------------------------+ 246 + * | $ra (optional) | 247 + * +--------------------------------+ 248 + * | $s0 (optional) | 249 + * +--------------------------------+ 250 + * | $s1 (optional) | 251 + * +--------------------------------+ 252 + * | $s2 (optional) | 253 + * +--------------------------------+ 254 + * | $s3 (optional) | 255 + * +--------------------------------+ 256 + * | $s4 (optional) | 257 + * +--------------------------------+ 258 + * | tmp-storage (if $ra saved) | 259 + * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10 260 + * | BPF_REG_10 relative storage | 261 + * | MAX_BPF_STACK (optional) | 262 + * | . | 263 + * | . | 264 + * | . | 265 + * $sp --------> +--------------------------------+ 266 + * 267 + * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized 268 + * area is not allocated. 269 + */ 270 + static int gen_int_prologue(struct jit_ctx *ctx) 271 + { 272 + int stack_adjust = 0; 273 + int store_offset; 274 + int locals_size; 275 + 276 + if (ctx->flags & EBPF_SAVE_RA) 277 + /* 278 + * If RA we are doing a function call and may need 279 + * extra 8-byte tmp area. 280 + */ 281 + stack_adjust += 16; 282 + if (ctx->flags & EBPF_SAVE_S0) 283 + stack_adjust += 8; 284 + if (ctx->flags & EBPF_SAVE_S1) 285 + stack_adjust += 8; 286 + if (ctx->flags & EBPF_SAVE_S2) 287 + stack_adjust += 8; 288 + if (ctx->flags & EBPF_SAVE_S3) 289 + stack_adjust += 8; 290 + if (ctx->flags & EBPF_SAVE_S4) 291 + stack_adjust += 8; 292 + 293 + BUILD_BUG_ON(MAX_BPF_STACK & 7); 294 + locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0; 295 + 296 + stack_adjust += locals_size; 297 + ctx->tmp_offset = locals_size; 298 + 299 + ctx->stack_size = stack_adjust; 300 + 301 + /* 302 + * First instruction initializes the tail call count (TCC). 303 + * On tail call we skip this instruction, and the TCC is 304 + * passed in $v1 from the caller. 305 + */ 306 + emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); 307 + if (stack_adjust) 308 + emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust); 309 + else 310 + return 0; 311 + 312 + store_offset = stack_adjust - 8; 313 + 314 + if (ctx->flags & EBPF_SAVE_RA) { 315 + emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP); 316 + store_offset -= 8; 317 + } 318 + if (ctx->flags & EBPF_SAVE_S0) { 319 + emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP); 320 + store_offset -= 8; 321 + } 322 + if (ctx->flags & EBPF_SAVE_S1) { 323 + emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP); 324 + store_offset -= 8; 325 + } 326 + if (ctx->flags & EBPF_SAVE_S2) { 327 + emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP); 328 + store_offset -= 8; 329 + } 330 + if (ctx->flags & EBPF_SAVE_S3) { 331 + emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP); 332 + store_offset -= 8; 333 + } 334 + if (ctx->flags & EBPF_SAVE_S4) { 335 + emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP); 336 + store_offset -= 8; 337 + } 338 + 339 + if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1)) 340 + emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO); 341 + 342 + return 0; 343 + } 344 + 345 + static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) 346 + { 347 + const struct bpf_prog *prog = ctx->skf; 348 + int stack_adjust = ctx->stack_size; 349 + int store_offset = stack_adjust - 8; 350 + int r0 = MIPS_R_V0; 351 + 352 + if (dest_reg == MIPS_R_RA && 353 + get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) 354 + /* Don't let zero extended value escape. */ 355 + emit_instr(ctx, sll, r0, r0, 0); 356 + 357 + if (ctx->flags & EBPF_SAVE_RA) { 358 + emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); 359 + store_offset -= 8; 360 + } 361 + if (ctx->flags & EBPF_SAVE_S0) { 362 + emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP); 363 + store_offset -= 8; 364 + } 365 + if (ctx->flags & EBPF_SAVE_S1) { 366 + emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP); 367 + store_offset -= 8; 368 + } 369 + if (ctx->flags & EBPF_SAVE_S2) { 370 + emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP); 371 + store_offset -= 8; 372 + } 373 + if (ctx->flags & EBPF_SAVE_S3) { 374 + emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP); 375 + store_offset -= 8; 376 + } 377 + if (ctx->flags & EBPF_SAVE_S4) { 378 + emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP); 379 + store_offset -= 8; 380 + } 381 + emit_instr(ctx, jr, dest_reg); 382 + 383 + if (stack_adjust) 384 + emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust); 385 + else 386 + emit_instr(ctx, nop); 387 + 388 + return 0; 389 + } 390 + 391 + static void gen_imm_to_reg(const struct bpf_insn *insn, int reg, 392 + struct jit_ctx *ctx) 393 + { 394 + if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { 395 + emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm); 396 + } else { 397 + int lower = (s16)(insn->imm & 0xffff); 398 + int upper = insn->imm - lower; 399 + 400 + emit_instr(ctx, lui, reg, upper >> 16); 401 + emit_instr(ctx, addiu, reg, reg, lower); 402 + } 403 + 404 + } 405 + 406 + static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, 407 + int idx) 408 + { 409 + int upper_bound, lower_bound; 410 + int dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 411 + 412 + if (dst < 0) 413 + return dst; 414 + 415 + switch (BPF_OP(insn->code)) { 416 + case BPF_MOV: 417 + case BPF_ADD: 418 + upper_bound = S16_MAX; 419 + lower_bound = S16_MIN; 420 + break; 421 + case BPF_SUB: 422 + upper_bound = -(int)S16_MIN; 423 + lower_bound = -(int)S16_MAX; 424 + break; 425 + case BPF_AND: 426 + case BPF_OR: 427 + case BPF_XOR: 428 + upper_bound = 0xffff; 429 + lower_bound = 0; 430 + break; 431 + case BPF_RSH: 432 + case BPF_LSH: 433 + case BPF_ARSH: 434 + /* Shift amounts are truncated, no need for bounds */ 435 + upper_bound = S32_MAX; 436 + lower_bound = S32_MIN; 437 + break; 438 + default: 439 + return -EINVAL; 440 + } 441 + 442 + /* 443 + * Immediate move clobbers the register, so no sign/zero 444 + * extension needed. 445 + */ 446 + if (BPF_CLASS(insn->code) == BPF_ALU64 && 447 + BPF_OP(insn->code) != BPF_MOV && 448 + get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT) 449 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 450 + /* BPF_ALU | BPF_LSH doesn't need separate sign extension */ 451 + if (BPF_CLASS(insn->code) == BPF_ALU && 452 + BPF_OP(insn->code) != BPF_LSH && 453 + BPF_OP(insn->code) != BPF_MOV && 454 + get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT) 455 + emit_instr(ctx, sll, dst, dst, 0); 456 + 457 + if (insn->imm >= lower_bound && insn->imm <= upper_bound) { 458 + /* single insn immediate case */ 459 + switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { 460 + case BPF_ALU64 | BPF_MOV: 461 + emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm); 462 + break; 463 + case BPF_ALU64 | BPF_AND: 464 + case BPF_ALU | BPF_AND: 465 + emit_instr(ctx, andi, dst, dst, insn->imm); 466 + break; 467 + case BPF_ALU64 | BPF_OR: 468 + case BPF_ALU | BPF_OR: 469 + emit_instr(ctx, ori, dst, dst, insn->imm); 470 + break; 471 + case BPF_ALU64 | BPF_XOR: 472 + case BPF_ALU | BPF_XOR: 473 + emit_instr(ctx, xori, dst, dst, insn->imm); 474 + break; 475 + case BPF_ALU64 | BPF_ADD: 476 + emit_instr(ctx, daddiu, dst, dst, insn->imm); 477 + break; 478 + case BPF_ALU64 | BPF_SUB: 479 + emit_instr(ctx, daddiu, dst, dst, -insn->imm); 480 + break; 481 + case BPF_ALU64 | BPF_RSH: 482 + emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f); 483 + break; 484 + case BPF_ALU | BPF_RSH: 485 + emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f); 486 + break; 487 + case BPF_ALU64 | BPF_LSH: 488 + emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f); 489 + break; 490 + case BPF_ALU | BPF_LSH: 491 + emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f); 492 + break; 493 + case BPF_ALU64 | BPF_ARSH: 494 + emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f); 495 + break; 496 + case BPF_ALU | BPF_ARSH: 497 + emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f); 498 + break; 499 + case BPF_ALU | BPF_MOV: 500 + emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm); 501 + break; 502 + case BPF_ALU | BPF_ADD: 503 + emit_instr(ctx, addiu, dst, dst, insn->imm); 504 + break; 505 + case BPF_ALU | BPF_SUB: 506 + emit_instr(ctx, addiu, dst, dst, -insn->imm); 507 + break; 508 + default: 509 + return -EINVAL; 510 + } 511 + } else { 512 + /* multi insn immediate case */ 513 + if (BPF_OP(insn->code) == BPF_MOV) { 514 + gen_imm_to_reg(insn, dst, ctx); 515 + } else { 516 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 517 + switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) { 518 + case BPF_ALU64 | BPF_AND: 519 + case BPF_ALU | BPF_AND: 520 + emit_instr(ctx, and, dst, dst, MIPS_R_AT); 521 + break; 522 + case BPF_ALU64 | BPF_OR: 523 + case BPF_ALU | BPF_OR: 524 + emit_instr(ctx, or, dst, dst, MIPS_R_AT); 525 + break; 526 + case BPF_ALU64 | BPF_XOR: 527 + case BPF_ALU | BPF_XOR: 528 + emit_instr(ctx, xor, dst, dst, MIPS_R_AT); 529 + break; 530 + case BPF_ALU64 | BPF_ADD: 531 + emit_instr(ctx, daddu, dst, dst, MIPS_R_AT); 532 + break; 533 + case BPF_ALU64 | BPF_SUB: 534 + emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT); 535 + break; 536 + case BPF_ALU | BPF_ADD: 537 + emit_instr(ctx, addu, dst, dst, MIPS_R_AT); 538 + break; 539 + case BPF_ALU | BPF_SUB: 540 + emit_instr(ctx, subu, dst, dst, MIPS_R_AT); 541 + break; 542 + default: 543 + return -EINVAL; 544 + } 545 + } 546 + } 547 + 548 + return 0; 549 + } 550 + 551 + static void * __must_check 552 + ool_skb_header_pointer(const struct sk_buff *skb, int offset, 553 + int len, void *buffer) 554 + { 555 + return skb_header_pointer(skb, offset, len, buffer); 556 + } 557 + 558 + static int size_to_len(const struct bpf_insn *insn) 559 + { 560 + switch (BPF_SIZE(insn->code)) { 561 + case BPF_B: 562 + return 1; 563 + case BPF_H: 564 + return 2; 565 + case BPF_W: 566 + return 4; 567 + case BPF_DW: 568 + return 8; 569 + } 570 + return 0; 571 + } 572 + 573 + static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) 574 + { 575 + if (value >= 0xffffffffffff8000ull || value < 0x8000ull) { 576 + emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value); 577 + } else if (value >= 0xffffffff80000000ull || 578 + (value < 0x80000000 && value > 0xffff)) { 579 + emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16)); 580 + emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff)); 581 + } else { 582 + int i; 583 + bool seen_part = false; 584 + int needed_shift = 0; 585 + 586 + for (i = 0; i < 4; i++) { 587 + u64 part = (value >> (16 * (3 - i))) & 0xffff; 588 + 589 + if (seen_part && needed_shift > 0 && (part || i == 3)) { 590 + emit_instr(ctx, dsll_safe, dst, dst, needed_shift); 591 + needed_shift = 0; 592 + } 593 + if (part) { 594 + if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) { 595 + emit_instr(ctx, lui, dst, (s32)(s16)part); 596 + needed_shift = -16; 597 + } else { 598 + emit_instr(ctx, ori, dst, 599 + seen_part ? dst : MIPS_R_ZERO, 600 + (unsigned int)part); 601 + } 602 + seen_part = true; 603 + } 604 + if (seen_part) 605 + needed_shift += 16; 606 + } 607 + } 608 + } 609 + 610 + static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) 611 + { 612 + int off, b_off; 613 + 614 + ctx->flags |= EBPF_SEEN_TC; 615 + /* 616 + * if (index >= array->map.max_entries) 617 + * goto out; 618 + */ 619 + off = offsetof(struct bpf_array, map.max_entries); 620 + emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1); 621 + emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2); 622 + b_off = b_imm(this_idx + 1, ctx); 623 + emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); 624 + /* 625 + * if (--TCC < 0) 626 + * goto out; 627 + */ 628 + /* Delay slot */ 629 + emit_instr(ctx, daddiu, MIPS_R_T5, 630 + (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); 631 + b_off = b_imm(this_idx + 1, ctx); 632 + emit_instr(ctx, bltz, MIPS_R_T5, b_off); 633 + /* 634 + * prog = array->ptrs[index]; 635 + * if (prog == NULL) 636 + * goto out; 637 + */ 638 + /* Delay slot */ 639 + emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3); 640 + emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1); 641 + off = offsetof(struct bpf_array, ptrs); 642 + emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8); 643 + b_off = b_imm(this_idx + 1, ctx); 644 + emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off); 645 + /* Delay slot */ 646 + emit_instr(ctx, nop); 647 + 648 + /* goto *(prog->bpf_func + 4); */ 649 + off = offsetof(struct bpf_prog, bpf_func); 650 + emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT); 651 + /* All systems are go... propagate TCC */ 652 + emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO); 653 + /* Skip first instruction (TCC initialization) */ 654 + emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4); 655 + return build_int_epilogue(ctx, MIPS_R_T9); 656 + } 657 + 658 + static bool use_bbit_insns(void) 659 + { 660 + switch (current_cpu_type()) { 661 + case CPU_CAVIUM_OCTEON: 662 + case CPU_CAVIUM_OCTEON_PLUS: 663 + case CPU_CAVIUM_OCTEON2: 664 + case CPU_CAVIUM_OCTEON3: 665 + return true; 666 + default: 667 + return false; 668 + } 669 + } 670 + 671 + static bool is_bad_offset(int b_off) 672 + { 673 + return b_off > 0x1ffff || b_off < -0x20000; 674 + } 675 + 676 + /* Returns the number of insn slots consumed. */ 677 + static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, 678 + int this_idx, int exit_idx) 679 + { 680 + int src, dst, r, td, ts, mem_off, b_off; 681 + bool need_swap, did_move, cmp_eq; 682 + unsigned int target; 683 + u64 t64; 684 + s64 t64s; 685 + 686 + switch (insn->code) { 687 + case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ 688 + case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */ 689 + case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */ 690 + case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */ 691 + case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */ 692 + case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */ 693 + case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */ 694 + case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */ 695 + case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */ 696 + case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */ 697 + case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */ 698 + case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */ 699 + case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */ 700 + case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */ 701 + case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */ 702 + case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */ 703 + case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */ 704 + case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */ 705 + r = gen_imm_insn(insn, ctx, this_idx); 706 + if (r < 0) 707 + return r; 708 + break; 709 + case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */ 710 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 711 + if (dst < 0) 712 + return dst; 713 + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) 714 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 715 + if (insn->imm == 1) /* Mult by 1 is a nop */ 716 + break; 717 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 718 + emit_instr(ctx, dmultu, MIPS_R_AT, dst); 719 + emit_instr(ctx, mflo, dst); 720 + break; 721 + case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */ 722 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 723 + if (dst < 0) 724 + return dst; 725 + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) 726 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 727 + emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst); 728 + break; 729 + case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */ 730 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 731 + if (dst < 0) 732 + return dst; 733 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 734 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 735 + /* sign extend */ 736 + emit_instr(ctx, sll, dst, dst, 0); 737 + } 738 + if (insn->imm == 1) /* Mult by 1 is a nop */ 739 + break; 740 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 741 + emit_instr(ctx, multu, dst, MIPS_R_AT); 742 + emit_instr(ctx, mflo, dst); 743 + break; 744 + case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */ 745 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 746 + if (dst < 0) 747 + return dst; 748 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 749 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 750 + /* sign extend */ 751 + emit_instr(ctx, sll, dst, dst, 0); 752 + } 753 + emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst); 754 + break; 755 + case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */ 756 + case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */ 757 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 758 + if (dst < 0) 759 + return dst; 760 + if (insn->imm == 0) { /* Div by zero */ 761 + b_off = b_imm(exit_idx, ctx); 762 + if (is_bad_offset(b_off)) 763 + return -E2BIG; 764 + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); 765 + emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO); 766 + } 767 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 768 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) 769 + /* sign extend */ 770 + emit_instr(ctx, sll, dst, dst, 0); 771 + if (insn->imm == 1) { 772 + /* div by 1 is a nop, mod by 1 is zero */ 773 + if (BPF_OP(insn->code) == BPF_MOD) 774 + emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); 775 + break; 776 + } 777 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 778 + emit_instr(ctx, divu, dst, MIPS_R_AT); 779 + if (BPF_OP(insn->code) == BPF_DIV) 780 + emit_instr(ctx, mflo, dst); 781 + else 782 + emit_instr(ctx, mfhi, dst); 783 + break; 784 + case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */ 785 + case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */ 786 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 787 + if (dst < 0) 788 + return dst; 789 + if (insn->imm == 0) { /* Div by zero */ 790 + b_off = b_imm(exit_idx, ctx); 791 + if (is_bad_offset(b_off)) 792 + return -E2BIG; 793 + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); 794 + emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO); 795 + } 796 + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) 797 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 798 + 799 + if (insn->imm == 1) { 800 + /* div by 1 is a nop, mod by 1 is zero */ 801 + if (BPF_OP(insn->code) == BPF_MOD) 802 + emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); 803 + break; 804 + } 805 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 806 + emit_instr(ctx, ddivu, dst, MIPS_R_AT); 807 + if (BPF_OP(insn->code) == BPF_DIV) 808 + emit_instr(ctx, mflo, dst); 809 + else 810 + emit_instr(ctx, mfhi, dst); 811 + break; 812 + case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */ 813 + case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */ 814 + case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */ 815 + case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */ 816 + case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */ 817 + case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */ 818 + case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */ 819 + case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */ 820 + case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */ 821 + case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */ 822 + case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */ 823 + case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */ 824 + src = ebpf_to_mips_reg(ctx, insn, src_reg); 825 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 826 + if (src < 0 || dst < 0) 827 + return -EINVAL; 828 + if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT) 829 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 830 + did_move = false; 831 + if (insn->src_reg == BPF_REG_10) { 832 + if (BPF_OP(insn->code) == BPF_MOV) { 833 + emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); 834 + did_move = true; 835 + } else { 836 + emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK); 837 + src = MIPS_R_AT; 838 + } 839 + } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 840 + int tmp_reg = MIPS_R_AT; 841 + 842 + if (BPF_OP(insn->code) == BPF_MOV) { 843 + tmp_reg = dst; 844 + did_move = true; 845 + } 846 + emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO); 847 + emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); 848 + src = MIPS_R_AT; 849 + } 850 + switch (BPF_OP(insn->code)) { 851 + case BPF_MOV: 852 + if (!did_move) 853 + emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); 854 + break; 855 + case BPF_ADD: 856 + emit_instr(ctx, daddu, dst, dst, src); 857 + break; 858 + case BPF_SUB: 859 + emit_instr(ctx, dsubu, dst, dst, src); 860 + break; 861 + case BPF_XOR: 862 + emit_instr(ctx, xor, dst, dst, src); 863 + break; 864 + case BPF_OR: 865 + emit_instr(ctx, or, dst, dst, src); 866 + break; 867 + case BPF_AND: 868 + emit_instr(ctx, and, dst, dst, src); 869 + break; 870 + case BPF_MUL: 871 + emit_instr(ctx, dmultu, dst, src); 872 + emit_instr(ctx, mflo, dst); 873 + break; 874 + case BPF_DIV: 875 + case BPF_MOD: 876 + b_off = b_imm(exit_idx, ctx); 877 + if (is_bad_offset(b_off)) 878 + return -E2BIG; 879 + emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); 880 + emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); 881 + emit_instr(ctx, ddivu, dst, src); 882 + if (BPF_OP(insn->code) == BPF_DIV) 883 + emit_instr(ctx, mflo, dst); 884 + else 885 + emit_instr(ctx, mfhi, dst); 886 + break; 887 + case BPF_LSH: 888 + emit_instr(ctx, dsllv, dst, dst, src); 889 + break; 890 + case BPF_RSH: 891 + emit_instr(ctx, dsrlv, dst, dst, src); 892 + break; 893 + case BPF_ARSH: 894 + emit_instr(ctx, dsrav, dst, dst, src); 895 + break; 896 + default: 897 + pr_err("ALU64_REG NOT HANDLED\n"); 898 + return -EINVAL; 899 + } 900 + break; 901 + case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */ 902 + case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */ 903 + case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */ 904 + case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */ 905 + case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */ 906 + case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */ 907 + case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */ 908 + case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */ 909 + case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */ 910 + case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */ 911 + case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */ 912 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 913 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 914 + if (src < 0 || dst < 0) 915 + return -EINVAL; 916 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 917 + if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { 918 + /* sign extend */ 919 + emit_instr(ctx, sll, dst, dst, 0); 920 + } 921 + did_move = false; 922 + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); 923 + if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { 924 + int tmp_reg = MIPS_R_AT; 925 + 926 + if (BPF_OP(insn->code) == BPF_MOV) { 927 + tmp_reg = dst; 928 + did_move = true; 929 + } 930 + /* sign extend */ 931 + emit_instr(ctx, sll, tmp_reg, src, 0); 932 + src = MIPS_R_AT; 933 + } 934 + switch (BPF_OP(insn->code)) { 935 + case BPF_MOV: 936 + if (!did_move) 937 + emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); 938 + break; 939 + case BPF_ADD: 940 + emit_instr(ctx, addu, dst, dst, src); 941 + break; 942 + case BPF_SUB: 943 + emit_instr(ctx, subu, dst, dst, src); 944 + break; 945 + case BPF_XOR: 946 + emit_instr(ctx, xor, dst, dst, src); 947 + break; 948 + case BPF_OR: 949 + emit_instr(ctx, or, dst, dst, src); 950 + break; 951 + case BPF_AND: 952 + emit_instr(ctx, and, dst, dst, src); 953 + break; 954 + case BPF_MUL: 955 + emit_instr(ctx, mul, dst, dst, src); 956 + break; 957 + case BPF_DIV: 958 + case BPF_MOD: 959 + b_off = b_imm(exit_idx, ctx); 960 + if (is_bad_offset(b_off)) 961 + return -E2BIG; 962 + emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); 963 + emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); 964 + emit_instr(ctx, divu, dst, src); 965 + if (BPF_OP(insn->code) == BPF_DIV) 966 + emit_instr(ctx, mflo, dst); 967 + else 968 + emit_instr(ctx, mfhi, dst); 969 + break; 970 + case BPF_LSH: 971 + emit_instr(ctx, sllv, dst, dst, src); 972 + break; 973 + case BPF_RSH: 974 + emit_instr(ctx, srlv, dst, dst, src); 975 + break; 976 + default: 977 + pr_err("ALU_REG NOT HANDLED\n"); 978 + return -EINVAL; 979 + } 980 + break; 981 + case BPF_JMP | BPF_EXIT: 982 + if (this_idx + 1 < exit_idx) { 983 + b_off = b_imm(exit_idx, ctx); 984 + if (is_bad_offset(b_off)) 985 + return -E2BIG; 986 + emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off); 987 + emit_instr(ctx, nop); 988 + } 989 + break; 990 + case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ 991 + case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ 992 + cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); 993 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); 994 + if (dst < 0) 995 + return dst; 996 + if (insn->imm == 0) { 997 + src = MIPS_R_ZERO; 998 + } else { 999 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 1000 + src = MIPS_R_AT; 1001 + } 1002 + goto jeq_common; 1003 + case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ 1004 + case BPF_JMP | BPF_JNE | BPF_X: 1005 + case BPF_JMP | BPF_JSGT | BPF_X: 1006 + case BPF_JMP | BPF_JSGE | BPF_X: 1007 + case BPF_JMP | BPF_JGT | BPF_X: 1008 + case BPF_JMP | BPF_JGE | BPF_X: 1009 + case BPF_JMP | BPF_JSET | BPF_X: 1010 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1011 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1012 + if (src < 0 || dst < 0) 1013 + return -EINVAL; 1014 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 1015 + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); 1016 + if (td == REG_32BIT && ts != REG_32BIT) { 1017 + emit_instr(ctx, sll, MIPS_R_AT, src, 0); 1018 + src = MIPS_R_AT; 1019 + } else if (ts == REG_32BIT && td != REG_32BIT) { 1020 + emit_instr(ctx, sll, MIPS_R_AT, dst, 0); 1021 + dst = MIPS_R_AT; 1022 + } 1023 + if (BPF_OP(insn->code) == BPF_JSET) { 1024 + emit_instr(ctx, and, MIPS_R_AT, dst, src); 1025 + cmp_eq = false; 1026 + dst = MIPS_R_AT; 1027 + src = MIPS_R_ZERO; 1028 + } else if (BPF_OP(insn->code) == BPF_JSGT) { 1029 + emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); 1030 + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { 1031 + b_off = b_imm(exit_idx, ctx); 1032 + if (is_bad_offset(b_off)) 1033 + return -E2BIG; 1034 + emit_instr(ctx, blez, MIPS_R_AT, b_off); 1035 + emit_instr(ctx, nop); 1036 + return 2; /* We consumed the exit. */ 1037 + } 1038 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1039 + if (is_bad_offset(b_off)) 1040 + return -E2BIG; 1041 + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); 1042 + emit_instr(ctx, nop); 1043 + break; 1044 + } else if (BPF_OP(insn->code) == BPF_JSGE) { 1045 + emit_instr(ctx, slt, MIPS_R_AT, dst, src); 1046 + cmp_eq = true; 1047 + dst = MIPS_R_AT; 1048 + src = MIPS_R_ZERO; 1049 + } else if (BPF_OP(insn->code) == BPF_JGT) { 1050 + /* dst or src could be AT */ 1051 + emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); 1052 + emit_instr(ctx, sltu, MIPS_R_AT, dst, src); 1053 + /* SP known to be non-zero, movz becomes boolean not */ 1054 + emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); 1055 + emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); 1056 + emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); 1057 + cmp_eq = true; 1058 + dst = MIPS_R_AT; 1059 + src = MIPS_R_ZERO; 1060 + } else if (BPF_OP(insn->code) == BPF_JGE) { 1061 + emit_instr(ctx, sltu, MIPS_R_AT, dst, src); 1062 + cmp_eq = true; 1063 + dst = MIPS_R_AT; 1064 + src = MIPS_R_ZERO; 1065 + } else { /* JNE/JEQ case */ 1066 + cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); 1067 + } 1068 + jeq_common: 1069 + /* 1070 + * If the next insn is EXIT and we are jumping arround 1071 + * only it, invert the sense of the compare and 1072 + * conditionally jump to the exit. Poor man's branch 1073 + * chaining. 1074 + */ 1075 + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { 1076 + b_off = b_imm(exit_idx, ctx); 1077 + if (is_bad_offset(b_off)) { 1078 + target = j_target(ctx, exit_idx); 1079 + if (target == (unsigned int)-1) 1080 + return -E2BIG; 1081 + cmp_eq = !cmp_eq; 1082 + b_off = 4 * 3; 1083 + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { 1084 + ctx->offsets[this_idx] |= OFFSETS_B_CONV; 1085 + ctx->long_b_conversion = 1; 1086 + } 1087 + } 1088 + 1089 + if (cmp_eq) 1090 + emit_instr(ctx, bne, dst, src, b_off); 1091 + else 1092 + emit_instr(ctx, beq, dst, src, b_off); 1093 + emit_instr(ctx, nop); 1094 + if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { 1095 + emit_instr(ctx, j, target); 1096 + emit_instr(ctx, nop); 1097 + } 1098 + return 2; /* We consumed the exit. */ 1099 + } 1100 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1101 + if (is_bad_offset(b_off)) { 1102 + target = j_target(ctx, this_idx + insn->off + 1); 1103 + if (target == (unsigned int)-1) 1104 + return -E2BIG; 1105 + cmp_eq = !cmp_eq; 1106 + b_off = 4 * 3; 1107 + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { 1108 + ctx->offsets[this_idx] |= OFFSETS_B_CONV; 1109 + ctx->long_b_conversion = 1; 1110 + } 1111 + } 1112 + 1113 + if (cmp_eq) 1114 + emit_instr(ctx, beq, dst, src, b_off); 1115 + else 1116 + emit_instr(ctx, bne, dst, src, b_off); 1117 + emit_instr(ctx, nop); 1118 + if (ctx->offsets[this_idx] & OFFSETS_B_CONV) { 1119 + emit_instr(ctx, j, target); 1120 + emit_instr(ctx, nop); 1121 + } 1122 + break; 1123 + case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ 1124 + case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ 1125 + cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); 1126 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); 1127 + if (dst < 0) 1128 + return dst; 1129 + 1130 + if (insn->imm == 0) { 1131 + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { 1132 + b_off = b_imm(exit_idx, ctx); 1133 + if (is_bad_offset(b_off)) 1134 + return -E2BIG; 1135 + if (cmp_eq) 1136 + emit_instr(ctx, bltz, dst, b_off); 1137 + else 1138 + emit_instr(ctx, blez, dst, b_off); 1139 + emit_instr(ctx, nop); 1140 + return 2; /* We consumed the exit. */ 1141 + } 1142 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1143 + if (is_bad_offset(b_off)) 1144 + return -E2BIG; 1145 + if (cmp_eq) 1146 + emit_instr(ctx, bgez, dst, b_off); 1147 + else 1148 + emit_instr(ctx, bgtz, dst, b_off); 1149 + emit_instr(ctx, nop); 1150 + break; 1151 + } 1152 + /* 1153 + * only "LT" compare available, so we must use imm + 1 1154 + * to generate "GT" 1155 + */ 1156 + t64s = insn->imm + (cmp_eq ? 0 : 1); 1157 + if (t64s >= S16_MIN && t64s <= S16_MAX) { 1158 + emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); 1159 + src = MIPS_R_AT; 1160 + dst = MIPS_R_ZERO; 1161 + cmp_eq = true; 1162 + goto jeq_common; 1163 + } 1164 + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); 1165 + emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); 1166 + src = MIPS_R_AT; 1167 + dst = MIPS_R_ZERO; 1168 + cmp_eq = true; 1169 + goto jeq_common; 1170 + 1171 + case BPF_JMP | BPF_JGT | BPF_K: 1172 + case BPF_JMP | BPF_JGE | BPF_K: 1173 + cmp_eq = (BPF_OP(insn->code) == BPF_JGE); 1174 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); 1175 + if (dst < 0) 1176 + return dst; 1177 + /* 1178 + * only "LT" compare available, so we must use imm + 1 1179 + * to generate "GT" 1180 + */ 1181 + t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1); 1182 + if (t64s >= 0 && t64s <= S16_MAX) { 1183 + emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s); 1184 + src = MIPS_R_AT; 1185 + dst = MIPS_R_ZERO; 1186 + cmp_eq = true; 1187 + goto jeq_common; 1188 + } 1189 + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); 1190 + emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); 1191 + src = MIPS_R_AT; 1192 + dst = MIPS_R_ZERO; 1193 + cmp_eq = true; 1194 + goto jeq_common; 1195 + 1196 + case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ 1197 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); 1198 + if (dst < 0) 1199 + return dst; 1200 + 1201 + if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) { 1202 + if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { 1203 + b_off = b_imm(exit_idx, ctx); 1204 + if (is_bad_offset(b_off)) 1205 + return -E2BIG; 1206 + emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off); 1207 + emit_instr(ctx, nop); 1208 + return 2; /* We consumed the exit. */ 1209 + } 1210 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1211 + if (is_bad_offset(b_off)) 1212 + return -E2BIG; 1213 + emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off); 1214 + emit_instr(ctx, nop); 1215 + break; 1216 + } 1217 + t64 = (u32)insn->imm; 1218 + emit_const_to_reg(ctx, MIPS_R_AT, t64); 1219 + emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT); 1220 + src = MIPS_R_AT; 1221 + dst = MIPS_R_ZERO; 1222 + cmp_eq = false; 1223 + goto jeq_common; 1224 + 1225 + case BPF_JMP | BPF_JA: 1226 + /* 1227 + * Prefer relative branch for easier debugging, but 1228 + * fall back if needed. 1229 + */ 1230 + b_off = b_imm(this_idx + insn->off + 1, ctx); 1231 + if (is_bad_offset(b_off)) { 1232 + target = j_target(ctx, this_idx + insn->off + 1); 1233 + if (target == (unsigned int)-1) 1234 + return -E2BIG; 1235 + emit_instr(ctx, j, target); 1236 + } else { 1237 + emit_instr(ctx, b, b_off); 1238 + } 1239 + emit_instr(ctx, nop); 1240 + break; 1241 + case BPF_LD | BPF_DW | BPF_IMM: 1242 + if (insn->src_reg != 0) 1243 + return -EINVAL; 1244 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1245 + if (dst < 0) 1246 + return dst; 1247 + t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32); 1248 + emit_const_to_reg(ctx, dst, t64); 1249 + return 2; /* Double slot insn */ 1250 + 1251 + case BPF_JMP | BPF_CALL: 1252 + ctx->flags |= EBPF_SAVE_RA; 1253 + t64s = (s64)insn->imm + (s64)__bpf_call_base; 1254 + emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s); 1255 + emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); 1256 + /* delay slot */ 1257 + emit_instr(ctx, nop); 1258 + break; 1259 + 1260 + case BPF_JMP | BPF_TAIL_CALL: 1261 + if (emit_bpf_tail_call(ctx, this_idx)) 1262 + return -EINVAL; 1263 + break; 1264 + 1265 + case BPF_LD | BPF_B | BPF_ABS: 1266 + case BPF_LD | BPF_H | BPF_ABS: 1267 + case BPF_LD | BPF_W | BPF_ABS: 1268 + case BPF_LD | BPF_DW | BPF_ABS: 1269 + ctx->flags |= EBPF_SAVE_RA; 1270 + 1271 + gen_imm_to_reg(insn, MIPS_R_A1, ctx); 1272 + emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); 1273 + 1274 + if (insn->imm < 0) { 1275 + emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper); 1276 + } else { 1277 + emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); 1278 + emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); 1279 + } 1280 + goto ld_skb_common; 1281 + 1282 + case BPF_LD | BPF_B | BPF_IND: 1283 + case BPF_LD | BPF_H | BPF_IND: 1284 + case BPF_LD | BPF_W | BPF_IND: 1285 + case BPF_LD | BPF_DW | BPF_IND: 1286 + ctx->flags |= EBPF_SAVE_RA; 1287 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1288 + if (src < 0) 1289 + return src; 1290 + ts = get_reg_val_type(ctx, this_idx, insn->src_reg); 1291 + if (ts == REG_32BIT_ZERO_EX) { 1292 + /* sign extend */ 1293 + emit_instr(ctx, sll, MIPS_R_A1, src, 0); 1294 + src = MIPS_R_A1; 1295 + } 1296 + if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) { 1297 + emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm); 1298 + } else { 1299 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 1300 + emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src); 1301 + } 1302 + /* truncate to 32-bit int */ 1303 + emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0); 1304 + emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset); 1305 + emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO); 1306 + 1307 + emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper); 1308 + emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer); 1309 + emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn)); 1310 + emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT); 1311 + 1312 + ld_skb_common: 1313 + emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9); 1314 + /* delay slot move */ 1315 + emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO); 1316 + 1317 + /* Check the error value */ 1318 + b_off = b_imm(exit_idx, ctx); 1319 + if (is_bad_offset(b_off)) { 1320 + target = j_target(ctx, exit_idx); 1321 + if (target == (unsigned int)-1) 1322 + return -E2BIG; 1323 + 1324 + if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) { 1325 + ctx->offsets[this_idx] |= OFFSETS_B_CONV; 1326 + ctx->long_b_conversion = 1; 1327 + } 1328 + emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3); 1329 + emit_instr(ctx, nop); 1330 + emit_instr(ctx, j, target); 1331 + emit_instr(ctx, nop); 1332 + } else { 1333 + emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off); 1334 + emit_instr(ctx, nop); 1335 + } 1336 + 1337 + #ifdef __BIG_ENDIAN 1338 + need_swap = false; 1339 + #else 1340 + need_swap = true; 1341 + #endif 1342 + dst = MIPS_R_V0; 1343 + switch (BPF_SIZE(insn->code)) { 1344 + case BPF_B: 1345 + emit_instr(ctx, lbu, dst, 0, MIPS_R_V0); 1346 + break; 1347 + case BPF_H: 1348 + emit_instr(ctx, lhu, dst, 0, MIPS_R_V0); 1349 + if (need_swap) 1350 + emit_instr(ctx, wsbh, dst, dst); 1351 + break; 1352 + case BPF_W: 1353 + emit_instr(ctx, lw, dst, 0, MIPS_R_V0); 1354 + if (need_swap) { 1355 + emit_instr(ctx, wsbh, dst, dst); 1356 + emit_instr(ctx, rotr, dst, dst, 16); 1357 + } 1358 + break; 1359 + case BPF_DW: 1360 + emit_instr(ctx, ld, dst, 0, MIPS_R_V0); 1361 + if (need_swap) { 1362 + emit_instr(ctx, dsbh, dst, dst); 1363 + emit_instr(ctx, dshd, dst, dst); 1364 + } 1365 + break; 1366 + } 1367 + 1368 + break; 1369 + case BPF_ALU | BPF_END | BPF_FROM_BE: 1370 + case BPF_ALU | BPF_END | BPF_FROM_LE: 1371 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1372 + if (dst < 0) 1373 + return dst; 1374 + td = get_reg_val_type(ctx, this_idx, insn->dst_reg); 1375 + if (insn->imm == 64 && td == REG_32BIT) 1376 + emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); 1377 + 1378 + if (insn->imm != 64 && 1379 + (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { 1380 + /* sign extend */ 1381 + emit_instr(ctx, sll, dst, dst, 0); 1382 + } 1383 + 1384 + #ifdef __BIG_ENDIAN 1385 + need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE); 1386 + #else 1387 + need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE); 1388 + #endif 1389 + if (insn->imm == 16) { 1390 + if (need_swap) 1391 + emit_instr(ctx, wsbh, dst, dst); 1392 + emit_instr(ctx, andi, dst, dst, 0xffff); 1393 + } else if (insn->imm == 32) { 1394 + if (need_swap) { 1395 + emit_instr(ctx, wsbh, dst, dst); 1396 + emit_instr(ctx, rotr, dst, dst, 16); 1397 + } 1398 + } else { /* 64-bit*/ 1399 + if (need_swap) { 1400 + emit_instr(ctx, dsbh, dst, dst); 1401 + emit_instr(ctx, dshd, dst, dst); 1402 + } 1403 + } 1404 + break; 1405 + 1406 + case BPF_ST | BPF_B | BPF_MEM: 1407 + case BPF_ST | BPF_H | BPF_MEM: 1408 + case BPF_ST | BPF_W | BPF_MEM: 1409 + case BPF_ST | BPF_DW | BPF_MEM: 1410 + if (insn->dst_reg == BPF_REG_10) { 1411 + ctx->flags |= EBPF_SEEN_FP; 1412 + dst = MIPS_R_SP; 1413 + mem_off = insn->off + MAX_BPF_STACK; 1414 + } else { 1415 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1416 + if (dst < 0) 1417 + return dst; 1418 + mem_off = insn->off; 1419 + } 1420 + gen_imm_to_reg(insn, MIPS_R_AT, ctx); 1421 + switch (BPF_SIZE(insn->code)) { 1422 + case BPF_B: 1423 + emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst); 1424 + break; 1425 + case BPF_H: 1426 + emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst); 1427 + break; 1428 + case BPF_W: 1429 + emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst); 1430 + break; 1431 + case BPF_DW: 1432 + emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst); 1433 + break; 1434 + } 1435 + break; 1436 + 1437 + case BPF_LDX | BPF_B | BPF_MEM: 1438 + case BPF_LDX | BPF_H | BPF_MEM: 1439 + case BPF_LDX | BPF_W | BPF_MEM: 1440 + case BPF_LDX | BPF_DW | BPF_MEM: 1441 + if (insn->src_reg == BPF_REG_10) { 1442 + ctx->flags |= EBPF_SEEN_FP; 1443 + src = MIPS_R_SP; 1444 + mem_off = insn->off + MAX_BPF_STACK; 1445 + } else { 1446 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1447 + if (src < 0) 1448 + return src; 1449 + mem_off = insn->off; 1450 + } 1451 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1452 + if (dst < 0) 1453 + return dst; 1454 + switch (BPF_SIZE(insn->code)) { 1455 + case BPF_B: 1456 + emit_instr(ctx, lbu, dst, mem_off, src); 1457 + break; 1458 + case BPF_H: 1459 + emit_instr(ctx, lhu, dst, mem_off, src); 1460 + break; 1461 + case BPF_W: 1462 + emit_instr(ctx, lw, dst, mem_off, src); 1463 + break; 1464 + case BPF_DW: 1465 + emit_instr(ctx, ld, dst, mem_off, src); 1466 + break; 1467 + } 1468 + break; 1469 + 1470 + case BPF_STX | BPF_B | BPF_MEM: 1471 + case BPF_STX | BPF_H | BPF_MEM: 1472 + case BPF_STX | BPF_W | BPF_MEM: 1473 + case BPF_STX | BPF_DW | BPF_MEM: 1474 + case BPF_STX | BPF_W | BPF_XADD: 1475 + case BPF_STX | BPF_DW | BPF_XADD: 1476 + if (insn->dst_reg == BPF_REG_10) { 1477 + ctx->flags |= EBPF_SEEN_FP; 1478 + dst = MIPS_R_SP; 1479 + mem_off = insn->off + MAX_BPF_STACK; 1480 + } else { 1481 + dst = ebpf_to_mips_reg(ctx, insn, dst_reg); 1482 + if (dst < 0) 1483 + return dst; 1484 + mem_off = insn->off; 1485 + } 1486 + src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1487 + if (src < 0) 1488 + return dst; 1489 + if (BPF_MODE(insn->code) == BPF_XADD) { 1490 + switch (BPF_SIZE(insn->code)) { 1491 + case BPF_W: 1492 + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 1493 + emit_instr(ctx, sll, MIPS_R_AT, src, 0); 1494 + src = MIPS_R_AT; 1495 + } 1496 + emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst); 1497 + emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src); 1498 + emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst); 1499 + /* 1500 + * On failure back up to LL (-4 1501 + * instructions of 4 bytes each 1502 + */ 1503 + emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); 1504 + emit_instr(ctx, nop); 1505 + break; 1506 + case BPF_DW: 1507 + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 1508 + emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); 1509 + emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); 1510 + src = MIPS_R_AT; 1511 + } 1512 + emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst); 1513 + emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src); 1514 + emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst); 1515 + emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4); 1516 + emit_instr(ctx, nop); 1517 + break; 1518 + } 1519 + } else { /* BPF_MEM */ 1520 + switch (BPF_SIZE(insn->code)) { 1521 + case BPF_B: 1522 + emit_instr(ctx, sb, src, mem_off, dst); 1523 + break; 1524 + case BPF_H: 1525 + emit_instr(ctx, sh, src, mem_off, dst); 1526 + break; 1527 + case BPF_W: 1528 + emit_instr(ctx, sw, src, mem_off, dst); 1529 + break; 1530 + case BPF_DW: 1531 + if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { 1532 + emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO); 1533 + emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32); 1534 + src = MIPS_R_AT; 1535 + } 1536 + emit_instr(ctx, sd, src, mem_off, dst); 1537 + break; 1538 + } 1539 + } 1540 + break; 1541 + 1542 + default: 1543 + pr_err("NOT HANDLED %d - (%02x)\n", 1544 + this_idx, (unsigned int)insn->code); 1545 + return -EINVAL; 1546 + } 1547 + return 1; 1548 + } 1549 + 1550 + #define RVT_VISITED_MASK 0xc000000000000000ull 1551 + #define RVT_FALL_THROUGH 0x4000000000000000ull 1552 + #define RVT_BRANCH_TAKEN 0x8000000000000000ull 1553 + #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN) 1554 + 1555 + static int build_int_body(struct jit_ctx *ctx) 1556 + { 1557 + const struct bpf_prog *prog = ctx->skf; 1558 + const struct bpf_insn *insn; 1559 + int i, r; 1560 + 1561 + for (i = 0; i < prog->len; ) { 1562 + insn = prog->insnsi + i; 1563 + if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) { 1564 + /* dead instruction, don't emit it. */ 1565 + i++; 1566 + continue; 1567 + } 1568 + 1569 + if (ctx->target == NULL) 1570 + ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4); 1571 + 1572 + r = build_one_insn(insn, ctx, i, prog->len); 1573 + if (r < 0) 1574 + return r; 1575 + i += r; 1576 + } 1577 + /* epilogue offset */ 1578 + if (ctx->target == NULL) 1579 + ctx->offsets[i] = ctx->idx * 4; 1580 + 1581 + /* 1582 + * All exits have an offset of the epilogue, some offsets may 1583 + * not have been set due to banch-around threading, so set 1584 + * them now. 1585 + */ 1586 + if (ctx->target == NULL) 1587 + for (i = 0; i < prog->len; i++) { 1588 + insn = prog->insnsi + i; 1589 + if (insn->code == (BPF_JMP | BPF_EXIT)) 1590 + ctx->offsets[i] = ctx->idx * 4; 1591 + } 1592 + return 0; 1593 + } 1594 + 1595 + /* return the last idx processed, or negative for error */ 1596 + static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, 1597 + int start_idx, bool follow_taken) 1598 + { 1599 + const struct bpf_prog *prog = ctx->skf; 1600 + const struct bpf_insn *insn; 1601 + u64 exit_rvt = initial_rvt; 1602 + u64 *rvt = ctx->reg_val_types; 1603 + int idx; 1604 + int reg; 1605 + 1606 + for (idx = start_idx; idx < prog->len; idx++) { 1607 + rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt; 1608 + insn = prog->insnsi + idx; 1609 + switch (BPF_CLASS(insn->code)) { 1610 + case BPF_ALU: 1611 + switch (BPF_OP(insn->code)) { 1612 + case BPF_ADD: 1613 + case BPF_SUB: 1614 + case BPF_MUL: 1615 + case BPF_DIV: 1616 + case BPF_OR: 1617 + case BPF_AND: 1618 + case BPF_LSH: 1619 + case BPF_RSH: 1620 + case BPF_NEG: 1621 + case BPF_MOD: 1622 + case BPF_XOR: 1623 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1624 + break; 1625 + case BPF_MOV: 1626 + if (BPF_SRC(insn->code)) { 1627 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1628 + } else { 1629 + /* IMM to REG move*/ 1630 + if (insn->imm >= 0) 1631 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1632 + else 1633 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1634 + } 1635 + break; 1636 + case BPF_END: 1637 + if (insn->imm == 64) 1638 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1639 + else if (insn->imm == 32) 1640 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1641 + else /* insn->imm == 16 */ 1642 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1643 + break; 1644 + } 1645 + rvt[idx] |= RVT_DONE; 1646 + break; 1647 + case BPF_ALU64: 1648 + switch (BPF_OP(insn->code)) { 1649 + case BPF_MOV: 1650 + if (BPF_SRC(insn->code)) { 1651 + /* REG to REG move*/ 1652 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1653 + } else { 1654 + /* IMM to REG move*/ 1655 + if (insn->imm >= 0) 1656 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1657 + else 1658 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); 1659 + } 1660 + break; 1661 + default: 1662 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1663 + } 1664 + rvt[idx] |= RVT_DONE; 1665 + break; 1666 + case BPF_LD: 1667 + switch (BPF_SIZE(insn->code)) { 1668 + case BPF_DW: 1669 + if (BPF_MODE(insn->code) == BPF_IMM) { 1670 + s64 val; 1671 + 1672 + val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32)); 1673 + if (val > 0 && val <= S32_MAX) 1674 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1675 + else if (val >= S32_MIN && val <= S32_MAX) 1676 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT); 1677 + else 1678 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1679 + rvt[idx] |= RVT_DONE; 1680 + idx++; 1681 + } else { 1682 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1683 + } 1684 + break; 1685 + case BPF_B: 1686 + case BPF_H: 1687 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1688 + break; 1689 + case BPF_W: 1690 + if (BPF_MODE(insn->code) == BPF_IMM) 1691 + set_reg_val_type(&exit_rvt, insn->dst_reg, 1692 + insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT); 1693 + else 1694 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1695 + break; 1696 + } 1697 + rvt[idx] |= RVT_DONE; 1698 + break; 1699 + case BPF_LDX: 1700 + switch (BPF_SIZE(insn->code)) { 1701 + case BPF_DW: 1702 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT); 1703 + break; 1704 + case BPF_B: 1705 + case BPF_H: 1706 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS); 1707 + break; 1708 + case BPF_W: 1709 + set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT); 1710 + break; 1711 + } 1712 + rvt[idx] |= RVT_DONE; 1713 + break; 1714 + case BPF_JMP: 1715 + switch (BPF_OP(insn->code)) { 1716 + case BPF_EXIT: 1717 + rvt[idx] = RVT_DONE | exit_rvt; 1718 + rvt[prog->len] = exit_rvt; 1719 + return idx; 1720 + case BPF_JA: 1721 + rvt[idx] |= RVT_DONE; 1722 + idx += insn->off; 1723 + break; 1724 + case BPF_JEQ: 1725 + case BPF_JGT: 1726 + case BPF_JGE: 1727 + case BPF_JSET: 1728 + case BPF_JNE: 1729 + case BPF_JSGT: 1730 + case BPF_JSGE: 1731 + if (follow_taken) { 1732 + rvt[idx] |= RVT_BRANCH_TAKEN; 1733 + idx += insn->off; 1734 + follow_taken = false; 1735 + } else { 1736 + rvt[idx] |= RVT_FALL_THROUGH; 1737 + } 1738 + break; 1739 + case BPF_CALL: 1740 + set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT); 1741 + /* Upon call return, argument registers are clobbered. */ 1742 + for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++) 1743 + set_reg_val_type(&exit_rvt, reg, REG_64BIT); 1744 + 1745 + rvt[idx] |= RVT_DONE; 1746 + break; 1747 + default: 1748 + WARN(1, "Unhandled BPF_JMP case.\n"); 1749 + rvt[idx] |= RVT_DONE; 1750 + break; 1751 + } 1752 + break; 1753 + default: 1754 + rvt[idx] |= RVT_DONE; 1755 + break; 1756 + } 1757 + } 1758 + return idx; 1759 + } 1760 + 1761 + /* 1762 + * Track the value range (i.e. 32-bit vs. 64-bit) of each register at 1763 + * each eBPF insn. This allows unneeded sign and zero extension 1764 + * operations to be omitted. 1765 + * 1766 + * Doesn't handle yet confluence of control paths with conflicting 1767 + * ranges, but it is good enough for most sane code. 1768 + */ 1769 + static int reg_val_propagate(struct jit_ctx *ctx) 1770 + { 1771 + const struct bpf_prog *prog = ctx->skf; 1772 + u64 exit_rvt; 1773 + int reg; 1774 + int i; 1775 + 1776 + /* 1777 + * 11 registers * 3 bits/reg leaves top bits free for other 1778 + * uses. Bit-62..63 used to see if we have visited an insn. 1779 + */ 1780 + exit_rvt = 0; 1781 + 1782 + /* Upon entry, argument registers are 64-bit. */ 1783 + for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++) 1784 + set_reg_val_type(&exit_rvt, reg, REG_64BIT); 1785 + 1786 + /* 1787 + * First follow all conditional branches on the fall-through 1788 + * edge of control flow.. 1789 + */ 1790 + reg_val_propagate_range(ctx, exit_rvt, 0, false); 1791 + restart_search: 1792 + /* 1793 + * Then repeatedly find the first conditional branch where 1794 + * both edges of control flow have not been taken, and follow 1795 + * the branch taken edge. We will end up restarting the 1796 + * search once per conditional branch insn. 1797 + */ 1798 + for (i = 0; i < prog->len; i++) { 1799 + u64 rvt = ctx->reg_val_types[i]; 1800 + 1801 + if ((rvt & RVT_VISITED_MASK) == RVT_DONE || 1802 + (rvt & RVT_VISITED_MASK) == 0) 1803 + continue; 1804 + if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) { 1805 + reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true); 1806 + } else { /* RVT_BRANCH_TAKEN */ 1807 + WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n"); 1808 + reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false); 1809 + } 1810 + goto restart_search; 1811 + } 1812 + /* 1813 + * Eventually all conditional branches have been followed on 1814 + * both branches and we are done. Any insn that has not been 1815 + * visited at this point is dead. 1816 + */ 1817 + 1818 + return 0; 1819 + } 1820 + 1821 + static void jit_fill_hole(void *area, unsigned int size) 1822 + { 1823 + u32 *p; 1824 + 1825 + /* We are guaranteed to have aligned memory. */ 1826 + for (p = area; size >= sizeof(u32); size -= sizeof(u32)) 1827 + uasm_i_break(&p, BRK_BUG); /* Increments p */ 1828 + } 1829 + 1830 + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1831 + { 1832 + struct bpf_prog *orig_prog = prog; 1833 + bool tmp_blinded = false; 1834 + struct bpf_prog *tmp; 1835 + struct bpf_binary_header *header = NULL; 1836 + struct jit_ctx ctx; 1837 + unsigned int image_size; 1838 + u8 *image_ptr; 1839 + 1840 + if (!bpf_jit_enable || !cpu_has_mips64r2) 1841 + return prog; 1842 + 1843 + tmp = bpf_jit_blind_constants(prog); 1844 + /* If blinding was requested and we failed during blinding, 1845 + * we must fall back to the interpreter. 1846 + */ 1847 + if (IS_ERR(tmp)) 1848 + return orig_prog; 1849 + if (tmp != prog) { 1850 + tmp_blinded = true; 1851 + prog = tmp; 1852 + } 1853 + 1854 + memset(&ctx, 0, sizeof(ctx)); 1855 + 1856 + ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); 1857 + if (ctx.offsets == NULL) 1858 + goto out_err; 1859 + 1860 + ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL); 1861 + if (ctx.reg_val_types == NULL) 1862 + goto out_err; 1863 + 1864 + ctx.skf = prog; 1865 + 1866 + if (reg_val_propagate(&ctx)) 1867 + goto out_err; 1868 + 1869 + /* 1870 + * First pass discovers used resources and instruction offsets 1871 + * assuming short branches are used. 1872 + */ 1873 + if (build_int_body(&ctx)) 1874 + goto out_err; 1875 + 1876 + /* 1877 + * If no calls are made (EBPF_SAVE_RA), then tail call count 1878 + * in $v1, else we must save in n$s4. 1879 + */ 1880 + if (ctx.flags & EBPF_SEEN_TC) { 1881 + if (ctx.flags & EBPF_SAVE_RA) 1882 + ctx.flags |= EBPF_SAVE_S4; 1883 + else 1884 + ctx.flags |= EBPF_TCC_IN_V1; 1885 + } 1886 + 1887 + /* 1888 + * Second pass generates offsets, if any branches are out of 1889 + * range a jump-around long sequence is generated, and we have 1890 + * to try again from the beginning to generate the new 1891 + * offsets. This is done until no additional conversions are 1892 + * necessary. 1893 + */ 1894 + do { 1895 + ctx.idx = 0; 1896 + ctx.gen_b_offsets = 1; 1897 + ctx.long_b_conversion = 0; 1898 + if (gen_int_prologue(&ctx)) 1899 + goto out_err; 1900 + if (build_int_body(&ctx)) 1901 + goto out_err; 1902 + if (build_int_epilogue(&ctx, MIPS_R_RA)) 1903 + goto out_err; 1904 + } while (ctx.long_b_conversion); 1905 + 1906 + image_size = 4 * ctx.idx; 1907 + 1908 + header = bpf_jit_binary_alloc(image_size, &image_ptr, 1909 + sizeof(u32), jit_fill_hole); 1910 + if (header == NULL) 1911 + goto out_err; 1912 + 1913 + ctx.target = (u32 *)image_ptr; 1914 + 1915 + /* Third pass generates the code */ 1916 + ctx.idx = 0; 1917 + if (gen_int_prologue(&ctx)) 1918 + goto out_err; 1919 + if (build_int_body(&ctx)) 1920 + goto out_err; 1921 + if (build_int_epilogue(&ctx, MIPS_R_RA)) 1922 + goto out_err; 1923 + 1924 + /* Update the icache */ 1925 + flush_icache_range((unsigned long)ctx.target, 1926 + (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); 1927 + 1928 + if (bpf_jit_enable > 1) 1929 + /* Dump JIT code */ 1930 + bpf_jit_dump(prog->len, image_size, 2, ctx.target); 1931 + 1932 + bpf_jit_binary_lock_ro(header); 1933 + prog->bpf_func = (void *)ctx.target; 1934 + prog->jited = 1; 1935 + prog->jited_len = image_size; 1936 + out_normal: 1937 + if (tmp_blinded) 1938 + bpf_jit_prog_release_other(prog, prog == orig_prog ? 1939 + tmp : orig_prog); 1940 + kfree(ctx.offsets); 1941 + kfree(ctx.reg_val_types); 1942 + 1943 + return prog; 1944 + 1945 + out_err: 1946 + prog = orig_prog; 1947 + if (header) 1948 + bpf_jit_binary_free(header); 1949 + goto out_normal; 1950 + }
+3 -4
arch/mips/pci/pci.c
··· 28 29 static int __init pcibios_set_cache_line_size(void) 30 { 31 - struct cpuinfo_mips *c = &current_cpu_data; 32 unsigned int lsize; 33 34 /* 35 * Set PCI cacheline size to that of the highest level in the 36 * cache hierarchy. 37 */ 38 - lsize = c->dcache.linesz; 39 - lsize = c->scache.linesz ? : lsize; 40 - lsize = c->tcache.linesz ? : lsize; 41 42 BUG_ON(!lsize); 43
··· 28 29 static int __init pcibios_set_cache_line_size(void) 30 { 31 unsigned int lsize; 32 33 /* 34 * Set PCI cacheline size to that of the highest level in the 35 * cache hierarchy. 36 */ 37 + lsize = cpu_dcache_line_size(); 38 + lsize = cpu_scache_line_size() ? : lsize; 39 + lsize = cpu_tcache_line_size() ? : lsize; 40 41 BUG_ON(!lsize); 42
+4 -2
arch/mips/vdso/gettimeofday.c
··· 35 " syscall\n" 36 : "=r" (ret), "=r" (error) 37 : "r" (tv), "r" (tz), "r" (nr) 38 - : "memory"); 39 40 return error ? -ret : ret; 41 } ··· 56 " syscall\n" 57 : "=r" (ret), "=r" (error) 58 : "r" (clkid), "r" (ts), "r" (nr) 59 - : "memory"); 60 61 return error ? -ret : ret; 62 }
··· 35 " syscall\n" 36 : "=r" (ret), "=r" (error) 37 : "r" (tv), "r" (tz), "r" (nr) 38 + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", 39 + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); 40 41 return error ? -ret : ret; 42 } ··· 55 " syscall\n" 56 : "=r" (ret), "=r" (error) 57 : "r" (clkid), "r" (ts), "r" (nr) 58 + : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", 59 + "$14", "$15", "$24", "$25", "hi", "lo", "memory"); 60 61 return error ? -ret : ret; 62 }
+2 -1
arch/powerpc/configs/powernv_defconfig
··· 293 CONFIG_DEBUG_KERNEL=y 294 CONFIG_DEBUG_STACK_USAGE=y 295 CONFIG_DEBUG_STACKOVERFLOW=y 296 - CONFIG_LOCKUP_DETECTOR=y 297 CONFIG_LATENCYTOP=y 298 CONFIG_SCHED_TRACER=y 299 CONFIG_BLK_DEV_IO_TRACE=y
··· 293 CONFIG_DEBUG_KERNEL=y 294 CONFIG_DEBUG_STACK_USAGE=y 295 CONFIG_DEBUG_STACKOVERFLOW=y 296 + CONFIG_SOFTLOCKUP_DETECTOR=y 297 + CONFIG_HARDLOCKUP_DETECTOR=y 298 CONFIG_LATENCYTOP=y 299 CONFIG_SCHED_TRACER=y 300 CONFIG_BLK_DEV_IO_TRACE=y
+2 -1
arch/powerpc/configs/ppc64_defconfig
··· 324 CONFIG_DEBUG_KERNEL=y 325 CONFIG_DEBUG_STACK_USAGE=y 326 CONFIG_DEBUG_STACKOVERFLOW=y 327 - CONFIG_LOCKUP_DETECTOR=y 328 CONFIG_DEBUG_MUTEXES=y 329 CONFIG_LATENCYTOP=y 330 CONFIG_SCHED_TRACER=y
··· 324 CONFIG_DEBUG_KERNEL=y 325 CONFIG_DEBUG_STACK_USAGE=y 326 CONFIG_DEBUG_STACKOVERFLOW=y 327 + CONFIG_SOFTLOCKUP_DETECTOR=y 328 + CONFIG_HARDLOCKUP_DETECTOR=y 329 CONFIG_DEBUG_MUTEXES=y 330 CONFIG_LATENCYTOP=y 331 CONFIG_SCHED_TRACER=y
+2 -1
arch/powerpc/configs/pseries_defconfig
··· 291 CONFIG_DEBUG_KERNEL=y 292 CONFIG_DEBUG_STACK_USAGE=y 293 CONFIG_DEBUG_STACKOVERFLOW=y 294 - CONFIG_LOCKUP_DETECTOR=y 295 CONFIG_LATENCYTOP=y 296 CONFIG_SCHED_TRACER=y 297 CONFIG_BLK_DEV_IO_TRACE=y
··· 291 CONFIG_DEBUG_KERNEL=y 292 CONFIG_DEBUG_STACK_USAGE=y 293 CONFIG_DEBUG_STACKOVERFLOW=y 294 + CONFIG_SOFTLOCKUP_DETECTOR=y 295 + CONFIG_HARDLOCKUP_DETECTOR=y 296 CONFIG_LATENCYTOP=y 297 CONFIG_SCHED_TRACER=y 298 CONFIG_BLK_DEV_IO_TRACE=y
+18 -42
arch/powerpc/kernel/entry_64.S
··· 223 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) 224 bne- .Lsyscall_exit_work 225 226 - /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */ 227 - li r7,MSR_FP 228 #ifdef CONFIG_ALTIVEC 229 - oris r7,r7,MSR_VEC@h 230 #endif 231 - and r0,r8,r7 232 - cmpd r0,r7 233 - bne .Lsyscall_restore_math 234 - .Lsyscall_restore_math_cont: 235 236 - cmpld r3,r11 237 ld r5,_CCR(r1) 238 bge- .Lsyscall_error 239 .Lsyscall_error_cont: ··· 276 neg r3,r3 277 std r5,_CCR(r1) 278 b .Lsyscall_error_cont 279 - 280 - .Lsyscall_restore_math: 281 - /* 282 - * Some initial tests from restore_math to avoid the heavyweight 283 - * C code entry and MSR manipulations. 284 - */ 285 - LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK) 286 - and. r0,r0,r8 287 - bne 1f 288 - 289 - ld r7,PACACURRENT(r13) 290 - lbz r0,THREAD+THREAD_LOAD_FP(r7) 291 - #ifdef CONFIG_ALTIVEC 292 - lbz r6,THREAD+THREAD_LOAD_VEC(r7) 293 - add r0,r0,r6 294 - #endif 295 - cmpdi r0,0 296 - beq .Lsyscall_restore_math_cont 297 - 298 - 1: addi r3,r1,STACK_FRAME_OVERHEAD 299 - #ifdef CONFIG_PPC_BOOK3S 300 - li r10,MSR_RI 301 - mtmsrd r10,1 /* Restore RI */ 302 - #endif 303 - bl restore_math 304 - #ifdef CONFIG_PPC_BOOK3S 305 - li r11,0 306 - mtmsrd r11,1 307 - #endif 308 - /* Restore volatiles, reload MSR from updated one */ 309 - ld r8,_MSR(r1) 310 - ld r3,RESULT(r1) 311 - li r11,-MAX_ERRNO 312 - b .Lsyscall_restore_math_cont 313 314 /* Traced system call support */ 315 .Lsyscall_dotrace:
··· 223 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) 224 bne- .Lsyscall_exit_work 225 226 + andi. r0,r8,MSR_FP 227 + beq 2f 228 #ifdef CONFIG_ALTIVEC 229 + andis. r0,r8,MSR_VEC@h 230 + bne 3f 231 #endif 232 + 2: addi r3,r1,STACK_FRAME_OVERHEAD 233 + #ifdef CONFIG_PPC_BOOK3S 234 + li r10,MSR_RI 235 + mtmsrd r10,1 /* Restore RI */ 236 + #endif 237 + bl restore_math 238 + #ifdef CONFIG_PPC_BOOK3S 239 + li r11,0 240 + mtmsrd r11,1 241 + #endif 242 + ld r8,_MSR(r1) 243 + ld r3,RESULT(r1) 244 + li r11,-MAX_ERRNO 245 246 + 3: cmpld r3,r11 247 ld r5,_CCR(r1) 248 bge- .Lsyscall_error 249 .Lsyscall_error_cont: ··· 266 neg r3,r3 267 std r5,_CCR(r1) 268 b .Lsyscall_error_cont 269 270 /* Traced system call support */ 271 .Lsyscall_dotrace:
-4
arch/powerpc/kernel/process.c
··· 511 { 512 unsigned long msr; 513 514 - /* 515 - * Syscall exit makes a similar initial check before branching 516 - * to restore_math. Keep them in synch. 517 - */ 518 if (!msr_tm_active(regs->msr) && 519 !current->thread.load_fp && !loadvec(current->thread)) 520 return;
··· 511 { 512 unsigned long msr; 513 514 if (!msr_tm_active(regs->msr) && 515 !current->thread.load_fp && !loadvec(current->thread)) 516 return;
+3 -3
arch/powerpc/kernel/smp.c
··· 351 hard_irq_disable(); 352 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 353 raw_local_irq_restore(*flags); 354 - cpu_relax(); 355 raw_local_irq_save(*flags); 356 hard_irq_disable(); 357 } ··· 360 static void nmi_ipi_lock(void) 361 { 362 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 363 - cpu_relax(); 364 } 365 366 static void nmi_ipi_unlock(void) ··· 475 nmi_ipi_lock_start(&flags); 476 while (nmi_ipi_busy_count) { 477 nmi_ipi_unlock_end(&flags); 478 - cpu_relax(); 479 nmi_ipi_lock_start(&flags); 480 } 481
··· 351 hard_irq_disable(); 352 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 353 raw_local_irq_restore(*flags); 354 + spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); 355 raw_local_irq_save(*flags); 356 hard_irq_disable(); 357 } ··· 360 static void nmi_ipi_lock(void) 361 { 362 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 363 + spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); 364 } 365 366 static void nmi_ipi_unlock(void) ··· 475 nmi_ipi_lock_start(&flags); 476 while (nmi_ipi_busy_count) { 477 nmi_ipi_unlock_end(&flags); 478 + spin_until_cond(nmi_ipi_busy_count == 0); 479 nmi_ipi_lock_start(&flags); 480 } 481
+36 -13
arch/powerpc/kernel/watchdog.c
··· 71 * This may be called from low level interrupt handlers at some 72 * point in future. 73 */ 74 - local_irq_save(*flags); 75 - while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) 76 - cpu_relax(); 77 } 78 79 static inline void wd_smp_unlock(unsigned long *flags) 80 { 81 clear_bit_unlock(0, &__wd_smp_lock); 82 - local_irq_restore(*flags); 83 } 84 85 static void wd_lockup_ipi(struct pt_regs *regs) ··· 101 nmi_panic(regs, "Hard LOCKUP"); 102 } 103 104 - static void set_cpu_stuck(int cpu, u64 tb) 105 { 106 - cpumask_set_cpu(cpu, &wd_smp_cpus_stuck); 107 - cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); 108 if (cpumask_empty(&wd_smp_cpus_pending)) { 109 wd_smp_last_reset_tb = tb; 110 cpumask_andnot(&wd_smp_cpus_pending, 111 &wd_cpus_enabled, 112 &wd_smp_cpus_stuck); 113 } 114 } 115 116 static void watchdog_smp_panic(int cpu, u64 tb) ··· 144 } 145 smp_flush_nmi_ipi(1000000); 146 147 - /* Take the stuck CPU out of the watch group */ 148 - for_each_cpu(c, &wd_smp_cpus_pending) 149 - set_cpu_stuck(c, tb); 150 151 - out: 152 wd_smp_unlock(&flags); 153 154 printk_safe_flush(); ··· 159 160 if (hardlockup_panic) 161 nmi_panic(NULL, "Hard LOCKUP"); 162 } 163 164 static void wd_smp_clear_cpu_pending(int cpu, u64 tb) ··· 270 271 void arch_touch_nmi_watchdog(void) 272 { 273 int cpu = smp_processor_id(); 274 275 - watchdog_timer_interrupt(cpu); 276 } 277 EXPORT_SYMBOL(arch_touch_nmi_watchdog); 278 ··· 297 298 static int start_wd_on_cpu(unsigned int cpu) 299 { 300 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { 301 WARN_ON(1); 302 return 0; ··· 313 if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) 314 return 0; 315 316 cpumask_set_cpu(cpu, &wd_cpus_enabled); 317 if (cpumask_weight(&wd_cpus_enabled) == 1) { 318 cpumask_set_cpu(cpu, &wd_smp_cpus_pending); 319 wd_smp_last_reset_tb = get_tb(); 320 } 321 - smp_wmb(); 322 start_watchdog_timer_on(cpu); 323 324 return 0; ··· 328 329 static int stop_wd_on_cpu(unsigned int cpu) 330 { 331 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) 332 return 0; /* Can happen in CPU unplug case */ 333 334 stop_watchdog_timer_on(cpu); 335 336 cpumask_clear_cpu(cpu, &wd_cpus_enabled); 337 wd_smp_clear_cpu_pending(cpu, get_tb()); 338 339 return 0;
··· 71 * This may be called from low level interrupt handlers at some 72 * point in future. 73 */ 74 + raw_local_irq_save(*flags); 75 + hard_irq_disable(); /* Make it soft-NMI safe */ 76 + while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) { 77 + raw_local_irq_restore(*flags); 78 + spin_until_cond(!test_bit(0, &__wd_smp_lock)); 79 + raw_local_irq_save(*flags); 80 + hard_irq_disable(); 81 + } 82 } 83 84 static inline void wd_smp_unlock(unsigned long *flags) 85 { 86 clear_bit_unlock(0, &__wd_smp_lock); 87 + raw_local_irq_restore(*flags); 88 } 89 90 static void wd_lockup_ipi(struct pt_regs *regs) ··· 96 nmi_panic(regs, "Hard LOCKUP"); 97 } 98 99 + static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb) 100 { 101 + cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask); 102 + cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask); 103 if (cpumask_empty(&wd_smp_cpus_pending)) { 104 wd_smp_last_reset_tb = tb; 105 cpumask_andnot(&wd_smp_cpus_pending, 106 &wd_cpus_enabled, 107 &wd_smp_cpus_stuck); 108 } 109 + } 110 + static void set_cpu_stuck(int cpu, u64 tb) 111 + { 112 + set_cpumask_stuck(cpumask_of(cpu), tb); 113 } 114 115 static void watchdog_smp_panic(int cpu, u64 tb) ··· 135 } 136 smp_flush_nmi_ipi(1000000); 137 138 + /* Take the stuck CPUs out of the watch group */ 139 + set_cpumask_stuck(&wd_smp_cpus_pending, tb); 140 141 wd_smp_unlock(&flags); 142 143 printk_safe_flush(); ··· 152 153 if (hardlockup_panic) 154 nmi_panic(NULL, "Hard LOCKUP"); 155 + 156 + return; 157 + 158 + out: 159 + wd_smp_unlock(&flags); 160 } 161 162 static void wd_smp_clear_cpu_pending(int cpu, u64 tb) ··· 258 259 void arch_touch_nmi_watchdog(void) 260 { 261 + unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; 262 int cpu = smp_processor_id(); 263 264 + if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks) 265 + watchdog_timer_interrupt(cpu); 266 } 267 EXPORT_SYMBOL(arch_touch_nmi_watchdog); 268 ··· 283 284 static int start_wd_on_cpu(unsigned int cpu) 285 { 286 + unsigned long flags; 287 + 288 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { 289 WARN_ON(1); 290 return 0; ··· 297 if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) 298 return 0; 299 300 + wd_smp_lock(&flags); 301 cpumask_set_cpu(cpu, &wd_cpus_enabled); 302 if (cpumask_weight(&wd_cpus_enabled) == 1) { 303 cpumask_set_cpu(cpu, &wd_smp_cpus_pending); 304 wd_smp_last_reset_tb = get_tb(); 305 } 306 + wd_smp_unlock(&flags); 307 + 308 start_watchdog_timer_on(cpu); 309 310 return 0; ··· 310 311 static int stop_wd_on_cpu(unsigned int cpu) 312 { 313 + unsigned long flags; 314 + 315 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) 316 return 0; /* Can happen in CPU unplug case */ 317 318 stop_watchdog_timer_on(cpu); 319 320 + wd_smp_lock(&flags); 321 cpumask_clear_cpu(cpu, &wd_cpus_enabled); 322 + wd_smp_unlock(&flags); 323 + 324 wd_smp_clear_cpu_pending(cpu, get_tb()); 325 326 return 0;
+38 -3
arch/powerpc/platforms/powernv/idle.c
··· 56 */ 57 static u64 pnv_deepest_stop_psscr_val; 58 static u64 pnv_deepest_stop_psscr_mask; 59 static bool deepest_stop_found; 60 61 static int pnv_save_sprs_for_deep_states(void) ··· 186 187 update_subcore_sibling_mask(); 188 189 - if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) 190 - pnv_save_sprs_for_deep_states(); 191 } 192 193 u32 pnv_get_supported_cpuidle_states(void) ··· 408 pnv_deepest_stop_psscr_val; 409 srr1 = power9_idle_stop(psscr); 410 411 - } else if (idle_states & OPAL_PM_WINKLE_ENABLED) { 412 srr1 = power7_idle_insn(PNV_THREAD_WINKLE); 413 } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || 414 (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { ··· 587 max_residency_ns = residency_ns[i]; 588 pnv_deepest_stop_psscr_val = psscr_val[i]; 589 pnv_deepest_stop_psscr_mask = psscr_mask[i]; 590 deepest_stop_found = true; 591 } 592
··· 56 */ 57 static u64 pnv_deepest_stop_psscr_val; 58 static u64 pnv_deepest_stop_psscr_mask; 59 + static u64 pnv_deepest_stop_flag; 60 static bool deepest_stop_found; 61 62 static int pnv_save_sprs_for_deep_states(void) ··· 185 186 update_subcore_sibling_mask(); 187 188 + if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) { 189 + int rc = pnv_save_sprs_for_deep_states(); 190 + 191 + if (likely(!rc)) 192 + return; 193 + 194 + /* 195 + * The stop-api is unable to restore hypervisor 196 + * resources on wakeup from platform idle states which 197 + * lose full context. So disable such states. 198 + */ 199 + supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT; 200 + pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n"); 201 + pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n"); 202 + 203 + if (cpu_has_feature(CPU_FTR_ARCH_300) && 204 + (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) { 205 + /* 206 + * Use the default stop state for CPU-Hotplug 207 + * if available. 208 + */ 209 + if (default_stop_found) { 210 + pnv_deepest_stop_psscr_val = 211 + pnv_default_stop_val; 212 + pnv_deepest_stop_psscr_mask = 213 + pnv_default_stop_mask; 214 + pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n", 215 + pnv_deepest_stop_psscr_val); 216 + } else { /* Fallback to snooze loop for CPU-Hotplug */ 217 + deepest_stop_found = false; 218 + pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n"); 219 + } 220 + } 221 + } 222 } 223 224 u32 pnv_get_supported_cpuidle_states(void) ··· 375 pnv_deepest_stop_psscr_val; 376 srr1 = power9_idle_stop(psscr); 377 378 + } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) && 379 + (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) { 380 srr1 = power7_idle_insn(PNV_THREAD_WINKLE); 381 } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || 382 (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { ··· 553 max_residency_ns = residency_ns[i]; 554 pnv_deepest_stop_psscr_val = psscr_val[i]; 555 pnv_deepest_stop_psscr_mask = psscr_mask[i]; 556 + pnv_deepest_stop_flag = flags[i]; 557 deepest_stop_found = true; 558 } 559
+11 -6
arch/s390/include/asm/tlb.h
··· 47 extern void tlb_table_flush(struct mmu_gather *tlb); 48 extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 49 50 - static inline void tlb_gather_mmu(struct mmu_gather *tlb, 51 - struct mm_struct *mm, 52 - unsigned long start, 53 - unsigned long end) 54 { 55 tlb->mm = mm; 56 tlb->start = start; ··· 75 tlb_flush_mmu_free(tlb); 76 } 77 78 - static inline void tlb_finish_mmu(struct mmu_gather *tlb, 79 - unsigned long start, unsigned long end) 80 { 81 tlb_flush_mmu(tlb); 82 } 83
··· 47 extern void tlb_table_flush(struct mmu_gather *tlb); 48 extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 49 50 + static inline void 51 + arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 52 + unsigned long start, unsigned long end) 53 { 54 tlb->mm = mm; 55 tlb->start = start; ··· 76 tlb_flush_mmu_free(tlb); 77 } 78 79 + static inline void 80 + arch_tlb_finish_mmu(struct mmu_gather *tlb, 81 + unsigned long start, unsigned long end, bool force) 82 { 83 + if (force) { 84 + tlb->start = start; 85 + tlb->end = end; 86 + } 87 + 88 tlb_flush_mmu(tlb); 89 } 90
+2 -1
arch/s390/net/bpf_jit_comp.c
··· 1253 insn_count = bpf_jit_insn(jit, fp, i); 1254 if (insn_count < 0) 1255 return -1; 1256 - jit->addrs[i + 1] = jit->prg; /* Next instruction address */ 1257 } 1258 bpf_jit_epilogue(jit); 1259
··· 1253 insn_count = bpf_jit_insn(jit, fp, i); 1254 if (insn_count < 0) 1255 return -1; 1256 + /* Next instruction address */ 1257 + jit->addrs[i + insn_count] = jit->prg; 1258 } 1259 bpf_jit_epilogue(jit); 1260
+5 -3
arch/sh/include/asm/tlb.h
··· 36 } 37 38 static inline void 39 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 40 { 41 tlb->mm = mm; 42 tlb->start = start; ··· 48 } 49 50 static inline void 51 - tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 52 { 53 - if (tlb->fullmm) 54 flush_tlb_mm(tlb->mm); 55 56 /* keep the page table cache within bounds */
··· 36 } 37 38 static inline void 39 + arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 40 + unsigned long start, unsigned long end) 41 { 42 tlb->mm = mm; 43 tlb->start = start; ··· 47 } 48 49 static inline void 50 + arch_tlb_finish_mmu(struct mmu_gather *tlb, 51 + unsigned long start, unsigned long end, bool force) 52 { 53 + if (tlb->fullmm || force) 54 flush_tlb_mm(tlb->mm); 55 56 /* keep the page table cache within bounds */
+16
arch/sparc/include/asm/spitfire.h
··· 47 #define SUN4V_CHIP_NIAGARA5 0x05 48 #define SUN4V_CHIP_SPARC_M6 0x06 49 #define SUN4V_CHIP_SPARC_M7 0x07 50 #define SUN4V_CHIP_SPARC64X 0x8a 51 #define SUN4V_CHIP_SPARC_SN 0x8b 52 #define SUN4V_CHIP_UNKNOWN 0xff 53 54 #ifndef __ASSEMBLY__ 55
··· 47 #define SUN4V_CHIP_NIAGARA5 0x05 48 #define SUN4V_CHIP_SPARC_M6 0x06 49 #define SUN4V_CHIP_SPARC_M7 0x07 50 + #define SUN4V_CHIP_SPARC_M8 0x08 51 #define SUN4V_CHIP_SPARC64X 0x8a 52 #define SUN4V_CHIP_SPARC_SN 0x8b 53 #define SUN4V_CHIP_UNKNOWN 0xff 54 + 55 + /* 56 + * The following CPU_ID_xxx constants are used 57 + * to identify the CPU type in the setup phase 58 + * (see head_64.S) 59 + */ 60 + #define CPU_ID_NIAGARA1 ('1') 61 + #define CPU_ID_NIAGARA2 ('2') 62 + #define CPU_ID_NIAGARA3 ('3') 63 + #define CPU_ID_NIAGARA4 ('4') 64 + #define CPU_ID_NIAGARA5 ('5') 65 + #define CPU_ID_M6 ('6') 66 + #define CPU_ID_M7 ('7') 67 + #define CPU_ID_M8 ('8') 68 + #define CPU_ID_SONOMA1 ('N') 69 70 #ifndef __ASSEMBLY__ 71
+6
arch/sparc/kernel/cpu.c
··· 506 sparc_pmu_type = "sparc-m7"; 507 break; 508 509 case SUN4V_CHIP_SPARC_SN: 510 sparc_cpu_type = "SPARC-SN"; 511 sparc_fpu_type = "SPARC-SN integrated FPU";
··· 506 sparc_pmu_type = "sparc-m7"; 507 break; 508 509 + case SUN4V_CHIP_SPARC_M8: 510 + sparc_cpu_type = "SPARC-M8"; 511 + sparc_fpu_type = "SPARC-M8 integrated FPU"; 512 + sparc_pmu_type = "sparc-m8"; 513 + break; 514 + 515 case SUN4V_CHIP_SPARC_SN: 516 sparc_cpu_type = "SPARC-SN"; 517 sparc_fpu_type = "SPARC-SN integrated FPU";
+1
arch/sparc/kernel/cpumap.c
··· 328 case SUN4V_CHIP_NIAGARA5: 329 case SUN4V_CHIP_SPARC_M6: 330 case SUN4V_CHIP_SPARC_M7: 331 case SUN4V_CHIP_SPARC_SN: 332 case SUN4V_CHIP_SPARC64X: 333 rover_inc_table = niagara_iterate_method;
··· 328 case SUN4V_CHIP_NIAGARA5: 329 case SUN4V_CHIP_SPARC_M6: 330 case SUN4V_CHIP_SPARC_M7: 331 + case SUN4V_CHIP_SPARC_M8: 332 case SUN4V_CHIP_SPARC_SN: 333 case SUN4V_CHIP_SPARC64X: 334 rover_inc_table = niagara_iterate_method;
+14 -8
arch/sparc/kernel/head_64.S
··· 424 nop 425 426 70: ldub [%g1 + 7], %g2 427 - cmp %g2, '3' 428 be,pt %xcc, 5f 429 mov SUN4V_CHIP_NIAGARA3, %g4 430 - cmp %g2, '4' 431 be,pt %xcc, 5f 432 mov SUN4V_CHIP_NIAGARA4, %g4 433 - cmp %g2, '5' 434 be,pt %xcc, 5f 435 mov SUN4V_CHIP_NIAGARA5, %g4 436 - cmp %g2, '6' 437 be,pt %xcc, 5f 438 mov SUN4V_CHIP_SPARC_M6, %g4 439 - cmp %g2, '7' 440 be,pt %xcc, 5f 441 mov SUN4V_CHIP_SPARC_M7, %g4 442 - cmp %g2, 'N' 443 be,pt %xcc, 5f 444 mov SUN4V_CHIP_SPARC_SN, %g4 445 ba,pt %xcc, 49f ··· 451 91: sethi %hi(prom_cpu_compatible), %g1 452 or %g1, %lo(prom_cpu_compatible), %g1 453 ldub [%g1 + 17], %g2 454 - cmp %g2, '1' 455 be,pt %xcc, 5f 456 mov SUN4V_CHIP_NIAGARA1, %g4 457 - cmp %g2, '2' 458 be,pt %xcc, 5f 459 mov SUN4V_CHIP_NIAGARA2, %g4 460 ··· 603 be,pt %xcc, niagara4_patch 604 nop 605 cmp %g1, SUN4V_CHIP_SPARC_M7 606 be,pt %xcc, niagara4_patch 607 nop 608 cmp %g1, SUN4V_CHIP_SPARC_SN
··· 424 nop 425 426 70: ldub [%g1 + 7], %g2 427 + cmp %g2, CPU_ID_NIAGARA3 428 be,pt %xcc, 5f 429 mov SUN4V_CHIP_NIAGARA3, %g4 430 + cmp %g2, CPU_ID_NIAGARA4 431 be,pt %xcc, 5f 432 mov SUN4V_CHIP_NIAGARA4, %g4 433 + cmp %g2, CPU_ID_NIAGARA5 434 be,pt %xcc, 5f 435 mov SUN4V_CHIP_NIAGARA5, %g4 436 + cmp %g2, CPU_ID_M6 437 be,pt %xcc, 5f 438 mov SUN4V_CHIP_SPARC_M6, %g4 439 + cmp %g2, CPU_ID_M7 440 be,pt %xcc, 5f 441 mov SUN4V_CHIP_SPARC_M7, %g4 442 + cmp %g2, CPU_ID_M8 443 + be,pt %xcc, 5f 444 + mov SUN4V_CHIP_SPARC_M8, %g4 445 + cmp %g2, CPU_ID_SONOMA1 446 be,pt %xcc, 5f 447 mov SUN4V_CHIP_SPARC_SN, %g4 448 ba,pt %xcc, 49f ··· 448 91: sethi %hi(prom_cpu_compatible), %g1 449 or %g1, %lo(prom_cpu_compatible), %g1 450 ldub [%g1 + 17], %g2 451 + cmp %g2, CPU_ID_NIAGARA1 452 be,pt %xcc, 5f 453 mov SUN4V_CHIP_NIAGARA1, %g4 454 + cmp %g2, CPU_ID_NIAGARA2 455 be,pt %xcc, 5f 456 mov SUN4V_CHIP_NIAGARA2, %g4 457 ··· 600 be,pt %xcc, niagara4_patch 601 nop 602 cmp %g1, SUN4V_CHIP_SPARC_M7 603 + be,pt %xcc, niagara4_patch 604 + nop 605 + cmp %g1, SUN4V_CHIP_SPARC_M8 606 be,pt %xcc, niagara4_patch 607 nop 608 cmp %g1, SUN4V_CHIP_SPARC_SN
+13 -2
arch/sparc/kernel/setup_64.c
··· 288 289 sun4v_patch_2insn_range(&__sun4v_2insn_patch, 290 &__sun4v_2insn_patch_end); 291 - if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 292 - sun4v_chip_type == SUN4V_CHIP_SPARC_SN) 293 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, 294 &__sun_m7_2insn_patch_end); 295 296 sun4v_hvapi_init(); 297 } ··· 536 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 537 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 538 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 539 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 540 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 541 cap |= HWCAP_SPARC_BLKINIT; ··· 546 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 547 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 548 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 549 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 550 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 551 cap |= HWCAP_SPARC_N2; ··· 577 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 578 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 579 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 580 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 581 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 582 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | ··· 588 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 589 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 590 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 591 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 592 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 593 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
··· 288 289 sun4v_patch_2insn_range(&__sun4v_2insn_patch, 290 &__sun4v_2insn_patch_end); 291 + 292 + switch (sun4v_chip_type) { 293 + case SUN4V_CHIP_SPARC_M7: 294 + case SUN4V_CHIP_SPARC_M8: 295 + case SUN4V_CHIP_SPARC_SN: 296 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, 297 &__sun_m7_2insn_patch_end); 298 + break; 299 + default: 300 + break; 301 + } 302 303 sun4v_hvapi_init(); 304 } ··· 529 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 530 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 531 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 532 + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || 533 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 534 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 535 cap |= HWCAP_SPARC_BLKINIT; ··· 538 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 539 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 540 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 541 + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || 542 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 543 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 544 cap |= HWCAP_SPARC_N2; ··· 568 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 569 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 570 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 571 + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || 572 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 573 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 574 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | ··· 578 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 579 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 580 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 581 + sun4v_chip_type == SUN4V_CHIP_SPARC_M8 || 582 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 583 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 584 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
+13 -1
arch/sparc/mm/init_64.c
··· 1944 break; 1945 case SUN4V_CHIP_SPARC_M7: 1946 case SUN4V_CHIP_SPARC_SN: 1947 - default: 1948 /* M7 and later support 52-bit virtual addresses. */ 1949 sparc64_va_hole_top = 0xfff8000000000000UL; 1950 sparc64_va_hole_bottom = 0x0008000000000000UL; 1951 max_phys_bits = 49; 1952 break; 1953 } 1954 } ··· 2171 */ 2172 switch (sun4v_chip_type) { 2173 case SUN4V_CHIP_SPARC_M7: 2174 case SUN4V_CHIP_SPARC_SN: 2175 pagecv_flag = 0x00; 2176 break; ··· 2324 */ 2325 switch (sun4v_chip_type) { 2326 case SUN4V_CHIP_SPARC_M7: 2327 case SUN4V_CHIP_SPARC_SN: 2328 page_cache4v_flag = _PAGE_CP_4V; 2329 break;
··· 1944 break; 1945 case SUN4V_CHIP_SPARC_M7: 1946 case SUN4V_CHIP_SPARC_SN: 1947 /* M7 and later support 52-bit virtual addresses. */ 1948 sparc64_va_hole_top = 0xfff8000000000000UL; 1949 sparc64_va_hole_bottom = 0x0008000000000000UL; 1950 max_phys_bits = 49; 1951 + break; 1952 + case SUN4V_CHIP_SPARC_M8: 1953 + default: 1954 + /* M8 and later support 54-bit virtual addresses. 1955 + * However, restricting M8 and above VA bits to 53 1956 + * as 4-level page table cannot support more than 1957 + * 53 VA bits. 1958 + */ 1959 + sparc64_va_hole_top = 0xfff0000000000000UL; 1960 + sparc64_va_hole_bottom = 0x0010000000000000UL; 1961 + max_phys_bits = 51; 1962 break; 1963 } 1964 } ··· 2161 */ 2162 switch (sun4v_chip_type) { 2163 case SUN4V_CHIP_SPARC_M7: 2164 + case SUN4V_CHIP_SPARC_M8: 2165 case SUN4V_CHIP_SPARC_SN: 2166 pagecv_flag = 0x00; 2167 break; ··· 2313 */ 2314 switch (sun4v_chip_type) { 2315 case SUN4V_CHIP_SPARC_M7: 2316 + case SUN4V_CHIP_SPARC_M8: 2317 case SUN4V_CHIP_SPARC_SN: 2318 page_cache4v_flag = _PAGE_CP_4V; 2319 break;
+10 -3
arch/um/include/asm/tlb.h
··· 45 } 46 47 static inline void 48 - tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 49 { 50 tlb->mm = mm; 51 tlb->start = start; ··· 81 tlb_flush_mmu_free(tlb); 82 } 83 84 - /* tlb_finish_mmu 85 * Called at the end of the shootdown operation to free up any resources 86 * that were required. 87 */ 88 static inline void 89 - tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 90 { 91 tlb_flush_mmu(tlb); 92 93 /* keep the page table cache within bounds */
··· 45 } 46 47 static inline void 48 + arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 49 + unsigned long start, unsigned long end) 50 { 51 tlb->mm = mm; 52 tlb->start = start; ··· 80 tlb_flush_mmu_free(tlb); 81 } 82 83 + /* arch_tlb_finish_mmu 84 * Called at the end of the shootdown operation to free up any resources 85 * that were required. 86 */ 87 static inline void 88 + arch_tlb_finish_mmu(struct mmu_gather *tlb, 89 + unsigned long start, unsigned long end, bool force) 90 { 91 + if (force) { 92 + tlb->start = start; 93 + tlb->end = end; 94 + tlb->need_flush = 1; 95 + } 96 tlb_flush_mmu(tlb); 97 98 /* keep the page table cache within bounds */
+10
arch/x86/include/asm/hypervisor.h
··· 43 44 /* pin current vcpu to specified physical cpu (run rarely) */ 45 void (*pin_vcpu)(int); 46 }; 47 48 extern const struct hypervisor_x86 *x86_hyper; ··· 60 extern void init_hypervisor_platform(void); 61 extern bool hypervisor_x2apic_available(void); 62 extern void hypervisor_pin_vcpu(int cpu); 63 #else 64 static inline void init_hypervisor_platform(void) { } 65 static inline bool hypervisor_x2apic_available(void) { return false; } 66 #endif /* CONFIG_HYPERVISOR_GUEST */ 67 #endif /* _ASM_X86_HYPERVISOR_H */
··· 43 44 /* pin current vcpu to specified physical cpu (run rarely) */ 45 void (*pin_vcpu)(int); 46 + 47 + /* called during init_mem_mapping() to setup early mappings. */ 48 + void (*init_mem_mapping)(void); 49 }; 50 51 extern const struct hypervisor_x86 *x86_hyper; ··· 57 extern void init_hypervisor_platform(void); 58 extern bool hypervisor_x2apic_available(void); 59 extern void hypervisor_pin_vcpu(int cpu); 60 + 61 + static inline void hypervisor_init_mem_mapping(void) 62 + { 63 + if (x86_hyper && x86_hyper->init_mem_mapping) 64 + x86_hyper->init_mem_mapping(); 65 + } 66 #else 67 static inline void init_hypervisor_platform(void) { } 68 static inline bool hypervisor_x2apic_available(void) { return false; } 69 + static inline void hypervisor_init_mem_mapping(void) { } 70 #endif /* CONFIG_HYPERVISOR_GUEST */ 71 #endif /* _ASM_X86_HYPERVISOR_H */
+3
arch/x86/mm/init.c
··· 18 #include <asm/dma.h> /* for MAX_DMA_PFN */ 19 #include <asm/microcode.h> 20 #include <asm/kaslr.h> 21 22 /* 23 * We need to define the tracepoints somewhere, and tlb.c ··· 636 637 load_cr3(swapper_pg_dir); 638 __flush_tlb_all(); 639 640 early_memtest(0, max_pfn_mapped << PAGE_SHIFT); 641 }
··· 18 #include <asm/dma.h> /* for MAX_DMA_PFN */ 19 #include <asm/microcode.h> 20 #include <asm/kaslr.h> 21 + #include <asm/hypervisor.h> 22 23 /* 24 * We need to define the tracepoints somewhere, and tlb.c ··· 635 636 load_cr3(swapper_pg_dir); 637 __flush_tlb_all(); 638 + 639 + hypervisor_init_mem_mapping(); 640 641 early_memtest(0, max_pfn_mapped << PAGE_SHIFT); 642 }
+37 -22
arch/x86/xen/enlighten_hvm.c
··· 12 #include <asm/setup.h> 13 #include <asm/hypervisor.h> 14 #include <asm/e820/api.h> 15 16 #include <asm/xen/cpuid.h> 17 #include <asm/xen/hypervisor.h> ··· 22 #include "mmu.h" 23 #include "smp.h" 24 25 - void __ref xen_hvm_init_shared_info(void) 26 { 27 struct xen_add_to_physmap xatp; 28 - u64 pa; 29 - 30 - if (HYPERVISOR_shared_info == &xen_dummy_shared_info) { 31 - /* 32 - * Search for a free page starting at 4kB physical address. 33 - * Low memory is preferred to avoid an EPT large page split up 34 - * by the mapping. 35 - * Starting below X86_RESERVE_LOW (usually 64kB) is fine as 36 - * the BIOS used for HVM guests is well behaved and won't 37 - * clobber memory other than the first 4kB. 38 - */ 39 - for (pa = PAGE_SIZE; 40 - !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) || 41 - memblock_is_reserved(pa); 42 - pa += PAGE_SIZE) 43 - ; 44 - 45 - memblock_reserve(pa, PAGE_SIZE); 46 - HYPERVISOR_shared_info = __va(pa); 47 - } 48 49 xatp.domid = DOMID_SELF; 50 xatp.idx = 0; 51 xatp.space = XENMAPSPACE_shared_info; 52 - xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info); 53 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 54 BUG(); 55 } 56 57 static void __init init_hvm_pv_info(void) ··· 166 167 init_hvm_pv_info(); 168 169 xen_hvm_init_shared_info(); 170 171 /* ··· 232 .init_platform = xen_hvm_guest_init, 233 .pin_vcpu = xen_pin_vcpu, 234 .x2apic_available = xen_x2apic_para_available, 235 }; 236 EXPORT_SYMBOL(x86_hyper_xen_hvm);
··· 12 #include <asm/setup.h> 13 #include <asm/hypervisor.h> 14 #include <asm/e820/api.h> 15 + #include <asm/early_ioremap.h> 16 17 #include <asm/xen/cpuid.h> 18 #include <asm/xen/hypervisor.h> ··· 21 #include "mmu.h" 22 #include "smp.h" 23 24 + static unsigned long shared_info_pfn; 25 + 26 + void xen_hvm_init_shared_info(void) 27 { 28 struct xen_add_to_physmap xatp; 29 30 xatp.domid = DOMID_SELF; 31 xatp.idx = 0; 32 xatp.space = XENMAPSPACE_shared_info; 33 + xatp.gpfn = shared_info_pfn; 34 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 35 BUG(); 36 + } 37 + 38 + static void __init reserve_shared_info(void) 39 + { 40 + u64 pa; 41 + 42 + /* 43 + * Search for a free page starting at 4kB physical address. 44 + * Low memory is preferred to avoid an EPT large page split up 45 + * by the mapping. 46 + * Starting below X86_RESERVE_LOW (usually 64kB) is fine as 47 + * the BIOS used for HVM guests is well behaved and won't 48 + * clobber memory other than the first 4kB. 49 + */ 50 + for (pa = PAGE_SIZE; 51 + !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) || 52 + memblock_is_reserved(pa); 53 + pa += PAGE_SIZE) 54 + ; 55 + 56 + shared_info_pfn = PHYS_PFN(pa); 57 + 58 + memblock_reserve(pa, PAGE_SIZE); 59 + HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE); 60 + } 61 + 62 + static void __init xen_hvm_init_mem_mapping(void) 63 + { 64 + early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE); 65 + HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn)); 66 } 67 68 static void __init init_hvm_pv_info(void) ··· 153 154 init_hvm_pv_info(); 155 156 + reserve_shared_info(); 157 xen_hvm_init_shared_info(); 158 159 /* ··· 218 .init_platform = xen_hvm_guest_init, 219 .pin_vcpu = xen_pin_vcpu, 220 .x2apic_available = xen_x2apic_para_available, 221 + .init_mem_mapping = xen_hvm_init_mem_mapping, 222 }; 223 EXPORT_SYMBOL(x86_hyper_xen_hvm);
+2
arch/xtensa/include/asm/Kbuild
··· 1 generic-y += bug.h 2 generic-y += clkdev.h 3 generic-y += div64.h 4 generic-y += dma-contiguous.h 5 generic-y += emergency-restart.h ··· 18 generic-y += local64.h 19 generic-y += mcs_spinlock.h 20 generic-y += mm-arch-hooks.h 21 generic-y += percpu.h 22 generic-y += preempt.h 23 generic-y += rwsem.h
··· 1 generic-y += bug.h 2 generic-y += clkdev.h 3 + generic-y += device.h 4 generic-y += div64.h 5 generic-y += dma-contiguous.h 6 generic-y += emergency-restart.h ··· 17 generic-y += local64.h 18 generic-y += mcs_spinlock.h 19 generic-y += mm-arch-hooks.h 20 + generic-y += param.h 21 generic-y += percpu.h 22 generic-y += preempt.h 23 generic-y += rwsem.h
-15
arch/xtensa/include/asm/device.h
··· 1 - /* 2 - * Arch specific extensions to struct device 3 - * 4 - * This file is released under the GPLv2 5 - */ 6 - #ifndef _ASM_XTENSA_DEVICE_H 7 - #define _ASM_XTENSA_DEVICE_H 8 - 9 - struct dev_archdata { 10 - }; 11 - 12 - struct pdev_archdata { 13 - }; 14 - 15 - #endif /* _ASM_XTENSA_DEVICE_H */
···
-18
arch/xtensa/include/asm/param.h
··· 1 - /* 2 - * include/asm-xtensa/param.h 3 - * 4 - * This file is subject to the terms and conditions of the GNU General Public 5 - * License. See the file "COPYING" in the main directory of this archive 6 - * for more details. 7 - * 8 - * Copyright (C) 2001 - 2005 Tensilica Inc. 9 - */ 10 - #ifndef _XTENSA_PARAM_H 11 - #define _XTENSA_PARAM_H 12 - 13 - #include <uapi/asm/param.h> 14 - 15 - # define HZ CONFIG_HZ /* internal timer frequency */ 16 - # define USER_HZ 100 /* for user interfaces in "ticks" */ 17 - # define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */ 18 - #endif /* _XTENSA_PARAM_H */
···
-2
arch/xtensa/kernel/xtensa_ksyms.c
··· 94 } 95 EXPORT_SYMBOL(__sync_fetch_and_or_4); 96 97 - #ifdef CONFIG_NET 98 /* 99 * Networking support 100 */ 101 EXPORT_SYMBOL(csum_partial); 102 EXPORT_SYMBOL(csum_partial_copy_generic); 103 - #endif /* CONFIG_NET */ 104 105 /* 106 * Architecture-specific symbols
··· 94 } 95 EXPORT_SYMBOL(__sync_fetch_and_or_4); 96 97 /* 98 * Networking support 99 */ 100 EXPORT_SYMBOL(csum_partial); 101 EXPORT_SYMBOL(csum_partial_copy_generic); 102 103 /* 104 * Architecture-specific symbols
+8 -8
arch/xtensa/mm/cache.c
··· 103 clear_page_alias(kvaddr, paddr); 104 preempt_enable(); 105 } 106 107 void copy_user_highpage(struct page *dst, struct page *src, 108 unsigned long vaddr, struct vm_area_struct *vma) ··· 120 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); 121 preempt_enable(); 122 } 123 - 124 - #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ 125 - 126 - #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 127 128 /* 129 * Any time the kernel writes to a user page cache page, or it is about to ··· 174 175 /* There shouldn't be an entry in the cache for this page anymore. */ 176 } 177 - 178 179 /* 180 * For now, flush the whole cache. FIXME?? ··· 186 __flush_invalidate_dcache_all(); 187 __invalidate_icache_all(); 188 } 189 190 /* 191 * Remove any entry in the cache for this page. ··· 206 __flush_invalidate_dcache_page_alias(virt, phys); 207 __invalidate_icache_page_alias(virt, phys); 208 } 209 210 - #endif 211 212 void 213 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ··· 225 226 flush_tlb_page(vma, addr); 227 228 - #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 229 230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { 231 unsigned long phys = page_to_phys(page); ··· 256 * flush_dcache_page() on the page. 257 */ 258 259 - #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 260 261 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 262 unsigned long vaddr, void *dst, const void *src,
··· 103 clear_page_alias(kvaddr, paddr); 104 preempt_enable(); 105 } 106 + EXPORT_SYMBOL(clear_user_highpage); 107 108 void copy_user_highpage(struct page *dst, struct page *src, 109 unsigned long vaddr, struct vm_area_struct *vma) ··· 119 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); 120 preempt_enable(); 121 } 122 + EXPORT_SYMBOL(copy_user_highpage); 123 124 /* 125 * Any time the kernel writes to a user page cache page, or it is about to ··· 176 177 /* There shouldn't be an entry in the cache for this page anymore. */ 178 } 179 + EXPORT_SYMBOL(flush_dcache_page); 180 181 /* 182 * For now, flush the whole cache. FIXME?? ··· 188 __flush_invalidate_dcache_all(); 189 __invalidate_icache_all(); 190 } 191 + EXPORT_SYMBOL(local_flush_cache_range); 192 193 /* 194 * Remove any entry in the cache for this page. ··· 207 __flush_invalidate_dcache_page_alias(virt, phys); 208 __invalidate_icache_page_alias(virt, phys); 209 } 210 + EXPORT_SYMBOL(local_flush_cache_page); 211 212 + #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ 213 214 void 215 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ··· 225 226 flush_tlb_page(vma, addr); 227 228 + #if (DCACHE_WAY_SIZE > PAGE_SIZE) 229 230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { 231 unsigned long phys = page_to_phys(page); ··· 256 * flush_dcache_page() on the page. 257 */ 258 259 + #if (DCACHE_WAY_SIZE > PAGE_SIZE) 260 261 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 262 unsigned long vaddr, void *dst, const void *src,
+17 -5
block/bfq-iosched.h
··· 71 * 72 * bfq_sched_data is the basic scheduler queue. It supports three 73 * ioprio_classes, and can be used either as a toplevel queue or as an 74 - * intermediate queue on a hierarchical setup. @next_in_service 75 - * points to the active entity of the sched_data service trees that 76 - * will be scheduled next. It is used to reduce the number of steps 77 - * needed for each hierarchical-schedule update. 78 * 79 * The supported ioprio_classes are the same as in CFQ, in descending 80 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. 81 * Requests from higher priority queues are served before all the 82 * requests from lower priority queues; among requests of the same 83 * queue requests are served according to B-WF2Q+. 84 - * All the fields are protected by the queue lock of the containing bfqd. 85 */ 86 struct bfq_sched_data { 87 /* entity in service */
··· 71 * 72 * bfq_sched_data is the basic scheduler queue. It supports three 73 * ioprio_classes, and can be used either as a toplevel queue or as an 74 + * intermediate queue in a hierarchical setup. 75 * 76 * The supported ioprio_classes are the same as in CFQ, in descending 77 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. 78 * Requests from higher priority queues are served before all the 79 * requests from lower priority queues; among requests of the same 80 * queue requests are served according to B-WF2Q+. 81 + * 82 + * The schedule is implemented by the service trees, plus the field 83 + * @next_in_service, which points to the entity on the active trees 84 + * that will be served next, if 1) no changes in the schedule occurs 85 + * before the current in-service entity is expired, 2) the in-service 86 + * queue becomes idle when it expires, and 3) if the entity pointed by 87 + * in_service_entity is not a queue, then the in-service child entity 88 + * of the entity pointed by in_service_entity becomes idle on 89 + * expiration. This peculiar definition allows for the following 90 + * optimization, not yet exploited: while a given entity is still in 91 + * service, we already know which is the best candidate for next 92 + * service among the other active entitities in the same parent 93 + * entity. We can then quickly compare the timestamps of the 94 + * in-service entity with those of such best candidate. 95 + * 96 + * All fields are protected by the lock of the containing bfqd. 97 */ 98 struct bfq_sched_data { 99 /* entity in service */
+81 -65
block/bfq-wf2q.c
··· 188 189 /* 190 * This function tells whether entity stops being a candidate for next 191 - * service, according to the following logic. 192 * 193 - * This function is invoked for an entity that is about to be set in 194 - * service. If such an entity is a queue, then the entity is no longer 195 - * a candidate for next service (i.e, a candidate entity to serve 196 - * after the in-service entity is expired). The function then returns 197 - * true. 198 * 199 - * In contrast, the entity could stil be a candidate for next service 200 - * if it is not a queue, and has more than one child. In fact, even if 201 - * one of its children is about to be set in service, other children 202 - * may still be the next to serve. As a consequence, a non-queue 203 - * entity is not a candidate for next-service only if it has only one 204 - * child. And only if this condition holds, then the function returns 205 - * true for a non-queue entity. 206 */ 207 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) 208 { ··· 215 216 bfqg = container_of(entity, struct bfq_group, entity); 217 218 if (bfqg->active_entities == 1) 219 return true; 220 ··· 968 * one of its children receives a new request. 969 * 970 * Basically, this function updates the timestamps of entity and 971 - * inserts entity into its active tree, ater possible extracting it 972 * from its idle tree. 973 */ 974 static void __bfq_activate_entity(struct bfq_entity *entity, ··· 1062 entity->start = entity->finish; 1063 /* 1064 * In addition, if the entity had more than one child 1065 - * when set in service, then was not extracted from 1066 * the active tree. This implies that the position of 1067 * the entity in the active tree may need to be 1068 * changed now, because we have just updated the start ··· 1070 * time in a moment (the requeueing is then, more 1071 * precisely, a repositioning in this case). To 1072 * implement this repositioning, we: 1) dequeue the 1073 - * entity here, 2) update the finish time and 1074 - * requeue the entity according to the new 1075 - * timestamps below. 1076 */ 1077 if (entity->tree) 1078 bfq_active_extract(st, entity); ··· 1118 1119 1120 /** 1121 - * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, 1122 - * and activate, requeue or reposition all ancestors 1123 - * for which such an update becomes necessary. 1124 * @entity: the entity to activate. 1125 * @non_blocking_wait_rq: true if this entity was waiting for a request 1126 * @requeue: true if this is a requeue, which implies that bfqq is ··· 1149 * @ins_into_idle_tree: if false, the entity will not be put into the 1150 * idle tree. 1151 * 1152 - * Deactivates an entity, independently from its previous state. Must 1153 * be invoked only if entity is on a service tree. Extracts the entity 1154 - * from that tree, and if necessary and allowed, puts it on the idle 1155 * tree. 1156 */ 1157 bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) ··· 1172 st = bfq_entity_service_tree(entity); 1173 is_in_service = entity == sd->in_service_entity; 1174 1175 - if (is_in_service) 1176 bfq_calc_finish(entity, entity->service); 1177 1178 if (entity->tree == &st->active) 1179 bfq_active_extract(st, entity); ··· 1193 /** 1194 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. 1195 * @entity: the entity to deactivate. 1196 - * @ins_into_idle_tree: true if the entity can be put on the idle tree 1197 */ 1198 static void bfq_deactivate_entity(struct bfq_entity *entity, 1199 bool ins_into_idle_tree, ··· 1224 */ 1225 bfq_update_next_in_service(sd, NULL); 1226 1227 - if (sd->next_in_service) 1228 /* 1229 - * The parent entity is still backlogged, 1230 - * because next_in_service is not NULL. So, no 1231 - * further upwards deactivation must be 1232 - * performed. Yet, next_in_service has 1233 - * changed. Then the schedule does need to be 1234 - * updated upwards. 1235 */ 1236 break; 1237 1238 /* 1239 * If we get here, then the parent is no more ··· 1523 1524 /* 1525 * If entity is no longer a candidate for next 1526 - * service, then we extract it from its active tree, 1527 - * for the following reason. To further boost the 1528 - * throughput in some special case, BFQ needs to know 1529 - * which is the next candidate entity to serve, while 1530 - * there is already an entity in service. In this 1531 - * respect, to make it easy to compute/update the next 1532 - * candidate entity to serve after the current 1533 - * candidate has been set in service, there is a case 1534 - * where it is necessary to extract the current 1535 - * candidate from its service tree. Such a case is 1536 - * when the entity just set in service cannot be also 1537 - * a candidate for next service. Details about when 1538 - * this conditions holds are reported in the comments 1539 - * on the function bfq_no_longer_next_in_service() 1540 - * invoked below. 1541 */ 1542 if (bfq_no_longer_next_in_service(entity)) 1543 bfq_active_extract(bfq_entity_service_tree(entity), 1544 entity); 1545 1546 /* 1547 - * For the same reason why we may have just extracted 1548 - * entity from its active tree, we may need to update 1549 - * next_in_service for the sched_data of entity too, 1550 - * regardless of whether entity has been extracted. 1551 - * In fact, even if entity has not been extracted, a 1552 - * descendant entity may get extracted. Such an event 1553 - * would cause a change in next_in_service for the 1554 - * level of the descendant entity, and thus possibly 1555 - * back to upper levels. 1556 * 1557 - * We cannot perform the resulting needed update 1558 - * before the end of this loop, because, to know which 1559 - * is the correct next-to-serve candidate entity for 1560 - * each level, we need first to find the leaf entity 1561 - * to set in service. In fact, only after we know 1562 - * which is the next-to-serve leaf entity, we can 1563 - * discover whether the parent entity of the leaf 1564 - * entity becomes the next-to-serve, and so on. 1565 */ 1566 - 1567 } 1568 1569 bfqq = bfq_entity_to_bfqq(entity);
··· 188 189 /* 190 * This function tells whether entity stops being a candidate for next 191 + * service, according to the restrictive definition of the field 192 + * next_in_service. In particular, this function is invoked for an 193 + * entity that is about to be set in service. 194 * 195 + * If entity is a queue, then the entity is no longer a candidate for 196 + * next service according to the that definition, because entity is 197 + * about to become the in-service queue. This function then returns 198 + * true if entity is a queue. 199 * 200 + * In contrast, entity could still be a candidate for next service if 201 + * it is not a queue, and has more than one active child. In fact, 202 + * even if one of its children is about to be set in service, other 203 + * active children may still be the next to serve, for the parent 204 + * entity, even according to the above definition. As a consequence, a 205 + * non-queue entity is not a candidate for next-service only if it has 206 + * only one active child. And only if this condition holds, then this 207 + * function returns true for a non-queue entity. 208 */ 209 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) 210 { ··· 213 214 bfqg = container_of(entity, struct bfq_group, entity); 215 216 + /* 217 + * The field active_entities does not always contain the 218 + * actual number of active children entities: it happens to 219 + * not account for the in-service entity in case the latter is 220 + * removed from its active tree (which may get done after 221 + * invoking the function bfq_no_longer_next_in_service in 222 + * bfq_get_next_queue). Fortunately, here, i.e., while 223 + * bfq_no_longer_next_in_service is not yet completed in 224 + * bfq_get_next_queue, bfq_active_extract has not yet been 225 + * invoked, and thus active_entities still coincides with the 226 + * actual number of active entities. 227 + */ 228 if (bfqg->active_entities == 1) 229 return true; 230 ··· 954 * one of its children receives a new request. 955 * 956 * Basically, this function updates the timestamps of entity and 957 + * inserts entity into its active tree, ater possibly extracting it 958 * from its idle tree. 959 */ 960 static void __bfq_activate_entity(struct bfq_entity *entity, ··· 1048 entity->start = entity->finish; 1049 /* 1050 * In addition, if the entity had more than one child 1051 + * when set in service, then it was not extracted from 1052 * the active tree. This implies that the position of 1053 * the entity in the active tree may need to be 1054 * changed now, because we have just updated the start ··· 1056 * time in a moment (the requeueing is then, more 1057 * precisely, a repositioning in this case). To 1058 * implement this repositioning, we: 1) dequeue the 1059 + * entity here, 2) update the finish time and requeue 1060 + * the entity according to the new timestamps below. 1061 */ 1062 if (entity->tree) 1063 bfq_active_extract(st, entity); ··· 1105 1106 1107 /** 1108 + * bfq_activate_requeue_entity - activate or requeue an entity representing a 1109 + * bfq_queue, and activate, requeue or reposition 1110 + * all ancestors for which such an update becomes 1111 + * necessary. 1112 * @entity: the entity to activate. 1113 * @non_blocking_wait_rq: true if this entity was waiting for a request 1114 * @requeue: true if this is a requeue, which implies that bfqq is ··· 1135 * @ins_into_idle_tree: if false, the entity will not be put into the 1136 * idle tree. 1137 * 1138 + * Deactivates an entity, independently of its previous state. Must 1139 * be invoked only if entity is on a service tree. Extracts the entity 1140 + * from that tree, and if necessary and allowed, puts it into the idle 1141 * tree. 1142 */ 1143 bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) ··· 1158 st = bfq_entity_service_tree(entity); 1159 is_in_service = entity == sd->in_service_entity; 1160 1161 + if (is_in_service) { 1162 bfq_calc_finish(entity, entity->service); 1163 + sd->in_service_entity = NULL; 1164 + } 1165 1166 if (entity->tree == &st->active) 1167 bfq_active_extract(st, entity); ··· 1177 /** 1178 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. 1179 * @entity: the entity to deactivate. 1180 + * @ins_into_idle_tree: true if the entity can be put into the idle tree 1181 */ 1182 static void bfq_deactivate_entity(struct bfq_entity *entity, 1183 bool ins_into_idle_tree, ··· 1208 */ 1209 bfq_update_next_in_service(sd, NULL); 1210 1211 + if (sd->next_in_service || sd->in_service_entity) { 1212 /* 1213 + * The parent entity is still active, because 1214 + * either next_in_service or in_service_entity 1215 + * is not NULL. So, no further upwards 1216 + * deactivation must be performed. Yet, 1217 + * next_in_service has changed. Then the 1218 + * schedule does need to be updated upwards. 1219 + * 1220 + * NOTE If in_service_entity is not NULL, then 1221 + * next_in_service may happen to be NULL, 1222 + * although the parent entity is evidently 1223 + * active. This happens if 1) the entity 1224 + * pointed by in_service_entity is the only 1225 + * active entity in the parent entity, and 2) 1226 + * according to the definition of 1227 + * next_in_service, the in_service_entity 1228 + * cannot be considered as 1229 + * next_in_service. See the comments on the 1230 + * definition of next_in_service for details. 1231 */ 1232 break; 1233 + } 1234 1235 /* 1236 * If we get here, then the parent is no more ··· 1494 1495 /* 1496 * If entity is no longer a candidate for next 1497 + * service, then it must be extracted from its active 1498 + * tree, so as to make sure that it won't be 1499 + * considered when computing next_in_service. See the 1500 + * comments on the function 1501 + * bfq_no_longer_next_in_service() for details. 1502 */ 1503 if (bfq_no_longer_next_in_service(entity)) 1504 bfq_active_extract(bfq_entity_service_tree(entity), 1505 entity); 1506 1507 /* 1508 + * Even if entity is not to be extracted according to 1509 + * the above check, a descendant entity may get 1510 + * extracted in one of the next iterations of this 1511 + * loop. Such an event could cause a change in 1512 + * next_in_service for the level of the descendant 1513 + * entity, and thus possibly back to this level. 1514 * 1515 + * However, we cannot perform the resulting needed 1516 + * update of next_in_service for this level before the 1517 + * end of the whole loop, because, to know which is 1518 + * the correct next-to-serve candidate entity for each 1519 + * level, we need first to find the leaf entity to set 1520 + * in service. In fact, only after we know which is 1521 + * the next-to-serve leaf entity, we can discover 1522 + * whether the parent entity of the leaf entity 1523 + * becomes the next-to-serve, and so on. 1524 */ 1525 } 1526 1527 bfqq = bfq_entity_to_bfqq(entity);
+4 -2
block/bio-integrity.c
··· 387 */ 388 bool __bio_integrity_endio(struct bio *bio) 389 { 390 - if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) { 391 - struct bio_integrity_payload *bip = bio_integrity(bio); 392 393 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); 394 queue_work(kintegrityd_wq, &bip->bip_work); 395 return false;
··· 387 */ 388 bool __bio_integrity_endio(struct bio *bio) 389 { 390 + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 391 + struct bio_integrity_payload *bip = bio_integrity(bio); 392 393 + if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && 394 + (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) { 395 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); 396 queue_work(kintegrityd_wq, &bip->bip_work); 397 return false;
+13 -8
block/blk-mq.c
··· 301 struct elevator_queue *e = q->elevator; 302 struct request *rq; 303 unsigned int tag; 304 305 blk_queue_enter_live(q); 306 data->q = q; 307 if (likely(!data->ctx)) 308 - data->ctx = blk_mq_get_ctx(q); 309 if (likely(!data->hctx)) 310 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 311 if (op & REQ_NOWAIT) ··· 325 326 tag = blk_mq_get_tag(data); 327 if (tag == BLK_MQ_TAG_FAIL) { 328 blk_queue_exit(q); 329 return NULL; 330 } ··· 361 362 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 363 364 - blk_mq_put_ctx(alloc_data.ctx); 365 - blk_queue_exit(q); 366 - 367 if (!rq) 368 return ERR_PTR(-EWOULDBLOCK); 369 370 rq->__data_len = 0; 371 rq->__sector = (sector_t) -1; ··· 412 413 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 414 415 - blk_queue_exit(q); 416 - 417 if (!rq) 418 return ERR_PTR(-EWOULDBLOCK); 419 420 return rq; 421 } ··· 684 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 685 unsigned long msecs) 686 { 687 - kblockd_schedule_delayed_work(&q->requeue_work, 688 - msecs_to_jiffies(msecs)); 689 } 690 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 691
··· 301 struct elevator_queue *e = q->elevator; 302 struct request *rq; 303 unsigned int tag; 304 + struct blk_mq_ctx *local_ctx = NULL; 305 306 blk_queue_enter_live(q); 307 data->q = q; 308 if (likely(!data->ctx)) 309 + data->ctx = local_ctx = blk_mq_get_ctx(q); 310 if (likely(!data->hctx)) 311 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 312 if (op & REQ_NOWAIT) ··· 324 325 tag = blk_mq_get_tag(data); 326 if (tag == BLK_MQ_TAG_FAIL) { 327 + if (local_ctx) { 328 + blk_mq_put_ctx(local_ctx); 329 + data->ctx = NULL; 330 + } 331 blk_queue_exit(q); 332 return NULL; 333 } ··· 356 357 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 358 359 if (!rq) 360 return ERR_PTR(-EWOULDBLOCK); 361 + 362 + blk_mq_put_ctx(alloc_data.ctx); 363 + blk_queue_exit(q); 364 365 rq->__data_len = 0; 366 rq->__sector = (sector_t) -1; ··· 407 408 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 409 410 if (!rq) 411 return ERR_PTR(-EWOULDBLOCK); 412 + 413 + blk_queue_exit(q); 414 415 return rq; 416 } ··· 679 void blk_mq_delay_kick_requeue_list(struct request_queue *q, 680 unsigned long msecs) 681 { 682 + kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 683 + msecs_to_jiffies(msecs)); 684 } 685 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 686
+34 -2
drivers/acpi/spcr.c
··· 17 #include <linux/serial_core.h> 18 19 /* 20 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. 21 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI 22 * quirk detection in pci_mcfg.c. ··· 157 goto done; 158 } 159 160 - if (qdf2400_erratum_44_present(&table->header)) 161 - uart = "qdf2400_e44"; 162 if (xgene_8250_erratum_present(table)) 163 iotype = "mmio32"; 164
··· 17 #include <linux/serial_core.h> 18 19 /* 20 + * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as 21 + * occasionally getting stuck as 1. To avoid the potential for a hang, check 22 + * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART 23 + * implementations, so only do so if an affected platform is detected in 24 + * parse_spcr(). 25 + */ 26 + bool qdf2400_e44_present; 27 + EXPORT_SYMBOL(qdf2400_e44_present); 28 + 29 + /* 30 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. 31 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI 32 * quirk detection in pci_mcfg.c. ··· 147 goto done; 148 } 149 150 + /* 151 + * If the E44 erratum is required, then we need to tell the pl011 152 + * driver to implement the work-around. 153 + * 154 + * The global variable is used by the probe function when it 155 + * creates the UARTs, whether or not they're used as a console. 156 + * 157 + * If the user specifies "traditional" earlycon, the qdf2400_e44 158 + * console name matches the EARLYCON_DECLARE() statement, and 159 + * SPCR is not used. Parameter "earlycon" is false. 160 + * 161 + * If the user specifies "SPCR" earlycon, then we need to update 162 + * the console name so that it also says "qdf2400_e44". Parameter 163 + * "earlycon" is true. 164 + * 165 + * For consistency, if we change the console name, then we do it 166 + * for everyone, not just earlycon. 167 + */ 168 + if (qdf2400_erratum_44_present(&table->header)) { 169 + qdf2400_e44_present = true; 170 + if (earlycon) 171 + uart = "qdf2400_e44"; 172 + } 173 + 174 if (xgene_8250_erratum_present(table)) 175 iotype = "mmio32"; 176
+34 -15
drivers/base/firmware_class.c
··· 30 #include <linux/syscore_ops.h> 31 #include <linux/reboot.h> 32 #include <linux/security.h> 33 - #include <linux/swait.h> 34 35 #include <generated/utsrelease.h> 36 ··· 111 * state of the firmware loading. 112 */ 113 struct fw_state { 114 - struct swait_queue_head wq; 115 enum fw_status status; 116 }; 117 118 static void fw_state_init(struct fw_state *fw_st) 119 { 120 - init_swait_queue_head(&fw_st->wq); 121 fw_st->status = FW_STATUS_UNKNOWN; 122 } 123 ··· 130 { 131 long ret; 132 133 - ret = swait_event_interruptible_timeout(fw_st->wq, 134 - __fw_state_is_done(READ_ONCE(fw_st->status)), 135 - timeout); 136 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) 137 return -ENOENT; 138 if (!ret) ··· 145 WRITE_ONCE(fw_st->status, status); 146 147 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) 148 - swake_up(&fw_st->wq); 149 } 150 151 #define fw_state_start(fw_st) \ 152 __fw_state_set(fw_st, FW_STATUS_LOADING) 153 #define fw_state_done(fw_st) \ 154 __fw_state_set(fw_st, FW_STATUS_DONE) 155 #define fw_state_wait(fw_st) \ 156 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) 157 - 158 - #ifndef CONFIG_FW_LOADER_USER_HELPER 159 - 160 - #define fw_state_is_aborted(fw_st) false 161 - 162 - #else /* CONFIG_FW_LOADER_USER_HELPER */ 163 164 static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) 165 { 166 return fw_st->status == status; 167 } 168 169 #define fw_state_aborted(fw_st) \ 170 __fw_state_set(fw_st, FW_STATUS_ABORTED) ··· 173 __fw_state_check(fw_st, FW_STATUS_DONE) 174 #define fw_state_is_loading(fw_st) \ 175 __fw_state_check(fw_st, FW_STATUS_LOADING) 176 - #define fw_state_is_aborted(fw_st) \ 177 - __fw_state_check(fw_st, FW_STATUS_ABORTED) 178 #define fw_state_wait_timeout(fw_st, timeout) \ 179 __fw_state_wait_common(fw_st, timeout) 180 ··· 1196 return 1; /* need to load */ 1197 } 1198 1199 /* called from request_firmware() and request_firmware_work_func() */ 1200 static int 1201 _request_firmware(const struct firmware **firmware_p, const char *name, ··· 1261 1262 out: 1263 if (ret < 0) { 1264 release_firmware(fw); 1265 fw = NULL; 1266 }
··· 30 #include <linux/syscore_ops.h> 31 #include <linux/reboot.h> 32 #include <linux/security.h> 33 34 #include <generated/utsrelease.h> 35 ··· 112 * state of the firmware loading. 113 */ 114 struct fw_state { 115 + struct completion completion; 116 enum fw_status status; 117 }; 118 119 static void fw_state_init(struct fw_state *fw_st) 120 { 121 + init_completion(&fw_st->completion); 122 fw_st->status = FW_STATUS_UNKNOWN; 123 } 124 ··· 131 { 132 long ret; 133 134 + ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); 135 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) 136 return -ENOENT; 137 if (!ret) ··· 148 WRITE_ONCE(fw_st->status, status); 149 150 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) 151 + complete_all(&fw_st->completion); 152 } 153 154 #define fw_state_start(fw_st) \ 155 __fw_state_set(fw_st, FW_STATUS_LOADING) 156 #define fw_state_done(fw_st) \ 157 __fw_state_set(fw_st, FW_STATUS_DONE) 158 + #define fw_state_aborted(fw_st) \ 159 + __fw_state_set(fw_st, FW_STATUS_ABORTED) 160 #define fw_state_wait(fw_st) \ 161 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) 162 163 static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) 164 { 165 return fw_st->status == status; 166 } 167 + 168 + #define fw_state_is_aborted(fw_st) \ 169 + __fw_state_check(fw_st, FW_STATUS_ABORTED) 170 + 171 + #ifdef CONFIG_FW_LOADER_USER_HELPER 172 173 #define fw_state_aborted(fw_st) \ 174 __fw_state_set(fw_st, FW_STATUS_ABORTED) ··· 175 __fw_state_check(fw_st, FW_STATUS_DONE) 176 #define fw_state_is_loading(fw_st) \ 177 __fw_state_check(fw_st, FW_STATUS_LOADING) 178 #define fw_state_wait_timeout(fw_st, timeout) \ 179 __fw_state_wait_common(fw_st, timeout) 180 ··· 1200 return 1; /* need to load */ 1201 } 1202 1203 + /* 1204 + * Batched requests need only one wake, we need to do this step last due to the 1205 + * fallback mechanism. The buf is protected with kref_get(), and it won't be 1206 + * released until the last user calls release_firmware(). 1207 + * 1208 + * Failed batched requests are possible as well, in such cases we just share 1209 + * the struct firmware_buf and won't release it until all requests are woken 1210 + * and have gone through this same path. 1211 + */ 1212 + static void fw_abort_batch_reqs(struct firmware *fw) 1213 + { 1214 + struct firmware_buf *buf; 1215 + 1216 + /* Loaded directly? */ 1217 + if (!fw || !fw->priv) 1218 + return; 1219 + 1220 + buf = fw->priv; 1221 + if (!fw_state_is_aborted(&buf->fw_st)) 1222 + fw_state_aborted(&buf->fw_st); 1223 + } 1224 + 1225 /* called from request_firmware() and request_firmware_work_func() */ 1226 static int 1227 _request_firmware(const struct firmware **firmware_p, const char *name, ··· 1243 1244 out: 1245 if (ret < 0) { 1246 + fw_abort_batch_reqs(fw); 1247 release_firmware(fw); 1248 fw = NULL; 1249 }
+61
drivers/block/sunvdc.c
··· 875 printk(KERN_INFO "%s", version); 876 } 877 878 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) 879 { 880 struct mdesc_handle *hp; ··· 940 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { 941 printk(KERN_ERR PFX "Port id [%llu] too large.\n", 942 vdev->dev_no); 943 goto err_out_release_mdesc; 944 } 945 ··· 1001 if (err) 1002 goto err_out_free_tx_ring; 1003 1004 dev_set_drvdata(&vdev->dev, port); 1005 1006 mdesc_release(hp);
··· 875 printk(KERN_INFO "%s", version); 876 } 877 878 + struct vdc_check_port_data { 879 + int dev_no; 880 + char *type; 881 + }; 882 + 883 + static int vdc_device_probed(struct device *dev, void *arg) 884 + { 885 + struct vio_dev *vdev = to_vio_dev(dev); 886 + struct vdc_check_port_data *port_data; 887 + 888 + port_data = (struct vdc_check_port_data *)arg; 889 + 890 + if ((vdev->dev_no == port_data->dev_no) && 891 + (!(strcmp((char *)&vdev->type, port_data->type))) && 892 + dev_get_drvdata(dev)) { 893 + /* This device has already been configured 894 + * by vdc_port_probe() 895 + */ 896 + return 1; 897 + } else { 898 + return 0; 899 + } 900 + } 901 + 902 + /* Determine whether the VIO device is part of an mpgroup 903 + * by locating all the virtual-device-port nodes associated 904 + * with the parent virtual-device node for the VIO device 905 + * and checking whether any of these nodes are vdc-ports 906 + * which have already been configured. 907 + * 908 + * Returns true if this device is part of an mpgroup and has 909 + * already been probed. 910 + */ 911 + static bool vdc_port_mpgroup_check(struct vio_dev *vdev) 912 + { 913 + struct vdc_check_port_data port_data; 914 + struct device *dev; 915 + 916 + port_data.dev_no = vdev->dev_no; 917 + port_data.type = (char *)&vdev->type; 918 + 919 + dev = device_find_child(vdev->dev.parent, &port_data, 920 + vdc_device_probed); 921 + 922 + if (dev) 923 + return true; 924 + 925 + return false; 926 + } 927 + 928 static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) 929 { 930 struct mdesc_handle *hp; ··· 890 if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { 891 printk(KERN_ERR PFX "Port id [%llu] too large.\n", 892 vdev->dev_no); 893 + goto err_out_release_mdesc; 894 + } 895 + 896 + /* Check if this device is part of an mpgroup */ 897 + if (vdc_port_mpgroup_check(vdev)) { 898 + printk(KERN_WARNING 899 + "VIO: Ignoring extra vdisk port %s", 900 + dev_name(&vdev->dev)); 901 goto err_out_release_mdesc; 902 } 903 ··· 943 if (err) 944 goto err_out_free_tx_ring; 945 946 + /* Note that the device driver_data is used to determine 947 + * whether the port has been probed. 948 + */ 949 dev_set_drvdata(&vdev->dev, port); 950 951 mdesc_release(hp);
+2 -2
drivers/block/zram/zram_drv.c
··· 308 struct device_attribute *attr, const char *buf, size_t len) 309 { 310 struct zram *zram = dev_to_zram(dev); 311 - char compressor[CRYPTO_MAX_ALG_NAME]; 312 size_t sz; 313 314 strlcpy(compressor, buf, sizeof(compressor)); ··· 327 return -EBUSY; 328 } 329 330 - strlcpy(zram->compressor, compressor, sizeof(compressor)); 331 up_write(&zram->init_lock); 332 return len; 333 }
··· 308 struct device_attribute *attr, const char *buf, size_t len) 309 { 310 struct zram *zram = dev_to_zram(dev); 311 + char compressor[ARRAY_SIZE(zram->compressor)]; 312 size_t sz; 313 314 strlcpy(compressor, buf, sizeof(compressor)); ··· 327 return -EBUSY; 328 } 329 330 + strcpy(zram->compressor, compressor); 331 up_write(&zram->init_lock); 332 return len; 333 }
+1 -1
drivers/char/random.c
··· 1492 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1493 print_once = true; 1494 #endif 1495 - pr_notice("random: %s called from %pF with crng_init=%d\n", 1496 func_name, caller, crng_init); 1497 } 1498
··· 1492 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1493 print_once = true; 1494 #endif 1495 + pr_notice("random: %s called from %pS with crng_init=%d\n", 1496 func_name, caller, crng_init); 1497 } 1498
+10
drivers/cpuidle/cpuidle-powernv.c
··· 235 return -1; 236 } 237 238 static int powernv_add_idle_states(void) 239 { 240 struct device_node *power_mgt; ··· 249 const char *names[CPUIDLE_STATE_MAX]; 250 u32 has_stop_states = 0; 251 int i, rc; 252 253 /* Currently we have snooze statically defined */ 254 ··· 365 for (i = 0; i < dt_idle_states; i++) { 366 unsigned int exit_latency, target_residency; 367 bool stops_timebase = false; 368 /* 369 * If an idle state has exit latency beyond 370 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
··· 235 return -1; 236 } 237 238 + extern u32 pnv_get_supported_cpuidle_states(void); 239 static int powernv_add_idle_states(void) 240 { 241 struct device_node *power_mgt; ··· 248 const char *names[CPUIDLE_STATE_MAX]; 249 u32 has_stop_states = 0; 250 int i, rc; 251 + u32 supported_flags = pnv_get_supported_cpuidle_states(); 252 + 253 254 /* Currently we have snooze statically defined */ 255 ··· 362 for (i = 0; i < dt_idle_states; i++) { 363 unsigned int exit_latency, target_residency; 364 bool stops_timebase = false; 365 + 366 + /* 367 + * Skip the platform idle state whose flag isn't in 368 + * the supported_cpuidle_states flag mask. 369 + */ 370 + if ((flags[i] & supported_flags) != flags[i]) 371 + continue; 372 /* 373 * If an idle state has exit latency beyond 374 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
+4 -4
drivers/crypto/inside-secure/safexcel_hash.c
··· 883 if (ret) 884 return ret; 885 886 - memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); 887 - memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); 888 - 889 - for (i = 0; i < ARRAY_SIZE(istate.state); i++) { 890 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || 891 ctx->opad[i] != le32_to_cpu(ostate.state[i])) { 892 ctx->base.needs_inv = true; 893 break; 894 } 895 } 896 897 return 0; 898 }
··· 883 if (ret) 884 return ret; 885 886 + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { 887 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || 888 ctx->opad[i] != le32_to_cpu(ostate.state[i])) { 889 ctx->base.needs_inv = true; 890 break; 891 } 892 } 893 + 894 + memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); 895 + memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); 896 897 return 0; 898 }
+3 -2
drivers/dma-buf/sync_file.c
··· 304 { 305 struct sync_file *sync_file = file->private_data; 306 307 - if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) 308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb); 309 dma_fence_put(sync_file->fence); 310 kfree(sync_file); ··· 318 319 poll_wait(file, &sync_file->wq, wait); 320 321 - if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { 322 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, 323 fence_check_cb_func) < 0) 324 wake_up_all(&sync_file->wq);
··· 304 { 305 struct sync_file *sync_file = file->private_data; 306 307 + if (test_bit(POLL_ENABLED, &sync_file->flags)) 308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb); 309 dma_fence_put(sync_file->fence); 310 kfree(sync_file); ··· 318 319 poll_wait(file, &sync_file->wq, wait); 320 321 + if (list_empty(&sync_file->cb.node) && 322 + !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) { 323 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, 324 fence_check_cb_func) < 0) 325 wake_up_all(&sync_file->wq);
+1 -1
drivers/gpu/drm/bridge/tc358767.c
··· 1255 1256 /* port@2 is the output port */ 1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); 1258 - if (ret) 1259 return ret; 1260 1261 /* Shut down GPIO is optional */
··· 1255 1256 /* port@2 is the output port */ 1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); 1258 + if (ret && ret != -ENODEV) 1259 return ret; 1260 1261 /* Shut down GPIO is optional */
+2 -2
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 270 if (ret) 271 return ret; 272 273 - if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { 274 - DRM_ERROR("relocation %u outside object", i); 275 return -EINVAL; 276 } 277
··· 270 if (ret) 271 return ret; 272 273 + if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) { 274 + DRM_ERROR("relocation %u outside object\n", i); 275 return -EINVAL; 276 } 277
+13 -1
drivers/gpu/drm/exynos/exynos_drm_fb.c
··· 145 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 146 const struct drm_mode_fb_cmd2 *mode_cmd) 147 { 148 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 149 struct drm_gem_object *obj; 150 struct drm_framebuffer *fb; 151 int i; 152 int ret; 153 154 - for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { 155 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); 156 if (!obj) { 157 DRM_ERROR("failed to lookup gem object\n"); ··· 166 } 167 168 exynos_gem[i] = to_exynos_gem(obj); 169 } 170 171 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
··· 145 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 146 const struct drm_mode_fb_cmd2 *mode_cmd) 147 { 148 + const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd); 149 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 150 struct drm_gem_object *obj; 151 struct drm_framebuffer *fb; 152 int i; 153 int ret; 154 155 + for (i = 0; i < info->num_planes; i++) { 156 + unsigned int height = (i == 0) ? mode_cmd->height : 157 + DIV_ROUND_UP(mode_cmd->height, info->vsub); 158 + unsigned long size = height * mode_cmd->pitches[i] + 159 + mode_cmd->offsets[i]; 160 + 161 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); 162 if (!obj) { 163 DRM_ERROR("failed to lookup gem object\n"); ··· 160 } 161 162 exynos_gem[i] = to_exynos_gem(obj); 163 + 164 + if (size > exynos_gem[i]->size) { 165 + i++; 166 + ret = -EINVAL; 167 + goto err; 168 + } 169 } 170 171 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
+22 -5
drivers/gpu/drm/i915/gvt/execlist.c
··· 46 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ 47 ((a)->lrca == (b)->lrca)) 48 49 static int context_switch_events[] = { 50 [RCS] = RCS_AS_CONTEXT_SWITCH, 51 [BCS] = BCS_AS_CONTEXT_SWITCH, ··· 501 static int complete_execlist_workload(struct intel_vgpu_workload *workload) 502 { 503 struct intel_vgpu *vgpu = workload->vgpu; 504 - struct intel_vgpu_execlist *execlist = 505 - &vgpu->execlist[workload->ring_id]; 506 struct intel_vgpu_workload *next_workload; 507 - struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; 508 bool lite_restore = false; 509 int ret; 510 ··· 514 release_shadow_batch_buffer(workload); 515 release_shadow_wa_ctx(&workload->wa_ctx); 516 517 - if (workload->status || vgpu->resetting) 518 goto out; 519 520 - if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { 521 struct execlist_ctx_descriptor_format *this_desc, *next_desc; 522 523 next_workload = container_of(next,
··· 46 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ 47 ((a)->lrca == (b)->lrca)) 48 49 + static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); 50 + 51 static int context_switch_events[] = { 52 [RCS] = RCS_AS_CONTEXT_SWITCH, 53 [BCS] = BCS_AS_CONTEXT_SWITCH, ··· 499 static int complete_execlist_workload(struct intel_vgpu_workload *workload) 500 { 501 struct intel_vgpu *vgpu = workload->vgpu; 502 + int ring_id = workload->ring_id; 503 + struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; 504 struct intel_vgpu_workload *next_workload; 505 + struct list_head *next = workload_q_head(vgpu, ring_id)->next; 506 bool lite_restore = false; 507 int ret; 508 ··· 512 release_shadow_batch_buffer(workload); 513 release_shadow_wa_ctx(&workload->wa_ctx); 514 515 + if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { 516 + /* if workload->status is not successful means HW GPU 517 + * has occurred GPU hang or something wrong with i915/GVT, 518 + * and GVT won't inject context switch interrupt to guest. 519 + * So this error is a vGPU hang actually to the guest. 520 + * According to this we should emunlate a vGPU hang. If 521 + * there are pending workloads which are already submitted 522 + * from guest, we should clean them up like HW GPU does. 523 + * 524 + * if it is in middle of engine resetting, the pending 525 + * workloads won't be submitted to HW GPU and will be 526 + * cleaned up during the resetting process later, so doing 527 + * the workload clean up here doesn't have any impact. 528 + **/ 529 + clean_workloads(vgpu, ENGINE_MASK(ring_id)); 530 goto out; 531 + } 532 533 + if (!list_empty(workload_q_head(vgpu, ring_id))) { 534 struct execlist_ctx_descriptor_format *this_desc, *next_desc; 535 536 next_workload = container_of(next,
+10 -1
drivers/gpu/drm/i915/gvt/firmware.c
··· 72 struct intel_gvt_device_info *info = &gvt->device_info; 73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; 74 struct intel_gvt_mmio_info *e; 75 struct gvt_firmware_header *h; 76 void *firmware; 77 void *p; 78 unsigned long size, crc32_start; 79 - int i; 80 int ret; 81 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size; ··· 106 107 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) 108 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); 109 110 memcpy(gvt->firmware.mmio, p, info->mmio_size); 111
··· 72 struct intel_gvt_device_info *info = &gvt->device_info; 73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; 74 struct intel_gvt_mmio_info *e; 75 + struct gvt_mmio_block *block = gvt->mmio.mmio_block; 76 + int num = gvt->mmio.num_mmio_block; 77 struct gvt_firmware_header *h; 78 void *firmware; 79 void *p; 80 unsigned long size, crc32_start; 81 + int i, j; 82 int ret; 83 84 size = sizeof(*h) + info->mmio_size + info->cfg_space_size; ··· 104 105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) 106 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); 107 + 108 + for (i = 0; i < num; i++, block++) { 109 + for (j = 0; j < block->size; j += 4) 110 + *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) = 111 + I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET( 112 + block->offset) + j)); 113 + } 114 115 memcpy(gvt->firmware.mmio, p, info->mmio_size); 116
+13 -1
drivers/gpu/drm/i915/gvt/gvt.h
··· 149 bool active; 150 bool pv_notified; 151 bool failsafe; 152 - bool resetting; 153 void *sched_data; 154 struct vgpu_sched_ctl sched_ctl; 155 ··· 195 unsigned long vgpu_allocated_fence_num; 196 }; 197 198 #define INTEL_GVT_MMIO_HASH_BITS 11 199 200 struct intel_gvt_mmio { ··· 222 #define F_CMD_ACCESSED (1 << 5) 223 /* This reg could be accessed by unaligned address */ 224 #define F_UNALIGN (1 << 6) 225 226 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 227 unsigned int num_tracked_mmio;
··· 149 bool active; 150 bool pv_notified; 151 bool failsafe; 152 + unsigned int resetting_eng; 153 void *sched_data; 154 struct vgpu_sched_ctl sched_ctl; 155 ··· 195 unsigned long vgpu_allocated_fence_num; 196 }; 197 198 + /* Special MMIO blocks. */ 199 + struct gvt_mmio_block { 200 + unsigned int device; 201 + i915_reg_t offset; 202 + unsigned int size; 203 + gvt_mmio_func read; 204 + gvt_mmio_func write; 205 + }; 206 + 207 #define INTEL_GVT_MMIO_HASH_BITS 11 208 209 struct intel_gvt_mmio { ··· 213 #define F_CMD_ACCESSED (1 << 5) 214 /* This reg could be accessed by unaligned address */ 215 #define F_UNALIGN (1 << 6) 216 + 217 + struct gvt_mmio_block *mmio_block; 218 + unsigned int num_mmio_block; 219 220 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 221 unsigned int num_tracked_mmio;
+18 -20
drivers/gpu/drm/i915/gvt/handlers.c
··· 2857 return 0; 2858 } 2859 2860 - /* Special MMIO blocks. */ 2861 - static struct gvt_mmio_block { 2862 - unsigned int device; 2863 - i915_reg_t offset; 2864 - unsigned int size; 2865 - gvt_mmio_func read; 2866 - gvt_mmio_func write; 2867 - } gvt_mmio_blocks[] = { 2868 - {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, 2869 - {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, 2870 - {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, 2871 - pvinfo_mmio_read, pvinfo_mmio_write}, 2872 - {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, 2873 - {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, 2874 - {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, 2875 - }; 2876 - 2877 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, 2878 unsigned int offset) 2879 { 2880 unsigned long device = intel_gvt_get_device_type(gvt); 2881 - struct gvt_mmio_block *block = gvt_mmio_blocks; 2882 int i; 2883 2884 - for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { 2885 if (!(device & block->device)) 2886 continue; 2887 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && ··· 2895 vfree(gvt->mmio.mmio_attribute); 2896 gvt->mmio.mmio_attribute = NULL; 2897 } 2898 2899 /** 2900 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device ··· 2945 if (ret) 2946 goto err; 2947 } 2948 2949 gvt_dbg_mmio("traced %u virtual mmio registers\n", 2950 gvt->mmio.num_tracked_mmio); ··· 3028 gvt_mmio_func func; 3029 int ret; 3030 3031 - if (WARN_ON(bytes > 4)) 3032 return -EINVAL; 3033 3034 /*
··· 2857 return 0; 2858 } 2859 2860 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, 2861 unsigned int offset) 2862 { 2863 unsigned long device = intel_gvt_get_device_type(gvt); 2864 + struct gvt_mmio_block *block = gvt->mmio.mmio_block; 2865 + int num = gvt->mmio.num_mmio_block; 2866 int i; 2867 2868 + for (i = 0; i < num; i++, block++) { 2869 if (!(device & block->device)) 2870 continue; 2871 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && ··· 2911 vfree(gvt->mmio.mmio_attribute); 2912 gvt->mmio.mmio_attribute = NULL; 2913 } 2914 + 2915 + /* Special MMIO blocks. */ 2916 + static struct gvt_mmio_block mmio_blocks[] = { 2917 + {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, 2918 + {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, 2919 + {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, 2920 + pvinfo_mmio_read, pvinfo_mmio_write}, 2921 + {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, 2922 + {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, 2923 + {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, 2924 + }; 2925 2926 /** 2927 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device ··· 2950 if (ret) 2951 goto err; 2952 } 2953 + 2954 + gvt->mmio.mmio_block = mmio_blocks; 2955 + gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks); 2956 2957 gvt_dbg_mmio("traced %u virtual mmio registers\n", 2958 gvt->mmio.num_tracked_mmio); ··· 3030 gvt_mmio_func func; 3031 int ret; 3032 3033 + if (WARN_ON(bytes > 8)) 3034 return -EINVAL; 3035 3036 /*
+2 -1
drivers/gpu/drm/i915/gvt/scheduler.c
··· 432 433 i915_gem_request_put(fetch_and_zero(&workload->req)); 434 435 - if (!workload->status && !vgpu->resetting) { 436 update_guest_context(workload); 437 438 for_each_set_bit(event, workload->pending_events,
··· 432 433 i915_gem_request_put(fetch_and_zero(&workload->req)); 434 435 + if (!workload->status && !(vgpu->resetting_eng & 436 + ENGINE_MASK(ring_id))) { 437 update_guest_context(workload); 438 439 for_each_set_bit(event, workload->pending_events,
+5 -3
drivers/gpu/drm/i915/gvt/vgpu.c
··· 480 { 481 struct intel_gvt *gvt = vgpu->gvt; 482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 483 484 gvt_dbg_core("------------------------------------------\n"); 485 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 486 vgpu->id, dmlr, engine_mask); 487 - vgpu->resetting = true; 488 489 intel_vgpu_stop_schedule(vgpu); 490 /* ··· 499 mutex_lock(&gvt->lock); 500 } 501 502 - intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); 503 504 /* full GPU reset or device model level reset */ 505 if (engine_mask == ALL_ENGINES || dmlr) { ··· 522 } 523 } 524 525 - vgpu->resetting = false; 526 gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 527 gvt_dbg_core("------------------------------------------\n"); 528 }
··· 480 { 481 struct intel_gvt *gvt = vgpu->gvt; 482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 483 + unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; 484 485 gvt_dbg_core("------------------------------------------\n"); 486 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 487 vgpu->id, dmlr, engine_mask); 488 + 489 + vgpu->resetting_eng = resetting_eng; 490 491 intel_vgpu_stop_schedule(vgpu); 492 /* ··· 497 mutex_lock(&gvt->lock); 498 } 499 500 + intel_vgpu_reset_execlist(vgpu, resetting_eng); 501 502 /* full GPU reset or device model level reset */ 503 if (engine_mask == ALL_ENGINES || dmlr) { ··· 520 } 521 } 522 523 + vgpu->resetting_eng = 0; 524 gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 525 gvt_dbg_core("------------------------------------------\n"); 526 }
+8 -3
drivers/gpu/drm/i915/i915_gem_shrinker.c
··· 43 return true; 44 45 case MUTEX_TRYLOCK_FAILED: 46 do { 47 cpu_relax(); 48 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 49 - case MUTEX_TRYLOCK_SUCCESS: 50 *unlock = true; 51 - return true; 52 } 53 } while (!need_resched()); 54 55 - return false; 56 } 57 58 BUG();
··· 43 return true; 44 45 case MUTEX_TRYLOCK_FAILED: 46 + *unlock = false; 47 + preempt_disable(); 48 do { 49 cpu_relax(); 50 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 51 *unlock = true; 52 + break; 53 } 54 } while (!need_resched()); 55 + preempt_enable(); 56 + return *unlock; 57 58 + case MUTEX_TRYLOCK_SUCCESS: 59 + *unlock = true; 60 + return true; 61 } 62 63 BUG();
+2 -2
drivers/gpu/drm/i915/i915_perf.c
··· 1601 u32 *cs; 1602 int i; 1603 1604 - cs = intel_ring_begin(req, n_flex_regs * 2 + 4); 1605 if (IS_ERR(cs)) 1606 return PTR_ERR(cs); 1607 1608 - *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); 1609 1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
··· 1601 u32 *cs; 1602 int i; 1603 1604 + cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); 1605 if (IS_ERR(cs)) 1606 return PTR_ERR(cs); 1607 1608 + *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1); 1609 1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+1
drivers/gpu/drm/i915/intel_color.c
··· 398 } 399 400 /* Program the max register to clamp values > 1.0. */ 401 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), 402 drm_color_lut_extract(lut[i].red, 16)); 403 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
··· 398 } 399 400 /* Program the max register to clamp values > 1.0. */ 401 + i = lut_size - 1; 402 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), 403 drm_color_lut_extract(lut[i].red, 16)); 404 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
+1 -1
drivers/gpu/drm/i915/intel_panel.c
··· 469 470 if (i915.invert_brightness > 0 || 471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 472 - return panel->backlight.max - val; 473 } 474 475 return val;
··· 469 470 if (i915.invert_brightness > 0 || 471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 472 + return panel->backlight.max - val + panel->backlight.min; 473 } 474 475 return val;
+1 -1
drivers/gpu/drm/msm/Kconfig
··· 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 6 depends on OF && COMMON_CLK 7 depends on MMU 8 - select QCOM_MDT_LOADER 9 select REGULATOR 10 select DRM_KMS_HELPER 11 select DRM_PANEL
··· 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 6 depends on OF && COMMON_CLK 7 depends on MMU 8 + select QCOM_MDT_LOADER if ARCH_QCOM 9 select REGULATOR 10 select DRM_KMS_HELPER 11 select DRM_PANEL
+66 -115
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 15 #include <linux/cpumask.h> 16 #include <linux/qcom_scm.h> 17 #include <linux/dma-mapping.h> 18 - #include <linux/of_reserved_mem.h> 19 #include <linux/soc/qcom/mdt_loader.h> 20 #include "msm_gem.h" 21 #include "msm_mmu.h" ··· 26 27 #define GPU_PAS_ID 13 28 29 - #if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) 30 - 31 static int zap_shader_load_mdt(struct device *dev, const char *fwname) 32 { 33 const struct firmware *fw; 34 phys_addr_t mem_phys; 35 ssize_t mem_size; 36 void *mem_region = NULL; 37 int ret; 38 39 /* Request the MDT file for the firmware */ 40 ret = request_firmware(&fw, fwname, dev); ··· 69 } 70 71 /* Allocate memory for the firmware image */ 72 - mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); 73 if (!mem_region) { 74 ret = -ENOMEM; 75 goto out; ··· 87 DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); 88 89 out: 90 release_firmware(fw); 91 92 return ret; 93 } 94 - #else 95 - static int zap_shader_load_mdt(struct device *dev, const char *fwname) 96 - { 97 - return -ENODEV; 98 - } 99 - #endif 100 101 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 102 struct msm_file_private *ctx) ··· 132 gpu->funcs->flush(gpu); 133 } 134 135 - struct a5xx_hwcg { 136 u32 offset; 137 u32 value; 138 - }; 139 - 140 - static const struct a5xx_hwcg a530_hwcg[] = { 141 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, 142 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, 143 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, ··· 230 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} 231 }; 232 233 - static const struct { 234 - int (*test)(struct adreno_gpu *gpu); 235 - const struct a5xx_hwcg *regs; 236 - unsigned int count; 237 - } a5xx_hwcg_regs[] = { 238 - { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, 239 - }; 240 - 241 - static void _a5xx_enable_hwcg(struct msm_gpu *gpu, 242 - const struct a5xx_hwcg *regs, unsigned int count) 243 { 244 unsigned int i; 245 246 - for (i = 0; i < count; i++) 247 - gpu_write(gpu, regs[i].offset, regs[i].value); 248 249 - gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); 250 - gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); 251 - } 252 - 253 - static void a5xx_enable_hwcg(struct msm_gpu *gpu) 254 - { 255 - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 256 - unsigned int i; 257 - 258 - for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { 259 - if (a5xx_hwcg_regs[i].test(adreno_gpu)) { 260 - _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, 261 - a5xx_hwcg_regs[i].count); 262 - return; 263 - } 264 - } 265 } 266 267 static int a5xx_me_init(struct msm_gpu *gpu) ··· 368 return ret; 369 } 370 371 - /* Set up a child device to "own" the zap shader */ 372 - static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev) 373 - { 374 - struct device_node *node; 375 - int ret; 376 - 377 - if (dev->parent) 378 - return 0; 379 - 380 - /* Find the sub-node for the zap shader */ 381 - node = of_get_child_by_name(parent->of_node, "zap-shader"); 382 - if (!node) { 383 - DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n"); 384 - return -ENODEV; 385 - } 386 - 387 - dev->parent = parent; 388 - dev->of_node = node; 389 - dev_set_name(dev, "adreno_zap_shader"); 390 - 391 - ret = device_register(dev); 392 - if (ret) { 393 - DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n"); 394 - goto out; 395 - } 396 - 397 - ret = of_reserved_mem_device_init(dev); 398 - if (ret) { 399 - DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n"); 400 - device_unregister(dev); 401 - } 402 - 403 - out: 404 - if (ret) 405 - dev->parent = NULL; 406 - 407 - return ret; 408 - } 409 - 410 static int a5xx_zap_shader_init(struct msm_gpu *gpu) 411 { 412 static bool loaded; ··· 396 return -ENODEV; 397 } 398 399 - ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); 400 - 401 - if (!ret) 402 - ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev, 403 - adreno_gpu->info->zapfw); 404 405 loaded = !ret; 406 ··· 493 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); 494 495 /* Enable HWCG */ 496 - a5xx_enable_hwcg(gpu); 497 498 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); 499 ··· 638 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 639 640 DBG("%s", gpu->name); 641 - 642 - if (a5xx_gpu->zap_dev.parent) 643 - device_unregister(&a5xx_gpu->zap_dev); 644 645 if (a5xx_gpu->pm4_bo) { 646 if (a5xx_gpu->pm4_iova) ··· 865 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 866 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 867 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, 868 - 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, 869 - 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, 870 - 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 871 - 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, 872 - 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 873 - 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 874 - 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 875 - 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, 876 - 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, 877 - 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, 878 - 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 879 - 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, 880 - 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 881 - 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, 882 - 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 883 - 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, 884 - 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, 885 - 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, 886 - 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 887 - 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, 888 - 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, 889 - 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, 890 - 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 891 - 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, 892 - ~0 893 }; 894 895 static void a5xx_dump(struct msm_gpu *gpu) ··· 964 { 965 seq_printf(m, "status: %08x\n", 966 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); 967 adreno_show(gpu, m); 968 } 969 #endif 970
··· 15 #include <linux/cpumask.h> 16 #include <linux/qcom_scm.h> 17 #include <linux/dma-mapping.h> 18 + #include <linux/of_address.h> 19 #include <linux/soc/qcom/mdt_loader.h> 20 #include "msm_gem.h" 21 #include "msm_mmu.h" ··· 26 27 #define GPU_PAS_ID 13 28 29 static int zap_shader_load_mdt(struct device *dev, const char *fwname) 30 { 31 const struct firmware *fw; 32 + struct device_node *np; 33 + struct resource r; 34 phys_addr_t mem_phys; 35 ssize_t mem_size; 36 void *mem_region = NULL; 37 int ret; 38 + 39 + if (!IS_ENABLED(CONFIG_ARCH_QCOM)) 40 + return -EINVAL; 41 + 42 + np = of_get_child_by_name(dev->of_node, "zap-shader"); 43 + if (!np) 44 + return -ENODEV; 45 + 46 + np = of_parse_phandle(np, "memory-region", 0); 47 + if (!np) 48 + return -EINVAL; 49 + 50 + ret = of_address_to_resource(np, 0, &r); 51 + if (ret) 52 + return ret; 53 + 54 + mem_phys = r.start; 55 + mem_size = resource_size(&r); 56 57 /* Request the MDT file for the firmware */ 58 ret = request_firmware(&fw, fwname, dev); ··· 51 } 52 53 /* Allocate memory for the firmware image */ 54 + mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); 55 if (!mem_region) { 56 ret = -ENOMEM; 57 goto out; ··· 69 DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); 70 71 out: 72 + if (mem_region) 73 + memunmap(mem_region); 74 + 75 release_firmware(fw); 76 77 return ret; 78 } 79 80 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 81 struct msm_file_private *ctx) ··· 117 gpu->funcs->flush(gpu); 118 } 119 120 + static const struct { 121 u32 offset; 122 u32 value; 123 + } a5xx_hwcg[] = { 124 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, 125 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, 126 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, ··· 217 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} 218 }; 219 220 + void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) 221 { 222 unsigned int i; 223 224 + for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) 225 + gpu_write(gpu, a5xx_hwcg[i].offset, 226 + state ? a5xx_hwcg[i].value : 0); 227 228 + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); 229 + gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); 230 } 231 232 static int a5xx_me_init(struct msm_gpu *gpu) ··· 377 return ret; 378 } 379 380 static int a5xx_zap_shader_init(struct msm_gpu *gpu) 381 { 382 static bool loaded; ··· 444 return -ENODEV; 445 } 446 447 + ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw); 448 449 loaded = !ret; 450 ··· 545 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); 546 547 /* Enable HWCG */ 548 + a5xx_set_hwcg(gpu, true); 549 550 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); 551 ··· 690 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 691 692 DBG("%s", gpu->name); 693 694 if (a5xx_gpu->pm4_bo) { 695 if (a5xx_gpu->pm4_iova) ··· 920 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 921 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 922 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, 923 + 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841, 924 + 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28, 925 + 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 926 + 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98, 927 + 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585, 928 + 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 929 + 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 930 + 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545, 931 + 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0, 932 + 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 933 + 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 934 + 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, 935 + 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201, 936 + 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A, 937 + 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F, 938 + 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 939 + 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947, 940 + 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 941 + 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 942 + 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 943 + 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 944 + 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 945 + 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F, 946 + 0xB9A0, 0xB9BF, ~0 947 }; 948 949 static void a5xx_dump(struct msm_gpu *gpu) ··· 1020 { 1021 seq_printf(m, "status: %08x\n", 1022 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); 1023 + 1024 + /* 1025 + * Temporarily disable hardware clock gating before going into 1026 + * adreno_show to avoid issues while reading the registers 1027 + */ 1028 + a5xx_set_hwcg(gpu, false); 1029 adreno_show(gpu, m); 1030 + a5xx_set_hwcg(gpu, true); 1031 } 1032 #endif 1033
+1 -2
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
··· 36 uint32_t gpmu_dwords; 37 38 uint32_t lm_leakage; 39 - 40 - struct device zap_dev; 41 }; 42 43 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) ··· 57 } 58 59 bool a5xx_idle(struct msm_gpu *gpu); 60 61 #endif /* __A5XX_GPU_H__ */
··· 36 uint32_t gpmu_dwords; 37 38 uint32_t lm_leakage; 39 }; 40 41 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) ··· 59 } 60 61 bool a5xx_idle(struct msm_gpu *gpu); 62 + void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); 63 64 #endif /* __A5XX_GPU_H__ */
+9 -2
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 48 *value = adreno_gpu->base.fast_rate; 49 return 0; 50 case MSM_PARAM_TIMESTAMP: 51 - if (adreno_gpu->funcs->get_timestamp) 52 - return adreno_gpu->funcs->get_timestamp(gpu, value); 53 return -EINVAL; 54 default: 55 DBG("%s: invalid param: %u", gpu->name, param);
··· 48 *value = adreno_gpu->base.fast_rate; 49 return 0; 50 case MSM_PARAM_TIMESTAMP: 51 + if (adreno_gpu->funcs->get_timestamp) { 52 + int ret; 53 + 54 + pm_runtime_get_sync(&gpu->pdev->dev); 55 + ret = adreno_gpu->funcs->get_timestamp(gpu, value); 56 + pm_runtime_put_autosuspend(&gpu->pdev->dev); 57 + 58 + return ret; 59 + } 60 return -EINVAL; 61 default: 62 DBG("%s: invalid param: %u", gpu->name, param);
+7 -7
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 2137 struct msm_dsi_phy_clk_request *clk_req) 2138 { 2139 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2140 2141 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2142 clk_req->escclk_rate = msm_host->esc_clk_rate; ··· 2287 struct drm_display_mode *mode) 2288 { 2289 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2290 - int ret; 2291 2292 if (msm_host->mode) { 2293 drm_mode_destroy(msm_host->dev, msm_host->mode); ··· 2297 if (!msm_host->mode) { 2298 pr_err("%s: cannot duplicate mode\n", __func__); 2299 return -ENOMEM; 2300 - } 2301 - 2302 - ret = dsi_calc_clk_rate(msm_host); 2303 - if (ret) { 2304 - pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 2305 - return ret; 2306 } 2307 2308 return 0;
··· 2137 struct msm_dsi_phy_clk_request *clk_req) 2138 { 2139 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2140 + int ret; 2141 + 2142 + ret = dsi_calc_clk_rate(msm_host); 2143 + if (ret) { 2144 + pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 2145 + return; 2146 + } 2147 2148 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2149 clk_req->escclk_rate = msm_host->esc_clk_rate; ··· 2280 struct drm_display_mode *mode) 2281 { 2282 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2283 2284 if (msm_host->mode) { 2285 drm_mode_destroy(msm_host->dev, msm_host->mode); ··· 2291 if (!msm_host->mode) { 2292 pr_err("%s: cannot duplicate mode\n", __func__); 2293 return -ENOMEM; 2294 } 2295 2296 return 0;
+10 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
··· 221 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 222 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; 223 unsigned long flags; 224 - enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; 225 - enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; 226 int i, plane_cnt = 0; 227 bool bg_alpha_enabled = false; 228 u32 mixer_op_mode = 0; ··· 753 if (!handle) { 754 DBG("Cursor off"); 755 cursor_enable = false; 756 goto set_cursor; 757 } 758 ··· 776 mdp5_crtc->cursor.height = height; 777 778 get_roi(crtc, &roi_w, &roi_h); 779 780 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); 781 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), ··· 807 crtc_flush(crtc, flush_mask); 808 809 end: 810 if (old_bo) { 811 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); 812 /* enable vblank to complete cursor work: */ ··· 840 841 get_roi(crtc, &roi_w, &roi_h); 842 843 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 844 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), 845 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | ··· 852 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 853 854 crtc_flush(crtc, flush_mask); 855 856 return 0; 857 }
··· 221 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 222 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; 223 unsigned long flags; 224 + enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; 225 + enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; 226 int i, plane_cnt = 0; 227 bool bg_alpha_enabled = false; 228 u32 mixer_op_mode = 0; ··· 753 if (!handle) { 754 DBG("Cursor off"); 755 cursor_enable = false; 756 + mdp5_enable(mdp5_kms); 757 goto set_cursor; 758 } 759 ··· 775 mdp5_crtc->cursor.height = height; 776 777 get_roi(crtc, &roi_w, &roi_h); 778 + 779 + mdp5_enable(mdp5_kms); 780 781 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); 782 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), ··· 804 crtc_flush(crtc, flush_mask); 805 806 end: 807 + mdp5_disable(mdp5_kms); 808 if (old_bo) { 809 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); 810 /* enable vblank to complete cursor work: */ ··· 836 837 get_roi(crtc, &roi_w, &roi_h); 838 839 + mdp5_enable(mdp5_kms); 840 + 841 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 842 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), 843 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | ··· 846 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 847 848 crtc_flush(crtc, flush_mask); 849 + 850 + mdp5_disable(mdp5_kms); 851 852 return 0; 853 }
+1 -1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
··· 299 struct mdp5_interface *intf = mdp5_encoder->intf; 300 301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 302 - mdp5_cmd_encoder_disable(encoder); 303 else 304 mdp5_vid_encoder_enable(encoder); 305 }
··· 299 struct mdp5_interface *intf = mdp5_encoder->intf; 300 301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 302 + mdp5_cmd_encoder_enable(encoder); 303 else 304 mdp5_vid_encoder_enable(encoder); 305 }
+6 -6
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
··· 502 const char *name, bool mandatory) 503 { 504 struct device *dev = &pdev->dev; 505 - struct clk *clk = devm_clk_get(dev, name); 506 if (IS_ERR(clk) && mandatory) { 507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 508 return PTR_ERR(clk); ··· 887 } 888 889 /* mandatory clocks: */ 890 - ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); 891 if (ret) 892 goto fail; 893 - ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); 894 if (ret) 895 goto fail; 896 - ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); 897 if (ret) 898 goto fail; 899 - ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); 900 if (ret) 901 goto fail; 902 903 /* optional clocks: */ 904 - get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); 905 906 /* we need to set a default rate before enabling. Set a safe 907 * rate first, then figure out hw revision, and then set a
··· 502 const char *name, bool mandatory) 503 { 504 struct device *dev = &pdev->dev; 505 + struct clk *clk = msm_clk_get(pdev, name); 506 if (IS_ERR(clk) && mandatory) { 507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 508 return PTR_ERR(clk); ··· 887 } 888 889 /* mandatory clocks: */ 890 + ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); 891 if (ret) 892 goto fail; 893 + ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); 894 if (ret) 895 goto fail; 896 + ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); 897 if (ret) 898 goto fail; 899 + ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); 900 if (ret) 901 goto fail; 902 903 /* optional clocks: */ 904 + get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); 905 906 /* we need to set a default rate before enabling. Set a safe 907 * rate first, then figure out hw revision, and then set a
+2 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 890 struct mdp5_hw_pipe *right_hwpipe; 891 const struct mdp_format *format; 892 uint32_t nplanes, config = 0; 893 - struct phase_step step = { 0 }; 894 - struct pixel_ext pe = { 0 }; 895 uint32_t hdecm = 0, vdecm = 0; 896 uint32_t pix_format; 897 unsigned int rotation;
··· 890 struct mdp5_hw_pipe *right_hwpipe; 891 const struct mdp_format *format; 892 uint32_t nplanes, config = 0; 893 + struct phase_step step = { { 0 } }; 894 + struct pixel_ext pe = { { 0 } }; 895 uint32_t hdecm = 0, vdecm = 0; 896 uint32_t pix_format; 897 unsigned int rotation;
+9 -3
drivers/gpu/drm/msm/msm_gem.c
··· 383 struct page **pages; 384 385 vma = add_vma(obj, aspace); 386 - if (IS_ERR(vma)) 387 - return PTR_ERR(vma); 388 389 pages = get_pages(obj); 390 if (IS_ERR(pages)) { ··· 407 408 fail: 409 del_vma(vma); 410 - 411 mutex_unlock(&msm_obj->lock); 412 return ret; 413 } ··· 930 if (use_vram) { 931 struct msm_gem_vma *vma; 932 struct page **pages; 933 934 vma = add_vma(obj, NULL); 935 if (IS_ERR(vma)) { 936 ret = PTR_ERR(vma); 937 goto fail;
··· 383 struct page **pages; 384 385 vma = add_vma(obj, aspace); 386 + if (IS_ERR(vma)) { 387 + ret = PTR_ERR(vma); 388 + goto unlock; 389 + } 390 391 pages = get_pages(obj); 392 if (IS_ERR(pages)) { ··· 405 406 fail: 407 del_vma(vma); 408 + unlock: 409 mutex_unlock(&msm_obj->lock); 410 return ret; 411 } ··· 928 if (use_vram) { 929 struct msm_gem_vma *vma; 930 struct page **pages; 931 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 932 + 933 + mutex_lock(&msm_obj->lock); 934 935 vma = add_vma(obj, NULL); 936 + mutex_unlock(&msm_obj->lock); 937 if (IS_ERR(vma)) { 938 ret = PTR_ERR(vma); 939 goto fail;
+3 -3
drivers/gpu/drm/msm/msm_gem_submit.c
··· 34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) 35 { 36 struct msm_gem_submit *submit; 37 - uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + 38 - (nr_cmds * sizeof(submit->cmd[0])); 39 40 if (sz > SIZE_MAX) 41 return NULL; ··· 451 if (ret) 452 goto out; 453 454 - if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { 455 ret = submit_fence_sync(submit); 456 if (ret) 457 goto out;
··· 34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) 35 { 36 struct msm_gem_submit *submit; 37 + uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + 38 + ((u64)nr_cmds * sizeof(submit->cmd[0])); 39 40 if (sz > SIZE_MAX) 41 return NULL; ··· 451 if (ret) 452 goto out; 453 454 + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { 455 ret = submit_fence_sync(submit); 456 if (ret) 457 goto out;
+1 -1
drivers/gpu/drm/msm/msm_gem_vma.c
··· 42 msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 43 struct msm_gem_vma *vma, struct sg_table *sgt) 44 { 45 - if (!vma->iova) 46 return; 47 48 if (aspace->mmu) {
··· 42 msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 43 struct msm_gem_vma *vma, struct sg_table *sgt) 44 { 45 + if (!aspace || !vma->iova) 46 return; 47 48 if (aspace->mmu) {
+2
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
··· 267 /* Create output path objects for each VBIOS display path. */ 268 i = -1; 269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { 270 if (dcbE.type == DCB_OUTPUT_UNUSED) 271 continue; 272 if (dcbE.type == DCB_OUTPUT_EOL)
··· 267 /* Create output path objects for each VBIOS display path. */ 268 i = -1; 269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { 270 + if (ver < 0x40) /* No support for chipsets prior to NV50. */ 271 + break; 272 if (dcbE.type == DCB_OUTPUT_UNUSED) 273 continue; 274 if (dcbE.type == DCB_OUTPUT_EOL)
+20 -21
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 500 static int vop_enable(struct drm_crtc *crtc) 501 { 502 struct vop *vop = to_vop(crtc); 503 - int ret; 504 505 ret = pm_runtime_get_sync(vop->dev); 506 if (ret < 0) { ··· 533 } 534 535 memcpy(vop->regs, vop->regsbak, vop->len); 536 vop_cfg_done(vop); 537 538 /* ··· 580 static void vop_crtc_disable(struct drm_crtc *crtc) 581 { 582 struct vop *vop = to_vop(crtc); 583 - int i; 584 585 WARN_ON(vop->event); 586 587 rockchip_drm_psr_deactivate(&vop->crtc); 588 - 589 - /* 590 - * We need to make sure that all windows are disabled before we 591 - * disable that crtc. Otherwise we might try to scan from a destroyed 592 - * buffer later. 593 - */ 594 - for (i = 0; i < vop->data->win_size; i++) { 595 - struct vop_win *vop_win = &vop->win[i]; 596 - const struct vop_win_data *win = vop_win->data; 597 - 598 - spin_lock(&vop->reg_lock); 599 - VOP_WIN_SET(vop, win, enable, 0); 600 - spin_unlock(&vop->reg_lock); 601 - } 602 - 603 - vop_cfg_done(vop); 604 605 drm_crtc_vblank_off(crtc); 606 ··· 679 * Src.x1 can be odd when do clip, but yuv plane start point 680 * need align with 2 pixel. 681 */ 682 - if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) 683 return -EINVAL; 684 685 return 0; 686 } ··· 763 spin_lock(&vop->reg_lock); 764 765 VOP_WIN_SET(vop, win, format, format); 766 - VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); 767 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 768 if (is_yuv_support(fb->format->format)) { 769 int hsub = drm_format_horz_chroma_subsampling(fb->format->format); ··· 777 offset += (src->y1 >> 16) * fb->pitches[1] / vsub; 778 779 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 780 - VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2); 781 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 782 } 783
··· 500 static int vop_enable(struct drm_crtc *crtc) 501 { 502 struct vop *vop = to_vop(crtc); 503 + int ret, i; 504 505 ret = pm_runtime_get_sync(vop->dev); 506 if (ret < 0) { ··· 533 } 534 535 memcpy(vop->regs, vop->regsbak, vop->len); 536 + /* 537 + * We need to make sure that all windows are disabled before we 538 + * enable the crtc. Otherwise we might try to scan from a destroyed 539 + * buffer later. 540 + */ 541 + for (i = 0; i < vop->data->win_size; i++) { 542 + struct vop_win *vop_win = &vop->win[i]; 543 + const struct vop_win_data *win = vop_win->data; 544 + 545 + spin_lock(&vop->reg_lock); 546 + VOP_WIN_SET(vop, win, enable, 0); 547 + spin_unlock(&vop->reg_lock); 548 + } 549 + 550 vop_cfg_done(vop); 551 552 /* ··· 566 static void vop_crtc_disable(struct drm_crtc *crtc) 567 { 568 struct vop *vop = to_vop(crtc); 569 570 WARN_ON(vop->event); 571 572 rockchip_drm_psr_deactivate(&vop->crtc); 573 574 drm_crtc_vblank_off(crtc); 575 ··· 682 * Src.x1 can be odd when do clip, but yuv plane start point 683 * need align with 2 pixel. 684 */ 685 + if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) { 686 + DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); 687 return -EINVAL; 688 + } 689 690 return 0; 691 } ··· 764 spin_lock(&vop->reg_lock); 765 766 VOP_WIN_SET(vop, win, format, format); 767 + VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); 768 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 769 if (is_yuv_support(fb->format->format)) { 770 int hsub = drm_format_horz_chroma_subsampling(fb->format->format); ··· 778 offset += (src->y1 >> 16) * fb->pitches[1] / vsub; 779 780 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 781 + VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4)); 782 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 783 } 784
+3
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
··· 282 283 act_height = (src_h + vskiplines - 1) / vskiplines; 284 285 return GET_SCL_FT_BILI_DN(act_height, dst_h); 286 } 287
··· 282 283 act_height = (src_h + vskiplines - 1) / vskiplines; 284 285 + if (act_height == dst_h) 286 + return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines; 287 + 288 return GET_SCL_FT_BILI_DN(act_height, dst_h); 289 } 290
-1
drivers/gpu/drm/stm/Kconfig
··· 7 select DRM_PANEL 8 select VIDEOMODE_HELPERS 9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA 10 - default y 11 12 help 13 Enable support for the on-chip display controller on
··· 7 select DRM_PANEL 8 select VIDEOMODE_HELPERS 9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA 10 11 help 12 Enable support for the on-chip display controller on
+1 -1
drivers/i2c/busses/Kconfig
··· 983 984 config I2C_VERSATILE 985 tristate "ARM Versatile/Realview I2C bus support" 986 - depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST 987 select I2C_ALGOBIT 988 help 989 Say yes if you want to support the I2C serial bus on ARMs Versatile
··· 983 984 config I2C_VERSATILE 985 tristate "ARM Versatile/Realview I2C bus support" 986 + depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST 987 select I2C_ALGOBIT 988 help 989 Say yes if you want to support the I2C serial bus on ARMs Versatile
+5 -1
drivers/i2c/busses/i2c-designware-platdrv.c
··· 298 } 299 300 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); 301 /* 302 * Find bus speed from the "clock-frequency" device property, ACPI 303 * or by using fast mode if neither is set. ··· 322 if (dev->clk_freq != 100000 && dev->clk_freq != 400000 323 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { 324 dev_err(&pdev->dev, 325 - "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); 326 ret = -EINVAL; 327 goto exit_reset; 328 }
··· 298 } 299 300 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); 301 + /* Some broken DSTDs use 1MiHz instead of 1MHz */ 302 + if (acpi_speed == 1048576) 303 + acpi_speed = 1000000; 304 /* 305 * Find bus speed from the "clock-frequency" device property, ACPI 306 * or by using fast mode if neither is set. ··· 319 if (dev->clk_freq != 100000 && dev->clk_freq != 400000 320 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { 321 dev_err(&pdev->dev, 322 + "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", 323 + dev->clk_freq); 324 ret = -EINVAL; 325 goto exit_reset; 326 }
+15 -4
drivers/i2c/i2c-core-acpi.c
··· 230 dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); 231 } 232 233 static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, 234 void *data, void **return_value) 235 { ··· 299 } 300 EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); 301 302 - static int i2c_acpi_match_adapter(struct device *dev, void *data) 303 { 304 struct i2c_adapter *adapter = i2c_verify_adapter(dev); 305 ··· 309 return ACPI_HANDLE(dev) == (acpi_handle)data; 310 } 311 312 - static int i2c_acpi_match_device(struct device *dev, void *data) 313 { 314 return ACPI_COMPANION(dev) == data; 315 } ··· 319 struct device *dev; 320 321 dev = bus_find_device(&i2c_bus_type, NULL, handle, 322 - i2c_acpi_match_adapter); 323 return dev ? i2c_verify_adapter(dev) : NULL; 324 } 325 ··· 327 { 328 struct device *dev; 329 330 - dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); 331 return dev ? i2c_verify_client(dev) : NULL; 332 } 333
··· 230 dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); 231 } 232 233 + const struct acpi_device_id * 234 + i2c_acpi_match_device(const struct acpi_device_id *matches, 235 + struct i2c_client *client) 236 + { 237 + if (!(client && matches)) 238 + return NULL; 239 + 240 + return acpi_match_device(matches, &client->dev); 241 + } 242 + 243 static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, 244 void *data, void **return_value) 245 { ··· 289 } 290 EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); 291 292 + static int i2c_acpi_find_match_adapter(struct device *dev, void *data) 293 { 294 struct i2c_adapter *adapter = i2c_verify_adapter(dev); 295 ··· 299 return ACPI_HANDLE(dev) == (acpi_handle)data; 300 } 301 302 + static int i2c_acpi_find_match_device(struct device *dev, void *data) 303 { 304 return ACPI_COMPANION(dev) == data; 305 } ··· 309 struct device *dev; 310 311 dev = bus_find_device(&i2c_bus_type, NULL, handle, 312 + i2c_acpi_find_match_adapter); 313 return dev ? i2c_verify_adapter(dev) : NULL; 314 } 315 ··· 317 { 318 struct device *dev; 319 320 + dev = bus_find_device(&i2c_bus_type, NULL, adev, 321 + i2c_acpi_find_match_device); 322 return dev ? i2c_verify_client(dev) : NULL; 323 } 324
+1
drivers/i2c/i2c-core-base.c
··· 357 * Tree match table entry is supplied for the probing device. 358 */ 359 if (!driver->id_table && 360 !i2c_of_match_device(dev->driver->of_match_table, client)) 361 return -ENODEV; 362
··· 357 * Tree match table entry is supplied for the probing device. 358 */ 359 if (!driver->id_table && 360 + !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && 361 !i2c_of_match_device(dev->driver->of_match_table, client)) 362 return -ENODEV; 363
+9
drivers/i2c/i2c-core.h
··· 31 int i2c_check_7bit_addr_validity_strict(unsigned short addr); 32 33 #ifdef CONFIG_ACPI 34 void i2c_acpi_register_devices(struct i2c_adapter *adap); 35 #else /* CONFIG_ACPI */ 36 static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } 37 #endif /* CONFIG_ACPI */ 38 extern struct notifier_block i2c_acpi_notifier; 39
··· 31 int i2c_check_7bit_addr_validity_strict(unsigned short addr); 32 33 #ifdef CONFIG_ACPI 34 + const struct acpi_device_id * 35 + i2c_acpi_match_device(const struct acpi_device_id *matches, 36 + struct i2c_client *client); 37 void i2c_acpi_register_devices(struct i2c_adapter *adap); 38 #else /* CONFIG_ACPI */ 39 static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } 40 + static inline const struct acpi_device_id * 41 + i2c_acpi_match_device(const struct acpi_device_id *matches, 42 + struct i2c_client *client) 43 + { 44 + return NULL; 45 + } 46 #endif /* CONFIG_ACPI */ 47 extern struct notifier_block i2c_acpi_notifier; 48
+1 -1
drivers/i2c/muxes/Kconfig
··· 83 different sets of pins at run-time. 84 85 This driver can also be built as a module. If so, the module will be 86 - called pinctrl-i2cmux. 87 88 config I2C_MUX_REG 89 tristate "Register-based I2C multiplexer"
··· 83 different sets of pins at run-time. 84 85 This driver can also be built as a module. If so, the module will be 86 + called i2c-mux-pinctrl. 87 88 config I2C_MUX_REG 89 tristate "Register-based I2C multiplexer"
+1 -8
drivers/iio/accel/bmc150-accel-core.c
··· 193 struct regmap *regmap; 194 int irq; 195 struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; 196 - atomic_t active_intr; 197 struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; 198 struct mutex mutex; 199 u8 fifo_mode, watermark; ··· 491 dev_err(dev, "Error updating reg_int_en\n"); 492 goto out_fix_power_state; 493 } 494 - 495 - if (state) 496 - atomic_inc(&data->active_intr); 497 - else 498 - atomic_dec(&data->active_intr); 499 500 return 0; 501 ··· 1704 struct bmc150_accel_data *data = iio_priv(indio_dev); 1705 1706 mutex_lock(&data->mutex); 1707 - if (atomic_read(&data->active_intr)) 1708 - bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1709 bmc150_accel_fifo_set_mode(data); 1710 mutex_unlock(&data->mutex); 1711
··· 193 struct regmap *regmap; 194 int irq; 195 struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; 196 struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; 197 struct mutex mutex; 198 u8 fifo_mode, watermark; ··· 492 dev_err(dev, "Error updating reg_int_en\n"); 493 goto out_fix_power_state; 494 } 495 496 return 0; 497 ··· 1710 struct bmc150_accel_data *data = iio_priv(indio_dev); 1711 1712 mutex_lock(&data->mutex); 1713 + bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); 1714 bmc150_accel_fifo_set_mode(data); 1715 mutex_unlock(&data->mutex); 1716
+32
drivers/iio/accel/st_accel_core.c
··· 166 .mask_ihl = 0x02, 167 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 168 }, 169 .multi_read_bit = true, 170 .bootime = 2, 171 }, ··· 237 .addr_od = 0x22, 238 .mask_od = 0x40, 239 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 240 }, 241 .multi_read_bit = true, 242 .bootime = 2, ··· 324 .en_mask = 0x08, 325 }, 326 }, 327 .multi_read_bit = false, 328 .bootime = 2, 329 }, ··· 391 .mask_int1 = 0x04, 392 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 393 }, 394 .multi_read_bit = true, 395 .bootime = 2, /* guess */ 396 }, ··· 452 .addr_od = 0x22, 453 .mask_od = 0x40, 454 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 455 }, 456 .multi_read_bit = false, 457 .bootime = 2, /* guess */ ··· 519 .addr_ihl = 0x22, 520 .mask_ihl = 0x80, 521 }, 522 .multi_read_bit = true, 523 .bootime = 2, 524 }, ··· 570 .addr = 0x21, 571 .mask_int1 = 0x04, 572 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 573 }, 574 .multi_read_bit = false, 575 .bootime = 2, ··· 641 .addr_ihl = 0x25, 642 .mask_ihl = 0x02, 643 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 644 }, 645 .multi_read_bit = true, 646 .bootime = 2,
··· 166 .mask_ihl = 0x02, 167 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 168 }, 169 + .sim = { 170 + .addr = 0x23, 171 + .value = BIT(0), 172 + }, 173 .multi_read_bit = true, 174 .bootime = 2, 175 }, ··· 233 .addr_od = 0x22, 234 .mask_od = 0x40, 235 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 236 + }, 237 + .sim = { 238 + .addr = 0x23, 239 + .value = BIT(0), 240 }, 241 .multi_read_bit = true, 242 .bootime = 2, ··· 316 .en_mask = 0x08, 317 }, 318 }, 319 + .sim = { 320 + .addr = 0x24, 321 + .value = BIT(0), 322 + }, 323 .multi_read_bit = false, 324 .bootime = 2, 325 }, ··· 379 .mask_int1 = 0x04, 380 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 381 }, 382 + .sim = { 383 + .addr = 0x21, 384 + .value = BIT(1), 385 + }, 386 .multi_read_bit = true, 387 .bootime = 2, /* guess */ 388 }, ··· 436 .addr_od = 0x22, 437 .mask_od = 0x40, 438 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 439 + }, 440 + .sim = { 441 + .addr = 0x21, 442 + .value = BIT(7), 443 }, 444 .multi_read_bit = false, 445 .bootime = 2, /* guess */ ··· 499 .addr_ihl = 0x22, 500 .mask_ihl = 0x80, 501 }, 502 + .sim = { 503 + .addr = 0x23, 504 + .value = BIT(0), 505 + }, 506 .multi_read_bit = true, 507 .bootime = 2, 508 }, ··· 546 .addr = 0x21, 547 .mask_int1 = 0x04, 548 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 549 + }, 550 + .sim = { 551 + .addr = 0x21, 552 + .value = BIT(1), 553 }, 554 .multi_read_bit = false, 555 .bootime = 2, ··· 613 .addr_ihl = 0x25, 614 .mask_ihl = 0x02, 615 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 616 + }, 617 + .sim = { 618 + .addr = 0x23, 619 + .value = BIT(0), 620 }, 621 .multi_read_bit = true, 622 .bootime = 2,
+26
drivers/iio/adc/aspeed_adc.c
··· 22 23 #include <linux/iio/iio.h> 24 #include <linux/iio/driver.h> 25 26 #define ASPEED_RESOLUTION_BITS 10 27 #define ASPEED_CLOCKS_PER_SAMPLE 12 ··· 39 40 #define ASPEED_ENGINE_ENABLE BIT(0) 41 42 struct aspeed_adc_model_data { 43 const char *model_name; 44 unsigned int min_sampling_rate; // Hz 45 unsigned int max_sampling_rate; // Hz 46 unsigned int vref_voltage; // mV 47 }; 48 49 struct aspeed_adc_data { ··· 218 goto scaler_error; 219 } 220 221 /* Start all channels in normal mode. */ 222 ret = clk_prepare_enable(data->clk_scaler->clk); 223 if (ret) ··· 299 .vref_voltage = 1800, // mV 300 .min_sampling_rate = 1, 301 .max_sampling_rate = 1000000, 302 }; 303 304 static const struct of_device_id aspeed_adc_matches[] = {
··· 22 23 #include <linux/iio/iio.h> 24 #include <linux/iio/driver.h> 25 + #include <linux/iopoll.h> 26 27 #define ASPEED_RESOLUTION_BITS 10 28 #define ASPEED_CLOCKS_PER_SAMPLE 12 ··· 38 39 #define ASPEED_ENGINE_ENABLE BIT(0) 40 41 + #define ASPEED_ADC_CTRL_INIT_RDY BIT(8) 42 + 43 + #define ASPEED_ADC_INIT_POLLING_TIME 500 44 + #define ASPEED_ADC_INIT_TIMEOUT 500000 45 + 46 struct aspeed_adc_model_data { 47 const char *model_name; 48 unsigned int min_sampling_rate; // Hz 49 unsigned int max_sampling_rate; // Hz 50 unsigned int vref_voltage; // mV 51 + bool wait_init_sequence; 52 }; 53 54 struct aspeed_adc_data { ··· 211 goto scaler_error; 212 } 213 214 + model_data = of_device_get_match_data(&pdev->dev); 215 + 216 + if (model_data->wait_init_sequence) { 217 + /* Enable engine in normal mode. */ 218 + writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE, 219 + data->base + ASPEED_REG_ENGINE_CONTROL); 220 + 221 + /* Wait for initial sequence complete. */ 222 + ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL, 223 + adc_engine_control_reg_val, 224 + adc_engine_control_reg_val & 225 + ASPEED_ADC_CTRL_INIT_RDY, 226 + ASPEED_ADC_INIT_POLLING_TIME, 227 + ASPEED_ADC_INIT_TIMEOUT); 228 + if (ret) 229 + goto scaler_error; 230 + } 231 + 232 /* Start all channels in normal mode. */ 233 ret = clk_prepare_enable(data->clk_scaler->clk); 234 if (ret) ··· 274 .vref_voltage = 1800, // mV 275 .min_sampling_rate = 1, 276 .max_sampling_rate = 1000000, 277 + .wait_init_sequence = true, 278 }; 279 280 static const struct of_device_id aspeed_adc_matches[] = {
+41 -1
drivers/iio/adc/axp288_adc.c
··· 28 #include <linux/iio/driver.h> 29 30 #define AXP288_ADC_EN_MASK 0xF1 31 32 enum axp288_adc_id { 33 AXP288_ADC_TS, ··· 123 return IIO_VAL_INT; 124 } 125 126 static int axp288_adc_read_raw(struct iio_dev *indio_dev, 127 struct iio_chan_spec const *chan, 128 int *val, int *val2, long mask) ··· 153 mutex_lock(&indio_dev->mlock); 154 switch (mask) { 155 case IIO_CHAN_INFO_RAW: 156 ret = axp288_adc_read_channel(val, chan->address, info->regmap); 157 break; 158 default: 159 ret = -EINVAL; ··· 170 mutex_unlock(&indio_dev->mlock); 171 172 return ret; 173 } 174 175 static const struct iio_info axp288_adc_iio_info = { ··· 209 * Set ADC to enabled state at all time, including system suspend. 210 * otherwise internal fuel gauge functionality may be affected. 211 */ 212 - ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); 213 if (ret) { 214 dev_err(&pdev->dev, "unable to enable ADC device\n"); 215 return ret;
··· 28 #include <linux/iio/driver.h> 29 30 #define AXP288_ADC_EN_MASK 0xF1 31 + #define AXP288_ADC_TS_PIN_GPADC 0xF2 32 + #define AXP288_ADC_TS_PIN_ON 0xF3 33 34 enum axp288_adc_id { 35 AXP288_ADC_TS, ··· 121 return IIO_VAL_INT; 122 } 123 124 + static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, 125 + unsigned long address) 126 + { 127 + int ret; 128 + 129 + /* channels other than GPADC do not need to switch TS pin */ 130 + if (address != AXP288_GP_ADC_H) 131 + return 0; 132 + 133 + ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); 134 + if (ret) 135 + return ret; 136 + 137 + /* When switching to the GPADC pin give things some time to settle */ 138 + if (mode == AXP288_ADC_TS_PIN_GPADC) 139 + usleep_range(6000, 10000); 140 + 141 + return 0; 142 + } 143 + 144 static int axp288_adc_read_raw(struct iio_dev *indio_dev, 145 struct iio_chan_spec const *chan, 146 int *val, int *val2, long mask) ··· 131 mutex_lock(&indio_dev->mlock); 132 switch (mask) { 133 case IIO_CHAN_INFO_RAW: 134 + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, 135 + chan->address)) { 136 + dev_err(&indio_dev->dev, "GPADC mode\n"); 137 + ret = -EINVAL; 138 + break; 139 + } 140 ret = axp288_adc_read_channel(val, chan->address, info->regmap); 141 + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, 142 + chan->address)) 143 + dev_err(&indio_dev->dev, "TS pin restore\n"); 144 break; 145 default: 146 ret = -EINVAL; ··· 139 mutex_unlock(&indio_dev->mlock); 140 141 return ret; 142 + } 143 + 144 + static int axp288_adc_set_state(struct regmap *regmap) 145 + { 146 + /* ADC should be always enabled for internal FG to function */ 147 + if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) 148 + return -EIO; 149 + 150 + return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); 151 } 152 153 static const struct iio_info axp288_adc_iio_info = { ··· 169 * Set ADC to enabled state at all time, including system suspend. 170 * otherwise internal fuel gauge functionality may be affected. 171 */ 172 + ret = axp288_adc_set_state(axp20x->regmap); 173 if (ret) { 174 dev_err(&pdev->dev, "unable to enable ADC device\n"); 175 return ret;
+1 -2
drivers/iio/adc/sun4i-gpadc-iio.c
··· 256 257 err: 258 pm_runtime_put_autosuspend(indio_dev->dev.parent); 259 mutex_unlock(&info->mutex); 260 261 return ret; ··· 366 complete(&info->completion); 367 368 out: 369 - disable_irq_nosync(info->temp_data_irq); 370 return IRQ_HANDLED; 371 } 372 ··· 380 complete(&info->completion); 381 382 out: 383 - disable_irq_nosync(info->fifo_data_irq); 384 return IRQ_HANDLED; 385 } 386
··· 256 257 err: 258 pm_runtime_put_autosuspend(indio_dev->dev.parent); 259 + disable_irq(irq); 260 mutex_unlock(&info->mutex); 261 262 return ret; ··· 365 complete(&info->completion); 366 367 out: 368 return IRQ_HANDLED; 369 } 370 ··· 380 complete(&info->completion); 381 382 out: 383 return IRQ_HANDLED; 384 } 385
+1 -1
drivers/iio/adc/vf610_adc.c
··· 77 #define VF610_ADC_ADSTS_MASK 0x300 78 #define VF610_ADC_ADLPC_EN 0x80 79 #define VF610_ADC_ADHSC_EN 0x400 80 - #define VF610_ADC_REFSEL_VALT 0x100 81 #define VF610_ADC_REFSEL_VBG 0x1000 82 #define VF610_ADC_ADTRG_HARD 0x2000 83 #define VF610_ADC_AVGS_8 0x4000
··· 77 #define VF610_ADC_ADSTS_MASK 0x300 78 #define VF610_ADC_ADLPC_EN 0x80 79 #define VF610_ADC_ADHSC_EN 0x400 80 + #define VF610_ADC_REFSEL_VALT 0x800 81 #define VF610_ADC_REFSEL_VBG 0x1000 82 #define VF610_ADC_ADTRG_HARD 0x2000 83 #define VF610_ADC_AVGS_8 0x4000
+29
drivers/iio/common/st_sensors/st_sensors_core.c
··· 550 } 551 EXPORT_SYMBOL(st_sensors_read_info_raw); 552 553 int st_sensors_check_device_support(struct iio_dev *indio_dev, 554 int num_sensors_list, 555 const struct st_sensor_settings *sensor_settings) ··· 598 indio_dev->name); 599 return -ENODEV; 600 } 601 602 if (sensor_settings[i].wai_addr) { 603 err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
··· 550 } 551 EXPORT_SYMBOL(st_sensors_read_info_raw); 552 553 + static int st_sensors_init_interface_mode(struct iio_dev *indio_dev, 554 + const struct st_sensor_settings *sensor_settings) 555 + { 556 + struct st_sensor_data *sdata = iio_priv(indio_dev); 557 + struct device_node *np = sdata->dev->of_node; 558 + struct st_sensors_platform_data *pdata; 559 + 560 + pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data; 561 + if (((np && of_property_read_bool(np, "spi-3wire")) || 562 + (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) { 563 + int err; 564 + 565 + err = sdata->tf->write_byte(&sdata->tb, sdata->dev, 566 + sensor_settings->sim.addr, 567 + sensor_settings->sim.value); 568 + if (err < 0) { 569 + dev_err(&indio_dev->dev, 570 + "failed to init interface mode\n"); 571 + return err; 572 + } 573 + } 574 + 575 + return 0; 576 + } 577 + 578 int st_sensors_check_device_support(struct iio_dev *indio_dev, 579 int num_sensors_list, 580 const struct st_sensor_settings *sensor_settings) ··· 573 indio_dev->name); 574 return -ENODEV; 575 } 576 + 577 + err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]); 578 + if (err < 0) 579 + return err; 580 581 if (sensor_settings[i].wai_addr) { 582 err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+1 -1
drivers/iio/light/tsl2563.c
··· 626 struct tsl2563_chip *chip = iio_priv(dev_info); 627 628 iio_push_event(dev_info, 629 - IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 630 0, 631 IIO_EV_TYPE_THRESH, 632 IIO_EV_DIR_EITHER),
··· 626 struct tsl2563_chip *chip = iio_priv(dev_info); 627 628 iio_push_event(dev_info, 629 + IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 630 0, 631 IIO_EV_TYPE_THRESH, 632 IIO_EV_DIR_EITHER),
+1 -1
drivers/iio/pressure/st_pressure_core.c
··· 456 .mask_od = 0x40, 457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 458 }, 459 - .multi_read_bit = true, 460 .bootime = 2, 461 }, 462 };
··· 456 .mask_od = 0x40, 457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 458 }, 459 + .multi_read_bit = false, 460 .bootime = 2, 461 }, 462 };
+48 -14
drivers/infiniband/core/addr.c
··· 61 void (*callback)(int status, struct sockaddr *src_addr, 62 struct rdma_dev_addr *addr, void *context); 63 unsigned long timeout; 64 int status; 65 u32 seq; 66 }; ··· 296 } 297 EXPORT_SYMBOL(rdma_translate_ip); 298 299 - static void set_timeout(unsigned long time) 300 { 301 unsigned long delay; 302 ··· 304 if ((long)delay < 0) 305 delay = 0; 306 307 - mod_delayed_work(addr_wq, &work, delay); 308 } 309 310 static void queue_req(struct addr_req *req) ··· 319 320 list_add(&req->list, &temp_req->list); 321 322 - if (req_list.next == &req->list) 323 - set_timeout(req->timeout); 324 mutex_unlock(&lock); 325 } 326 ··· 574 return ret; 575 } 576 577 static void process_req(struct work_struct *work) 578 { 579 struct addr_req *req, *temp_req; ··· 622 true, req->seq); 623 if (req->status && time_after_eq(jiffies, req->timeout)) 624 req->status = -ETIMEDOUT; 625 - else if (req->status == -ENODATA) 626 continue; 627 } 628 list_move_tail(&req->list, &done_list); 629 } 630 631 - if (!list_empty(&req_list)) { 632 - req = list_entry(req_list.next, struct addr_req, list); 633 - set_timeout(req->timeout); 634 - } 635 mutex_unlock(&lock); 636 637 list_for_each_entry_safe(req, temp_req, &done_list, list) { 638 list_del(&req->list); 639 req->callback(req->status, (struct sockaddr *) &req->src_addr, 640 req->addr, req->context); 641 put_client(req->client); ··· 681 req->context = context; 682 req->client = client; 683 atomic_inc(&client->refcount); 684 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); 685 686 req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); ··· 736 req->status = -ECANCELED; 737 req->timeout = jiffies; 738 list_move(&req->list, &req_list); 739 - set_timeout(req->timeout); 740 break; 741 } 742 } ··· 842 if (event == NETEVENT_NEIGH_UPDATE) { 843 struct neighbour *neigh = ctx; 844 845 - if (neigh->nud_state & NUD_VALID) { 846 - set_timeout(jiffies); 847 - } 848 } 849 return 0; 850 } ··· 854 855 int addr_init(void) 856 { 857 - addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); 858 if (!addr_wq) 859 return -ENOMEM; 860
··· 61 void (*callback)(int status, struct sockaddr *src_addr, 62 struct rdma_dev_addr *addr, void *context); 63 unsigned long timeout; 64 + struct delayed_work work; 65 int status; 66 u32 seq; 67 }; ··· 295 } 296 EXPORT_SYMBOL(rdma_translate_ip); 297 298 + static void set_timeout(struct delayed_work *delayed_work, unsigned long time) 299 { 300 unsigned long delay; 301 ··· 303 if ((long)delay < 0) 304 delay = 0; 305 306 + mod_delayed_work(addr_wq, delayed_work, delay); 307 } 308 309 static void queue_req(struct addr_req *req) ··· 318 319 list_add(&req->list, &temp_req->list); 320 321 + set_timeout(&req->work, req->timeout); 322 mutex_unlock(&lock); 323 } 324 ··· 574 return ret; 575 } 576 577 + static void process_one_req(struct work_struct *_work) 578 + { 579 + struct addr_req *req; 580 + struct sockaddr *src_in, *dst_in; 581 + 582 + mutex_lock(&lock); 583 + req = container_of(_work, struct addr_req, work.work); 584 + 585 + if (req->status == -ENODATA) { 586 + src_in = (struct sockaddr *)&req->src_addr; 587 + dst_in = (struct sockaddr *)&req->dst_addr; 588 + req->status = addr_resolve(src_in, dst_in, req->addr, 589 + true, req->seq); 590 + if (req->status && time_after_eq(jiffies, req->timeout)) { 591 + req->status = -ETIMEDOUT; 592 + } else if (req->status == -ENODATA) { 593 + /* requeue the work for retrying again */ 594 + set_timeout(&req->work, req->timeout); 595 + mutex_unlock(&lock); 596 + return; 597 + } 598 + } 599 + list_del(&req->list); 600 + mutex_unlock(&lock); 601 + 602 + req->callback(req->status, (struct sockaddr *)&req->src_addr, 603 + req->addr, req->context); 604 + put_client(req->client); 605 + kfree(req); 606 + } 607 + 608 static void process_req(struct work_struct *work) 609 { 610 struct addr_req *req, *temp_req; ··· 591 true, req->seq); 592 if (req->status && time_after_eq(jiffies, req->timeout)) 593 req->status = -ETIMEDOUT; 594 + else if (req->status == -ENODATA) { 595 + set_timeout(&req->work, req->timeout); 596 continue; 597 + } 598 } 599 list_move_tail(&req->list, &done_list); 600 } 601 602 mutex_unlock(&lock); 603 604 list_for_each_entry_safe(req, temp_req, &done_list, list) { 605 list_del(&req->list); 606 + /* It is safe to cancel other work items from this work item 607 + * because at a time there can be only one work item running 608 + * with this single threaded work queue. 609 + */ 610 + cancel_delayed_work(&req->work); 611 req->callback(req->status, (struct sockaddr *) &req->src_addr, 612 req->addr, req->context); 613 put_client(req->client); ··· 647 req->context = context; 648 req->client = client; 649 atomic_inc(&client->refcount); 650 + INIT_DELAYED_WORK(&req->work, process_one_req); 651 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); 652 653 req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); ··· 701 req->status = -ECANCELED; 702 req->timeout = jiffies; 703 list_move(&req->list, &req_list); 704 + set_timeout(&req->work, req->timeout); 705 break; 706 } 707 } ··· 807 if (event == NETEVENT_NEIGH_UPDATE) { 808 struct neighbour *neigh = ctx; 809 810 + if (neigh->nud_state & NUD_VALID) 811 + set_timeout(&work, jiffies); 812 } 813 return 0; 814 } ··· 820 821 int addr_init(void) 822 { 823 + addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); 824 if (!addr_wq) 825 return -ENOMEM; 826
+1 -1
drivers/infiniband/core/uverbs_cmd.c
··· 1153 int out_len) 1154 { 1155 struct ib_uverbs_resize_cq cmd; 1156 - struct ib_uverbs_resize_cq_resp resp; 1157 struct ib_udata udata; 1158 struct ib_cq *cq; 1159 int ret = -EINVAL;
··· 1153 int out_len) 1154 { 1155 struct ib_uverbs_resize_cq cmd; 1156 + struct ib_uverbs_resize_cq_resp resp = {}; 1157 struct ib_udata udata; 1158 struct ib_cq *cq; 1159 int ret = -EINVAL;
+1 -2
drivers/infiniband/core/uverbs_main.c
··· 250 if (atomic_dec_and_test(&file->device->refcount)) 251 ib_uverbs_comp_dev(file->device); 252 253 kfree(file); 254 } 255 ··· 918 static int ib_uverbs_close(struct inode *inode, struct file *filp) 919 { 920 struct ib_uverbs_file *file = filp->private_data; 921 - struct ib_uverbs_device *dev = file->device; 922 923 mutex_lock(&file->cleanup_mutex); 924 if (file->ucontext) { ··· 939 ib_uverbs_release_async_event_file); 940 941 kref_put(&file->ref, ib_uverbs_release_file); 942 - kobject_put(&dev->kobj); 943 944 return 0; 945 }
··· 250 if (atomic_dec_and_test(&file->device->refcount)) 251 ib_uverbs_comp_dev(file->device); 252 253 + kobject_put(&file->device->kobj); 254 kfree(file); 255 } 256 ··· 917 static int ib_uverbs_close(struct inode *inode, struct file *filp) 918 { 919 struct ib_uverbs_file *file = filp->private_data; 920 921 mutex_lock(&file->cleanup_mutex); 922 if (file->ucontext) { ··· 939 ib_uverbs_release_async_event_file); 940 941 kref_put(&file->ref, ib_uverbs_release_file); 942 943 return 0; 944 }
-1
drivers/infiniband/core/verbs.c
··· 895 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 896 [IB_QPS_RESET] = { 897 [IB_QPS_RESET] = { .valid = 1 }, 898 - [IB_QPS_ERR] = { .valid = 1 }, 899 [IB_QPS_INIT] = { 900 .valid = 1, 901 .req_param = {
··· 895 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 896 [IB_QPS_RESET] = { 897 [IB_QPS_RESET] = { .valid = 1 }, 898 [IB_QPS_INIT] = { 899 .valid = 1, 900 .req_param = {
+1 -1
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
··· 733 continue; 734 735 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); 736 - if (IS_ERR(free_mr->mr_free_qp[i])) { 737 dev_err(dev, "Create loop qp failed!\n"); 738 goto create_lp_qp_failed; 739 }
··· 733 continue; 734 735 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); 736 + if (!free_mr->mr_free_qp[i]) { 737 dev_err(dev, "Create loop qp failed!\n"); 738 goto create_lp_qp_failed; 739 }
+1 -1
drivers/infiniband/hw/mlx5/odp.c
··· 939 940 if (qp->ibqp.qp_type != IB_QPT_RC) { 941 av = *wqe; 942 - if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) 943 *wqe += sizeof(struct mlx5_av); 944 else 945 *wqe += sizeof(struct mlx5_base_av);
··· 939 940 if (qp->ibqp.qp_type != IB_QPT_RC) { 941 av = *wqe; 942 + if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) 943 *wqe += sizeof(struct mlx5_av); 944 else 945 *wqe += sizeof(struct mlx5_base_av);
+1
drivers/infiniband/ulp/ipoib/ipoib.h
··· 336 unsigned long flags; 337 338 struct rw_semaphore vlan_rwsem; 339 340 struct rb_root path_tree; 341 struct list_head path_list;
··· 336 unsigned long flags; 337 338 struct rw_semaphore vlan_rwsem; 339 + struct mutex mcast_mutex; 340 341 struct rb_root path_tree; 342 struct list_head path_list;
-1
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 511 case IB_CM_REQ_RECEIVED: 512 return ipoib_cm_req_handler(cm_id, event); 513 case IB_CM_DREQ_RECEIVED: 514 - p = cm_id->context; 515 ib_send_cm_drep(cm_id, NULL, 0); 516 /* Fall through */ 517 case IB_CM_REJ_RECEIVED:
··· 511 case IB_CM_REQ_RECEIVED: 512 return ipoib_cm_req_handler(cm_id, event); 513 case IB_CM_DREQ_RECEIVED: 514 ib_send_cm_drep(cm_id, NULL, 0); 515 /* Fall through */ 516 case IB_CM_REJ_RECEIVED:
+2 -1
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
··· 52 IPOIB_NETDEV_STAT(tx_bytes), 53 IPOIB_NETDEV_STAT(tx_errors), 54 IPOIB_NETDEV_STAT(rx_dropped), 55 - IPOIB_NETDEV_STAT(tx_dropped) 56 }; 57 58 #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
··· 52 IPOIB_NETDEV_STAT(tx_bytes), 53 IPOIB_NETDEV_STAT(tx_errors), 54 IPOIB_NETDEV_STAT(rx_dropped), 55 + IPOIB_NETDEV_STAT(tx_dropped), 56 + IPOIB_NETDEV_STAT(multicast), 57 }; 58 59 #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
+24 -1
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 256 257 ++dev->stats.rx_packets; 258 dev->stats.rx_bytes += skb->len; 259 260 skb->dev = dev; 261 if ((dev->features & NETIF_F_RXCSUM) && ··· 711 return pending; 712 } 713 714 int ipoib_ib_dev_stop_default(struct net_device *dev) 715 { 716 struct ipoib_dev_priv *priv = ipoib_priv(dev); ··· 751 */ 752 qp_attr.qp_state = IB_QPS_ERR; 753 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 754 - ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); 755 756 /* Wait for all sends and receives to complete */ 757 begin = jiffies;
··· 256 257 ++dev->stats.rx_packets; 258 dev->stats.rx_bytes += skb->len; 259 + if (skb->pkt_type == PACKET_MULTICAST) 260 + dev->stats.multicast++; 261 262 skb->dev = dev; 263 if ((dev->features & NETIF_F_RXCSUM) && ··· 709 return pending; 710 } 711 712 + static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, 713 + struct ib_qp *qp, 714 + enum ib_qp_state new_state) 715 + { 716 + struct ib_qp_attr qp_attr; 717 + struct ib_qp_init_attr query_init_attr; 718 + int ret; 719 + 720 + ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); 721 + if (ret) { 722 + ipoib_warn(priv, "%s: Failed to query QP\n", __func__); 723 + return; 724 + } 725 + /* print according to the new-state and the previous state.*/ 726 + if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) 727 + ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n"); 728 + else 729 + ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n", 730 + new_state, qp_attr.qp_state); 731 + } 732 + 733 int ipoib_ib_dev_stop_default(struct net_device *dev) 734 { 735 struct ipoib_dev_priv *priv = ipoib_priv(dev); ··· 728 */ 729 qp_attr.qp_state = IB_QPS_ERR; 730 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 731 + check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); 732 733 /* Wait for all sends and receives to complete */ 734 begin = jiffies;
+12 -7
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1560 int i, wait_flushed = 0; 1561 1562 init_completion(&priv->ntbl.flushed); 1563 1564 spin_lock_irqsave(&priv->lock, flags); 1565 ··· 1605 1606 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1607 init_completion(&priv->ntbl.deleted); 1608 - set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1609 1610 /* Stop GC if called at init fail need to cancel work */ 1611 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); ··· 1847 .ndo_tx_timeout = ipoib_timeout, 1848 .ndo_set_rx_mode = ipoib_set_mcast_list, 1849 .ndo_get_iflink = ipoib_get_iflink, 1850 }; 1851 1852 void ipoib_setup_common(struct net_device *dev) ··· 1878 priv->dev = dev; 1879 spin_lock_init(&priv->lock); 1880 init_rwsem(&priv->vlan_rwsem); 1881 1882 INIT_LIST_HEAD(&priv->path_list); 1883 INIT_LIST_HEAD(&priv->child_intfs); ··· 2175 priv->dev->dev_id = port - 1; 2176 2177 result = ib_query_port(hca, port, &attr); 2178 - if (!result) 2179 - priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 2180 - else { 2181 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 2182 hca->name, port); 2183 goto device_init_failed; 2184 } 2185 2186 /* MTU will be reset when mcast join happens */ 2187 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); ··· 2213 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 2214 hca->name, port, result); 2215 goto device_init_failed; 2216 - } else 2217 - memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 2218 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2219 2220 result = ipoib_dev_init(priv->dev, hca, port); 2221 - if (result < 0) { 2222 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 2223 hca->name, port, result); 2224 goto device_init_failed; ··· 2369 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 2370 #ifdef CONFIG_INFINIBAND_IPOIB_CM 2371 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 2372 #endif 2373 2374 /*
··· 1560 int i, wait_flushed = 0; 1561 1562 init_completion(&priv->ntbl.flushed); 1563 + set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1564 1565 spin_lock_irqsave(&priv->lock, flags); 1566 ··· 1604 1605 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1606 init_completion(&priv->ntbl.deleted); 1607 1608 /* Stop GC if called at init fail need to cancel work */ 1609 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); ··· 1847 .ndo_tx_timeout = ipoib_timeout, 1848 .ndo_set_rx_mode = ipoib_set_mcast_list, 1849 .ndo_get_iflink = ipoib_get_iflink, 1850 + .ndo_get_stats64 = ipoib_get_stats, 1851 }; 1852 1853 void ipoib_setup_common(struct net_device *dev) ··· 1877 priv->dev = dev; 1878 spin_lock_init(&priv->lock); 1879 init_rwsem(&priv->vlan_rwsem); 1880 + mutex_init(&priv->mcast_mutex); 1881 1882 INIT_LIST_HEAD(&priv->path_list); 1883 INIT_LIST_HEAD(&priv->child_intfs); ··· 2173 priv->dev->dev_id = port - 1; 2174 2175 result = ib_query_port(hca, port, &attr); 2176 + if (result) { 2177 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 2178 hca->name, port); 2179 goto device_init_failed; 2180 } 2181 + 2182 + priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 2183 2184 /* MTU will be reset when mcast join happens */ 2185 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); ··· 2211 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 2212 hca->name, port, result); 2213 goto device_init_failed; 2214 + } 2215 + 2216 + memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, 2217 + sizeof(union ib_gid)); 2218 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2219 2220 result = ipoib_dev_init(priv->dev, hca, port); 2221 + if (result) { 2222 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 2223 hca->name, port, result); 2224 goto device_init_failed; ··· 2365 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 2366 #ifdef CONFIG_INFINIBAND_IPOIB_CM 2367 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 2368 + ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); 2369 #endif 2370 2371 /*
+11 -22
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 684 int ipoib_mcast_stop_thread(struct net_device *dev) 685 { 686 struct ipoib_dev_priv *priv = ipoib_priv(dev); 687 - unsigned long flags; 688 689 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 690 691 - spin_lock_irqsave(&priv->lock, flags); 692 - cancel_delayed_work(&priv->mcast_task); 693 - spin_unlock_irqrestore(&priv->lock, flags); 694 - 695 - flush_workqueue(priv->wq); 696 697 return 0; 698 } ··· 742 void ipoib_mcast_remove_list(struct list_head *remove_list) 743 { 744 struct ipoib_mcast *mcast, *tmcast; 745 746 list_for_each_entry_safe(mcast, tmcast, remove_list, list) { 747 ipoib_mcast_leave(mcast->dev, mcast); ··· 841 struct ipoib_mcast *mcast, *tmcast; 842 unsigned long flags; 843 844 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 845 846 spin_lock_irqsave(&priv->lock, flags); ··· 860 861 spin_unlock_irqrestore(&priv->lock, flags); 862 863 - /* 864 - * make sure the in-flight joins have finished before we attempt 865 - * to leave 866 - */ 867 - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) 868 - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 869 - wait_for_completion(&mcast->done); 870 - 871 ipoib_mcast_remove_list(&remove_list); 872 } 873 874 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) ··· 978 spin_unlock(&priv->lock); 979 netif_addr_unlock(dev); 980 local_irq_restore(flags); 981 - 982 - /* 983 - * make sure the in-flight joins have finished before we attempt 984 - * to leave 985 - */ 986 - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) 987 - if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 988 - wait_for_completion(&mcast->done); 989 990 ipoib_mcast_remove_list(&remove_list); 991
··· 684 int ipoib_mcast_stop_thread(struct net_device *dev) 685 { 686 struct ipoib_dev_priv *priv = ipoib_priv(dev); 687 688 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 689 690 + cancel_delayed_work_sync(&priv->mcast_task); 691 692 return 0; 693 } ··· 747 void ipoib_mcast_remove_list(struct list_head *remove_list) 748 { 749 struct ipoib_mcast *mcast, *tmcast; 750 + 751 + /* 752 + * make sure the in-flight joins have finished before we attempt 753 + * to leave 754 + */ 755 + list_for_each_entry_safe(mcast, tmcast, remove_list, list) 756 + if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 757 + wait_for_completion(&mcast->done); 758 759 list_for_each_entry_safe(mcast, tmcast, remove_list, list) { 760 ipoib_mcast_leave(mcast->dev, mcast); ··· 838 struct ipoib_mcast *mcast, *tmcast; 839 unsigned long flags; 840 841 + mutex_lock(&priv->mcast_mutex); 842 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 843 844 spin_lock_irqsave(&priv->lock, flags); ··· 856 857 spin_unlock_irqrestore(&priv->lock, flags); 858 859 ipoib_mcast_remove_list(&remove_list); 860 + mutex_unlock(&priv->mcast_mutex); 861 } 862 863 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) ··· 981 spin_unlock(&priv->lock); 982 netif_addr_unlock(dev); 983 local_irq_restore(flags); 984 985 ipoib_mcast_remove_list(&remove_list); 986
+7
drivers/iommu/arm-smmu.c
··· 1519 1520 if (using_legacy_binding) { 1521 ret = arm_smmu_register_legacy_master(dev, &smmu); 1522 if (ret) 1523 goto out_free; 1524 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
··· 1519 1520 if (using_legacy_binding) { 1521 ret = arm_smmu_register_legacy_master(dev, &smmu); 1522 + 1523 + /* 1524 + * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() 1525 + * will allocate/initialise a new one. Thus we need to update fwspec for 1526 + * later use. 1527 + */ 1528 + fwspec = dev->iommu_fwspec; 1529 if (ret) 1530 goto out_free; 1531 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
+13 -15
drivers/isdn/hysdn/hysdn_proclog.c
··· 44 char log_name[15]; /* log filename */ 45 struct log_data *log_head, *log_tail; /* head and tail for queue */ 46 int if_used; /* open count for interface */ 47 - int volatile del_lock; /* lock for delete operations */ 48 unsigned char logtmp[LOG_MAX_LINELEN]; 49 wait_queue_head_t rd_queue; 50 }; ··· 101 { 102 struct log_data *ib; 103 struct procdata *pd = card->proclog; 104 - int i; 105 unsigned long flags; 106 107 if (!pd) ··· 124 else 125 pd->log_tail->next = ib; /* follows existing messages */ 126 pd->log_tail = ib; /* new tail */ 127 - i = pd->del_lock++; /* get lock state */ 128 - spin_unlock_irqrestore(&card->hysdn_lock, flags); 129 130 /* delete old entrys */ 131 - if (!i) 132 - while (pd->log_head->next) { 133 - if ((pd->log_head->usage_cnt <= 0) && 134 - (pd->log_head->next->usage_cnt <= 0)) { 135 - ib = pd->log_head; 136 - pd->log_head = pd->log_head->next; 137 - kfree(ib); 138 - } else 139 - break; 140 - } /* pd->log_head->next */ 141 - pd->del_lock--; /* release lock level */ 142 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ 143 } /* put_log_buffer */ 144
··· 44 char log_name[15]; /* log filename */ 45 struct log_data *log_head, *log_tail; /* head and tail for queue */ 46 int if_used; /* open count for interface */ 47 unsigned char logtmp[LOG_MAX_LINELEN]; 48 wait_queue_head_t rd_queue; 49 }; ··· 102 { 103 struct log_data *ib; 104 struct procdata *pd = card->proclog; 105 unsigned long flags; 106 107 if (!pd) ··· 126 else 127 pd->log_tail->next = ib; /* follows existing messages */ 128 pd->log_tail = ib; /* new tail */ 129 130 /* delete old entrys */ 131 + while (pd->log_head->next) { 132 + if ((pd->log_head->usage_cnt <= 0) && 133 + (pd->log_head->next->usage_cnt <= 0)) { 134 + ib = pd->log_head; 135 + pd->log_head = pd->log_head->next; 136 + kfree(ib); 137 + } else { 138 + break; 139 + } 140 + } /* pd->log_head->next */ 141 + 142 + spin_unlock_irqrestore(&card->hysdn_lock, flags); 143 + 144 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ 145 } /* put_log_buffer */ 146
+6
drivers/misc/mei/pci-me.c
··· 216 pci_set_drvdata(pdev, dev); 217 218 /* 219 * For not wake-able HW runtime pm framework 220 * can't be used on pci device level. 221 * Use domain runtime pm callbacks instead.
··· 216 pci_set_drvdata(pdev, dev); 217 218 /* 219 + * MEI requires to resume from runtime suspend mode 220 + * in order to perform link reset flow upon system suspend. 221 + */ 222 + pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 223 + 224 + /* 225 * For not wake-able HW runtime pm framework 226 * can't be used on pci device level. 227 * Use domain runtime pm callbacks instead.
+6
drivers/misc/mei/pci-txe.c
··· 138 pci_set_drvdata(pdev, dev); 139 140 /* 141 * For not wake-able HW runtime pm framework 142 * can't be used on pci device level. 143 * Use domain runtime pm callbacks instead.
··· 138 pci_set_drvdata(pdev, dev); 139 140 /* 141 + * MEI requires to resume from runtime suspend mode 142 + * in order to perform link reset flow upon system suspend. 143 + */ 144 + pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 145 + 146 + /* 147 * For not wake-able HW runtime pm framework 148 * can't be used on pci device level. 149 * Use domain runtime pm callbacks instead.
+2
drivers/mmc/core/block.c
··· 2170 * from being accepted. 2171 */ 2172 card = md->queue.card; 2173 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); 2174 blk_set_queue_dying(md->queue.queue); 2175 mmc_cleanup_queue(&md->queue); 2176 if (md->disk->flags & GENHD_FL_UP) {
··· 2170 * from being accepted. 2171 */ 2172 card = md->queue.card; 2173 + spin_lock_irq(md->queue.queue->queue_lock); 2174 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); 2175 + spin_unlock_irq(md->queue.queue->queue_lock); 2176 blk_set_queue_dying(md->queue.queue); 2177 mmc_cleanup_queue(&md->queue); 2178 if (md->disk->flags & GENHD_FL_UP) {
+1 -1
drivers/mmc/core/mmc.c
··· 1289 static int mmc_select_hs400es(struct mmc_card *card) 1290 { 1291 struct mmc_host *host = card->host; 1292 - int err = 0; 1293 u8 val; 1294 1295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
··· 1289 static int mmc_select_hs400es(struct mmc_card *card) 1290 { 1291 struct mmc_host *host = card->host; 1292 + int err = -EINVAL; 1293 u8 val; 1294 1295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
+1 -1
drivers/mmc/host/omap_hsmmc.c
··· 2086 mmc->max_seg_size = mmc->max_req_size; 2087 2088 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 2089 - MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; 2090 2091 mmc->caps |= mmc_pdata(host)->caps; 2092 if (mmc->caps & MMC_CAP_8_BIT_DATA)
··· 2086 mmc->max_seg_size = mmc->max_req_size; 2087 2088 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 2089 + MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; 2090 2091 mmc->caps |= mmc_pdata(host)->caps; 2092 if (mmc->caps & MMC_CAP_8_BIT_DATA)
+1
drivers/mtd/mtd_blkdevs.c
··· 113 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 114 if (tr->writesect(dev, block, buf)) 115 return BLK_STS_IOERR; 116 default: 117 return BLK_STS_IOERR; 118 }
··· 113 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 114 if (tr->writesect(dev, block, buf)) 115 return BLK_STS_IOERR; 116 + return BLK_STS_OK; 117 default: 118 return BLK_STS_IOERR; 119 }
+1 -1
drivers/mtd/nand/atmel/nand-controller.c
··· 1201 * tRC < 30ns implies EDO mode. This controller does not support this 1202 * mode. 1203 */ 1204 - if (conf->timings.sdr.tRC_min < 30) 1205 return -ENOTSUPP; 1206 1207 atmel_smc_cs_conf_init(smcconf);
··· 1201 * tRC < 30ns implies EDO mode. This controller does not support this 1202 * mode. 1203 */ 1204 + if (conf->timings.sdr.tRC_min < 30000) 1205 return -ENOTSUPP; 1206 1207 atmel_smc_cs_conf_init(smcconf);
+6 -15
drivers/mtd/nand/atmel/pmecc.c
··· 945 */ 946 struct platform_device *pdev = to_platform_device(userdev); 947 const struct atmel_pmecc_caps *caps; 948 949 /* No PMECC engine available. */ 950 if (!of_property_read_bool(userdev->of_node, ··· 954 955 caps = &at91sam9g45_caps; 956 957 - /* 958 - * Try to find the NFC subnode and extract the associated caps 959 - * from there. 960 - */ 961 - np = of_find_compatible_node(userdev->of_node, NULL, 962 - "atmel,sama5d3-nfc"); 963 - if (np) { 964 - const struct of_device_id *match; 965 - 966 - match = of_match_node(atmel_pmecc_legacy_match, np); 967 - if (match && match->data) 968 - caps = match->data; 969 - 970 - of_node_put(np); 971 - } 972 973 pmecc = atmel_pmecc_create(pdev, caps, 1, 2); 974 }
··· 945 */ 946 struct platform_device *pdev = to_platform_device(userdev); 947 const struct atmel_pmecc_caps *caps; 948 + const struct of_device_id *match; 949 950 /* No PMECC engine available. */ 951 if (!of_property_read_bool(userdev->of_node, ··· 953 954 caps = &at91sam9g45_caps; 955 956 + /* Find the caps associated to the NAND dev node. */ 957 + match = of_match_node(atmel_pmecc_legacy_match, 958 + userdev->of_node); 959 + if (match && match->data) 960 + caps = match->data; 961 962 pmecc = atmel_pmecc_create(pdev, caps, 1, 2); 963 }
+10 -3
drivers/mtd/nand/nand_base.c
··· 65 66 if (!section) { 67 oobregion->offset = 0; 68 - oobregion->length = 4; 69 } else { 70 oobregion->offset = 6; 71 oobregion->length = ecc->total - 4; 72 } ··· 1131 * Ensure the timing mode has been changed on the chip side 1132 * before changing timings on the controller side. 1133 */ 1134 - if (chip->onfi_version) { 1135 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { 1136 chip->onfi_timing_mode_default, 1137 }; ··· 2749 * @buf: the data to write 2750 * @oob_required: must write chip->oob_poi to OOB 2751 * @page: page number to write 2752 - * @cached: cached programming 2753 * @raw: use _raw version of write_page 2754 */ 2755 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
··· 65 66 if (!section) { 67 oobregion->offset = 0; 68 + if (mtd->oobsize == 16) 69 + oobregion->length = 4; 70 + else 71 + oobregion->length = 3; 72 } else { 73 + if (mtd->oobsize == 8) 74 + return -ERANGE; 75 + 76 oobregion->offset = 6; 77 oobregion->length = ecc->total - 4; 78 } ··· 1125 * Ensure the timing mode has been changed on the chip side 1126 * before changing timings on the controller side. 1127 */ 1128 + if (chip->onfi_version && 1129 + (le16_to_cpu(chip->onfi_params.opt_cmd) & 1130 + ONFI_OPT_CMD_SET_GET_FEATURES)) { 1131 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { 1132 chip->onfi_timing_mode_default, 1133 }; ··· 2741 * @buf: the data to write 2742 * @oob_required: must write chip->oob_poi to OOB 2743 * @page: page number to write 2744 * @raw: use _raw version of write_page 2745 */ 2746 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+3 -3
drivers/mtd/nand/nand_timings.c
··· 311 struct nand_sdr_timings *timings = &iface->timings.sdr; 312 313 /* microseconds -> picoseconds */ 314 - timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog); 315 - timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers); 316 - timings->tR_max = 1000000UL * le16_to_cpu(params->t_r); 317 318 /* nanoseconds -> picoseconds */ 319 timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
··· 311 struct nand_sdr_timings *timings = &iface->timings.sdr; 312 313 /* microseconds -> picoseconds */ 314 + timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog); 315 + timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers); 316 + timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r); 317 318 /* nanoseconds -> picoseconds */ 319 timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
+4
drivers/mtd/nand/sunxi_nand.c
··· 1728 */ 1729 chip->clk_rate = NSEC_PER_SEC / min_clk_period; 1730 real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); 1731 1732 /* 1733 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
··· 1728 */ 1729 chip->clk_rate = NSEC_PER_SEC / min_clk_period; 1730 real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); 1731 + if (real_clk_rate <= 0) { 1732 + dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate); 1733 + return -EINVAL; 1734 + } 1735 1736 /* 1737 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+38
drivers/net/dsa/mt7530.c
··· 625 * all finished. 626 */ 627 mt7623_pad_clk_setup(ds); 628 } 629 } 630
··· 625 * all finished. 626 */ 627 mt7623_pad_clk_setup(ds); 628 + } else { 629 + u16 lcl_adv = 0, rmt_adv = 0; 630 + u8 flowctrl; 631 + u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE; 632 + 633 + switch (phydev->speed) { 634 + case SPEED_1000: 635 + mcr |= PMCR_FORCE_SPEED_1000; 636 + break; 637 + case SPEED_100: 638 + mcr |= PMCR_FORCE_SPEED_100; 639 + break; 640 + }; 641 + 642 + if (phydev->link) 643 + mcr |= PMCR_FORCE_LNK; 644 + 645 + if (phydev->duplex) { 646 + mcr |= PMCR_FORCE_FDX; 647 + 648 + if (phydev->pause) 649 + rmt_adv = LPA_PAUSE_CAP; 650 + if (phydev->asym_pause) 651 + rmt_adv |= LPA_PAUSE_ASYM; 652 + 653 + if (phydev->advertising & ADVERTISED_Pause) 654 + lcl_adv |= ADVERTISE_PAUSE_CAP; 655 + if (phydev->advertising & ADVERTISED_Asym_Pause) 656 + lcl_adv |= ADVERTISE_PAUSE_ASYM; 657 + 658 + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 659 + 660 + if (flowctrl & FLOW_CTRL_TX) 661 + mcr |= PMCR_TX_FC_EN; 662 + if (flowctrl & FLOW_CTRL_RX) 663 + mcr |= PMCR_RX_FC_EN; 664 + } 665 + mt7530_write(priv, MT7530_PMCR_P(port), mcr); 666 } 667 } 668
+1
drivers/net/dsa/mt7530.h
··· 151 #define PMCR_TX_FC_EN BIT(5) 152 #define PMCR_RX_FC_EN BIT(4) 153 #define PMCR_FORCE_SPEED_1000 BIT(3) 154 #define PMCR_FORCE_FDX BIT(1) 155 #define PMCR_FORCE_LNK BIT(0) 156 #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
··· 151 #define PMCR_TX_FC_EN BIT(5) 152 #define PMCR_RX_FC_EN BIT(4) 153 #define PMCR_FORCE_SPEED_1000 BIT(3) 154 + #define PMCR_FORCE_SPEED_100 BIT(2) 155 #define PMCR_FORCE_FDX BIT(1) 156 #define PMCR_FORCE_LNK BIT(0) 157 #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+3 -3
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 1785 1786 xgene_enet_gpiod_get(pdata); 1787 1788 - if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { 1789 - pdata->clk = devm_clk_get(&pdev->dev, NULL); 1790 - if (IS_ERR(pdata->clk)) { 1791 /* Abort if the clock is defined but couldn't be 1792 * retrived. Always abort if the clock is missing on 1793 * DT system as the driver can't cope with this case.
··· 1785 1786 xgene_enet_gpiod_get(pdata); 1787 1788 + pdata->clk = devm_clk_get(&pdev->dev, NULL); 1789 + if (IS_ERR(pdata->clk)) { 1790 + if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { 1791 /* Abort if the clock is defined but couldn't be 1792 * retrived. Always abort if the clock is missing on 1793 * DT system as the driver can't cope with this case.
+1
drivers/net/ethernet/broadcom/b44.c
··· 2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); 2369 2370 spin_lock_init(&bp->lock); 2371 2372 bp->rx_pending = B44_DEF_RX_RING_PENDING; 2373 bp->tx_pending = B44_DEF_TX_RING_PENDING;
··· 2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); 2369 2370 spin_lock_init(&bp->lock); 2371 + u64_stats_init(&bp->hw_stats.syncp); 2372 2373 bp->rx_pending = B44_DEF_RX_RING_PENDING; 2374 bp->tx_pending = B44_DEF_TX_RING_PENDING;
+4
drivers/net/ethernet/broadcom/bcmsysport.c
··· 449 p = (char *)&dev->stats; 450 else 451 p = (char *)priv; 452 p += s->stat_offset; 453 data[j] = *(unsigned long *)p; 454 j++;
··· 449 p = (char *)&dev->stats; 450 else 451 p = (char *)priv; 452 + 453 + if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) 454 + continue; 455 + 456 p += s->stat_offset; 457 data[j] = *(unsigned long *)p; 458 j++;
+14 -1
drivers/net/ethernet/ibm/ibmvnic.c
··· 111 static void send_request_unmap(struct ibmvnic_adapter *, u8); 112 static void send_login(struct ibmvnic_adapter *adapter); 113 static void send_cap_queries(struct ibmvnic_adapter *adapter); 114 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 115 static int ibmvnic_init(struct ibmvnic_adapter *); 116 static void release_crq_queue(struct ibmvnic_adapter *); ··· 652 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 653 unsigned long timeout = msecs_to_jiffies(30000); 654 struct device *dev = &adapter->vdev->dev; 655 656 do { 657 if (adapter->renegotiate) { ··· 664 if (!wait_for_completion_timeout(&adapter->init_done, 665 timeout)) { 666 dev_err(dev, "Capabilities query timeout\n"); 667 return -1; 668 } 669 } ··· 3018 *req_value, 3019 (long int)be64_to_cpu(crq->request_capability_rsp. 3020 number), name); 3021 - release_sub_crqs(adapter); 3022 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3023 ibmvnic_send_req_caps(adapter, 1); 3024 return;
··· 111 static void send_request_unmap(struct ibmvnic_adapter *, u8); 112 static void send_login(struct ibmvnic_adapter *adapter); 113 static void send_cap_queries(struct ibmvnic_adapter *adapter); 114 + static int init_sub_crqs(struct ibmvnic_adapter *); 115 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 116 static int ibmvnic_init(struct ibmvnic_adapter *); 117 static void release_crq_queue(struct ibmvnic_adapter *); ··· 651 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 652 unsigned long timeout = msecs_to_jiffies(30000); 653 struct device *dev = &adapter->vdev->dev; 654 + int rc; 655 656 do { 657 if (adapter->renegotiate) { ··· 662 if (!wait_for_completion_timeout(&adapter->init_done, 663 timeout)) { 664 dev_err(dev, "Capabilities query timeout\n"); 665 + return -1; 666 + } 667 + rc = init_sub_crqs(adapter); 668 + if (rc) { 669 + dev_err(dev, 670 + "Initialization of SCRQ's failed\n"); 671 + return -1; 672 + } 673 + rc = init_sub_crq_irqs(adapter); 674 + if (rc) { 675 + dev_err(dev, 676 + "Initialization of SCRQ's irqs failed\n"); 677 return -1; 678 } 679 } ··· 3004 *req_value, 3005 (long int)be64_to_cpu(crq->request_capability_rsp. 3006 number), name); 3007 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3008 ibmvnic_send_req_caps(adapter, 1); 3009 return;
+2
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1113 if (!tx_ring->tx_bi) 1114 goto err; 1115 1116 /* round up to nearest 4K */ 1117 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1118 /* add u32 for head writeback, align after this takes care of
··· 1113 if (!tx_ring->tx_bi) 1114 goto err; 1115 1116 + u64_stats_init(&tx_ring->syncp); 1117 + 1118 /* round up to nearest 4K */ 1119 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1120 /* add u32 for head writeback, align after this takes care of
+4
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 2988 if (!tx_ring->tx_buffer_info) 2989 goto err; 2990 2991 /* round up to nearest 4K */ 2992 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2993 tx_ring->size = ALIGN(tx_ring->size, 4096); ··· 3047 rx_ring->rx_buffer_info = vzalloc(size); 3048 if (!rx_ring->rx_buffer_info) 3049 goto err; 3050 3051 /* Round up to nearest 4K */ 3052 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
··· 2988 if (!tx_ring->tx_buffer_info) 2989 goto err; 2990 2991 + u64_stats_init(&tx_ring->syncp); 2992 + 2993 /* round up to nearest 4K */ 2994 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2995 tx_ring->size = ALIGN(tx_ring->size, 4096); ··· 3045 rx_ring->rx_buffer_info = vzalloc(size); 3046 if (!rx_ring->rx_buffer_info) 3047 goto err; 3048 + 3049 + u64_stats_init(&rx_ring->syncp); 3050 3051 /* Round up to nearest 4K */ 3052 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+8 -7
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 223 struct ethtool_wolinfo *wol) 224 { 225 struct mlx4_en_priv *priv = netdev_priv(netdev); 226 int err = 0; 227 u64 config = 0; 228 u64 mask; ··· 236 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 237 MLX4_DEV_CAP_FLAG_WOL_PORT2; 238 239 - if (!(priv->mdev->dev->caps.flags & mask)) { 240 wol->supported = 0; 241 wol->wolopts = 0; 242 return; 243 } 244 245 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 246 if (err) { ··· 253 return; 254 } 255 256 - if (config & MLX4_EN_WOL_MAGIC) 257 - wol->supported = WAKE_MAGIC; 258 - else 259 - wol->supported = 0; 260 - 261 - if (config & MLX4_EN_WOL_ENABLED) 262 wol->wolopts = WAKE_MAGIC; 263 else 264 wol->wolopts = 0;
··· 223 struct ethtool_wolinfo *wol) 224 { 225 struct mlx4_en_priv *priv = netdev_priv(netdev); 226 + struct mlx4_caps *caps = &priv->mdev->dev->caps; 227 int err = 0; 228 u64 config = 0; 229 u64 mask; ··· 235 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 236 MLX4_DEV_CAP_FLAG_WOL_PORT2; 237 238 + if (!(caps->flags & mask)) { 239 wol->supported = 0; 240 wol->wolopts = 0; 241 return; 242 } 243 + 244 + if (caps->wol_port[priv->port]) 245 + wol->supported = WAKE_MAGIC; 246 + else 247 + wol->supported = 0; 248 249 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 250 if (err) { ··· 247 return; 248 } 249 250 + if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC)) 251 wol->wolopts = WAKE_MAGIC; 252 else 253 wol->wolopts = 0;
+18 -11
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 574 * header, the HW adds it. To address that, we are subtracting the pseudo 575 * header checksum from the checksum value provided by the HW. 576 */ 577 - static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, 578 - struct iphdr *iph) 579 { 580 __u16 length_for_csum = 0; 581 __wsum csum_pseudo_header = 0; 582 583 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); 584 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, 585 - length_for_csum, iph->protocol, 0); 586 skb->csum = csum_sub(hw_checksum, csum_pseudo_header); 587 } 588 589 #if IS_ENABLED(CONFIG_IPV6) ··· 599 static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, 600 struct ipv6hdr *ipv6h) 601 { 602 __wsum csum_pseudo_hdr = 0; 603 604 - if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || 605 - ipv6h->nexthdr == IPPROTO_HOPOPTS)) 606 return -1; 607 - hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); 608 609 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 610 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); 611 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); 612 - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); 613 614 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); 615 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); ··· 635 } 636 637 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) 638 - get_fixed_ipv4_csum(hw_checksum, skb, hdr); 639 #if IS_ENABLED(CONFIG_IPV6) 640 - else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) 641 - if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) 642 - return -1; 643 #endif 644 return 0; 645 }
··· 574 * header, the HW adds it. To address that, we are subtracting the pseudo 575 * header checksum from the checksum value provided by the HW. 576 */ 577 + static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, 578 + struct iphdr *iph) 579 { 580 __u16 length_for_csum = 0; 581 __wsum csum_pseudo_header = 0; 582 + __u8 ipproto = iph->protocol; 583 + 584 + if (unlikely(ipproto == IPPROTO_SCTP)) 585 + return -1; 586 587 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); 588 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, 589 + length_for_csum, ipproto, 0); 590 skb->csum = csum_sub(hw_checksum, csum_pseudo_header); 591 + return 0; 592 } 593 594 #if IS_ENABLED(CONFIG_IPV6) ··· 594 static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, 595 struct ipv6hdr *ipv6h) 596 { 597 + __u8 nexthdr = ipv6h->nexthdr; 598 __wsum csum_pseudo_hdr = 0; 599 600 + if (unlikely(nexthdr == IPPROTO_FRAGMENT || 601 + nexthdr == IPPROTO_HOPOPTS || 602 + nexthdr == IPPROTO_SCTP)) 603 return -1; 604 + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr)); 605 606 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 607 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); 608 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); 609 + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, 610 + (__force __wsum)htons(nexthdr)); 611 612 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); 613 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); ··· 627 } 628 629 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) 630 + return get_fixed_ipv4_csum(hw_checksum, skb, hdr); 631 #if IS_ENABLED(CONFIG_IPV6) 632 + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) 633 + return get_fixed_ipv6_csum(hw_checksum, skb, hdr); 634 #endif 635 return 0; 636 }
+7 -2
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 159 [32] = "Loopback source checks support", 160 [33] = "RoCEv2 support", 161 [34] = "DMFS Sniffer support (UC & MC)", 162 - [35] = "QinQ VST mode support", 163 - [36] = "sl to vl mapping table change event support" 164 }; 165 int i; 166 ··· 765 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 766 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 767 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 768 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 769 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 770 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 ··· 922 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 923 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 924 dev_cap->flags = flags | (u64)ext_flags << 32; 925 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 926 dev_cap->reserved_uars = field >> 4; 927 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
··· 159 [32] = "Loopback source checks support", 160 [33] = "RoCEv2 support", 161 [34] = "DMFS Sniffer support (UC & MC)", 162 + [35] = "Diag counters per port", 163 + [36] = "QinQ VST mode support", 164 + [37] = "sl to vl mapping table change event support", 165 }; 166 int i; 167 ··· 764 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 765 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 766 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 767 + #define QUERY_DEV_CAP_WOL_OFFSET 0x43 768 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 769 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 770 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 ··· 920 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 921 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 922 dev_cap->flags = flags | (u64)ext_flags << 32; 923 + MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET); 924 + dev_cap->wol_port[1] = !!(field & 0x20); 925 + dev_cap->wol_port[2] = !!(field & 0x40); 926 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 927 dev_cap->reserved_uars = field >> 4; 928 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
+1
drivers/net/ethernet/mellanox/mlx4/fw.h
··· 129 u32 dmfs_high_rate_qpn_range; 130 struct mlx4_rate_limit_caps rl_caps; 131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; 132 }; 133 134 struct mlx4_func_cap {
··· 129 u32 dmfs_high_rate_qpn_range; 130 struct mlx4_rate_limit_caps rl_caps; 131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; 132 + bool wol_port[MLX4_MAX_PORTS + 1]; 133 }; 134 135 struct mlx4_func_cap {
+2
drivers/net/ethernet/mellanox/mlx4/main.c
··· 424 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 425 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 427 428 /* Save uar page shift */ 429 if (!mlx4_is_slave(dev)) {
··· 424 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 425 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 427 + dev->caps.wol_port[1] = dev_cap->wol_port[1]; 428 + dev->caps.wol_port[2] = dev_cap->wol_port[2]; 429 430 /* Save uar page shift */ 431 if (!mlx4_is_slave(dev)) {
+24 -13
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 626 627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 628 orig_dev); 629 - if (WARN_ON(!bridge_port)) 630 - return -EINVAL; 631 632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 633 MLXSW_SP_FLOOD_TYPE_UC, ··· 711 712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 713 orig_dev); 714 - if (WARN_ON(!bridge_port)) 715 - return -EINVAL; 716 717 if (!bridge_port->bridge_device->multicast_enabled) 718 return 0; ··· 1283 return 0; 1284 1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1286 - if (WARN_ON(!bridge_port)) 1287 - return -EINVAL; 1288 1289 bridge_device = bridge_port->bridge_device; 1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1291 bridge_device, 1292 mdb->vid); 1293 - if (WARN_ON(!mlxsw_sp_port_vlan)) 1294 - return -EINVAL; 1295 1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1297 ··· 1407 int err = 0; 1408 1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1410 - if (WARN_ON(!bridge_port)) 1411 - return -EINVAL; 1412 1413 bridge_device = bridge_port->bridge_device; 1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1415 bridge_device, 1416 mdb->vid); 1417 - if (WARN_ON(!mlxsw_sp_port_vlan)) 1418 - return -EINVAL; 1419 1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1421 ··· 1974 1975 } 1976 1977 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1978 { 1979 struct mlxsw_sp_bridge *bridge; ··· 2007 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 2008 { 2009 mlxsw_sp_fdb_fini(mlxsw_sp); 2010 - WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); 2011 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 2012 kfree(mlxsw_sp->bridge); 2013 }
··· 626 627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 628 orig_dev); 629 + if (!bridge_port) 630 + return 0; 631 632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 633 MLXSW_SP_FLOOD_TYPE_UC, ··· 711 712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 713 orig_dev); 714 + if (!bridge_port) 715 + return 0; 716 717 if (!bridge_port->bridge_device->multicast_enabled) 718 return 0; ··· 1283 return 0; 1284 1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1286 + if (!bridge_port) 1287 + return 0; 1288 1289 bridge_device = bridge_port->bridge_device; 1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1291 bridge_device, 1292 mdb->vid); 1293 + if (!mlxsw_sp_port_vlan) 1294 + return 0; 1295 1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1297 ··· 1407 int err = 0; 1408 1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1410 + if (!bridge_port) 1411 + return 0; 1412 1413 bridge_device = bridge_port->bridge_device; 1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1415 bridge_device, 1416 mdb->vid); 1417 + if (!mlxsw_sp_port_vlan) 1418 + return 0; 1419 1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1421 ··· 1974 1975 } 1976 1977 + static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp) 1978 + { 1979 + struct mlxsw_sp_mid *mid, *tmp; 1980 + 1981 + list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) { 1982 + list_del(&mid->list); 1983 + clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); 1984 + kfree(mid); 1985 + } 1986 + } 1987 + 1988 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1989 { 1990 struct mlxsw_sp_bridge *bridge; ··· 1996 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 1997 { 1998 mlxsw_sp_fdb_fini(mlxsw_sp); 1999 + mlxsw_sp_mids_fini(mlxsw_sp); 2000 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 2001 kfree(mlxsw_sp->bridge); 2002 }
+2
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 513 tx_ring->idx = idx; 514 tx_ring->r_vec = r_vec; 515 tx_ring->is_xdp = is_xdp; 516 517 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 518 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); ··· 533 534 rx_ring->idx = idx; 535 rx_ring->r_vec = r_vec; 536 537 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 538 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
··· 513 tx_ring->idx = idx; 514 tx_ring->r_vec = r_vec; 515 tx_ring->is_xdp = is_xdp; 516 + u64_stats_init(&tx_ring->r_vec->tx_sync); 517 518 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 519 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); ··· 532 533 rx_ring->idx = idx; 534 rx_ring->r_vec = r_vec; 535 + u64_stats_init(&rx_ring->r_vec->rx_sync); 536 537 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 538 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
+1 -1
drivers/net/ethernet/qlogic/qed/qed_mcp.c
··· 253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); 256 - if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 257 goto err; 258 259 return 0;
··· 253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); 256 + if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow) 257 goto err; 258 259 return 0;
+95 -16
drivers/net/ethernet/ti/cpts.c
··· 31 32 #include "cpts.h" 33 34 #define cpts_read32(c, r) readl_relaxed(&c->reg->r) 35 #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) 36 37 static int event_expired(struct cpts_event *event) 38 { ··· 86 return removed ? 0 : -1; 87 } 88 89 /* 90 * Returns zero if matching event type was found. 91 */ ··· 151 event->low = lo; 152 type = event_type(event); 153 switch (type) { 154 case CPTS_EV_PUSH: 155 case CPTS_EV_RX: 156 - case CPTS_EV_TX: 157 list_del_init(&event->list); 158 list_add_tail(&event->list, &cpts->events); 159 break; ··· 280 return -EOPNOTSUPP; 281 } 282 283 static struct ptp_clock_info cpts_info = { 284 .owner = THIS_MODULE, 285 .name = "CTPS timer", ··· 310 .gettime64 = cpts_ptp_gettime, 311 .settime64 = cpts_ptp_settime, 312 .enable = cpts_ptp_enable, 313 }; 314 - 315 - static void cpts_overflow_check(struct work_struct *work) 316 - { 317 - struct timespec64 ts; 318 - struct cpts *cpts = container_of(work, struct cpts, overflow_work.work); 319 - 320 - cpts_ptp_gettime(&cpts->info, &ts); 321 - pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); 322 - schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); 323 - } 324 325 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 326 u16 ts_seqid, u8 ts_msgtype) ··· 364 return 0; 365 366 spin_lock_irqsave(&cpts->lock, flags); 367 - cpts_fifo_read(cpts, CPTS_EV_PUSH); 368 list_for_each_safe(this, next, &cpts->events) { 369 event = list_entry(this, struct cpts_event, list); 370 if (event_expired(event)) { ··· 381 list_add(&event->list, &cpts->pool); 382 break; 383 } 384 } 385 spin_unlock_irqrestore(&cpts->lock, flags); 386 ··· 436 { 437 int err, i; 438 439 INIT_LIST_HEAD(&cpts->events); 440 INIT_LIST_HEAD(&cpts->pool); 441 for (i = 0; i < CPTS_MAX_EVENTS; i++) ··· 457 } 458 cpts->phc_index = ptp_clock_index(cpts->clock); 459 460 - schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); 461 return 0; 462 463 err_ptp: ··· 471 if (WARN_ON(!cpts->clock)) 472 return; 473 474 - cancel_delayed_work_sync(&cpts->overflow_work); 475 - 476 ptp_clock_unregister(cpts->clock); 477 cpts->clock = NULL; 478 479 cpts_write32(cpts, 0, int_enable); 480 cpts_write32(cpts, 0, control); 481 482 clk_disable(cpts->refclk); 483 } ··· 556 cpts->dev = dev; 557 cpts->reg = (struct cpsw_cpts __iomem *)regs; 558 spin_lock_init(&cpts->lock); 559 - INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check); 560 561 ret = cpts_of_parse(cpts, node); 562 if (ret)
··· 31 32 #include "cpts.h" 33 34 + #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ 35 + 36 + struct cpts_skb_cb_data { 37 + unsigned long tmo; 38 + }; 39 + 40 #define cpts_read32(c, r) readl_relaxed(&c->reg->r) 41 #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) 42 + 43 + static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 44 + u16 ts_seqid, u8 ts_msgtype); 45 46 static int event_expired(struct cpts_event *event) 47 { ··· 77 return removed ? 0 : -1; 78 } 79 80 + static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) 81 + { 82 + struct sk_buff *skb, *tmp; 83 + u16 seqid; 84 + u8 mtype; 85 + bool found = false; 86 + 87 + mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; 88 + seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; 89 + 90 + /* no need to grab txq.lock as access is always done under cpts->lock */ 91 + skb_queue_walk_safe(&cpts->txq, skb, tmp) { 92 + struct skb_shared_hwtstamps ssh; 93 + unsigned int class = ptp_classify_raw(skb); 94 + struct cpts_skb_cb_data *skb_cb = 95 + (struct cpts_skb_cb_data *)skb->cb; 96 + 97 + if (cpts_match(skb, class, seqid, mtype)) { 98 + u64 ns = timecounter_cyc2time(&cpts->tc, event->low); 99 + 100 + memset(&ssh, 0, sizeof(ssh)); 101 + ssh.hwtstamp = ns_to_ktime(ns); 102 + skb_tstamp_tx(skb, &ssh); 103 + found = true; 104 + __skb_unlink(skb, &cpts->txq); 105 + dev_consume_skb_any(skb); 106 + dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", 107 + mtype, seqid); 108 + } else if (time_after(jiffies, skb_cb->tmo)) { 109 + /* timeout any expired skbs over 1s */ 110 + dev_dbg(cpts->dev, 111 + "expiring tx timestamp mtype %u seqid %04x\n", 112 + mtype, seqid); 113 + __skb_unlink(skb, &cpts->txq); 114 + dev_consume_skb_any(skb); 115 + } 116 + } 117 + 118 + return found; 119 + } 120 + 121 /* 122 * Returns zero if matching event type was found. 123 */ ··· 101 event->low = lo; 102 type = event_type(event); 103 switch (type) { 104 + case CPTS_EV_TX: 105 + if (cpts_match_tx_ts(cpts, event)) { 106 + /* if the new event matches an existing skb, 107 + * then don't queue it 108 + */ 109 + break; 110 + } 111 case CPTS_EV_PUSH: 112 case CPTS_EV_RX: 113 list_del_init(&event->list); 114 list_add_tail(&event->list, &cpts->events); 115 break; ··· 224 return -EOPNOTSUPP; 225 } 226 227 + static long cpts_overflow_check(struct ptp_clock_info *ptp) 228 + { 229 + struct cpts *cpts = container_of(ptp, struct cpts, info); 230 + unsigned long delay = cpts->ov_check_period; 231 + struct timespec64 ts; 232 + unsigned long flags; 233 + 234 + spin_lock_irqsave(&cpts->lock, flags); 235 + ts = ns_to_timespec64(timecounter_read(&cpts->tc)); 236 + 237 + if (!skb_queue_empty(&cpts->txq)) 238 + delay = CPTS_SKB_TX_WORK_TIMEOUT; 239 + spin_unlock_irqrestore(&cpts->lock, flags); 240 + 241 + pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); 242 + return (long)delay; 243 + } 244 + 245 static struct ptp_clock_info cpts_info = { 246 .owner = THIS_MODULE, 247 .name = "CTPS timer", ··· 236 .gettime64 = cpts_ptp_gettime, 237 .settime64 = cpts_ptp_settime, 238 .enable = cpts_ptp_enable, 239 + .do_aux_work = cpts_overflow_check, 240 }; 241 242 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 243 u16 ts_seqid, u8 ts_msgtype) ··· 299 return 0; 300 301 spin_lock_irqsave(&cpts->lock, flags); 302 + cpts_fifo_read(cpts, -1); 303 list_for_each_safe(this, next, &cpts->events) { 304 event = list_entry(this, struct cpts_event, list); 305 if (event_expired(event)) { ··· 316 list_add(&event->list, &cpts->pool); 317 break; 318 } 319 + } 320 + 321 + if (ev_type == CPTS_EV_TX && !ns) { 322 + struct cpts_skb_cb_data *skb_cb = 323 + (struct cpts_skb_cb_data *)skb->cb; 324 + /* Not found, add frame to queue for processing later. 325 + * The periodic FIFO check will handle this. 326 + */ 327 + skb_get(skb); 328 + /* get the timestamp for timeouts */ 329 + skb_cb->tmo = jiffies + msecs_to_jiffies(100); 330 + __skb_queue_tail(&cpts->txq, skb); 331 + ptp_schedule_worker(cpts->clock, 0); 332 } 333 spin_unlock_irqrestore(&cpts->lock, flags); 334 ··· 358 { 359 int err, i; 360 361 + skb_queue_head_init(&cpts->txq); 362 INIT_LIST_HEAD(&cpts->events); 363 INIT_LIST_HEAD(&cpts->pool); 364 for (i = 0; i < CPTS_MAX_EVENTS; i++) ··· 378 } 379 cpts->phc_index = ptp_clock_index(cpts->clock); 380 381 + ptp_schedule_worker(cpts->clock, cpts->ov_check_period); 382 return 0; 383 384 err_ptp: ··· 392 if (WARN_ON(!cpts->clock)) 393 return; 394 395 ptp_clock_unregister(cpts->clock); 396 cpts->clock = NULL; 397 398 cpts_write32(cpts, 0, int_enable); 399 cpts_write32(cpts, 0, control); 400 + 401 + /* Drop all packet */ 402 + skb_queue_purge(&cpts->txq); 403 404 clk_disable(cpts->refclk); 405 } ··· 476 cpts->dev = dev; 477 cpts->reg = (struct cpsw_cpts __iomem *)regs; 478 spin_lock_init(&cpts->lock); 479 480 ret = cpts_of_parse(cpts, node); 481 if (ret)
+1 -1
drivers/net/ethernet/ti/cpts.h
··· 119 u32 cc_mult; /* for the nominal frequency */ 120 struct cyclecounter cc; 121 struct timecounter tc; 122 - struct delayed_work overflow_work; 123 int phc_index; 124 struct clk *refclk; 125 struct list_head events; 126 struct list_head pool; 127 struct cpts_event pool_data[CPTS_MAX_EVENTS]; 128 unsigned long ov_check_period; 129 }; 130 131 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
··· 119 u32 cc_mult; /* for the nominal frequency */ 120 struct cyclecounter cc; 121 struct timecounter tc; 122 int phc_index; 123 struct clk *refclk; 124 struct list_head events; 125 struct list_head pool; 126 struct cpts_event pool_data[CPTS_MAX_EVENTS]; 127 unsigned long ov_check_period; 128 + struct sk_buff_head txq; 129 }; 130 131 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+1 -1
drivers/net/geneve.c
··· 1091 if (data[IFLA_GENEVE_ID]) { 1092 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 1093 1094 - if (vni >= GENEVE_VID_MASK) 1095 return -ERANGE; 1096 } 1097
··· 1091 if (data[IFLA_GENEVE_ID]) { 1092 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 1093 1094 + if (vni >= GENEVE_N_VID) 1095 return -ERANGE; 1096 } 1097
+1 -1
drivers/net/gtp.c
··· 364 365 gtp->dev = dev; 366 367 - dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 368 if (!dev->tstats) 369 return -ENOMEM; 370
··· 364 365 gtp->dev = dev; 366 367 + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 368 if (!dev->tstats) 369 return -ENOMEM; 370
+2 -1
drivers/net/hyperv/hyperv_net.h
··· 765 u32 max_chn; 766 u32 num_chn; 767 768 - refcount_t sc_offered; 769 770 struct rndis_device *extension; 771
··· 765 u32 max_chn; 766 u32 num_chn; 767 768 + atomic_t open_chn; 769 + wait_queue_head_t subchan_open; 770 771 struct rndis_device *extension; 772
+3
drivers/net/hyperv/netvsc.c
··· 78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 80 init_completion(&net_device->channel_init_wait); 81 82 return net_device; 83 } ··· 1303 struct netvsc_channel *nvchan = &net_device->chan_table[i]; 1304 1305 nvchan->channel = device->channel; 1306 } 1307 1308 /* Enable NAPI handler before init callbacks */
··· 78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 80 init_completion(&net_device->channel_init_wait); 81 + init_waitqueue_head(&net_device->subchan_open); 82 83 return net_device; 84 } ··· 1302 struct netvsc_channel *nvchan = &net_device->chan_table[i]; 1303 1304 nvchan->channel = device->channel; 1305 + u64_stats_init(&nvchan->tx_stats.syncp); 1306 + u64_stats_init(&nvchan->rx_stats.syncp); 1307 } 1308 1309 /* Enable NAPI handler before init callbacks */
+8 -6
drivers/net/hyperv/rndis_filter.c
··· 1048 else 1049 netif_napi_del(&nvchan->napi); 1050 1051 - if (refcount_dec_and_test(&nvscdev->sc_offered)) 1052 - complete(&nvscdev->channel_init_wait); 1053 } 1054 1055 int rndis_filter_device_add(struct hv_device *dev, ··· 1089 net_device = net_device_ctx->nvdev; 1090 net_device->max_chn = 1; 1091 net_device->num_chn = 1; 1092 - 1093 - refcount_set(&net_device->sc_offered, 0); 1094 1095 net_device->extension = rndis_device; 1096 rndis_device->ndev = net; ··· 1219 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, 1220 net_device->num_chn); 1221 1222 num_rss_qs = net_device->num_chn - 1; 1223 if (num_rss_qs == 0) 1224 return 0; 1225 1226 - refcount_set(&net_device->sc_offered, num_rss_qs); 1227 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); 1228 1229 init_packet = &net_device->channel_init_pkt; ··· 1240 if (ret) 1241 goto out; 1242 1243 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1244 ret = -ENODEV; 1245 goto out; 1246 } 1247 - wait_for_completion(&net_device->channel_init_wait); 1248 1249 net_device->num_chn = 1 + 1250 init_packet->msg.v5_msg.subchn_comp.num_subchannels; 1251 1252 /* ignore failues from setting rss parameters, still have channels */ 1253 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
··· 1048 else 1049 netif_napi_del(&nvchan->napi); 1050 1051 + atomic_inc(&nvscdev->open_chn); 1052 + wake_up(&nvscdev->subchan_open); 1053 } 1054 1055 int rndis_filter_device_add(struct hv_device *dev, ··· 1089 net_device = net_device_ctx->nvdev; 1090 net_device->max_chn = 1; 1091 net_device->num_chn = 1; 1092 1093 net_device->extension = rndis_device; 1094 rndis_device->ndev = net; ··· 1221 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, 1222 net_device->num_chn); 1223 1224 + atomic_set(&net_device->open_chn, 1); 1225 num_rss_qs = net_device->num_chn - 1; 1226 if (num_rss_qs == 0) 1227 return 0; 1228 1229 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); 1230 1231 init_packet = &net_device->channel_init_pkt; ··· 1242 if (ret) 1243 goto out; 1244 1245 + wait_for_completion(&net_device->channel_init_wait); 1246 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1247 ret = -ENODEV; 1248 goto out; 1249 } 1250 1251 net_device->num_chn = 1 + 1252 init_packet->msg.v5_msg.subchn_comp.num_subchannels; 1253 + 1254 + /* wait for all sub channels to open */ 1255 + wait_event(net_device->subchan_open, 1256 + atomic_read(&net_device->open_chn) == net_device->num_chn); 1257 1258 /* ignore failues from setting rss parameters, still have channels */ 1259 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
+1 -1
drivers/net/ipvlan/ipvlan_main.c
··· 192 193 netdev_lockdep_set_classes(dev); 194 195 - ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); 196 if (!ipvlan->pcpu_stats) 197 return -ENOMEM; 198
··· 192 193 netdev_lockdep_set_classes(dev); 194 195 + ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); 196 if (!ipvlan->pcpu_stats) 197 return -ENOMEM; 198
+10 -8
drivers/net/ppp/ppp_generic.c
··· 1915 spin_unlock(&pch->downl); 1916 /* see if there is anything from the attached unit to be sent */ 1917 if (skb_queue_empty(&pch->file.xq)) { 1918 - read_lock(&pch->upl); 1919 ppp = pch->ppp; 1920 if (ppp) 1921 - ppp_xmit_process(ppp); 1922 - read_unlock(&pch->upl); 1923 } 1924 } 1925 1926 static void ppp_channel_push(struct channel *pch) 1927 { 1928 - local_bh_disable(); 1929 - 1930 - __ppp_channel_push(pch); 1931 - 1932 - local_bh_enable(); 1933 } 1934 1935 /*
··· 1915 spin_unlock(&pch->downl); 1916 /* see if there is anything from the attached unit to be sent */ 1917 if (skb_queue_empty(&pch->file.xq)) { 1918 ppp = pch->ppp; 1919 if (ppp) 1920 + __ppp_xmit_process(ppp); 1921 } 1922 } 1923 1924 static void ppp_channel_push(struct channel *pch) 1925 { 1926 + read_lock_bh(&pch->upl); 1927 + if (pch->ppp) { 1928 + (*this_cpu_ptr(pch->ppp->xmit_recursion))++; 1929 + __ppp_channel_push(pch); 1930 + (*this_cpu_ptr(pch->ppp->xmit_recursion))--; 1931 + } else { 1932 + __ppp_channel_push(pch); 1933 + } 1934 + read_unlock_bh(&pch->upl); 1935 } 1936 1937 /*
+1
drivers/net/usb/asix.h
··· 209 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 210 struct asix_rx_fixup_info *rx); 211 int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); 212 213 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 214 gfp_t flags);
··· 209 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 210 struct asix_rx_fixup_info *rx); 211 int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); 212 + void asix_rx_fixup_common_free(struct asix_common_private *dp); 213 214 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 215 gfp_t flags);
+43 -10
drivers/net/usb/asix_common.c
··· 75 value, index, data, size); 76 } 77 78 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 79 struct asix_rx_fixup_info *rx) 80 { ··· 120 if (size != ((~rx->header >> 16) & 0x7ff)) { 121 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", 122 rx->remaining); 123 - if (rx->ax_skb) { 124 - kfree_skb(rx->ax_skb); 125 - rx->ax_skb = NULL; 126 - /* Discard the incomplete netdev Ethernet frame 127 - * and assume the Data header is at the start of 128 - * the current URB socket buffer. 129 - */ 130 - } 131 - rx->remaining = 0; 132 } 133 } 134 ··· 152 if (size != ((~rx->header >> 16) & 0x7ff)) { 153 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", 154 rx->header, offset); 155 return 0; 156 } 157 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { 158 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 159 size); 160 return 0; 161 } 162 ··· 183 if (rx->ax_skb) { 184 skb_put_data(rx->ax_skb, skb->data + offset, 185 copy_length); 186 - if (!rx->remaining) 187 usbnet_skb_return(dev, rx->ax_skb); 188 } 189 190 offset += (copy_length + 1) & 0xfffe; ··· 195 if (skb->len != offset) { 196 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", 197 skb->len, offset); 198 return 0; 199 } 200 ··· 208 struct asix_rx_fixup_info *rx = &dp->rx_fixup_info; 209 210 return asix_rx_fixup_internal(dev, skb, rx); 211 } 212 213 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
··· 75 value, index, data, size); 76 } 77 78 + static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) 79 + { 80 + /* Reset the variables that have a lifetime outside of 81 + * asix_rx_fixup_internal() so that future processing starts from a 82 + * known set of initial conditions. 83 + */ 84 + 85 + if (rx->ax_skb) { 86 + /* Discard any incomplete Ethernet frame in the netdev buffer */ 87 + kfree_skb(rx->ax_skb); 88 + rx->ax_skb = NULL; 89 + } 90 + 91 + /* Assume the Data header 32-bit word is at the start of the current 92 + * or next URB socket buffer so reset all the state variables. 93 + */ 94 + rx->remaining = 0; 95 + rx->split_head = false; 96 + rx->header = 0; 97 + } 98 + 99 int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 100 struct asix_rx_fixup_info *rx) 101 { ··· 99 if (size != ((~rx->header >> 16) & 0x7ff)) { 100 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", 101 rx->remaining); 102 + reset_asix_rx_fixup_info(rx); 103 } 104 } 105 ··· 139 if (size != ((~rx->header >> 16) & 0x7ff)) { 140 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", 141 rx->header, offset); 142 + reset_asix_rx_fixup_info(rx); 143 return 0; 144 } 145 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { 146 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 147 size); 148 + reset_asix_rx_fixup_info(rx); 149 return 0; 150 } 151 ··· 168 if (rx->ax_skb) { 169 skb_put_data(rx->ax_skb, skb->data + offset, 170 copy_length); 171 + if (!rx->remaining) { 172 usbnet_skb_return(dev, rx->ax_skb); 173 + rx->ax_skb = NULL; 174 + } 175 } 176 177 offset += (copy_length + 1) & 0xfffe; ··· 178 if (skb->len != offset) { 179 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", 180 skb->len, offset); 181 + reset_asix_rx_fixup_info(rx); 182 return 0; 183 } 184 ··· 190 struct asix_rx_fixup_info *rx = &dp->rx_fixup_info; 191 192 return asix_rx_fixup_internal(dev, skb, rx); 193 + } 194 + 195 + void asix_rx_fixup_common_free(struct asix_common_private *dp) 196 + { 197 + struct asix_rx_fixup_info *rx; 198 + 199 + if (!dp) 200 + return; 201 + 202 + rx = &dp->rx_fixup_info; 203 + 204 + if (rx->ax_skb) { 205 + kfree_skb(rx->ax_skb); 206 + rx->ax_skb = NULL; 207 + } 208 } 209 210 struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+1
drivers/net/usb/asix_devices.c
··· 764 765 static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) 766 { 767 kfree(dev->driver_priv); 768 } 769
··· 764 765 static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) 766 { 767 + asix_rx_fixup_common_free(dev->driver_priv); 768 kfree(dev->driver_priv); 769 } 770
+9 -9
drivers/net/usb/lan78xx.c
··· 2367 /* Init LTM */ 2368 lan78xx_init_ltm(dev); 2369 2370 - dev->net->hard_header_len += TX_OVERHEAD; 2371 - dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 2372 - 2373 if (dev->udev->speed == USB_SPEED_SUPER) { 2374 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; 2375 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; ··· 2852 return ret; 2853 } 2854 2855 /* Init all registers */ 2856 ret = lan78xx_reset(dev); 2857 2858 - lan78xx_mdio_init(dev); 2859 2860 dev->net->flags |= IFF_MULTICAST; 2861 2862 pdata->wol = WAKE_MAGIC; 2863 2864 - return 0; 2865 } 2866 2867 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) ··· 3525 udev = interface_to_usbdev(intf); 3526 udev = usb_get_dev(udev); 3527 3528 - ret = -ENOMEM; 3529 netdev = alloc_etherdev(sizeof(struct lan78xx_net)); 3530 if (!netdev) { 3531 - dev_err(&intf->dev, "Error: OOM\n"); 3532 - goto out1; 3533 } 3534 3535 /* netdev_printk() needs this */ ··· 3610 ret = register_netdev(netdev); 3611 if (ret != 0) { 3612 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3613 - goto out2; 3614 } 3615 3616 usb_set_intfdata(intf, dev);
··· 2367 /* Init LTM */ 2368 lan78xx_init_ltm(dev); 2369 2370 if (dev->udev->speed == USB_SPEED_SUPER) { 2371 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; 2372 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; ··· 2855 return ret; 2856 } 2857 2858 + dev->net->hard_header_len += TX_OVERHEAD; 2859 + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 2860 + 2861 /* Init all registers */ 2862 ret = lan78xx_reset(dev); 2863 2864 + ret = lan78xx_mdio_init(dev); 2865 2866 dev->net->flags |= IFF_MULTICAST; 2867 2868 pdata->wol = WAKE_MAGIC; 2869 2870 + return ret; 2871 } 2872 2873 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) ··· 3525 udev = interface_to_usbdev(intf); 3526 udev = usb_get_dev(udev); 3527 3528 netdev = alloc_etherdev(sizeof(struct lan78xx_net)); 3529 if (!netdev) { 3530 + dev_err(&intf->dev, "Error: OOM\n"); 3531 + ret = -ENOMEM; 3532 + goto out1; 3533 } 3534 3535 /* netdev_printk() needs this */ ··· 3610 ret = register_netdev(netdev); 3611 if (ret != 0) { 3612 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3613 + goto out3; 3614 } 3615 3616 usb_set_intfdata(intf, dev);
+6 -1
drivers/net/usb/qmi_wwan.c
··· 1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1178 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1179 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1180 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ ··· 1341 static void qmi_wwan_disconnect(struct usb_interface *intf) 1342 { 1343 struct usbnet *dev = usb_get_intfdata(intf); 1344 - struct qmi_wwan_state *info = (void *)&dev->data; 1345 struct list_head *iter; 1346 struct net_device *ldev; 1347 1348 if (info->flags & QMI_WWAN_FLAG_MUX) { 1349 if (!rtnl_trylock()) { 1350 restart_syscall();
··· 1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1178 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 1179 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1180 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1181 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ ··· 1340 static void qmi_wwan_disconnect(struct usb_interface *intf) 1341 { 1342 struct usbnet *dev = usb_get_intfdata(intf); 1343 + struct qmi_wwan_state *info; 1344 struct list_head *iter; 1345 struct net_device *ldev; 1346 1347 + /* called twice if separate control and data intf */ 1348 + if (!dev) 1349 + return; 1350 + info = (void *)&dev->data; 1351 if (info->flags & QMI_WWAN_FLAG_MUX) { 1352 if (!rtnl_trylock()) { 1353 restart_syscall();
+1
drivers/net/vxlan.c
··· 623 624 out: 625 skb_gro_remcsum_cleanup(skb, &grc); 626 NAPI_GRO_CB(skb)->flush |= flush; 627 628 return pp;
··· 623 624 out: 625 skb_gro_remcsum_cleanup(skb, &grc); 626 + skb->remcsum_offload = 0; 627 NAPI_GRO_CB(skb)->flush |= flush; 628 629 return pp;
+25 -10
drivers/nvme/host/core.c
··· 336 337 c.directive.opcode = nvme_admin_directive_recv; 338 c.directive.nsid = cpu_to_le32(nsid); 339 - c.directive.numd = cpu_to_le32(sizeof(*s)); 340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 341 c.directive.dtype = NVME_DIR_STREAMS; 342 ··· 1509 blk_queue_write_cache(q, vwc, vwc); 1510 } 1511 1512 - static void nvme_configure_apst(struct nvme_ctrl *ctrl) 1513 { 1514 /* 1515 * APST (Autonomous Power State Transition) lets us program a ··· 1538 * then don't do anything. 1539 */ 1540 if (!ctrl->apsta) 1541 - return; 1542 1543 if (ctrl->npss > 31) { 1544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 1545 - return; 1546 } 1547 1548 table = kzalloc(sizeof(*table), GFP_KERNEL); 1549 if (!table) 1550 - return; 1551 1552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 1553 /* Turn off APST. */ ··· 1629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 1630 1631 kfree(table); 1632 } 1633 1634 static void nvme_set_latency_tolerance(struct device *dev, s32 val) ··· 1836 * In fabrics we need to verify the cntlid matches the 1837 * admin connect 1838 */ 1839 - if (ctrl->cntlid != le16_to_cpu(id->cntlid)) 1840 ret = -EINVAL; 1841 1842 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 1843 dev_err(ctrl->device, 1844 "keep-alive support is mandatory for fabrics\n"); 1845 ret = -EINVAL; 1846 } 1847 } else { 1848 ctrl->cntlid = le16_to_cpu(id->cntlid); ··· 1860 else if (!ctrl->apst_enabled && prev_apst_enabled) 1861 dev_pm_qos_hide_latency_tolerance(ctrl->device); 1862 1863 - nvme_configure_apst(ctrl); 1864 - nvme_configure_directives(ctrl); 1865 1866 ctrl->identified = true; 1867 1868 return ret; 1869 } 1870 EXPORT_SYMBOL_GPL(nvme_init_identify); ··· 2017 if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) 2018 return sprintf(buf, "eui.%8phN\n", ns->eui); 2019 2020 - while (ctrl->serial[serial_len - 1] == ' ') 2021 serial_len--; 2022 - while (ctrl->model[model_len - 1] == ' ') 2023 model_len--; 2024 2025 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
··· 336 337 c.directive.opcode = nvme_admin_directive_recv; 338 c.directive.nsid = cpu_to_le32(nsid); 339 + c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); 340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 341 c.directive.dtype = NVME_DIR_STREAMS; 342 ··· 1509 blk_queue_write_cache(q, vwc, vwc); 1510 } 1511 1512 + static int nvme_configure_apst(struct nvme_ctrl *ctrl) 1513 { 1514 /* 1515 * APST (Autonomous Power State Transition) lets us program a ··· 1538 * then don't do anything. 1539 */ 1540 if (!ctrl->apsta) 1541 + return 0; 1542 1543 if (ctrl->npss > 31) { 1544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 1545 + return 0; 1546 } 1547 1548 table = kzalloc(sizeof(*table), GFP_KERNEL); 1549 if (!table) 1550 + return 0; 1551 1552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 1553 /* Turn off APST. */ ··· 1629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 1630 1631 kfree(table); 1632 + return ret; 1633 } 1634 1635 static void nvme_set_latency_tolerance(struct device *dev, s32 val) ··· 1835 * In fabrics we need to verify the cntlid matches the 1836 * admin connect 1837 */ 1838 + if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 1839 ret = -EINVAL; 1840 + goto out_free; 1841 + } 1842 1843 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 1844 dev_err(ctrl->device, 1845 "keep-alive support is mandatory for fabrics\n"); 1846 ret = -EINVAL; 1847 + goto out_free; 1848 } 1849 } else { 1850 ctrl->cntlid = le16_to_cpu(id->cntlid); ··· 1856 else if (!ctrl->apst_enabled && prev_apst_enabled) 1857 dev_pm_qos_hide_latency_tolerance(ctrl->device); 1858 1859 + ret = nvme_configure_apst(ctrl); 1860 + if (ret < 0) 1861 + return ret; 1862 + 1863 + ret = nvme_configure_directives(ctrl); 1864 + if (ret < 0) 1865 + return ret; 1866 1867 ctrl->identified = true; 1868 1869 + return 0; 1870 + 1871 + out_free: 1872 + kfree(id); 1873 return ret; 1874 } 1875 EXPORT_SYMBOL_GPL(nvme_init_identify); ··· 2004 if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) 2005 return sprintf(buf, "eui.%8phN\n", ns->eui); 2006 2007 + while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' || 2008 + ctrl->serial[serial_len - 1] == '\0')) 2009 serial_len--; 2010 + while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' || 2011 + ctrl->model[model_len - 1] == '\0')) 2012 model_len--; 2013 2014 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
+7 -11
drivers/nvme/host/pci.c
··· 1558 if (dev->cmb) { 1559 iounmap(dev->cmb); 1560 dev->cmb = NULL; 1561 - if (dev->cmbsz) { 1562 - sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1563 - &dev_attr_cmb.attr, NULL); 1564 - dev->cmbsz = 0; 1565 - } 1566 } 1567 } 1568 ··· 1951 1952 /* 1953 * CMBs can currently only exist on >=1.2 PCIe devices. We only 1954 - * populate sysfs if a CMB is implemented. Note that we add the 1955 - * CMB attribute to the nvme_ctrl kobj which removes the need to remove 1956 - * it on exit. Since nvme_dev_attrs_group has no name we can pass 1957 - * NULL as final argument to sysfs_add_file_to_group. 1958 */ 1959 1960 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { 1961 dev->cmb = nvme_map_cmb(dev); 1962 - 1963 - if (dev->cmbsz) { 1964 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1965 &dev_attr_cmb.attr, NULL)) 1966 dev_warn(dev->ctrl.device,
··· 1558 if (dev->cmb) { 1559 iounmap(dev->cmb); 1560 dev->cmb = NULL; 1561 + sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1562 + &dev_attr_cmb.attr, NULL); 1563 + dev->cmbsz = 0; 1564 } 1565 } 1566 ··· 1953 1954 /* 1955 * CMBs can currently only exist on >=1.2 PCIe devices. We only 1956 + * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group 1957 + * has no name we can pass NULL as final argument to 1958 + * sysfs_add_file_to_group. 1959 */ 1960 1961 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { 1962 dev->cmb = nvme_map_cmb(dev); 1963 + if (dev->cmb) { 1964 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1965 &dev_attr_cmb.attr, NULL)) 1966 dev_warn(dev->ctrl.device,
+186 -30
drivers/nvme/target/fc.c
··· 114 struct kref ref; 115 }; 116 117 struct nvmet_fc_tgt_queue { 118 bool ninetypercent; 119 u16 qid; ··· 137 struct nvmet_fc_tgt_assoc *assoc; 138 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ 139 struct list_head fod_list; 140 struct workqueue_struct *work_q; 141 struct kref ref; 142 } __aligned(sizeof(unsigned long long)); ··· 230 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 231 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 232 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 233 234 235 /* *********************** FC-NVME DMA Handling **************************** */ ··· 472 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 473 { 474 static struct nvmet_fc_fcp_iod *fod; 475 - unsigned long flags; 476 477 - spin_lock_irqsave(&queue->qlock, flags); 478 fod = list_first_entry_or_null(&queue->fod_list, 479 struct nvmet_fc_fcp_iod, fcp_list); 480 if (fod) { ··· 486 * will "inherit" that reference. 487 */ 488 } 489 - spin_unlock_irqrestore(&queue->qlock, flags); 490 return fod; 491 } 492 493 494 static void 495 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, ··· 516 { 517 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 518 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 519 unsigned long flags; 520 521 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, ··· 524 525 fcpreq->nvmet_fc_private = NULL; 526 527 - spin_lock_irqsave(&queue->qlock, flags); 528 - list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 529 fod->active = false; 530 fod->abort = false; 531 fod->aborted = false; 532 fod->writedataactive = false; 533 fod->fcpreq = NULL; 534 - spin_unlock_irqrestore(&queue->qlock, flags); 535 - 536 - /* 537 - * release the reference taken at queue lookup and fod allocation 538 - */ 539 - nvmet_fc_tgt_q_put(queue); 540 541 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 542 } 543 544 static int ··· 633 queue->port = assoc->tgtport->port; 634 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); 635 INIT_LIST_HEAD(&queue->fod_list); 636 atomic_set(&queue->connected, 0); 637 atomic_set(&queue->sqtail, 0); 638 atomic_set(&queue->rsn, 1); ··· 704 { 705 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 706 struct nvmet_fc_fcp_iod *fod = queue->fod; 707 unsigned long flags; 708 int i, writedataactive; 709 bool disconnect; ··· 732 &tgtport->fc_target_port, fod->fcpreq); 733 } 734 } 735 } 736 spin_unlock_irqrestore(&queue->qlock, flags); 737 ··· 2268 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2269 * layer for processing. 2270 * 2271 - * The nvmet-fc layer will copy cmd payload to an internal structure for 2272 - * processing. As such, upon completion of the routine, the LLDD may 2273 - * immediately free/reuse the CMD IU buffer passed in the call. 2274 * 2275 - * If this routine returns error, the lldd should abort the exchange. 2276 * 2277 * @target_port: pointer to the (registered) target port the FCP CMD IU 2278 * was received on. ··· 2317 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2318 struct nvmet_fc_tgt_queue *queue; 2319 struct nvmet_fc_fcp_iod *fod; 2320 2321 /* validate iu, so the connection id can be used to find the queue */ 2322 if ((cmdiubuf_len != sizeof(*cmdiu)) || ··· 2339 * when the fod is freed. 2340 */ 2341 2342 fod = nvmet_fc_alloc_fcp_iod(queue); 2343 - if (!fod) { 2344 /* release the queue lookup reference */ 2345 nvmet_fc_tgt_q_put(queue); 2346 return -ENOENT; 2347 } 2348 2349 - fcpreq->nvmet_fc_private = fod; 2350 - fod->fcpreq = fcpreq; 2351 - /* 2352 - * put all admin cmds on hw queue id 0. All io commands go to 2353 - * the respective hw queue based on a modulo basis 2354 - */ 2355 - fcpreq->hwqid = queue->qid ? 2356 - ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 2357 - memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2358 2359 - if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) 2360 - queue_work_on(queue->cpu, queue->work_q, &fod->work); 2361 - else 2362 - nvmet_fc_handle_fcp_rqst(tgtport, fod); 2363 2364 - return 0; 2365 } 2366 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2367
··· 114 struct kref ref; 115 }; 116 117 + struct nvmet_fc_defer_fcp_req { 118 + struct list_head req_list; 119 + struct nvmefc_tgt_fcp_req *fcp_req; 120 + }; 121 + 122 struct nvmet_fc_tgt_queue { 123 bool ninetypercent; 124 u16 qid; ··· 132 struct nvmet_fc_tgt_assoc *assoc; 133 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ 134 struct list_head fod_list; 135 + struct list_head pending_cmd_list; 136 + struct list_head avail_defer_list; 137 struct workqueue_struct *work_q; 138 struct kref ref; 139 } __aligned(sizeof(unsigned long long)); ··· 223 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 224 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 225 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 226 + static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, 227 + struct nvmet_fc_fcp_iod *fod); 228 229 230 /* *********************** FC-NVME DMA Handling **************************** */ ··· 463 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 464 { 465 static struct nvmet_fc_fcp_iod *fod; 466 467 + lockdep_assert_held(&queue->qlock); 468 + 469 fod = list_first_entry_or_null(&queue->fod_list, 470 struct nvmet_fc_fcp_iod, fcp_list); 471 if (fod) { ··· 477 * will "inherit" that reference. 478 */ 479 } 480 return fod; 481 } 482 483 + 484 + static void 485 + nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, 486 + struct nvmet_fc_tgt_queue *queue, 487 + struct nvmefc_tgt_fcp_req *fcpreq) 488 + { 489 + struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; 490 + 491 + /* 492 + * put all admin cmds on hw queue id 0. All io commands go to 493 + * the respective hw queue based on a modulo basis 494 + */ 495 + fcpreq->hwqid = queue->qid ? 496 + ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; 497 + 498 + if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) 499 + queue_work_on(queue->cpu, queue->work_q, &fod->work); 500 + else 501 + nvmet_fc_handle_fcp_rqst(tgtport, fod); 502 + } 503 504 static void 505 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, ··· 488 { 489 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 490 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 491 + struct nvmet_fc_defer_fcp_req *deferfcp; 492 unsigned long flags; 493 494 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, ··· 495 496 fcpreq->nvmet_fc_private = NULL; 497 498 fod->active = false; 499 fod->abort = false; 500 fod->aborted = false; 501 fod->writedataactive = false; 502 fod->fcpreq = NULL; 503 504 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); 505 + 506 + spin_lock_irqsave(&queue->qlock, flags); 507 + deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 508 + struct nvmet_fc_defer_fcp_req, req_list); 509 + if (!deferfcp) { 510 + list_add_tail(&fod->fcp_list, &fod->queue->fod_list); 511 + spin_unlock_irqrestore(&queue->qlock, flags); 512 + 513 + /* Release reference taken at queue lookup and fod allocation */ 514 + nvmet_fc_tgt_q_put(queue); 515 + return; 516 + } 517 + 518 + /* Re-use the fod for the next pending cmd that was deferred */ 519 + list_del(&deferfcp->req_list); 520 + 521 + fcpreq = deferfcp->fcp_req; 522 + 523 + /* deferfcp can be reused for another IO at a later date */ 524 + list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); 525 + 526 + spin_unlock_irqrestore(&queue->qlock, flags); 527 + 528 + /* Save NVME CMD IO in fod */ 529 + memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); 530 + 531 + /* Setup new fcpreq to be processed */ 532 + fcpreq->rspaddr = NULL; 533 + fcpreq->rsplen = 0; 534 + fcpreq->nvmet_fc_private = fod; 535 + fod->fcpreq = fcpreq; 536 + fod->active = true; 537 + 538 + /* inform LLDD IO is now being processed */ 539 + tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); 540 + 541 + /* Submit deferred IO for processing */ 542 + nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 543 + 544 + /* 545 + * Leave the queue lookup get reference taken when 546 + * fod was originally allocated. 547 + */ 548 } 549 550 static int ··· 569 queue->port = assoc->tgtport->port; 570 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); 571 INIT_LIST_HEAD(&queue->fod_list); 572 + INIT_LIST_HEAD(&queue->avail_defer_list); 573 + INIT_LIST_HEAD(&queue->pending_cmd_list); 574 atomic_set(&queue->connected, 0); 575 atomic_set(&queue->sqtail, 0); 576 atomic_set(&queue->rsn, 1); ··· 638 { 639 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 640 struct nvmet_fc_fcp_iod *fod = queue->fod; 641 + struct nvmet_fc_defer_fcp_req *deferfcp; 642 unsigned long flags; 643 int i, writedataactive; 644 bool disconnect; ··· 665 &tgtport->fc_target_port, fod->fcpreq); 666 } 667 } 668 + } 669 + 670 + /* Cleanup defer'ed IOs in queue */ 671 + list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) { 672 + list_del(&deferfcp->req_list); 673 + kfree(deferfcp); 674 + } 675 + 676 + for (;;) { 677 + deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, 678 + struct nvmet_fc_defer_fcp_req, req_list); 679 + if (!deferfcp) 680 + break; 681 + 682 + list_del(&deferfcp->req_list); 683 + spin_unlock_irqrestore(&queue->qlock, flags); 684 + 685 + tgtport->ops->defer_rcv(&tgtport->fc_target_port, 686 + deferfcp->fcp_req); 687 + 688 + tgtport->ops->fcp_abort(&tgtport->fc_target_port, 689 + deferfcp->fcp_req); 690 + 691 + tgtport->ops->fcp_req_release(&tgtport->fc_target_port, 692 + deferfcp->fcp_req); 693 + 694 + kfree(deferfcp); 695 + 696 + spin_lock_irqsave(&queue->qlock, flags); 697 } 698 spin_unlock_irqrestore(&queue->qlock, flags); 699 ··· 2172 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2173 * layer for processing. 2174 * 2175 + * The nvmet_fc layer allocates a local job structure (struct 2176 + * nvmet_fc_fcp_iod) from the queue for the io and copies the 2177 + * CMD IU buffer to the job structure. As such, on a successful 2178 + * completion (returns 0), the LLDD may immediately free/reuse 2179 + * the CMD IU buffer passed in the call. 2180 * 2181 + * However, in some circumstances, due to the packetized nature of FC 2182 + * and the api of the FC LLDD which may issue a hw command to send the 2183 + * response, but the LLDD may not get the hw completion for that command 2184 + * and upcall the nvmet_fc layer before a new command may be 2185 + * asynchronously received - its possible for a command to be received 2186 + * before the LLDD and nvmet_fc have recycled the job structure. It gives 2187 + * the appearance of more commands received than fits in the sq. 2188 + * To alleviate this scenario, a temporary queue is maintained in the 2189 + * transport for pending LLDD requests waiting for a queue job structure. 2190 + * In these "overrun" cases, a temporary queue element is allocated 2191 + * the LLDD request and CMD iu buffer information remembered, and the 2192 + * routine returns a -EOVERFLOW status. Subsequently, when a queue job 2193 + * structure is freed, it is immediately reallocated for anything on the 2194 + * pending request list. The LLDDs defer_rcv() callback is called, 2195 + * informing the LLDD that it may reuse the CMD IU buffer, and the io 2196 + * is then started normally with the transport. 2197 + * 2198 + * The LLDD, when receiving an -EOVERFLOW completion status, is to treat 2199 + * the completion as successful but must not reuse the CMD IU buffer 2200 + * until the LLDD's defer_rcv() callback has been called for the 2201 + * corresponding struct nvmefc_tgt_fcp_req pointer. 2202 + * 2203 + * If there is any other condition in which an error occurs, the 2204 + * transport will return a non-zero status indicating the error. 2205 + * In all cases other than -EOVERFLOW, the transport has not accepted the 2206 + * request and the LLDD should abort the exchange. 2207 * 2208 * @target_port: pointer to the (registered) target port the FCP CMD IU 2209 * was received on. ··· 2194 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2195 struct nvmet_fc_tgt_queue *queue; 2196 struct nvmet_fc_fcp_iod *fod; 2197 + struct nvmet_fc_defer_fcp_req *deferfcp; 2198 + unsigned long flags; 2199 2200 /* validate iu, so the connection id can be used to find the queue */ 2201 if ((cmdiubuf_len != sizeof(*cmdiu)) || ··· 2214 * when the fod is freed. 2215 */ 2216 2217 + spin_lock_irqsave(&queue->qlock, flags); 2218 + 2219 fod = nvmet_fc_alloc_fcp_iod(queue); 2220 + if (fod) { 2221 + spin_unlock_irqrestore(&queue->qlock, flags); 2222 + 2223 + fcpreq->nvmet_fc_private = fod; 2224 + fod->fcpreq = fcpreq; 2225 + 2226 + memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); 2227 + 2228 + nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); 2229 + 2230 + return 0; 2231 + } 2232 + 2233 + if (!tgtport->ops->defer_rcv) { 2234 + spin_unlock_irqrestore(&queue->qlock, flags); 2235 /* release the queue lookup reference */ 2236 nvmet_fc_tgt_q_put(queue); 2237 return -ENOENT; 2238 } 2239 2240 + deferfcp = list_first_entry_or_null(&queue->avail_defer_list, 2241 + struct nvmet_fc_defer_fcp_req, req_list); 2242 + if (deferfcp) { 2243 + /* Just re-use one that was previously allocated */ 2244 + list_del(&deferfcp->req_list); 2245 + } else { 2246 + spin_unlock_irqrestore(&queue->qlock, flags); 2247 2248 + /* Now we need to dynamically allocate one */ 2249 + deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); 2250 + if (!deferfcp) { 2251 + /* release the queue lookup reference */ 2252 + nvmet_fc_tgt_q_put(queue); 2253 + return -ENOMEM; 2254 + } 2255 + spin_lock_irqsave(&queue->qlock, flags); 2256 + } 2257 2258 + /* For now, use rspaddr / rsplen to save payload information */ 2259 + fcpreq->rspaddr = cmdiubuf; 2260 + fcpreq->rsplen = cmdiubuf_len; 2261 + deferfcp->fcp_req = fcpreq; 2262 + 2263 + /* defer processing till a fod becomes available */ 2264 + list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); 2265 + 2266 + /* NOTE: the queue lookup reference is still valid */ 2267 + 2268 + spin_unlock_irqrestore(&queue->qlock, flags); 2269 + 2270 + return -EOVERFLOW; 2271 } 2272 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2273
+35
drivers/pci/pci.c
··· 4260 EXPORT_SYMBOL_GPL(pci_reset_function); 4261 4262 /** 4263 * pci_try_reset_function - quiesce and reset a PCI device function 4264 * @dev: PCI device to reset 4265 *
··· 4260 EXPORT_SYMBOL_GPL(pci_reset_function); 4261 4262 /** 4263 + * pci_reset_function_locked - quiesce and reset a PCI device function 4264 + * @dev: PCI device to reset 4265 + * 4266 + * Some devices allow an individual function to be reset without affecting 4267 + * other functions in the same device. The PCI device must be responsive 4268 + * to PCI config space in order to use this function. 4269 + * 4270 + * This function does not just reset the PCI portion of a device, but 4271 + * clears all the state associated with the device. This function differs 4272 + * from __pci_reset_function() in that it saves and restores device state 4273 + * over the reset. It also differs from pci_reset_function() in that it 4274 + * requires the PCI device lock to be held. 4275 + * 4276 + * Returns 0 if the device function was successfully reset or negative if the 4277 + * device doesn't support resetting a single function. 4278 + */ 4279 + int pci_reset_function_locked(struct pci_dev *dev) 4280 + { 4281 + int rc; 4282 + 4283 + rc = pci_probe_reset_function(dev); 4284 + if (rc) 4285 + return rc; 4286 + 4287 + pci_dev_save_and_disable(dev); 4288 + 4289 + rc = __pci_reset_function_locked(dev); 4290 + 4291 + pci_dev_restore(dev); 4292 + 4293 + return rc; 4294 + } 4295 + EXPORT_SYMBOL_GPL(pci_reset_function_locked); 4296 + 4297 + /** 4298 * pci_try_reset_function - quiesce and reset a PCI device function 4299 * @dev: PCI device to reset 4300 *
+7
drivers/pinctrl/intel/pinctrl-cherryview.c
··· 1548 }, 1549 }, 1550 { 1551 .ident = "Acer Chromebook R11 (Cyan)", 1552 .matches = { 1553 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
··· 1548 }, 1549 }, 1550 { 1551 + .ident = "HP Chromebook 11 G5 (Setzer)", 1552 + .matches = { 1553 + DMI_MATCH(DMI_SYS_VENDOR, "HP"), 1554 + DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), 1555 + }, 1556 + }, 1557 + { 1558 .ident = "Acer Chromebook R11 (Cyan)", 1559 .matches = { 1560 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+3 -3
drivers/pinctrl/intel/pinctrl-merrifield.c
··· 343 344 static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; 345 static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; 346 - static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 }; 347 - static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 }; 348 - static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 }; 349 static const unsigned int mrfld_pwm0_pins[] = { 144 }; 350 static const unsigned int mrfld_pwm1_pins[] = { 145 }; 351 static const unsigned int mrfld_pwm2_pins[] = { 132 };
··· 343 344 static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; 345 static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; 346 + static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 }; 347 + static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 }; 348 + static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 }; 349 static const unsigned int mrfld_pwm0_pins[] = { 144 }; 350 static const unsigned int mrfld_pwm1_pins[] = { 145 }; 351 static const unsigned int mrfld_pwm2_pins[] = { 132 };
+18 -7
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
··· 37 #define IRQ_STATUS 0x10 38 #define IRQ_WKUP 0x18 39 40 - #define NB_FUNCS 2 41 #define GPIO_PER_REG 32 42 43 /** ··· 126 .funcs = {_func1, "gpio"} \ 127 } 128 129 #define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \ 130 _f1, _f2) \ 131 { \ ··· 181 PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"), 182 PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"), 183 PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"), 184 - PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"), 185 PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"), 186 PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"), 187 PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"), 188 PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"), 189 - PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"), 190 }; 191 192 const struct armada_37xx_pin_data armada_37xx_pin_nb = { ··· 198 }; 199 200 const struct armada_37xx_pin_data armada_37xx_pin_sb = { 201 - .nr_pins = 29, 202 .name = "GPIO2", 203 .groups = armada_37xx_sb_groups, 204 .ngroups = ARRAY_SIZE(armada_37xx_sb_groups), ··· 219 { 220 int f; 221 222 - for (f = 0; f < NB_FUNCS; f++) 223 if (!strcmp(grp->funcs[f], func)) 224 return f; 225 ··· 806 for (j = 0; j < grp->extra_npins; j++) 807 grp->pins[i+j] = grp->extra_pin + j; 808 809 - for (f = 0; f < NB_FUNCS; f++) { 810 int ret; 811 /* check for unique functions and count groups */ 812 ret = armada_37xx_add_function(info->funcs, &funcsize, ··· 858 struct armada_37xx_pin_group *gp = &info->groups[g]; 859 int f; 860 861 - for (f = 0; f < NB_FUNCS; f++) { 862 if (strcmp(gp->funcs[f], name) == 0) { 863 *groups = gp->name; 864 groups++;
··· 37 #define IRQ_STATUS 0x10 38 #define IRQ_WKUP 0x18 39 40 + #define NB_FUNCS 3 41 #define GPIO_PER_REG 32 42 43 /** ··· 126 .funcs = {_func1, "gpio"} \ 127 } 128 129 + #define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \ 130 + { \ 131 + .name = _name, \ 132 + .start_pin = _start, \ 133 + .npins = _nr, \ 134 + .reg_mask = _mask, \ 135 + .val = {_v1, _v2, _v3}, \ 136 + .funcs = {_f1, _f2, "gpio"} \ 137 + } 138 + 139 #define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \ 140 _f1, _f2) \ 141 { \ ··· 171 PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"), 172 PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"), 173 PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"), 174 + PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"), 175 PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"), 176 PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"), 177 PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"), 178 PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"), 179 + PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14), 180 + "mii", "mii_err"), 181 }; 182 183 const struct armada_37xx_pin_data armada_37xx_pin_nb = { ··· 187 }; 188 189 const struct armada_37xx_pin_data armada_37xx_pin_sb = { 190 + .nr_pins = 30, 191 .name = "GPIO2", 192 .groups = armada_37xx_sb_groups, 193 .ngroups = ARRAY_SIZE(armada_37xx_sb_groups), ··· 208 { 209 int f; 210 211 + for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) 212 if (!strcmp(grp->funcs[f], func)) 213 return f; 214 ··· 795 for (j = 0; j < grp->extra_npins; j++) 796 grp->pins[i+j] = grp->extra_pin + j; 797 798 + for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) { 799 int ret; 800 /* check for unique functions and count groups */ 801 ret = armada_37xx_add_function(info->funcs, &funcsize, ··· 847 struct armada_37xx_pin_group *gp = &info->groups[g]; 848 int f; 849 850 + for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) { 851 if (strcmp(gp->funcs[f], name) == 0) { 852 *groups = gp->name; 853 groups++;
+1
drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
··· 918 SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */ 919 PINCTRL_SUN7I_A20), 920 SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ 921 SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ 922 SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ 923 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
··· 918 SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */ 919 PINCTRL_SUN7I_A20), 920 SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ 921 + SUNXI_FUNCTION(0x5, "sim"), /* DET */ 922 SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ 923 SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ 924 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
+1 -1
drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
··· 1084 static const int usb1_muxvals[] = {0, 0}; 1085 static const unsigned usb2_pins[] = {184, 185}; 1086 static const int usb2_muxvals[] = {0, 0}; 1087 - static const unsigned usb3_pins[] = {186, 187}; 1088 static const int usb3_muxvals[] = {0, 0}; 1089 static const unsigned port_range0_pins[] = { 1090 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */
··· 1084 static const int usb1_muxvals[] = {0, 0}; 1085 static const unsigned usb2_pins[] = {184, 185}; 1086 static const int usb2_muxvals[] = {0, 0}; 1087 + static const unsigned usb3_pins[] = {187, 188}; 1088 static const int usb3_muxvals[] = {0, 0}; 1089 static const unsigned port_range0_pins[] = { 1090 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */
+7 -4
drivers/pinctrl/zte/pinctrl-zx.c
··· 64 struct zx_pinctrl_soc_info *info = zpctl->info; 65 const struct pinctrl_pin_desc *pindesc = info->pins + group_selector; 66 struct zx_pin_data *data = pindesc->drv_data; 67 - struct zx_mux_desc *mux = data->muxes; 68 - u32 mask = (1 << data->width) - 1; 69 - u32 offset = data->offset; 70 - u32 bitpos = data->bitpos; 71 struct function_desc *func; 72 unsigned long flags; 73 u32 val, mval; ··· 73 /* Skip reserved pin */ 74 if (!data) 75 return -EINVAL; 76 77 func = pinmux_generic_get_function(pctldev, func_selector); 78 if (!func)
··· 64 struct zx_pinctrl_soc_info *info = zpctl->info; 65 const struct pinctrl_pin_desc *pindesc = info->pins + group_selector; 66 struct zx_pin_data *data = pindesc->drv_data; 67 + struct zx_mux_desc *mux; 68 + u32 mask, offset, bitpos; 69 struct function_desc *func; 70 unsigned long flags; 71 u32 val, mval; ··· 75 /* Skip reserved pin */ 76 if (!data) 77 return -EINVAL; 78 + 79 + mux = data->muxes; 80 + mask = (1 << data->width) - 1; 81 + offset = data->offset; 82 + bitpos = data->bitpos; 83 84 func = pinmux_generic_get_function(pctldev, func_selector); 85 if (!func)
+42
drivers/ptp/ptp_clock.c
··· 28 #include <linux/slab.h> 29 #include <linux/syscalls.h> 30 #include <linux/uaccess.h> 31 32 #include "ptp_private.h" 33 ··· 185 kfree(ptp); 186 } 187 188 /* public interface */ 189 190 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, ··· 230 mutex_init(&ptp->tsevq_mux); 231 mutex_init(&ptp->pincfg_mux); 232 init_waitqueue_head(&ptp->tsev_wq); 233 234 err = ptp_populate_pin_groups(ptp); 235 if (err) ··· 287 no_device: 288 ptp_cleanup_pin_groups(ptp); 289 no_pin_groups: 290 mutex_destroy(&ptp->tsevq_mux); 291 mutex_destroy(&ptp->pincfg_mux); 292 ida_simple_remove(&ptp_clocks_map, index); ··· 304 { 305 ptp->defunct = 1; 306 wake_up_interruptible(&ptp->tsev_wq); 307 308 /* Release the clock's resources. */ 309 if (ptp->pps_source) ··· 374 return pin ? i : -1; 375 } 376 EXPORT_SYMBOL(ptp_find_pin); 377 378 /* module operations */ 379
··· 28 #include <linux/slab.h> 29 #include <linux/syscalls.h> 30 #include <linux/uaccess.h> 31 + #include <uapi/linux/sched/types.h> 32 33 #include "ptp_private.h" 34 ··· 184 kfree(ptp); 185 } 186 187 + static void ptp_aux_kworker(struct kthread_work *work) 188 + { 189 + struct ptp_clock *ptp = container_of(work, struct ptp_clock, 190 + aux_work.work); 191 + struct ptp_clock_info *info = ptp->info; 192 + long delay; 193 + 194 + delay = info->do_aux_work(info); 195 + 196 + if (delay >= 0) 197 + kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); 198 + } 199 + 200 /* public interface */ 201 202 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, ··· 216 mutex_init(&ptp->tsevq_mux); 217 mutex_init(&ptp->pincfg_mux); 218 init_waitqueue_head(&ptp->tsev_wq); 219 + 220 + if (ptp->info->do_aux_work) { 221 + char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index); 222 + 223 + kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); 224 + ptp->kworker = kthread_create_worker(0, worker_name ? 225 + worker_name : info->name); 226 + kfree(worker_name); 227 + if (IS_ERR(ptp->kworker)) { 228 + err = PTR_ERR(ptp->kworker); 229 + pr_err("failed to create ptp aux_worker %d\n", err); 230 + goto kworker_err; 231 + } 232 + } 233 234 err = ptp_populate_pin_groups(ptp); 235 if (err) ··· 259 no_device: 260 ptp_cleanup_pin_groups(ptp); 261 no_pin_groups: 262 + if (ptp->kworker) 263 + kthread_destroy_worker(ptp->kworker); 264 + kworker_err: 265 mutex_destroy(&ptp->tsevq_mux); 266 mutex_destroy(&ptp->pincfg_mux); 267 ida_simple_remove(&ptp_clocks_map, index); ··· 273 { 274 ptp->defunct = 1; 275 wake_up_interruptible(&ptp->tsev_wq); 276 + 277 + if (ptp->kworker) { 278 + kthread_cancel_delayed_work_sync(&ptp->aux_work); 279 + kthread_destroy_worker(ptp->kworker); 280 + } 281 282 /* Release the clock's resources. */ 283 if (ptp->pps_source) ··· 338 return pin ? i : -1; 339 } 340 EXPORT_SYMBOL(ptp_find_pin); 341 + 342 + int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) 343 + { 344 + return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); 345 + } 346 + EXPORT_SYMBOL(ptp_schedule_worker); 347 348 /* module operations */ 349
+3
drivers/ptp/ptp_private.h
··· 22 23 #include <linux/cdev.h> 24 #include <linux/device.h> 25 #include <linux/mutex.h> 26 #include <linux/posix-clock.h> 27 #include <linux/ptp_clock.h> ··· 57 struct attribute_group pin_attr_group; 58 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ 59 const struct attribute_group *pin_attr_groups[2]; 60 }; 61 62 /*
··· 22 23 #include <linux/cdev.h> 24 #include <linux/device.h> 25 + #include <linux/kthread.h> 26 #include <linux/mutex.h> 27 #include <linux/posix-clock.h> 28 #include <linux/ptp_clock.h> ··· 56 struct attribute_group pin_attr_group; 57 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ 58 const struct attribute_group *pin_attr_groups[2]; 59 + struct kthread_worker *kworker; 60 + struct kthread_delayed_work aux_work; 61 }; 62 63 /*
+2 -2
drivers/s390/net/qeth_l3_main.c
··· 2512 struct rtable *rt = (struct rtable *) dst; 2513 __be32 *pkey = &ip_hdr(skb)->daddr; 2514 2515 - if (rt->rt_gateway) 2516 pkey = &rt->rt_gateway; 2517 2518 /* IPv4 */ ··· 2523 struct rt6_info *rt = (struct rt6_info *) dst; 2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; 2525 2526 - if (!ipv6_addr_any(&rt->rt6i_gateway)) 2527 pkey = &rt->rt6i_gateway; 2528 2529 /* IPv6 */
··· 2512 struct rtable *rt = (struct rtable *) dst; 2513 __be32 *pkey = &ip_hdr(skb)->daddr; 2514 2515 + if (rt && rt->rt_gateway) 2516 pkey = &rt->rt_gateway; 2517 2518 /* IPv4 */ ··· 2523 struct rt6_info *rt = (struct rt6_info *) dst; 2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; 2525 2526 + if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) 2527 pkey = &rt->rt6i_gateway; 2528 2529 /* IPv6 */
+4 -3
drivers/scsi/aacraid/aachba.c
··· 3198 return -EBUSY; 3199 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 3200 return -EFAULT; 3201 - if (qd.cnum == -1) 3202 qd.cnum = qd.id; 3203 - else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 3204 - { 3205 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 3206 return -EINVAL; 3207 qd.instance = dev->scsi_host_ptr->host_no;
··· 3198 return -EBUSY; 3199 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 3200 return -EFAULT; 3201 + if (qd.cnum == -1) { 3202 + if (qd.id < 0 || qd.id >= dev->maximum_num_containers) 3203 + return -EINVAL; 3204 qd.cnum = qd.id; 3205 + } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { 3206 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 3207 return -EINVAL; 3208 qd.instance = dev->scsi_host_ptr->host_no;
+15 -53
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 2624 }; 2625 2626 /** 2627 - * bnx2fc_percpu_thread_create - Create a receive thread for an 2628 - * online CPU 2629 * 2630 * @cpu: cpu index for the online cpu 2631 */ 2632 - static void bnx2fc_percpu_thread_create(unsigned int cpu) 2633 { 2634 struct bnx2fc_percpu_s *p; 2635 struct task_struct *thread; ··· 2638 thread = kthread_create_on_node(bnx2fc_percpu_io_thread, 2639 (void *)p, cpu_to_node(cpu), 2640 "bnx2fc_thread/%d", cpu); 2641 /* bind thread to the cpu */ 2642 - if (likely(!IS_ERR(thread))) { 2643 - kthread_bind(thread, cpu); 2644 - p->iothread = thread; 2645 - wake_up_process(thread); 2646 - } 2647 } 2648 2649 - static void bnx2fc_percpu_thread_destroy(unsigned int cpu) 2650 { 2651 struct bnx2fc_percpu_s *p; 2652 struct task_struct *thread; ··· 2662 thread = p->iothread; 2663 p->iothread = NULL; 2664 2665 - 2666 /* Free all work in the list */ 2667 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 2668 list_del_init(&work->list); ··· 2673 2674 if (thread) 2675 kthread_stop(thread); 2676 - } 2677 - 2678 - 2679 - static int bnx2fc_cpu_online(unsigned int cpu) 2680 - { 2681 - printk(PFX "CPU %x online: Create Rx thread\n", cpu); 2682 - bnx2fc_percpu_thread_create(cpu); 2683 - return 0; 2684 - } 2685 - 2686 - static int bnx2fc_cpu_dead(unsigned int cpu) 2687 - { 2688 - printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); 2689 - bnx2fc_percpu_thread_destroy(cpu); 2690 return 0; 2691 } 2692 ··· 2747 spin_lock_init(&p->fp_work_lock); 2748 } 2749 2750 - get_online_cpus(); 2751 - 2752 - for_each_online_cpu(cpu) 2753 - bnx2fc_percpu_thread_create(cpu); 2754 - 2755 - rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2756 - "scsi/bnx2fc:online", 2757 - bnx2fc_cpu_online, NULL); 2758 if (rc < 0) 2759 - goto stop_threads; 2760 bnx2fc_online_state = rc; 2761 2762 - cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead", 2763 - NULL, bnx2fc_cpu_dead); 2764 - put_online_cpus(); 2765 - 2766 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2767 - 2768 return 0; 2769 2770 - stop_threads: 2771 - for_each_online_cpu(cpu) 2772 - bnx2fc_percpu_thread_destroy(cpu); 2773 - put_online_cpus(); 2774 kthread_stop(l2_thread); 2775 free_wq: 2776 destroy_workqueue(bnx2fc_wq); ··· 2775 struct fcoe_percpu_s *bg; 2776 struct task_struct *l2_thread; 2777 struct sk_buff *skb; 2778 - unsigned int cpu = 0; 2779 2780 /* 2781 * NOTE: Since cnic calls register_driver routine rtnl_lock, ··· 2815 if (l2_thread) 2816 kthread_stop(l2_thread); 2817 2818 - get_online_cpus(); 2819 - /* Destroy per cpu threads */ 2820 - for_each_online_cpu(cpu) { 2821 - bnx2fc_percpu_thread_destroy(cpu); 2822 - } 2823 - 2824 - cpuhp_remove_state_nocalls(bnx2fc_online_state); 2825 - cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD); 2826 - 2827 - put_online_cpus(); 2828 2829 destroy_workqueue(bnx2fc_wq); 2830 /*
··· 2624 }; 2625 2626 /** 2627 + * bnx2fc_cpu_online - Create a receive thread for an online CPU 2628 * 2629 * @cpu: cpu index for the online cpu 2630 */ 2631 + static int bnx2fc_cpu_online(unsigned int cpu) 2632 { 2633 struct bnx2fc_percpu_s *p; 2634 struct task_struct *thread; ··· 2639 thread = kthread_create_on_node(bnx2fc_percpu_io_thread, 2640 (void *)p, cpu_to_node(cpu), 2641 "bnx2fc_thread/%d", cpu); 2642 + if (IS_ERR(thread)) 2643 + return PTR_ERR(thread); 2644 + 2645 /* bind thread to the cpu */ 2646 + kthread_bind(thread, cpu); 2647 + p->iothread = thread; 2648 + wake_up_process(thread); 2649 + return 0; 2650 } 2651 2652 + static int bnx2fc_cpu_offline(unsigned int cpu) 2653 { 2654 struct bnx2fc_percpu_s *p; 2655 struct task_struct *thread; ··· 2661 thread = p->iothread; 2662 p->iothread = NULL; 2663 2664 /* Free all work in the list */ 2665 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 2666 list_del_init(&work->list); ··· 2673 2674 if (thread) 2675 kthread_stop(thread); 2676 return 0; 2677 } 2678 ··· 2761 spin_lock_init(&p->fp_work_lock); 2762 } 2763 2764 + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", 2765 + bnx2fc_cpu_online, bnx2fc_cpu_offline); 2766 if (rc < 0) 2767 + goto stop_thread; 2768 bnx2fc_online_state = rc; 2769 2770 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2771 return 0; 2772 2773 + stop_thread: 2774 kthread_stop(l2_thread); 2775 free_wq: 2776 destroy_workqueue(bnx2fc_wq); ··· 2803 struct fcoe_percpu_s *bg; 2804 struct task_struct *l2_thread; 2805 struct sk_buff *skb; 2806 2807 /* 2808 * NOTE: Since cnic calls register_driver routine rtnl_lock, ··· 2844 if (l2_thread) 2845 kthread_stop(l2_thread); 2846 2847 + cpuhp_remove_state(bnx2fc_online_state); 2848 2849 destroy_workqueue(bnx2fc_wq); 2850 /*
+23 -22
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 1008 return work; 1009 } 1010 1011 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 1012 { 1013 struct fcoe_cqe *cq; ··· 1064 /* Unsolicited event notification */ 1065 bnx2fc_process_unsol_compl(tgt, wqe); 1066 } else { 1067 - /* Pending work request completion */ 1068 - struct bnx2fc_work *work = NULL; 1069 - struct bnx2fc_percpu_s *fps = NULL; 1070 - unsigned int cpu = wqe % num_possible_cpus(); 1071 - 1072 - fps = &per_cpu(bnx2fc_percpu, cpu); 1073 - spin_lock_bh(&fps->fp_work_lock); 1074 - if (unlikely(!fps->iothread)) 1075 - goto unlock; 1076 - 1077 - work = bnx2fc_alloc_work(tgt, wqe); 1078 - if (work) 1079 - list_add_tail(&work->list, 1080 - &fps->work_list); 1081 - unlock: 1082 - spin_unlock_bh(&fps->fp_work_lock); 1083 - 1084 - /* Pending work request completion */ 1085 - if (fps->iothread && work) 1086 - wake_up_process(fps->iothread); 1087 - else 1088 - bnx2fc_process_cq_compl(tgt, wqe); 1089 num_free_sqes++; 1090 } 1091 cqe++;
··· 1008 return work; 1009 } 1010 1011 + /* Pending work request completion */ 1012 + static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) 1013 + { 1014 + unsigned int cpu = wqe % num_possible_cpus(); 1015 + struct bnx2fc_percpu_s *fps; 1016 + struct bnx2fc_work *work; 1017 + 1018 + fps = &per_cpu(bnx2fc_percpu, cpu); 1019 + spin_lock_bh(&fps->fp_work_lock); 1020 + if (fps->iothread) { 1021 + work = bnx2fc_alloc_work(tgt, wqe); 1022 + if (work) { 1023 + list_add_tail(&work->list, &fps->work_list); 1024 + wake_up_process(fps->iothread); 1025 + spin_unlock_bh(&fps->fp_work_lock); 1026 + return; 1027 + } 1028 + } 1029 + spin_unlock_bh(&fps->fp_work_lock); 1030 + bnx2fc_process_cq_compl(tgt, wqe); 1031 + } 1032 + 1033 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 1034 { 1035 struct fcoe_cqe *cq; ··· 1042 /* Unsolicited event notification */ 1043 bnx2fc_process_unsol_compl(tgt, wqe); 1044 } else { 1045 + bnx2fc_pending_work(tgt, wqe); 1046 num_free_sqes++; 1047 } 1048 cqe++;
+15 -49
drivers/scsi/bnx2i/bnx2i_init.c
··· 404 405 406 /** 407 - * bnx2i_percpu_thread_create - Create a receive thread for an 408 - * online CPU 409 * 410 * @cpu: cpu index for the online cpu 411 */ 412 - static void bnx2i_percpu_thread_create(unsigned int cpu) 413 { 414 struct bnx2i_percpu_s *p; 415 struct task_struct *thread; ··· 418 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, 419 cpu_to_node(cpu), 420 "bnx2i_thread/%d", cpu); 421 /* bind thread to the cpu */ 422 - if (likely(!IS_ERR(thread))) { 423 - kthread_bind(thread, cpu); 424 - p->iothread = thread; 425 - wake_up_process(thread); 426 - } 427 } 428 429 - 430 - static void bnx2i_percpu_thread_destroy(unsigned int cpu) 431 { 432 struct bnx2i_percpu_s *p; 433 struct task_struct *thread; ··· 451 spin_unlock_bh(&p->p_work_lock); 452 if (thread) 453 kthread_stop(thread); 454 - } 455 - 456 - static int bnx2i_cpu_online(unsigned int cpu) 457 - { 458 - pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu); 459 - bnx2i_percpu_thread_create(cpu); 460 - return 0; 461 - } 462 - 463 - static int bnx2i_cpu_dead(unsigned int cpu) 464 - { 465 - pr_info("CPU %x offline: Remove Rx thread\n", cpu); 466 - bnx2i_percpu_thread_destroy(cpu); 467 return 0; 468 } 469 ··· 498 p->iothread = NULL; 499 } 500 501 - get_online_cpus(); 502 - 503 - for_each_online_cpu(cpu) 504 - bnx2i_percpu_thread_create(cpu); 505 - 506 - err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 507 - "scsi/bnx2i:online", 508 - bnx2i_cpu_online, NULL); 509 if (err < 0) 510 - goto remove_threads; 511 bnx2i_online_state = err; 512 - 513 - cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead", 514 - NULL, bnx2i_cpu_dead); 515 - put_online_cpus(); 516 return 0; 517 518 - remove_threads: 519 - for_each_online_cpu(cpu) 520 - bnx2i_percpu_thread_destroy(cpu); 521 - put_online_cpus(); 522 cnic_unregister_driver(CNIC_ULP_ISCSI); 523 unreg_xport: 524 iscsi_unregister_transport(&bnx2i_iscsi_transport); ··· 525 static void __exit bnx2i_mod_exit(void) 526 { 527 struct bnx2i_hba *hba; 528 - unsigned cpu = 0; 529 530 mutex_lock(&bnx2i_dev_lock); 531 while (!list_empty(&adapter_list)) { ··· 542 } 543 mutex_unlock(&bnx2i_dev_lock); 544 545 - get_online_cpus(); 546 - 547 - for_each_online_cpu(cpu) 548 - bnx2i_percpu_thread_destroy(cpu); 549 - 550 - cpuhp_remove_state_nocalls(bnx2i_online_state); 551 - cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD); 552 - put_online_cpus(); 553 554 iscsi_unregister_transport(&bnx2i_iscsi_transport); 555 cnic_unregister_driver(CNIC_ULP_ISCSI);
··· 404 405 406 /** 407 + * bnx2i_cpu_online - Create a receive thread for an online CPU 408 * 409 * @cpu: cpu index for the online cpu 410 */ 411 + static int bnx2i_cpu_online(unsigned int cpu) 412 { 413 struct bnx2i_percpu_s *p; 414 struct task_struct *thread; ··· 419 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, 420 cpu_to_node(cpu), 421 "bnx2i_thread/%d", cpu); 422 + if (IS_ERR(thread)) 423 + return PTR_ERR(thread); 424 + 425 /* bind thread to the cpu */ 426 + kthread_bind(thread, cpu); 427 + p->iothread = thread; 428 + wake_up_process(thread); 429 + return 0; 430 } 431 432 + static int bnx2i_cpu_offline(unsigned int cpu) 433 { 434 struct bnx2i_percpu_s *p; 435 struct task_struct *thread; ··· 451 spin_unlock_bh(&p->p_work_lock); 452 if (thread) 453 kthread_stop(thread); 454 return 0; 455 } 456 ··· 511 p->iothread = NULL; 512 } 513 514 + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online", 515 + bnx2i_cpu_online, bnx2i_cpu_offline); 516 if (err < 0) 517 + goto unreg_driver; 518 bnx2i_online_state = err; 519 return 0; 520 521 + unreg_driver: 522 cnic_unregister_driver(CNIC_ULP_ISCSI); 523 unreg_xport: 524 iscsi_unregister_transport(&bnx2i_iscsi_transport); ··· 551 static void __exit bnx2i_mod_exit(void) 552 { 553 struct bnx2i_hba *hba; 554 555 mutex_lock(&bnx2i_dev_lock); 556 while (!list_empty(&adapter_list)) { ··· 569 } 570 mutex_unlock(&bnx2i_dev_lock); 571 572 + cpuhp_remove_state(bnx2i_online_state); 573 574 iscsi_unregister_transport(&bnx2i_iscsi_transport); 575 cnic_unregister_driver(CNIC_ULP_ISCSI);
+3 -1
drivers/scsi/lpfc/lpfc_attr.c
··· 205 atomic_read(&tgtp->xmt_ls_rsp_error)); 206 207 len += snprintf(buf+len, PAGE_SIZE-len, 208 - "FCP: Rcv %08x Release %08x Drop %08x\n", 209 atomic_read(&tgtp->rcv_fcp_cmd_in), 210 atomic_read(&tgtp->xmt_fcp_release), 211 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 212
··· 205 atomic_read(&tgtp->xmt_ls_rsp_error)); 206 207 len += snprintf(buf+len, PAGE_SIZE-len, 208 + "FCP: Rcv %08x Defer %08x Release %08x " 209 + "Drop %08x\n", 210 atomic_read(&tgtp->rcv_fcp_cmd_in), 211 + atomic_read(&tgtp->rcv_fcp_cmd_defer), 212 atomic_read(&tgtp->xmt_fcp_release), 213 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 214
+4 -1
drivers/scsi/lpfc/lpfc_debugfs.c
··· 782 atomic_read(&tgtp->xmt_ls_rsp_error)); 783 784 len += snprintf(buf + len, size - len, 785 - "FCP: Rcv %08x Drop %08x\n", 786 atomic_read(&tgtp->rcv_fcp_cmd_in), 787 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 788 789 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
··· 782 atomic_read(&tgtp->xmt_ls_rsp_error)); 783 784 len += snprintf(buf + len, size - len, 785 + "FCP: Rcv %08x Defer %08x Release %08x " 786 + "Drop %08x\n", 787 atomic_read(&tgtp->rcv_fcp_cmd_in), 788 + atomic_read(&tgtp->rcv_fcp_cmd_defer), 789 + atomic_read(&tgtp->xmt_fcp_release), 790 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 791 792 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
+30
drivers/scsi/lpfc/lpfc_nvmet.c
··· 841 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 842 } 843 844 static struct nvmet_fc_target_template lpfc_tgttemplate = { 845 .targetport_delete = lpfc_nvmet_targetport_delete, 846 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 847 .fcp_op = lpfc_nvmet_xmt_fcp_op, 848 .fcp_abort = lpfc_nvmet_xmt_fcp_abort, 849 .fcp_req_release = lpfc_nvmet_xmt_fcp_release, 850 851 .max_hw_queues = 1, 852 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, ··· 1520 if (rc == 0) { 1521 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1522 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1523 return; 1524 } 1525
··· 841 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 842 } 843 844 + static void 845 + lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, 846 + struct nvmefc_tgt_fcp_req *rsp) 847 + { 848 + struct lpfc_nvmet_tgtport *tgtp; 849 + struct lpfc_nvmet_rcv_ctx *ctxp = 850 + container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 851 + struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; 852 + struct lpfc_hba *phba = ctxp->phba; 853 + 854 + lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", 855 + ctxp->oxid, ctxp->size, smp_processor_id()); 856 + 857 + tgtp = phba->targetport->private; 858 + atomic_inc(&tgtp->rcv_fcp_cmd_defer); 859 + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 860 + } 861 + 862 static struct nvmet_fc_target_template lpfc_tgttemplate = { 863 .targetport_delete = lpfc_nvmet_targetport_delete, 864 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 865 .fcp_op = lpfc_nvmet_xmt_fcp_op, 866 .fcp_abort = lpfc_nvmet_xmt_fcp_abort, 867 .fcp_req_release = lpfc_nvmet_xmt_fcp_release, 868 + .defer_rcv = lpfc_nvmet_defer_rcv, 869 870 .max_hw_queues = 1, 871 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, ··· 1501 if (rc == 0) { 1502 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1503 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1504 + return; 1505 + } 1506 + 1507 + /* Processing of FCP command is deferred */ 1508 + if (rc == -EOVERFLOW) { 1509 + lpfc_nvmeio_data(phba, 1510 + "NVMET RCV BUSY: xri x%x sz %d from %06x\n", 1511 + oxid, size, sid); 1512 + /* defer reposting rcv buffer till .defer_rcv callback */ 1513 + ctxp->rqb_buffer = nvmebuf; 1514 + atomic_inc(&tgtp->rcv_fcp_cmd_out); 1515 return; 1516 } 1517
+1
drivers/scsi/lpfc/lpfc_nvmet.h
··· 49 atomic_t rcv_fcp_cmd_in; 50 atomic_t rcv_fcp_cmd_out; 51 atomic_t rcv_fcp_cmd_drop; 52 atomic_t xmt_fcp_release; 53 54 /* Stats counters - lpfc_nvmet_xmt_fcp_op */
··· 49 atomic_t rcv_fcp_cmd_in; 50 atomic_t rcv_fcp_cmd_out; 51 atomic_t rcv_fcp_cmd_drop; 52 + atomic_t rcv_fcp_cmd_defer; 53 atomic_t xmt_fcp_release; 54 55 /* Stats counters - lpfc_nvmet_xmt_fcp_op */
+2 -1
drivers/scsi/qedf/qedf.h
··· 528 #define QEDF_WRITE (1 << 0) 529 #define MAX_FIBRE_LUNS 0xffffffff 530 531 - #define QEDF_MAX_NUM_CQS 8 532 533 /* 534 * PCI function probe defines
··· 528 #define QEDF_WRITE (1 << 0) 529 #define MAX_FIBRE_LUNS 0xffffffff 530 531 + #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ 532 + num_online_cpus()) 533 534 /* 535 * PCI function probe defines
+9 -11
drivers/scsi/qedf/qedf_main.c
··· 2760 * we allocation is the minimum off: 2761 * 2762 * Number of CPUs 2763 - * Number of MSI-X vectors 2764 - * Max number allocated in hardware (QEDF_MAX_NUM_CQS) 2765 */ 2766 - qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, 2767 - num_online_cpus()); 2768 2769 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 2770 qedf->num_queues); ··· 2960 goto err1; 2961 } 2962 2963 /* queue allocation code should come here 2964 * order should be 2965 * slowpath_start ··· 2981 goto err2; 2982 } 2983 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 2984 - 2985 - /* Learn information crucial for qedf to progress */ 2986 - rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); 2987 - if (rc) { 2988 - QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); 2989 - goto err1; 2990 - } 2991 2992 /* Record BDQ producer doorbell addresses */ 2993 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
··· 2760 * we allocation is the minimum off: 2761 * 2762 * Number of CPUs 2763 + * Number allocated by qed for our PCI function 2764 */ 2765 + qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); 2766 2767 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 2768 qedf->num_queues); ··· 2962 goto err1; 2963 } 2964 2965 + /* Learn information crucial for qedf to progress */ 2966 + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); 2967 + if (rc) { 2968 + QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); 2969 + goto err1; 2970 + } 2971 + 2972 /* queue allocation code should come here 2973 * order should be 2974 * slowpath_start ··· 2976 goto err2; 2977 } 2978 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 2979 2980 /* Record BDQ producer doorbell addresses */ 2981 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
-30
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 500 static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 501 { 502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 503 - unsigned long flags; 504 505 /* 506 * Ensure that the complete FCP WRITE payload has been received. 507 * Otherwise return an exception via CHECK_CONDITION status. 508 */ 509 cmd->cmd_in_wq = 0; 510 - 511 - spin_lock_irqsave(&cmd->cmd_lock, flags); 512 - cmd->data_work = 1; 513 - if (cmd->aborted) { 514 - cmd->data_work_free = 1; 515 - spin_unlock_irqrestore(&cmd->cmd_lock, flags); 516 - 517 - tcm_qla2xxx_free_cmd(cmd); 518 - return; 519 - } 520 - spin_unlock_irqrestore(&cmd->cmd_lock, flags); 521 522 cmd->qpair->tgt_counters.qla_core_ret_ctio++; 523 if (!cmd->write_data_transferred) { ··· 753 qlt_xmit_tm_rsp(mcmd); 754 } 755 756 - #define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free) 757 static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) 758 { 759 struct qla_tgt_cmd *cmd = container_of(se_cmd, 760 struct qla_tgt_cmd, se_cmd); 761 - unsigned long flags; 762 763 if (qlt_abort_cmd(cmd)) 764 return; 765 - 766 - spin_lock_irqsave(&cmd->cmd_lock, flags); 767 - if ((cmd->state == QLA_TGT_STATE_NEW)|| 768 - ((cmd->state == QLA_TGT_STATE_DATA_IN) && 769 - DATA_WORK_NOT_FREE(cmd))) { 770 - cmd->data_work_free = 1; 771 - spin_unlock_irqrestore(&cmd->cmd_lock, flags); 772 - /* 773 - * cmd has not reached fw, Use this trigger to free it. 774 - */ 775 - tcm_qla2xxx_free_cmd(cmd); 776 - return; 777 - } 778 - spin_unlock_irqrestore(&cmd->cmd_lock, flags); 779 - return; 780 - 781 } 782 783 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
··· 500 static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 501 { 502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 503 504 /* 505 * Ensure that the complete FCP WRITE payload has been received. 506 * Otherwise return an exception via CHECK_CONDITION status. 507 */ 508 cmd->cmd_in_wq = 0; 509 510 cmd->qpair->tgt_counters.qla_core_ret_ctio++; 511 if (!cmd->write_data_transferred) { ··· 765 qlt_xmit_tm_rsp(mcmd); 766 } 767 768 static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) 769 { 770 struct qla_tgt_cmd *cmd = container_of(se_cmd, 771 struct qla_tgt_cmd, se_cmd); 772 773 if (qlt_abort_cmd(cmd)) 774 return; 775 } 776 777 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+1 -30
drivers/scsi/sg.c
··· 751 return count; 752 } 753 754 - static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) 755 - { 756 - switch (hp->dxfer_direction) { 757 - case SG_DXFER_NONE: 758 - if (hp->dxferp || hp->dxfer_len > 0) 759 - return false; 760 - return true; 761 - case SG_DXFER_FROM_DEV: 762 - /* 763 - * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp 764 - * can either be NULL or != NULL so there's no point in checking 765 - * it either. So just return true. 766 - */ 767 - return true; 768 - case SG_DXFER_TO_DEV: 769 - case SG_DXFER_TO_FROM_DEV: 770 - if (!hp->dxferp || hp->dxfer_len == 0) 771 - return false; 772 - return true; 773 - case SG_DXFER_UNKNOWN: 774 - if ((!hp->dxferp && hp->dxfer_len) || 775 - (hp->dxferp && hp->dxfer_len == 0)) 776 - return false; 777 - return true; 778 - default: 779 - return false; 780 - } 781 - } 782 - 783 static int 784 sg_common_write(Sg_fd * sfp, Sg_request * srp, 785 unsigned char *cmnd, int timeout, int blocking) ··· 771 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 772 (int) cmnd[0], (int) hp->cmd_len)); 773 774 - if (!sg_is_valid_dxfer(hp)) 775 return -EINVAL; 776 777 k = sg_start_req(srp, cmnd);
··· 751 return count; 752 } 753 754 static int 755 sg_common_write(Sg_fd * sfp, Sg_request * srp, 756 unsigned char *cmnd, int timeout, int blocking) ··· 800 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 801 (int) cmnd[0], (int) hp->cmd_len)); 802 803 + if (hp->dxfer_len >= SZ_256M) 804 return -EINVAL; 805 806 k = sg_start_req(srp, cmnd);
+3
drivers/staging/comedi/comedi_fops.c
··· 2396 continue; 2397 } 2398 2399 wp = async->buf_write_ptr; 2400 n1 = min(n, async->prealloc_bufsz - wp); 2401 n2 = n - n1; ··· 2529 } 2530 continue; 2531 } 2532 rp = async->buf_read_ptr; 2533 n1 = min(n, async->prealloc_bufsz - rp); 2534 n2 = n - n1;
··· 2396 continue; 2397 } 2398 2399 + set_current_state(TASK_RUNNING); 2400 wp = async->buf_write_ptr; 2401 n1 = min(n, async->prealloc_bufsz - wp); 2402 n2 = n - n1; ··· 2528 } 2529 continue; 2530 } 2531 + 2532 + set_current_state(TASK_RUNNING); 2533 rp = async->buf_read_ptr; 2534 n1 = min(n, async->prealloc_bufsz - rp); 2535 n2 = n - n1;
+1 -1
drivers/staging/iio/resolver/ad2s1210.c
··· 472 long m) 473 { 474 struct ad2s1210_state *st = iio_priv(indio_dev); 475 - bool negative; 476 int ret = 0; 477 u16 pos; 478 s16 vel;
··· 472 long m) 473 { 474 struct ad2s1210_state *st = iio_priv(indio_dev); 475 + u16 negative; 476 int ret = 0; 477 u16 pos; 478 s16 vel;
+12 -4
drivers/target/iscsi/cxgbit/cxgbit_cm.c
··· 1510 1511 if (!cnp) { 1512 pr_info("%s stid %d lookup failure\n", __func__, stid); 1513 - return; 1514 } 1515 1516 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); 1517 cxgbit_put_cnp(cnp); 1518 } 1519 1520 static void ··· 1532 1533 if (!cnp) { 1534 pr_info("%s stid %d lookup failure\n", __func__, stid); 1535 - return; 1536 } 1537 1538 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); 1539 cxgbit_put_cnp(cnp); 1540 } 1541 1542 static void ··· 1823 struct tid_info *t = lldi->tids; 1824 1825 csk = lookup_tid(t, tid); 1826 - if (unlikely(!csk)) 1827 pr_err("can't find connection for tid %u.\n", tid); 1828 - else 1829 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); 1830 1831 cxgbit_put_csk(csk); 1832 } 1833 1834 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
··· 1510 1511 if (!cnp) { 1512 pr_info("%s stid %d lookup failure\n", __func__, stid); 1513 + goto rel_skb; 1514 } 1515 1516 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); 1517 cxgbit_put_cnp(cnp); 1518 + rel_skb: 1519 + __kfree_skb(skb); 1520 } 1521 1522 static void ··· 1530 1531 if (!cnp) { 1532 pr_info("%s stid %d lookup failure\n", __func__, stid); 1533 + goto rel_skb; 1534 } 1535 1536 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); 1537 cxgbit_put_cnp(cnp); 1538 + rel_skb: 1539 + __kfree_skb(skb); 1540 } 1541 1542 static void ··· 1819 struct tid_info *t = lldi->tids; 1820 1821 csk = lookup_tid(t, tid); 1822 + if (unlikely(!csk)) { 1823 pr_err("can't find connection for tid %u.\n", tid); 1824 + goto rel_skb; 1825 + } else { 1826 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); 1827 + } 1828 1829 cxgbit_put_csk(csk); 1830 + rel_skb: 1831 + __kfree_skb(skb); 1832 } 1833 1834 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
+7 -5
drivers/target/iscsi/cxgbit/cxgbit_target.c
··· 827 828 static void 829 cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, 830 - unsigned int nents) 831 { 832 struct skb_seq_state st; 833 const u8 *buf; ··· 846 } 847 848 consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, 849 - buf_len, consumed); 850 } 851 } 852 ··· 912 struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; 913 u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); 914 915 - cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); 916 } 917 918 cmd->write_data_done += pdu_cb->dlen; ··· 1069 cmd->se_cmd.data_length); 1070 1071 if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { 1072 sg_off = data_offset / PAGE_SIZE; 1073 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1074 - sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); 1075 1076 - cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); 1077 } 1078 1079 check_payload:
··· 827 828 static void 829 cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, 830 + unsigned int nents, u32 skip) 831 { 832 struct skb_seq_state st; 833 const u8 *buf; ··· 846 } 847 848 consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, 849 + buf_len, skip + consumed); 850 } 851 } 852 ··· 912 struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; 913 u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); 914 915 + cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0); 916 } 917 918 cmd->write_data_done += pdu_cb->dlen; ··· 1069 cmd->se_cmd.data_length); 1070 1071 if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { 1072 + u32 skip = data_offset % PAGE_SIZE; 1073 + 1074 sg_off = data_offset / PAGE_SIZE; 1075 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1076 + sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE)); 1077 1078 + cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip); 1079 } 1080 1081 check_payload:
+4 -2
drivers/target/iscsi/iscsi_target.c
··· 418 return 0; 419 } 420 np->np_thread_state = ISCSI_NP_THREAD_RESET; 421 422 if (np->np_thread) { 423 spin_unlock_bh(&np->np_thread_lock); ··· 2168 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2169 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 2170 cmd->data_direction = DMA_NONE; 2171 cmd->text_in_ptr = NULL; 2172 2173 return 0; ··· 3489 return text_length; 3490 3491 if (completed) { 3492 - hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3493 } else { 3494 - hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; 3495 cmd->read_data_done += text_length; 3496 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 3497 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
··· 418 return 0; 419 } 420 np->np_thread_state = ISCSI_NP_THREAD_RESET; 421 + atomic_inc(&np->np_reset_count); 422 423 if (np->np_thread) { 424 spin_unlock_bh(&np->np_thread_lock); ··· 2167 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2168 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 2169 cmd->data_direction = DMA_NONE; 2170 + kfree(cmd->text_in_ptr); 2171 cmd->text_in_ptr = NULL; 2172 2173 return 0; ··· 3487 return text_length; 3488 3489 if (completed) { 3490 + hdr->flags = ISCSI_FLAG_CMD_FINAL; 3491 } else { 3492 + hdr->flags = ISCSI_FLAG_TEXT_CONTINUE; 3493 cmd->read_data_done += text_length; 3494 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 3495 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+5 -2
drivers/target/iscsi/iscsi_target_login.c
··· 1243 flush_signals(current); 1244 1245 spin_lock_bh(&np->np_thread_lock); 1246 - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1247 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1248 complete(&np->np_restart_comp); 1249 } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { 1250 spin_unlock_bh(&np->np_thread_lock); 1251 goto exit; ··· 1280 goto exit; 1281 } else if (rc < 0) { 1282 spin_lock_bh(&np->np_thread_lock); 1283 - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1284 spin_unlock_bh(&np->np_thread_lock); 1285 complete(&np->np_restart_comp); 1286 iscsit_put_transport(conn->conn_transport);
··· 1243 flush_signals(current); 1244 1245 spin_lock_bh(&np->np_thread_lock); 1246 + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { 1247 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1248 + spin_unlock_bh(&np->np_thread_lock); 1249 complete(&np->np_restart_comp); 1250 + return 1; 1251 } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { 1252 spin_unlock_bh(&np->np_thread_lock); 1253 goto exit; ··· 1278 goto exit; 1279 } else if (rc < 0) { 1280 spin_lock_bh(&np->np_thread_lock); 1281 + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { 1282 + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1283 spin_unlock_bh(&np->np_thread_lock); 1284 complete(&np->np_restart_comp); 1285 iscsit_put_transport(conn->conn_transport);
+2 -2
drivers/target/target_core_tpg.c
··· 364 mutex_lock(&tpg->acl_node_mutex); 365 if (acl->dynamic_node_acl) 366 acl->dynamic_node_acl = 0; 367 - list_del(&acl->acl_list); 368 mutex_unlock(&tpg->acl_node_mutex); 369 370 target_shutdown_sessions(acl); ··· 548 * in transport_deregister_session(). 549 */ 550 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 551 - list_del(&nacl->acl_list); 552 553 core_tpg_wait_for_nacl_pr_ref(nacl); 554 core_free_device_list_for_node(nacl, se_tpg);
··· 364 mutex_lock(&tpg->acl_node_mutex); 365 if (acl->dynamic_node_acl) 366 acl->dynamic_node_acl = 0; 367 + list_del_init(&acl->acl_list); 368 mutex_unlock(&tpg->acl_node_mutex); 369 370 target_shutdown_sessions(acl); ··· 548 * in transport_deregister_session(). 549 */ 550 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 551 + list_del_init(&nacl->acl_list); 552 553 core_tpg_wait_for_nacl_pr_ref(nacl); 554 core_free_device_list_for_node(nacl, se_tpg);
+2 -2
drivers/target/target_core_transport.c
··· 466 } 467 468 mutex_lock(&se_tpg->acl_node_mutex); 469 - list_del(&nacl->acl_list); 470 mutex_unlock(&se_tpg->acl_node_mutex); 471 472 core_tpg_wait_for_nacl_pr_ref(nacl); ··· 538 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 539 540 if (se_nacl->dynamic_stop) 541 - list_del(&se_nacl->acl_list); 542 } 543 mutex_unlock(&se_tpg->acl_node_mutex); 544
··· 466 } 467 468 mutex_lock(&se_tpg->acl_node_mutex); 469 + list_del_init(&nacl->acl_list); 470 mutex_unlock(&se_tpg->acl_node_mutex); 471 472 core_tpg_wait_for_nacl_pr_ref(nacl); ··· 538 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 539 540 if (se_nacl->dynamic_stop) 541 + list_del_init(&se_nacl->acl_list); 542 } 543 mutex_unlock(&se_tpg->acl_node_mutex); 544
+7 -6
drivers/target/target_core_user.c
··· 563 block_remaining); 564 to_offset = get_block_offset_user(udev, dbi, 565 block_remaining); 566 - offset = DATA_BLOCK_SIZE - block_remaining; 567 - to += offset; 568 569 if (*iov_cnt != 0 && 570 to_offset == iov_tail(*iov)) { ··· 573 (*iov)->iov_len = copy_bytes; 574 } 575 if (copy_data) { 576 - memcpy(to, from + sg->length - sg_remaining, 577 - copy_bytes); 578 tcmu_flush_dcache_range(to, copy_bytes); 579 } 580 sg_remaining -= copy_bytes; ··· 637 copy_bytes = min_t(size_t, sg_remaining, 638 block_remaining); 639 offset = DATA_BLOCK_SIZE - block_remaining; 640 - from += offset; 641 tcmu_flush_dcache_range(from, copy_bytes); 642 - memcpy(to + sg->length - sg_remaining, from, 643 copy_bytes); 644 645 sg_remaining -= copy_bytes; ··· 1432 if (udev->dev_config[0]) 1433 snprintf(str + used, size - used, "/%s", udev->dev_config); 1434 1435 info->name = str; 1436 1437 return 0;
··· 563 block_remaining); 564 to_offset = get_block_offset_user(udev, dbi, 565 block_remaining); 566 567 if (*iov_cnt != 0 && 568 to_offset == iov_tail(*iov)) { ··· 575 (*iov)->iov_len = copy_bytes; 576 } 577 if (copy_data) { 578 + offset = DATA_BLOCK_SIZE - block_remaining; 579 + memcpy(to + offset, 580 + from + sg->length - sg_remaining, 581 + copy_bytes); 582 tcmu_flush_dcache_range(to, copy_bytes); 583 } 584 sg_remaining -= copy_bytes; ··· 637 copy_bytes = min_t(size_t, sg_remaining, 638 block_remaining); 639 offset = DATA_BLOCK_SIZE - block_remaining; 640 tcmu_flush_dcache_range(from, copy_bytes); 641 + memcpy(to + sg->length - sg_remaining, from + offset, 642 copy_bytes); 643 644 sg_remaining -= copy_bytes; ··· 1433 if (udev->dev_config[0]) 1434 snprintf(str + used, size - used, "/%s", udev->dev_config); 1435 1436 + /* If the old string exists, free it */ 1437 + kfree(info->name); 1438 info->name = str; 1439 1440 return 0;
+9
drivers/thunderbolt/eeprom.c
··· 333 int res; 334 enum tb_port_type type; 335 336 port = &sw->ports[header->index]; 337 port->disabled = header->port_disabled; 338 if (port->disabled)
··· 333 int res; 334 enum tb_port_type type; 335 336 + /* 337 + * Some DROMs list more ports than the controller actually has 338 + * so we skip those but allow the parser to continue. 339 + */ 340 + if (header->index > sw->config.max_port_number) { 341 + dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n"); 342 + return 0; 343 + } 344 + 345 port = &sw->ports[header->index]; 346 port->disabled = header->port_disabled; 347 if (port->disabled)
+17 -6
drivers/tty/serial/8250/8250_core.c
··· 1043 if (up->dl_write) 1044 uart->dl_write = up->dl_write; 1045 1046 - if (serial8250_isa_config != NULL) 1047 - serial8250_isa_config(0, &uart->port, 1048 - &uart->capabilities); 1049 1050 - ret = uart_add_one_port(&serial8250_reg, &uart->port); 1051 - if (ret == 0) 1052 - ret = uart->port.line; 1053 } 1054 mutex_unlock(&serial_mutex); 1055
··· 1043 if (up->dl_write) 1044 uart->dl_write = up->dl_write; 1045 1046 + if (uart->port.type != PORT_8250_CIR) { 1047 + if (serial8250_isa_config != NULL) 1048 + serial8250_isa_config(0, &uart->port, 1049 + &uart->capabilities); 1050 1051 + ret = uart_add_one_port(&serial8250_reg, 1052 + &uart->port); 1053 + if (ret == 0) 1054 + ret = uart->port.line; 1055 + } else { 1056 + dev_info(uart->port.dev, 1057 + "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", 1058 + uart->port.iobase, 1059 + (unsigned long long)uart->port.mapbase, 1060 + uart->port.irq); 1061 + 1062 + ret = 0; 1063 + } 1064 } 1065 mutex_unlock(&serial_mutex); 1066
+19 -18
drivers/tty/serial/amba-pl011.c
··· 142 .fixed_options = true, 143 }; 144 145 - /* 146 - * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as 147 - * occasionally getting stuck as 1. To avoid the potential for a hang, check 148 - * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART 149 - * implementations, so only do so if an affected platform is detected in 150 - * parse_spcr(). 151 - */ 152 - static bool qdf2400_e44_present = false; 153 - 154 static struct vendor_data vendor_qdt_qdf2400_e44 = { 155 .reg_offset = pl011_std_offsets, 156 .fr_busy = UART011_FR_TXFE, ··· 157 .always_enabled = true, 158 .fixed_options = true, 159 }; 160 161 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { 162 [REG_DR] = UART01x_DR, ··· 2368 resource_size_t addr; 2369 int i; 2370 2371 - if (strcmp(name, "qdf2400_e44") == 0) { 2372 - pr_info_once("UART: Working around QDF2400 SoC erratum 44"); 2373 - qdf2400_e44_present = true; 2374 - } else if (strcmp(name, "pl011") != 0) { 2375 return -ENODEV; 2376 - } 2377 2378 if (uart_parse_earlycon(options, &iotype, &addr, &options)) 2379 return -ENODEV; ··· 2729 } 2730 uap->port.irq = ret; 2731 2732 - uap->reg_offset = vendor_sbsa.reg_offset; 2733 - uap->vendor = qdf2400_e44_present ? 2734 - &vendor_qdt_qdf2400_e44 : &vendor_sbsa; 2735 uap->fifosize = 32; 2736 - uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM; 2737 uap->port.ops = &sbsa_uart_pops; 2738 uap->fixed_baud = baudrate; 2739
··· 142 .fixed_options = true, 143 }; 144 145 + #ifdef CONFIG_ACPI_SPCR_TABLE 146 static struct vendor_data vendor_qdt_qdf2400_e44 = { 147 .reg_offset = pl011_std_offsets, 148 .fr_busy = UART011_FR_TXFE, ··· 165 .always_enabled = true, 166 .fixed_options = true, 167 }; 168 + #endif 169 170 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { 171 [REG_DR] = UART01x_DR, ··· 2375 resource_size_t addr; 2376 int i; 2377 2378 + /* 2379 + * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum 2380 + * have a distinct console name, so make sure we check for that. 2381 + * The actual implementation of the erratum occurs in the probe 2382 + * function. 2383 + */ 2384 + if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) 2385 return -ENODEV; 2386 2387 if (uart_parse_earlycon(options, &iotype, &addr, &options)) 2388 return -ENODEV; ··· 2734 } 2735 uap->port.irq = ret; 2736 2737 + #ifdef CONFIG_ACPI_SPCR_TABLE 2738 + if (qdf2400_e44_present) { 2739 + dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n"); 2740 + uap->vendor = &vendor_qdt_qdf2400_e44; 2741 + } else 2742 + #endif 2743 + uap->vendor = &vendor_sbsa; 2744 + 2745 + uap->reg_offset = uap->vendor->reg_offset; 2746 uap->fifosize = 32; 2747 + uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; 2748 uap->port.ops = &sbsa_uart_pops; 2749 uap->fixed_baud = baudrate; 2750
+3 -1
drivers/usb/core/hcd.c
··· 1888 /* No more submits can occur */ 1889 spin_lock_irq(&hcd_urb_list_lock); 1890 rescan: 1891 - list_for_each_entry (urb, &ep->urb_list, urb_list) { 1892 int is_in; 1893 1894 if (urb->unlinked) ··· 2485 } 2486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { 2487 hcd = hcd->shared_hcd; 2488 if (hcd->rh_registered) { 2489 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2490
··· 1888 /* No more submits can occur */ 1889 spin_lock_irq(&hcd_urb_list_lock); 1890 rescan: 1891 + list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) { 1892 int is_in; 1893 1894 if (urb->unlinked) ··· 2485 } 2486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { 2487 hcd = hcd->shared_hcd; 2488 + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); 2489 + set_bit(HCD_FLAG_DEAD, &hcd->flags); 2490 if (hcd->rh_registered) { 2491 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2492
+6 -4
drivers/usb/core/hub.c
··· 4725 static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, 4726 u16 portchange) 4727 { 4728 - int status, i; 4729 unsigned unit_load; 4730 struct usb_device *hdev = hub->hdev; 4731 struct usb_hcd *hcd = bus_to_hcd(hdev->bus); ··· 4930 4931 done: 4932 hub_port_disable(hub, port1, 1); 4933 - if (hcd->driver->relinquish_port && !hub->hdev->parent) 4934 - hcd->driver->relinquish_port(hcd, port1); 4935 - 4936 } 4937 4938 /* Handle physical or logical connection change events.
··· 4725 static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, 4726 u16 portchange) 4727 { 4728 + int status = -ENODEV; 4729 + int i; 4730 unsigned unit_load; 4731 struct usb_device *hdev = hub->hdev; 4732 struct usb_hcd *hcd = bus_to_hcd(hdev->bus); ··· 4929 4930 done: 4931 hub_port_disable(hub, port1, 1); 4932 + if (hcd->driver->relinquish_port && !hub->hdev->parent) { 4933 + if (status != -ENOTCONN && status != -ENODEV) 4934 + hcd->driver->relinquish_port(hcd, port1); 4935 + } 4936 } 4937 4938 /* Handle physical or logical connection change events.
+4
drivers/usb/core/quirks.c
··· 150 /* appletouch */ 151 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 152 153 /* Avision AV600U */ 154 { USB_DEVICE(0x0638, 0x0a13), .driver_info = 155 USB_QUIRK_STRING_FETCH_255 }, ··· 252 { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, 253 { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, 254 { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, 255 256 /* Logitech Optical Mouse M90/M100 */ 257 { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
··· 150 /* appletouch */ 151 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 152 153 + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ 154 + { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, 155 + 156 /* Avision AV600U */ 157 { USB_DEVICE(0x0638, 0x0a13), .driver_info = 158 USB_QUIRK_STRING_FETCH_255 }, ··· 249 { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, 250 { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, 251 { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, 252 + { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME }, 253 254 /* Logitech Optical Mouse M90/M100 */ 255 { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
+32 -1
drivers/usb/dwc3/gadget.c
··· 896 if (!node) { 897 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 898 899 if (speed == USB_SPEED_HIGH) { 900 struct usb_ep *ep = &dep->endpoint; 901 - trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); 902 } 903 } else { 904 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
··· 896 if (!node) { 897 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 898 899 + /* 900 + * USB Specification 2.0 Section 5.9.2 states that: "If 901 + * there is only a single transaction in the microframe, 902 + * only a DATA0 data packet PID is used. If there are 903 + * two transactions per microframe, DATA1 is used for 904 + * the first transaction data packet and DATA0 is used 905 + * for the second transaction data packet. If there are 906 + * three transactions per microframe, DATA2 is used for 907 + * the first transaction data packet, DATA1 is used for 908 + * the second, and DATA0 is used for the third." 909 + * 910 + * IOW, we should satisfy the following cases: 911 + * 912 + * 1) length <= maxpacket 913 + * - DATA0 914 + * 915 + * 2) maxpacket < length <= (2 * maxpacket) 916 + * - DATA1, DATA0 917 + * 918 + * 3) (2 * maxpacket) < length <= (3 * maxpacket) 919 + * - DATA2, DATA1, DATA0 920 + */ 921 if (speed == USB_SPEED_HIGH) { 922 struct usb_ep *ep = &dep->endpoint; 923 + unsigned int mult = ep->mult - 1; 924 + unsigned int maxp = usb_endpoint_maxp(ep->desc); 925 + 926 + if (length <= (2 * maxp)) 927 + mult--; 928 + 929 + if (length <= maxp) 930 + mult--; 931 + 932 + trb->size |= DWC3_TRB_SIZE_PCM1(mult); 933 } 934 } else { 935 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
+18 -7
drivers/usb/gadget/udc/renesas_usb3.c
··· 838 return usb3_req; 839 } 840 841 static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, 842 struct renesas_usb3_request *usb3_req, int status) 843 { 844 struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); 845 unsigned long flags; 846 847 - dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", 848 - usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, 849 - status); 850 - usb3_req->req.status = status; 851 spin_lock_irqsave(&usb3->lock, flags); 852 - usb3_ep->started = false; 853 - list_del_init(&usb3_req->queue); 854 spin_unlock_irqrestore(&usb3->lock, flags); 855 - usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); 856 } 857 858 static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
··· 838 return usb3_req; 839 } 840 841 + static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep, 842 + struct renesas_usb3_request *usb3_req, 843 + int status) 844 + { 845 + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); 846 + 847 + dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", 848 + usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, 849 + status); 850 + usb3_req->req.status = status; 851 + usb3_ep->started = false; 852 + list_del_init(&usb3_req->queue); 853 + spin_unlock(&usb3->lock); 854 + usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); 855 + spin_lock(&usb3->lock); 856 + } 857 + 858 static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, 859 struct renesas_usb3_request *usb3_req, int status) 860 { 861 struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); 862 unsigned long flags; 863 864 spin_lock_irqsave(&usb3->lock, flags); 865 + __usb3_request_done(usb3_ep, usb3_req, status); 866 spin_unlock_irqrestore(&usb3->lock, flags); 867 } 868 869 static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
+32 -5
drivers/usb/host/pci-quirks.c
··· 98 AMD_CHIPSET_HUDSON2, 99 AMD_CHIPSET_BOLTON, 100 AMD_CHIPSET_YANGTZE, 101 AMD_CHIPSET_UNKNOWN, 102 }; 103 ··· 142 pinfo->sb_type.gen = AMD_CHIPSET_SB700; 143 else if (rev >= 0x40 && rev <= 0x4f) 144 pinfo->sb_type.gen = AMD_CHIPSET_SB800; 145 } else { 146 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 147 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); ··· 266 { 267 /* Make sure amd chipset type has already been initialized */ 268 usb_amd_find_chipset_info(); 269 - if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) 270 - return 0; 271 - 272 - dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); 273 - return 1; 274 } 275 EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); 276 ··· 1157 } 1158 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, 1159 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
··· 98 AMD_CHIPSET_HUDSON2, 99 AMD_CHIPSET_BOLTON, 100 AMD_CHIPSET_YANGTZE, 101 + AMD_CHIPSET_TAISHAN, 102 AMD_CHIPSET_UNKNOWN, 103 }; 104 ··· 141 pinfo->sb_type.gen = AMD_CHIPSET_SB700; 142 else if (rev >= 0x40 && rev <= 0x4f) 143 pinfo->sb_type.gen = AMD_CHIPSET_SB800; 144 + } 145 + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 146 + 0x145c, NULL); 147 + if (pinfo->smbus_dev) { 148 + pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN; 149 } else { 150 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 151 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); ··· 260 { 261 /* Make sure amd chipset type has already been initialized */ 262 usb_amd_find_chipset_info(); 263 + if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE || 264 + amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) { 265 + dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); 266 + return 1; 267 + } 268 + return 0; 269 } 270 EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); 271 ··· 1150 } 1151 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, 1152 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); 1153 + 1154 + bool usb_xhci_needs_pci_reset(struct pci_dev *pdev) 1155 + { 1156 + /* 1157 + * Our dear uPD72020{1,2} friend only partially resets when 1158 + * asked to via the XHCI interface, and may end up doing DMA 1159 + * at the wrong addresses, as it keeps the top 32bit of some 1160 + * addresses from its previous programming under obscure 1161 + * circumstances. 1162 + * Give it a good wack at probe time. Unfortunately, this 1163 + * needs to happen before we've had a chance to discover any 1164 + * quirk, or the system will be in a rather bad state. 1165 + */ 1166 + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && 1167 + (pdev->device == 0x0014 || pdev->device == 0x0015)) 1168 + return true; 1169 + 1170 + return false; 1171 + } 1172 + EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
+1
drivers/usb/host/pci-quirks.h
··· 15 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); 16 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); 17 void sb800_prefetch(struct device *dev, int on); 18 #else 19 struct pci_dev; 20 static inline void usb_amd_quirk_pll_disable(void) {}
··· 15 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); 16 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); 17 void sb800_prefetch(struct device *dev, int on); 18 + bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); 19 #else 20 struct pci_dev; 21 static inline void usb_amd_quirk_pll_disable(void) {}
+7
drivers/usb/host/xhci-pci.c
··· 284 285 driver = (struct hc_driver *)id->driver_data; 286 287 /* Prevent runtime suspending between USB-2 and USB-3 initialization */ 288 pm_runtime_get_noresume(&dev->dev); 289
··· 284 285 driver = (struct hc_driver *)id->driver_data; 286 287 + /* For some HW implementation, a XHCI reset is just not enough... */ 288 + if (usb_xhci_needs_pci_reset(dev)) { 289 + dev_info(&dev->dev, "Resetting\n"); 290 + if (pci_reset_function_locked(dev)) 291 + dev_warn(&dev->dev, "Reset failed"); 292 + } 293 + 294 /* Prevent runtime suspending between USB-2 and USB-3 initialization */ 295 pm_runtime_get_noresume(&dev->dev); 296
+1
drivers/usb/musb/musb_host.c
··· 139 "Could not flush host TX%d fifo: csr: %04x\n", 140 ep->epnum, csr)) 141 return; 142 } 143 } 144
··· 139 "Could not flush host TX%d fifo: csr: %04x\n", 140 ep->epnum, csr)) 141 return; 142 + mdelay(1); 143 } 144 } 145
+9 -8
drivers/usb/phy/phy-msm-usb.c
··· 197 struct regulator *v3p3; 198 struct regulator *v1p8; 199 struct regulator *vddcx; 200 201 struct reset_control *phy_rst; 202 struct reset_control *link_rst; ··· 1732 1733 static int msm_otg_probe(struct platform_device *pdev) 1734 { 1735 - struct regulator_bulk_data regs[3]; 1736 int ret = 0; 1737 struct device_node *np = pdev->dev.of_node; 1738 struct msm_otg_platform_data *pdata; ··· 1817 return motg->irq; 1818 } 1819 1820 - regs[0].supply = "vddcx"; 1821 - regs[1].supply = "v3p3"; 1822 - regs[2].supply = "v1p8"; 1823 1824 - ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); 1825 if (ret) 1826 return ret; 1827 1828 - motg->vddcx = regs[0].consumer; 1829 - motg->v3p3 = regs[1].consumer; 1830 - motg->v1p8 = regs[2].consumer; 1831 1832 clk_set_rate(motg->clk, 60000000); 1833
··· 197 struct regulator *v3p3; 198 struct regulator *v1p8; 199 struct regulator *vddcx; 200 + struct regulator_bulk_data supplies[3]; 201 202 struct reset_control *phy_rst; 203 struct reset_control *link_rst; ··· 1731 1732 static int msm_otg_probe(struct platform_device *pdev) 1733 { 1734 int ret = 0; 1735 struct device_node *np = pdev->dev.of_node; 1736 struct msm_otg_platform_data *pdata; ··· 1817 return motg->irq; 1818 } 1819 1820 + motg->supplies[0].supply = "vddcx"; 1821 + motg->supplies[1].supply = "v3p3"; 1822 + motg->supplies[2].supply = "v1p8"; 1823 1824 + ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies), 1825 + motg->supplies); 1826 if (ret) 1827 return ret; 1828 1829 + motg->vddcx = motg->supplies[0].consumer; 1830 + motg->v3p3 = motg->supplies[1].consumer; 1831 + motg->v1p8 = motg->supplies[2].consumer; 1832 1833 clk_set_rate(motg->clk, 60000000); 1834
+1 -4
drivers/usb/renesas_usbhs/mod_gadget.c
··· 639 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); 640 struct usbhs_pipe *pipe; 641 unsigned long flags; 642 - int ret = 0; 643 644 spin_lock_irqsave(&uep->lock, flags); 645 pipe = usbhsg_uep_to_pipe(uep); 646 - if (!pipe) { 647 - ret = -EINVAL; 648 goto out; 649 - } 650 651 usbhsg_pipe_disable(uep); 652 usbhs_pipe_free(pipe);
··· 639 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); 640 struct usbhs_pipe *pipe; 641 unsigned long flags; 642 643 spin_lock_irqsave(&uep->lock, flags); 644 pipe = usbhsg_uep_to_pipe(uep); 645 + if (!pipe) 646 goto out; 647 648 usbhsg_pipe_disable(uep); 649 usbhs_pipe_free(pipe);
+7 -2
drivers/usb/renesas_usbhs/rcar3.c
··· 20 /* Low Power Status register (LPSTS) */ 21 #define LPSTS_SUSPM 0x4000 22 23 - /* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */ 24 #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ 25 #define UGCTRL2_USB0SEL_OTG 0x00000030 26 27 static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) 28 { ··· 38 { 39 struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); 40 41 - usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); 42 43 if (enable) { 44 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
··· 20 /* Low Power Status register (LPSTS) */ 21 #define LPSTS_SUSPM 0x4000 22 23 + /* 24 + * USB General control register 2 (UGCTRL2) 25 + * Remarks: bit[31:11] and bit[9:6] should be 0 26 + */ 27 #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ 28 #define UGCTRL2_USB0SEL_OTG 0x00000030 29 + #define UGCTRL2_VBUSSEL 0x00000400 30 31 static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) 32 { ··· 34 { 35 struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); 36 37 + usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG | 38 + UGCTRL2_VBUSSEL); 39 40 if (enable) { 41 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
+1
drivers/usb/serial/cp210x.c
··· 142 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 143 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 144 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ 145 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 146 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 147 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
··· 142 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 143 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 144 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ 145 + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ 146 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 147 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 148 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+2
drivers/usb/serial/option.c
··· 2025 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ 2026 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ 2027 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 2028 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 2029 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 2030 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
··· 2025 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ 2026 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ 2027 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 2028 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 2029 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 2030 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 2031 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 2032 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+2
drivers/usb/serial/pl2303.c
··· 52 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 53 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), 54 .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, 55 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, 56 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, 57 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
··· 52 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 53 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), 54 .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, 55 + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485), 56 + .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, 57 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, 58 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, 59 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
+1
drivers/usb/serial/pl2303.h
··· 27 #define ATEN_VENDOR_ID 0x0557 28 #define ATEN_VENDOR_ID2 0x0547 29 #define ATEN_PRODUCT_ID 0x2008 30 #define ATEN_PRODUCT_ID2 0x2118 31 32 #define IODATA_VENDOR_ID 0x04bb
··· 27 #define ATEN_VENDOR_ID 0x0557 28 #define ATEN_VENDOR_ID2 0x0547 29 #define ATEN_PRODUCT_ID 0x2008 30 + #define ATEN_PRODUCT_UC485 0x2021 31 #define ATEN_PRODUCT_ID2 0x2118 32 33 #define IODATA_VENDOR_ID 0x04bb
+2 -2
drivers/usb/storage/unusual_uas.h
··· 124 /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ 125 UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, 126 "Initio Corporation", 127 - "", 128 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 129 - US_FL_NO_ATA_1X), 130 131 /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ 132 UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
··· 124 /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ 125 UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, 126 "Initio Corporation", 127 + "INIC-3069", 128 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 129 + US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE), 130 131 /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ 132 UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
+12 -6
drivers/usb/storage/usb.c
··· 315 { 316 struct us_data *us = (struct us_data *)__us; 317 struct Scsi_Host *host = us_to_host(us); 318 319 for (;;) { 320 usb_stor_dbg(us, "*** thread sleeping\n"); ··· 331 scsi_lock(host); 332 333 /* When we are called with no command pending, we're done */ 334 if (us->srb == NULL) { 335 scsi_unlock(host); 336 mutex_unlock(&us->dev_mutex); ··· 400 /* lock access to the state */ 401 scsi_lock(host); 402 403 - /* indicate that the command is done */ 404 - if (us->srb->result != DID_ABORT << 16) { 405 - usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", 406 - us->srb->result); 407 - us->srb->scsi_done(us->srb); 408 - } else { 409 SkipForAbort: 410 usb_stor_dbg(us, "scsi command aborted\n"); 411 } 412 413 /* ··· 428 429 /* unlock the device pointers */ 430 mutex_unlock(&us->dev_mutex); 431 } /* for (;;) */ 432 433 /* Wait until we are told to stop */
··· 315 { 316 struct us_data *us = (struct us_data *)__us; 317 struct Scsi_Host *host = us_to_host(us); 318 + struct scsi_cmnd *srb; 319 320 for (;;) { 321 usb_stor_dbg(us, "*** thread sleeping\n"); ··· 330 scsi_lock(host); 331 332 /* When we are called with no command pending, we're done */ 333 + srb = us->srb; 334 if (us->srb == NULL) { 335 scsi_unlock(host); 336 mutex_unlock(&us->dev_mutex); ··· 398 /* lock access to the state */ 399 scsi_lock(host); 400 401 + /* was the command aborted? */ 402 + if (us->srb->result == DID_ABORT << 16) { 403 SkipForAbort: 404 usb_stor_dbg(us, "scsi command aborted\n"); 405 + srb = NULL; /* Don't call srb->scsi_done() */ 406 } 407 408 /* ··· 429 430 /* unlock the device pointers */ 431 mutex_unlock(&us->dev_mutex); 432 + 433 + /* now that the locks are released, notify the SCSI core */ 434 + if (srb) { 435 + usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", 436 + srb->result); 437 + srb->scsi_done(srb); 438 + } 439 } /* for (;;) */ 440 441 /* Wait until we are told to stop */
+7 -1
drivers/video/fbdev/efifb.c
··· 17 #include <asm/efi.h> 18 19 static bool request_mem_succeeded = false; 20 21 static struct fb_var_screeninfo efifb_defined = { 22 .activate = FB_ACTIVATE_NOW, ··· 100 screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); 101 else if (!strncmp(this_opt, "width:", 6)) 102 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); 103 } 104 } 105 ··· 258 info->apertures->ranges[0].base = efifb_fix.smem_start; 259 info->apertures->ranges[0].size = size_remap; 260 261 - info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); 262 if (!info->screen_base) { 263 pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", 264 efifb_fix.smem_len, efifb_fix.smem_start);
··· 17 #include <asm/efi.h> 18 19 static bool request_mem_succeeded = false; 20 + static bool nowc = false; 21 22 static struct fb_var_screeninfo efifb_defined = { 23 .activate = FB_ACTIVATE_NOW, ··· 99 screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); 100 else if (!strncmp(this_opt, "width:", 6)) 101 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); 102 + else if (!strcmp(this_opt, "nowc")) 103 + nowc = true; 104 } 105 } 106 ··· 255 info->apertures->ranges[0].base = efifb_fix.smem_start; 256 info->apertures->ranges[0].size = size_remap; 257 258 + if (nowc) 259 + info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 260 + else 261 + info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); 262 if (!info->screen_base) { 263 pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", 264 efifb_fix.smem_len, efifb_fix.smem_start);
+3 -7
drivers/video/fbdev/imxfb.c
··· 1073 imxfb_disable_controller(fbi); 1074 1075 unregister_framebuffer(info); 1076 - 1077 pdata = dev_get_platdata(&pdev->dev); 1078 if (pdata && pdata->exit) 1079 pdata->exit(fbi->pdev); 1080 - 1081 - fb_dealloc_cmap(&info->cmap); 1082 - kfree(info->pseudo_palette); 1083 - framebuffer_release(info); 1084 - 1085 dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, 1086 fbi->map_dma); 1087 - 1088 iounmap(fbi->regs); 1089 release_mem_region(res->start, resource_size(res)); 1090 1091 return 0; 1092 }
··· 1073 imxfb_disable_controller(fbi); 1074 1075 unregister_framebuffer(info); 1076 + fb_dealloc_cmap(&info->cmap); 1077 pdata = dev_get_platdata(&pdev->dev); 1078 if (pdata && pdata->exit) 1079 pdata->exit(fbi->pdev); 1080 dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, 1081 fbi->map_dma); 1082 iounmap(fbi->regs); 1083 release_mem_region(res->start, resource_size(res)); 1084 + kfree(info->pseudo_palette); 1085 + framebuffer_release(info); 1086 1087 return 0; 1088 }
-1
drivers/video/fbdev/omap2/omapfb/dss/core.c
··· 193 194 static int __init omap_dss_probe(struct platform_device *pdev) 195 { 196 - struct omap_dss_board_info *pdata = pdev->dev.platform_data; 197 int r; 198 199 core.pdev = pdev;
··· 193 194 static int __init omap_dss_probe(struct platform_device *pdev) 195 { 196 int r; 197 198 core.pdev = pdev;
+1 -1
drivers/xen/events/events_base.c
··· 574 575 static void enable_pirq(struct irq_data *data) 576 { 577 - startup_pirq(data); 578 } 579 580 static void disable_pirq(struct irq_data *data)
··· 574 575 static void enable_pirq(struct irq_data *data) 576 { 577 + enable_dynirq(data); 578 } 579 580 static void disable_pirq(struct irq_data *data)
+2 -1
drivers/xen/xenbus/xenbus_xs.c
··· 857 struct list_head *ent; 858 struct xs_watch_event *event; 859 860 for (;;) { 861 wait_event_interruptible(watch_events_waitq, 862 !list_empty(&watch_events)); ··· 927 task = kthread_run(xenwatch_thread, NULL, "xenwatch"); 928 if (IS_ERR(task)) 929 return PTR_ERR(task); 930 - xenwatch_pid = task->pid; 931 932 /* shutdown watches for kexec boot */ 933 xs_reset_watches();
··· 857 struct list_head *ent; 858 struct xs_watch_event *event; 859 860 + xenwatch_pid = current->pid; 861 + 862 for (;;) { 863 wait_event_interruptible(watch_events_waitq, 864 !list_empty(&watch_events)); ··· 925 task = kthread_run(xenwatch_thread, NULL, "xenwatch"); 926 if (IS_ERR(task)) 927 return PTR_ERR(task); 928 929 /* shutdown watches for kexec boot */ 930 xs_reset_watches();
+5 -4
fs/fuse/file.c
··· 46 { 47 struct fuse_file *ff; 48 49 - ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 50 if (unlikely(!ff)) 51 return NULL; 52 ··· 609 struct fuse_io_priv *io = req->io; 610 ssize_t pos = -1; 611 612 - fuse_release_user_pages(req, !io->write); 613 614 if (io->write) { 615 if (req->misc.write.in.size != req->misc.write.out.size) ··· 1316 loff_t *ppos, int flags) 1317 { 1318 int write = flags & FUSE_DIO_WRITE; 1319 - bool should_dirty = !write && iter_is_iovec(iter); 1320 int cuse = flags & FUSE_DIO_CUSE; 1321 struct file *file = io->file; 1322 struct inode *inode = file->f_mapping->host; ··· 1345 inode_unlock(inode); 1346 } 1347 1348 while (count) { 1349 size_t nres; 1350 fl_owner_t owner = current->files; ··· 1360 nres = fuse_send_read(req, io, pos, nbytes, owner); 1361 1362 if (!io->async) 1363 - fuse_release_user_pages(req, should_dirty); 1364 if (req->out.h.error) { 1365 err = req->out.h.error; 1366 break; ··· 1669 err_free: 1670 fuse_request_free(req); 1671 err: 1672 end_page_writeback(page); 1673 return error; 1674 }
··· 46 { 47 struct fuse_file *ff; 48 49 + ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL); 50 if (unlikely(!ff)) 51 return NULL; 52 ··· 609 struct fuse_io_priv *io = req->io; 610 ssize_t pos = -1; 611 612 + fuse_release_user_pages(req, io->should_dirty); 613 614 if (io->write) { 615 if (req->misc.write.in.size != req->misc.write.out.size) ··· 1316 loff_t *ppos, int flags) 1317 { 1318 int write = flags & FUSE_DIO_WRITE; 1319 int cuse = flags & FUSE_DIO_CUSE; 1320 struct file *file = io->file; 1321 struct inode *inode = file->f_mapping->host; ··· 1346 inode_unlock(inode); 1347 } 1348 1349 + io->should_dirty = !write && iter_is_iovec(iter); 1350 while (count) { 1351 size_t nres; 1352 fl_owner_t owner = current->files; ··· 1360 nres = fuse_send_read(req, io, pos, nbytes, owner); 1361 1362 if (!io->async) 1363 + fuse_release_user_pages(req, io->should_dirty); 1364 if (req->out.h.error) { 1365 err = req->out.h.error; 1366 break; ··· 1669 err_free: 1670 fuse_request_free(req); 1671 err: 1672 + mapping_set_error(page->mapping, error); 1673 end_page_writeback(page); 1674 return error; 1675 }
+1
fs/fuse/fuse_i.h
··· 249 size_t size; 250 __u64 offset; 251 bool write; 252 int err; 253 struct kiocb *iocb; 254 struct file *file;
··· 249 size_t size; 250 __u64 offset; 251 bool write; 252 + bool should_dirty; 253 int err; 254 struct kiocb *iocb; 255 struct file *file;
+1
fs/nfs/Kconfig
··· 121 config PNFS_BLOCK 122 tristate 123 depends on NFS_V4_1 && BLK_DEV_DM 124 default NFS_V4 125 126 config PNFS_FLEXFILE_LAYOUT
··· 121 config PNFS_BLOCK 122 tristate 123 depends on NFS_V4_1 && BLK_DEV_DM 124 + depends on 64BIT || LBDAF 125 default NFS_V4 126 127 config PNFS_FLEXFILE_LAYOUT
+1
fs/nfs/flexfilelayout/flexfilelayoutdev.c
··· 32 { 33 nfs4_print_deviceid(&mirror_ds->id_node.deviceid); 34 nfs4_pnfs_ds_put(mirror_ds->ds); 35 kfree_rcu(mirror_ds, id_node.rcu); 36 } 37
··· 32 { 33 nfs4_print_deviceid(&mirror_ds->id_node.deviceid); 34 nfs4_pnfs_ds_put(mirror_ds->ds); 35 + kfree(mirror_ds->ds_versions); 36 kfree_rcu(mirror_ds, id_node.rcu); 37 } 38
+48 -22
fs/nfs/nfs4proc.c
··· 1659 return state; 1660 } 1661 1662 static struct nfs4_state * 1663 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1664 { 1665 - struct inode *inode; 1666 - struct nfs4_state *state = NULL; 1667 - int ret; 1668 1669 if (!data->rpc_done) { 1670 state = nfs4_try_open_cached(data); ··· 1712 goto out; 1713 } 1714 1715 - ret = -EAGAIN; 1716 - if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1717 - goto err; 1718 - inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1719 - ret = PTR_ERR(inode); 1720 - if (IS_ERR(inode)) 1721 - goto err; 1722 - ret = -ENOMEM; 1723 - state = nfs4_get_open_state(inode, data->owner); 1724 - if (state == NULL) 1725 - goto err_put_inode; 1726 if (data->o_res.delegation_type != 0) 1727 nfs4_opendata_check_deleg(data, state); 1728 update_open_stateid(state, &data->o_res.stateid, NULL, 1729 data->o_arg.fmode); 1730 - iput(inode); 1731 out: 1732 nfs_release_seqid(data->o_arg.seqid); 1733 return state; 1734 - err_put_inode: 1735 - iput(inode); 1736 - err: 1737 - return ERR_PTR(ret); 1738 } 1739 1740 static struct nfs4_state * ··· 2099 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2100 case NFS4_OPEN_CLAIM_FH: 2101 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2102 - nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 2103 } 2104 data->timestamp = jiffies; 2105 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, ··· 2580 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2581 clear_bit(NFS_OPEN_STATE, &state->flags); 2582 stateid->type = NFS4_INVALID_STATEID_TYPE; 2583 - } 2584 - if (status != NFS_OK) 2585 return status; 2586 if (nfs_open_stateid_recover_openmode(state)) 2587 return -NFS4ERR_OPENMODE; 2588 return NFS_OK;
··· 1659 return state; 1660 } 1661 1662 + static struct inode * 1663 + nfs4_opendata_get_inode(struct nfs4_opendata *data) 1664 + { 1665 + struct inode *inode; 1666 + 1667 + switch (data->o_arg.claim) { 1668 + case NFS4_OPEN_CLAIM_NULL: 1669 + case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1670 + case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1671 + if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1672 + return ERR_PTR(-EAGAIN); 1673 + inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 1674 + &data->f_attr, data->f_label); 1675 + break; 1676 + default: 1677 + inode = d_inode(data->dentry); 1678 + ihold(inode); 1679 + nfs_refresh_inode(inode, &data->f_attr); 1680 + } 1681 + return inode; 1682 + } 1683 + 1684 + static struct nfs4_state * 1685 + nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 1686 + { 1687 + struct nfs4_state *state; 1688 + struct inode *inode; 1689 + 1690 + inode = nfs4_opendata_get_inode(data); 1691 + if (IS_ERR(inode)) 1692 + return ERR_CAST(inode); 1693 + if (data->state != NULL && data->state->inode == inode) { 1694 + state = data->state; 1695 + atomic_inc(&state->count); 1696 + } else 1697 + state = nfs4_get_open_state(inode, data->owner); 1698 + iput(inode); 1699 + if (state == NULL) 1700 + state = ERR_PTR(-ENOMEM); 1701 + return state; 1702 + } 1703 + 1704 static struct nfs4_state * 1705 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1706 { 1707 + struct nfs4_state *state; 1708 1709 if (!data->rpc_done) { 1710 state = nfs4_try_open_cached(data); ··· 1672 goto out; 1673 } 1674 1675 + state = nfs4_opendata_find_nfs4_state(data); 1676 + if (IS_ERR(state)) 1677 + goto out; 1678 + 1679 if (data->o_res.delegation_type != 0) 1680 nfs4_opendata_check_deleg(data, state); 1681 update_open_stateid(state, &data->o_res.stateid, NULL, 1682 data->o_arg.fmode); 1683 out: 1684 nfs_release_seqid(data->o_arg.seqid); 1685 return state; 1686 } 1687 1688 static struct nfs4_state * ··· 2071 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2072 case NFS4_OPEN_CLAIM_FH: 2073 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2074 } 2075 data->timestamp = jiffies; 2076 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, ··· 2553 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2554 clear_bit(NFS_OPEN_STATE, &state->flags); 2555 stateid->type = NFS4_INVALID_STATEID_TYPE; 2556 return status; 2557 + } 2558 if (nfs_open_stateid_recover_openmode(state)) 2559 return -NFS4ERR_OPENMODE; 2560 return NFS_OK;
+4 -4
fs/proc/meminfo.c
··· 106 global_node_page_state(NR_FILE_MAPPED)); 107 show_val_kb(m, "Shmem: ", i.sharedram); 108 show_val_kb(m, "Slab: ", 109 - global_page_state(NR_SLAB_RECLAIMABLE) + 110 - global_page_state(NR_SLAB_UNRECLAIMABLE)); 111 112 show_val_kb(m, "SReclaimable: ", 113 - global_page_state(NR_SLAB_RECLAIMABLE)); 114 show_val_kb(m, "SUnreclaim: ", 115 - global_page_state(NR_SLAB_UNRECLAIMABLE)); 116 seq_printf(m, "KernelStack: %8lu kB\n", 117 global_page_state(NR_KERNEL_STACK_KB)); 118 show_val_kb(m, "PageTables: ",
··· 106 global_node_page_state(NR_FILE_MAPPED)); 107 show_val_kb(m, "Shmem: ", i.sharedram); 108 show_val_kb(m, "Slab: ", 109 + global_node_page_state(NR_SLAB_RECLAIMABLE) + 110 + global_node_page_state(NR_SLAB_UNRECLAIMABLE)); 111 112 show_val_kb(m, "SReclaimable: ", 113 + global_node_page_state(NR_SLAB_RECLAIMABLE)); 114 show_val_kb(m, "SUnreclaim: ", 115 + global_node_page_state(NR_SLAB_UNRECLAIMABLE)); 116 seq_printf(m, "KernelStack: %8lu kB\n", 117 global_page_state(NR_KERNEL_STACK_KB)); 118 show_val_kb(m, "PageTables: ",
+5 -2
fs/proc/task_mmu.c
··· 16 #include <linux/mmu_notifier.h> 17 #include <linux/page_idle.h> 18 #include <linux/shmem_fs.h> 19 20 #include <asm/elf.h> 21 - #include <linux/uaccess.h> 22 #include <asm/tlbflush.h> 23 #include "internal.h" 24 ··· 1009 struct mm_struct *mm; 1010 struct vm_area_struct *vma; 1011 enum clear_refs_types type; 1012 int itype; 1013 int rv; 1014 ··· 1056 } 1057 1058 down_read(&mm->mmap_sem); 1059 if (type == CLEAR_REFS_SOFT_DIRTY) { 1060 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1061 if (!(vma->vm_flags & VM_SOFTDIRTY)) ··· 1078 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); 1079 if (type == CLEAR_REFS_SOFT_DIRTY) 1080 mmu_notifier_invalidate_range_end(mm, 0, -1); 1081 - flush_tlb_mm(mm); 1082 up_read(&mm->mmap_sem); 1083 out_mm: 1084 mmput(mm);
··· 16 #include <linux/mmu_notifier.h> 17 #include <linux/page_idle.h> 18 #include <linux/shmem_fs.h> 19 + #include <linux/uaccess.h> 20 21 #include <asm/elf.h> 22 + #include <asm/tlb.h> 23 #include <asm/tlbflush.h> 24 #include "internal.h" 25 ··· 1008 struct mm_struct *mm; 1009 struct vm_area_struct *vma; 1010 enum clear_refs_types type; 1011 + struct mmu_gather tlb; 1012 int itype; 1013 int rv; 1014 ··· 1054 } 1055 1056 down_read(&mm->mmap_sem); 1057 + tlb_gather_mmu(&tlb, mm, 0, -1); 1058 if (type == CLEAR_REFS_SOFT_DIRTY) { 1059 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1060 if (!(vma->vm_flags & VM_SOFTDIRTY)) ··· 1075 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); 1076 if (type == CLEAR_REFS_SOFT_DIRTY) 1077 mmu_notifier_invalidate_range_end(mm, 0, -1); 1078 + tlb_finish_mmu(&tlb, 0, -1); 1079 up_read(&mm->mmap_sem); 1080 out_mm: 1081 mmput(mm);
+2 -2
fs/userfaultfd.c
··· 1600 uffdio_copy.len); 1601 mmput(ctx->mm); 1602 } else { 1603 - return -ENOSPC; 1604 } 1605 if (unlikely(put_user(ret, &user_uffdio_copy->copy))) 1606 return -EFAULT; ··· 1647 uffdio_zeropage.range.len); 1648 mmput(ctx->mm); 1649 } else { 1650 - return -ENOSPC; 1651 } 1652 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) 1653 return -EFAULT;
··· 1600 uffdio_copy.len); 1601 mmput(ctx->mm); 1602 } else { 1603 + return -ESRCH; 1604 } 1605 if (unlikely(put_user(ret, &user_uffdio_copy->copy))) 1606 return -EFAULT; ··· 1647 uffdio_zeropage.range.len); 1648 mmput(ctx->mm); 1649 } else { 1650 + return -ESRCH; 1651 } 1652 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) 1653 return -EFAULT;
+7 -5
fs/xfs/xfs_inode.c
··· 874 case S_IFREG: 875 case S_IFDIR: 876 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 877 - uint64_t di_flags2 = 0; 878 uint di_flags = 0; 879 880 if (S_ISDIR(mode)) { ··· 910 di_flags |= XFS_DIFLAG_NODEFRAG; 911 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 912 di_flags |= XFS_DIFLAG_FILESTREAM; 913 - if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX) 914 - di_flags2 |= XFS_DIFLAG2_DAX; 915 916 ip->i_d.di_flags |= di_flags; 917 - ip->i_d.di_flags2 |= di_flags2; 918 } 919 if (pip && 920 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && 921 pip->i_d.di_version == 3 && 922 ip->i_d.di_version == 3) { 923 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { 924 - ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 925 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; 926 } 927 } 928 /* FALLTHROUGH */ 929 case S_IFLNK:
··· 874 case S_IFREG: 875 case S_IFDIR: 876 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 877 uint di_flags = 0; 878 879 if (S_ISDIR(mode)) { ··· 911 di_flags |= XFS_DIFLAG_NODEFRAG; 912 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 913 di_flags |= XFS_DIFLAG_FILESTREAM; 914 915 ip->i_d.di_flags |= di_flags; 916 } 917 if (pip && 918 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && 919 pip->i_d.di_version == 3 && 920 ip->i_d.di_version == 3) { 921 + uint64_t di_flags2 = 0; 922 + 923 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { 924 + di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 925 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; 926 } 927 + if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX) 928 + di_flags2 |= XFS_DIFLAG2_DAX; 929 + 930 + ip->i_d.di_flags2 |= di_flags2; 931 } 932 /* FALLTHROUGH */ 933 case S_IFLNK:
+1
fs/xfs/xfs_log_cil.c
··· 539 540 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 541 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 542 } 543 544 static void
··· 539 540 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 541 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 542 + bio_put(bio); 543 } 544 545 static void
+4 -3
include/asm-generic/tlb.h
··· 112 113 #define HAVE_GENERIC_MMU_GATHER 114 115 - void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); 116 void tlb_flush_mmu(struct mmu_gather *tlb); 117 - void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 118 - unsigned long end); 119 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, 120 int page_size); 121
··· 112 113 #define HAVE_GENERIC_MMU_GATHER 114 115 + void arch_tlb_gather_mmu(struct mmu_gather *tlb, 116 + struct mm_struct *mm, unsigned long start, unsigned long end); 117 void tlb_flush_mmu(struct mmu_gather *tlb); 118 + void arch_tlb_finish_mmu(struct mmu_gather *tlb, 119 + unsigned long start, unsigned long end, bool force); 120 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, 121 int page_size); 122
+1
include/linux/acpi.h
··· 1209 #endif 1210 1211 #ifdef CONFIG_ACPI_SPCR_TABLE 1212 int parse_spcr(bool earlycon); 1213 #else 1214 static inline int parse_spcr(bool earlycon) { return 0; }
··· 1209 #endif 1210 1211 #ifdef CONFIG_ACPI_SPCR_TABLE 1212 + extern bool qdf2400_e44_present; 1213 int parse_spcr(bool earlycon); 1214 #else 1215 static inline int parse_spcr(bool earlycon) { return 0; }
-2
include/linux/cpuhotplug.h
··· 39 CPUHP_PCI_XGENE_DEAD, 40 CPUHP_IOMMU_INTEL_DEAD, 41 CPUHP_LUSTRE_CFS_DEAD, 42 - CPUHP_SCSI_BNX2FC_DEAD, 43 - CPUHP_SCSI_BNX2I_DEAD, 44 CPUHP_WORKQUEUE_PREP, 45 CPUHP_POWER_NUMA_PREPARE, 46 CPUHP_HRTIMERS_PREPARE,
··· 39 CPUHP_PCI_XGENE_DEAD, 40 CPUHP_IOMMU_INTEL_DEAD, 41 CPUHP_LUSTRE_CFS_DEAD, 42 CPUHP_WORKQUEUE_PREP, 43 CPUHP_POWER_NUMA_PREPARE, 44 CPUHP_HRTIMERS_PREPARE,
+1 -1
include/linux/device.h
··· 843 * hibernation, system resume and during runtime PM transitions 844 * along with subsystem-level and driver-level callbacks. 845 * @pins: For device pin management. 846 - * See Documentation/pinctrl.txt for details. 847 * @msi_list: Hosts MSI descriptors 848 * @msi_domain: The generic MSI domain this device is using. 849 * @numa_node: NUMA node this device is close to.
··· 843 * hibernation, system resume and during runtime PM transitions 844 * along with subsystem-level and driver-level callbacks. 845 * @pins: For device pin management. 846 + * See Documentation/driver-api/pinctl.rst for details. 847 * @msi_list: Hosts MSI descriptors 848 * @msi_domain: The generic MSI domain this device is using. 849 * @numa_node: NUMA node this device is close to.
+2 -1
include/linux/i2c.h
··· 689 #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ 690 #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ 691 #define I2C_CLASS_SPD (1<<7) /* Memory modules */ 692 - #define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */ 693 694 /* Internal numbers to terminate lists */ 695 #define I2C_CLIENT_END 0xfffeU
··· 689 #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ 690 #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ 691 #define I2C_CLASS_SPD (1<<7) /* Memory modules */ 692 + /* Warn users that the adapter doesn't support classes anymore */ 693 + #define I2C_CLASS_DEPRECATED (1<<8) 694 695 /* Internal numbers to terminate lists */ 696 #define I2C_CLIENT_END 0xfffeU
+7
include/linux/iio/common/st_sensors.h
··· 105 struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX]; 106 }; 107 108 /** 109 * struct st_sensor_bdu - ST sensor device block data update 110 * @addr: address of the register. ··· 202 * @bdu: Block data update register. 203 * @das: Data Alignment Selection register. 204 * @drdy_irq: Data ready register of the sensor. 205 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. 206 * @bootime: samples to discard when sensor passing from power-down to power-up. 207 */ ··· 219 struct st_sensor_bdu bdu; 220 struct st_sensor_das das; 221 struct st_sensor_data_ready_irq drdy_irq; 222 bool multi_read_bit; 223 unsigned int bootime; 224 };
··· 105 struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX]; 106 }; 107 108 + struct st_sensor_sim { 109 + u8 addr; 110 + u8 value; 111 + }; 112 + 113 /** 114 * struct st_sensor_bdu - ST sensor device block data update 115 * @addr: address of the register. ··· 197 * @bdu: Block data update register. 198 * @das: Data Alignment Selection register. 199 * @drdy_irq: Data ready register of the sensor. 200 + * @sim: SPI serial interface mode register of the sensor. 201 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. 202 * @bootime: samples to discard when sensor passing from power-down to power-up. 203 */ ··· 213 struct st_sensor_bdu bdu; 214 struct st_sensor_das das; 215 struct st_sensor_data_ready_irq drdy_irq; 216 + struct st_sensor_sim sim; 217 bool multi_read_bit; 218 unsigned int bootime; 219 };
+1
include/linux/mlx4/device.h
··· 620 u32 dmfs_high_rate_qpn_base; 621 u32 dmfs_high_rate_qpn_range; 622 u32 vf_caps; 623 struct mlx4_rate_limit_caps rl_caps; 624 }; 625
··· 620 u32 dmfs_high_rate_qpn_base; 621 u32 dmfs_high_rate_qpn_range; 622 u32 vf_caps; 623 + bool wol_port[MLX4_MAX_PORTS + 1]; 624 struct mlx4_rate_limit_caps rl_caps; 625 }; 626
-1
include/linux/mlx5/qp.h
··· 212 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff 213 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 214 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 215 - #define MLX5_WQE_AV_EXT 0x80000000 216 217 enum { 218 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
··· 212 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff 213 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 214 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 215 216 enum { 217 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
+38 -26
include/linux/mm_types.h
··· 487 /* numa_scan_seq prevents two threads setting pte_numa */ 488 int numa_scan_seq; 489 #endif 490 - #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) 491 /* 492 * An operation with batched TLB flushing is going on. Anything that 493 * can move process memory needs to flush the TLB when moving a 494 * PROT_NONE or PROT_NUMA mapped page. 495 */ 496 - bool tlb_flush_pending; 497 - #endif 498 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 499 /* See flush_tlb_batched_pending() */ 500 bool tlb_flush_batched; ··· 520 return mm->cpu_vm_mask_var; 521 } 522 523 - #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) 524 /* 525 * Memory barriers to keep this state in sync are graciously provided by 526 * the page table locks, outside of which no page table modifications happen. 527 - * The barriers below prevent the compiler from re-ordering the instructions 528 - * around the memory barriers that are already present in the code. 529 */ 530 static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 531 { 532 - barrier(); 533 - return mm->tlb_flush_pending; 534 } 535 - static inline void set_tlb_flush_pending(struct mm_struct *mm) 536 { 537 - mm->tlb_flush_pending = true; 538 539 /* 540 - * Guarantee that the tlb_flush_pending store does not leak into the 541 * critical section updating the page tables 542 */ 543 smp_mb__before_spinlock(); 544 } 545 /* Clearing is done after a TLB flush, which also provides a barrier. */ 546 - static inline void clear_tlb_flush_pending(struct mm_struct *mm) 547 { 548 - barrier(); 549 - mm->tlb_flush_pending = false; 550 } 551 - #else 552 - static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 553 - { 554 - return false; 555 - } 556 - static inline void set_tlb_flush_pending(struct mm_struct *mm) 557 - { 558 - } 559 - static inline void clear_tlb_flush_pending(struct mm_struct *mm) 560 - { 561 - } 562 - #endif 563 564 struct vm_fault; 565
··· 487 /* numa_scan_seq prevents two threads setting pte_numa */ 488 int numa_scan_seq; 489 #endif 490 /* 491 * An operation with batched TLB flushing is going on. Anything that 492 * can move process memory needs to flush the TLB when moving a 493 * PROT_NONE or PROT_NUMA mapped page. 494 */ 495 + atomic_t tlb_flush_pending; 496 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 497 /* See flush_tlb_batched_pending() */ 498 bool tlb_flush_batched; ··· 522 return mm->cpu_vm_mask_var; 523 } 524 525 + struct mmu_gather; 526 + extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 527 + unsigned long start, unsigned long end); 528 + extern void tlb_finish_mmu(struct mmu_gather *tlb, 529 + unsigned long start, unsigned long end); 530 + 531 /* 532 * Memory barriers to keep this state in sync are graciously provided by 533 * the page table locks, outside of which no page table modifications happen. 534 + * The barriers are used to ensure the order between tlb_flush_pending updates, 535 + * which happen while the lock is not taken, and the PTE updates, which happen 536 + * while the lock is taken, are serialized. 537 */ 538 static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 539 { 540 + return atomic_read(&mm->tlb_flush_pending) > 0; 541 } 542 + 543 + /* 544 + * Returns true if there are two above TLB batching threads in parallel. 545 + */ 546 + static inline bool mm_tlb_flush_nested(struct mm_struct *mm) 547 { 548 + return atomic_read(&mm->tlb_flush_pending) > 1; 549 + } 550 + 551 + static inline void init_tlb_flush_pending(struct mm_struct *mm) 552 + { 553 + atomic_set(&mm->tlb_flush_pending, 0); 554 + } 555 + 556 + static inline void inc_tlb_flush_pending(struct mm_struct *mm) 557 + { 558 + atomic_inc(&mm->tlb_flush_pending); 559 560 /* 561 + * Guarantee that the tlb_flush_pending increase does not leak into the 562 * critical section updating the page tables 563 */ 564 smp_mb__before_spinlock(); 565 } 566 + 567 /* Clearing is done after a TLB flush, which also provides a barrier. */ 568 + static inline void dec_tlb_flush_pending(struct mm_struct *mm) 569 { 570 + /* 571 + * Guarantee that the tlb_flush_pending does not not leak into the 572 + * critical section, since we must order the PTE change and changes to 573 + * the pending TLB flush indication. We could have relied on TLB flush 574 + * as a memory barrier, but this behavior is not clearly documented. 575 + */ 576 + smp_mb__before_atomic(); 577 + atomic_dec(&mm->tlb_flush_pending); 578 } 579 580 struct vm_fault; 581
+3 -3
include/linux/mtd/nand.h
··· 681 * @tWW_min: WP# transition to WE# low 682 */ 683 struct nand_sdr_timings { 684 - u32 tBERS_max; 685 u32 tCCS_min; 686 - u32 tPROG_max; 687 - u32 tR_max; 688 u32 tALH_min; 689 u32 tADL_min; 690 u32 tALS_min;
··· 681 * @tWW_min: WP# transition to WE# low 682 */ 683 struct nand_sdr_timings { 684 + u64 tBERS_max; 685 u32 tCCS_min; 686 + u64 tPROG_max; 687 + u64 tR_max; 688 u32 tALH_min; 689 u32 tADL_min; 690 u32 tALS_min;
+7
include/linux/nvme-fc-driver.h
··· 346 * indicating an FC transport Aborted status. 347 * Entrypoint is Mandatory. 348 * 349 * @max_hw_queues: indicates the maximum number of hw queues the LLDD 350 * supports for cpu affinitization. 351 * Value is Mandatory. Must be at least 1. ··· 850 void (*fcp_abort)(struct nvmet_fc_target_port *tgtport, 851 struct nvmefc_tgt_fcp_req *fcpreq); 852 void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, 853 struct nvmefc_tgt_fcp_req *fcpreq); 854 855 u32 max_hw_queues;
··· 346 * indicating an FC transport Aborted status. 347 * Entrypoint is Mandatory. 348 * 349 + * @defer_rcv: Called by the transport to signal the LLLD that it has 350 + * begun processing of a previously received NVME CMD IU. The LLDD 351 + * is now free to re-use the rcv buffer associated with the 352 + * nvmefc_tgt_fcp_req. 353 + * 354 * @max_hw_queues: indicates the maximum number of hw queues the LLDD 355 * supports for cpu affinitization. 356 * Value is Mandatory. Must be at least 1. ··· 845 void (*fcp_abort)(struct nvmet_fc_target_port *tgtport, 846 struct nvmefc_tgt_fcp_req *fcpreq); 847 void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, 848 + struct nvmefc_tgt_fcp_req *fcpreq); 849 + void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, 850 struct nvmefc_tgt_fcp_req *fcpreq); 851 852 u32 max_hw_queues;
+1
include/linux/pci.h
··· 1067 int __pci_reset_function(struct pci_dev *dev); 1068 int __pci_reset_function_locked(struct pci_dev *dev); 1069 int pci_reset_function(struct pci_dev *dev); 1070 int pci_try_reset_function(struct pci_dev *dev); 1071 int pci_probe_reset_slot(struct pci_slot *slot); 1072 int pci_reset_slot(struct pci_slot *slot);
··· 1067 int __pci_reset_function(struct pci_dev *dev); 1068 int __pci_reset_function_locked(struct pci_dev *dev); 1069 int pci_reset_function(struct pci_dev *dev); 1070 + int pci_reset_function_locked(struct pci_dev *dev); 1071 int pci_try_reset_function(struct pci_dev *dev); 1072 int pci_probe_reset_slot(struct pci_slot *slot); 1073 int pci_reset_slot(struct pci_slot *slot);
+2 -2
include/linux/pinctrl/pinconf-generic.h
··· 81 * it. 82 * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a 83 * value on the line. Use argument 1 to indicate high level, argument 0 to 84 - * indicate low level. (Please see Documentation/pinctrl.txt, section 85 - * "GPIO mode pitfalls" for a discussion around this parameter.) 86 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power 87 * supplies, the argument to this parameter (on a custom format) tells 88 * the driver which alternative power source to use.
··· 81 * it. 82 * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a 83 * value on the line. Use argument 1 to indicate high level, argument 0 to 84 + * indicate low level. (Please see Documentation/driver-api/pinctl.rst, 85 + * section "GPIO mode pitfalls" for a discussion around this parameter.) 86 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power 87 * supplies, the argument to this parameter (on a custom format) tells 88 * the driver which alternative power source to use.
+2
include/linux/platform_data/st_sensors_pdata.h
··· 17 * Available only for accelerometer and pressure sensors. 18 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). 19 * @open_drain: set the interrupt line to be open drain if possible. 20 */ 21 struct st_sensors_platform_data { 22 u8 drdy_int_pin; 23 bool open_drain; 24 }; 25 26 #endif /* ST_SENSORS_PDATA_H */
··· 17 * Available only for accelerometer and pressure sensors. 18 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). 19 * @open_drain: set the interrupt line to be open drain if possible. 20 + * @spi_3wire: enable spi-3wire mode. 21 */ 22 struct st_sensors_platform_data { 23 u8 drdy_int_pin; 24 bool open_drain; 25 + bool spi_3wire; 26 }; 27 28 #endif /* ST_SENSORS_PDATA_H */
+20
include/linux/ptp_clock_kernel.h
··· 99 * parameter func: the desired function to use. 100 * parameter chan: the function channel index to use. 101 * 102 * Drivers should embed their ptp_clock_info within a private 103 * structure, obtaining a reference to it using container_of(). 104 * ··· 131 struct ptp_clock_request *request, int on); 132 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, 133 enum ptp_pin_function func, unsigned int chan); 134 }; 135 136 struct ptp_clock; ··· 217 int ptp_find_pin(struct ptp_clock *ptp, 218 enum ptp_pin_function func, unsigned int chan); 219 220 #else 221 static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 222 struct device *parent) ··· 241 static inline int ptp_find_pin(struct ptp_clock *ptp, 242 enum ptp_pin_function func, unsigned int chan) 243 { return -1; } 244 #endif 245 246 #endif
··· 99 * parameter func: the desired function to use. 100 * parameter chan: the function channel index to use. 101 * 102 + * @do_work: Request driver to perform auxiliary (periodic) operations 103 + * Driver should return delay of the next auxiliary work scheduling 104 + * time (>=0) or negative value in case further scheduling 105 + * is not required. 106 + * 107 * Drivers should embed their ptp_clock_info within a private 108 * structure, obtaining a reference to it using container_of(). 109 * ··· 126 struct ptp_clock_request *request, int on); 127 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, 128 enum ptp_pin_function func, unsigned int chan); 129 + long (*do_aux_work)(struct ptp_clock_info *ptp); 130 }; 131 132 struct ptp_clock; ··· 211 int ptp_find_pin(struct ptp_clock *ptp, 212 enum ptp_pin_function func, unsigned int chan); 213 214 + /** 215 + * ptp_schedule_worker() - schedule ptp auxiliary work 216 + * 217 + * @ptp: The clock obtained from ptp_clock_register(). 218 + * @delay: number of jiffies to wait before queuing 219 + * See kthread_queue_delayed_work() for more info. 220 + */ 221 + 222 + int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); 223 + 224 #else 225 static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 226 struct device *parent) ··· 225 static inline int ptp_find_pin(struct ptp_clock *ptp, 226 enum ptp_pin_function func, unsigned int chan) 227 { return -1; } 228 + static inline int ptp_schedule_worker(struct ptp_clock *ptp, 229 + unsigned long delay) 230 + { return -EOPNOTSUPP; } 231 + 232 #endif 233 234 #endif
+2 -1
include/linux/sync_file.h
··· 43 #endif 44 45 wait_queue_head_t wq; 46 47 struct dma_fence *fence; 48 struct dma_fence_cb cb; 49 }; 50 51 - #define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS 52 53 struct sync_file *sync_file_create(struct dma_fence *fence); 54 struct dma_fence *sync_file_get_fence(int fd);
··· 43 #endif 44 45 wait_queue_head_t wq; 46 + unsigned long flags; 47 48 struct dma_fence *fence; 49 struct dma_fence_cb cb; 50 }; 51 52 + #define POLL_ENABLED 0 53 54 struct sync_file *sync_file_create(struct dma_fence *fence); 55 struct dma_fence *sync_file_get_fence(int fd);
+10
include/net/tcp.h
··· 1916 u64 xmit_time); 1917 extern void tcp_rack_reo_timeout(struct sock *sk); 1918 1919 /* 1920 * Save and compile IPv4 options, return a pointer to it 1921 */
··· 1916 u64 xmit_time); 1917 extern void tcp_rack_reo_timeout(struct sock *sk); 1918 1919 + /* At how many usecs into the future should the RTO fire? */ 1920 + static inline s64 tcp_rto_delta_us(const struct sock *sk) 1921 + { 1922 + const struct sk_buff *skb = tcp_write_queue_head(sk); 1923 + u32 rto = inet_csk(sk)->icsk_rto; 1924 + u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); 1925 + 1926 + return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; 1927 + } 1928 + 1929 /* 1930 * Save and compile IPv4 options, return a pointer to it 1931 */
+1
include/target/iscsi/iscsi_target_core.h
··· 786 int np_sock_type; 787 enum np_thread_state_table np_thread_state; 788 bool enabled; 789 enum iscsi_timer_flags_table np_login_timer_flags; 790 u32 np_exports; 791 enum np_flags_table np_flags;
··· 786 int np_sock_type; 787 enum np_thread_state_table np_thread_state; 788 bool enabled; 789 + atomic_t np_reset_count; 790 enum iscsi_timer_flags_table np_login_timer_flags; 791 u32 np_exports; 792 enum np_flags_table np_flags;
+3 -3
include/uapi/drm/msm_drm.h
··· 171 __u32 size; /* in, cmdstream size */ 172 __u32 pad; 173 __u32 nr_relocs; /* in, number of submit_reloc's */ 174 - __u64 __user relocs; /* in, ptr to array of submit_reloc's */ 175 }; 176 177 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the ··· 215 __u32 fence; /* out */ 216 __u32 nr_bos; /* in, number of submit_bo's */ 217 __u32 nr_cmds; /* in, number of submit_cmd's */ 218 - __u64 __user bos; /* in, ptr to array of submit_bo's */ 219 - __u64 __user cmds; /* in, ptr to array of submit_cmd's */ 220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ 221 }; 222
··· 171 __u32 size; /* in, cmdstream size */ 172 __u32 pad; 173 __u32 nr_relocs; /* in, number of submit_reloc's */ 174 + __u64 relocs; /* in, ptr to array of submit_reloc's */ 175 }; 176 177 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the ··· 215 __u32 fence; /* out */ 216 __u32 nr_bos; /* in, number of submit_bo's */ 217 __u32 nr_cmds; /* in, number of submit_cmd's */ 218 + __u64 bos; /* in, ptr to array of submit_bo's */ 219 + __u64 cmds; /* in, ptr to array of submit_cmd's */ 220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ 221 }; 222
+1 -1
kernel/fork.c
··· 807 mm_init_aio(mm); 808 mm_init_owner(mm, p); 809 mmu_notifier_mm_init(mm); 810 - clear_tlb_flush_pending(mm); 811 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 812 mm->pmd_huge_pte = NULL; 813 #endif
··· 807 mm_init_aio(mm); 808 mm_init_owner(mm, p); 809 mmu_notifier_mm_init(mm); 810 + init_tlb_flush_pending(mm); 811 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 812 mm->pmd_huge_pte = NULL; 813 #endif
+3 -2
kernel/futex.c
··· 670 * this reference was taken by ihold under the page lock 671 * pinning the inode in place so i_lock was unnecessary. The 672 * only way for this check to fail is if the inode was 673 - * truncated in parallel so warn for now if this happens. 674 * 675 * We are not calling into get_futex_key_refs() in file-backed 676 * cases, therefore a successful atomic_inc return below will 677 * guarantee that get_futex_key() will still imply smp_mb(); (B). 678 */ 679 - if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { 680 rcu_read_unlock(); 681 put_page(page); 682
··· 670 * this reference was taken by ihold under the page lock 671 * pinning the inode in place so i_lock was unnecessary. The 672 * only way for this check to fail is if the inode was 673 + * truncated in parallel which is almost certainly an 674 + * application bug. In such a case, just retry. 675 * 676 * We are not calling into get_futex_key_refs() in file-backed 677 * cases, therefore a successful atomic_inc return below will 678 * guarantee that get_futex_key() will still imply smp_mb(); (B). 679 */ 680 + if (!atomic_inc_not_zero(&inode->i_count)) { 681 rcu_read_unlock(); 682 put_page(page); 683
+1 -1
kernel/power/snapshot.c
··· 1650 { 1651 unsigned long size; 1652 1653 - size = global_page_state(NR_SLAB_RECLAIMABLE) 1654 + global_node_page_state(NR_ACTIVE_ANON) 1655 + global_node_page_state(NR_INACTIVE_ANON) 1656 + global_node_page_state(NR_ACTIVE_FILE)
··· 1650 { 1651 unsigned long size; 1652 1653 + size = global_node_page_state(NR_SLAB_RECLAIMABLE) 1654 + global_node_page_state(NR_ACTIVE_ANON) 1655 + global_node_page_state(NR_INACTIVE_ANON) 1656 + global_node_page_state(NR_ACTIVE_FILE)
+5 -3
lib/fault-inject.c
··· 110 if (in_task()) { 111 unsigned int fail_nth = READ_ONCE(current->fail_nth); 112 113 - if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1)) 114 - goto fail; 115 116 - return false; 117 } 118 119 /* No need to check any other properties if the probability is 0 */
··· 110 if (in_task()) { 111 unsigned int fail_nth = READ_ONCE(current->fail_nth); 112 113 + if (fail_nth) { 114 + if (!WRITE_ONCE(current->fail_nth, fail_nth - 1)) 115 + goto fail; 116 117 + return false; 118 + } 119 } 120 121 /* No need to check any other properties if the probability is 0 */
+8 -8
lib/test_kmod.c
··· 485 config->test_driver); 486 else 487 len += snprintf(buf+len, PAGE_SIZE - len, 488 - "driver:\tEMTPY\n"); 489 490 if (config->test_fs) 491 len += snprintf(buf+len, PAGE_SIZE - len, ··· 493 config->test_fs); 494 else 495 len += snprintf(buf+len, PAGE_SIZE - len, 496 - "fs:\tEMTPY\n"); 497 498 mutex_unlock(&test_dev->config_mutex); 499 ··· 746 strlen(test_str)); 747 break; 748 case TEST_KMOD_FS_TYPE: 749 - break; 750 kfree_const(config->test_fs); 751 config->test_driver = NULL; 752 copied = config_copy_test_fs(config, test_str, 753 strlen(test_str)); 754 default: 755 mutex_unlock(&test_dev->config_mutex); 756 return -EINVAL; ··· 880 int (*test_sync)(struct kmod_test_device *test_dev)) 881 { 882 int ret; 883 - long new; 884 unsigned int old_val; 885 886 - ret = kstrtol(buf, 10, &new); 887 if (ret) 888 return ret; 889 ··· 918 unsigned int max) 919 { 920 int ret; 921 - long new; 922 923 - ret = kstrtol(buf, 10, &new); 924 if (ret) 925 return ret; 926 ··· 1146 struct kmod_test_device *test_dev = NULL; 1147 int ret; 1148 1149 - mutex_unlock(&reg_dev_mutex); 1150 1151 /* int should suffice for number of devices, test for wrap */ 1152 if (unlikely(num_test_devs + 1) < 0) {
··· 485 config->test_driver); 486 else 487 len += snprintf(buf+len, PAGE_SIZE - len, 488 + "driver:\tEMPTY\n"); 489 490 if (config->test_fs) 491 len += snprintf(buf+len, PAGE_SIZE - len, ··· 493 config->test_fs); 494 else 495 len += snprintf(buf+len, PAGE_SIZE - len, 496 + "fs:\tEMPTY\n"); 497 498 mutex_unlock(&test_dev->config_mutex); 499 ··· 746 strlen(test_str)); 747 break; 748 case TEST_KMOD_FS_TYPE: 749 kfree_const(config->test_fs); 750 config->test_driver = NULL; 751 copied = config_copy_test_fs(config, test_str, 752 strlen(test_str)); 753 + break; 754 default: 755 mutex_unlock(&test_dev->config_mutex); 756 return -EINVAL; ··· 880 int (*test_sync)(struct kmod_test_device *test_dev)) 881 { 882 int ret; 883 + unsigned long new; 884 unsigned int old_val; 885 886 + ret = kstrtoul(buf, 10, &new); 887 if (ret) 888 return ret; 889 ··· 918 unsigned int max) 919 { 920 int ret; 921 + unsigned long new; 922 923 + ret = kstrtoul(buf, 10, &new); 924 if (ret) 925 return ret; 926 ··· 1146 struct kmod_test_device *test_dev = NULL; 1147 int ret; 1148 1149 + mutex_lock(&reg_dev_mutex); 1150 1151 /* int should suffice for number of devices, test for wrap */ 1152 if (unlikely(num_test_devs + 1) < 0) {
+1 -1
mm/balloon_compaction.c
··· 24 { 25 unsigned long flags; 26 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 27 - __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO); 28 if (!page) 29 return NULL; 30
··· 24 { 25 unsigned long flags; 26 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 27 + __GFP_NOMEMALLOC | __GFP_NORETRY); 28 if (!page) 29 return NULL; 30
+1 -5
mm/debug.c
··· 124 #ifdef CONFIG_NUMA_BALANCING 125 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 126 #endif 127 - #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) 128 "tlb_flush_pending %d\n" 129 - #endif 130 "def_flags: %#lx(%pGv)\n", 131 132 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, ··· 156 #ifdef CONFIG_NUMA_BALANCING 157 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, 158 #endif 159 - #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) 160 - mm->tlb_flush_pending, 161 - #endif 162 mm->def_flags, &mm->def_flags 163 ); 164 }
··· 124 #ifdef CONFIG_NUMA_BALANCING 125 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 126 #endif 127 "tlb_flush_pending %d\n" 128 "def_flags: %#lx(%pGv)\n", 129 130 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, ··· 158 #ifdef CONFIG_NUMA_BALANCING 159 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, 160 #endif 161 + atomic_read(&mm->tlb_flush_pending), 162 mm->def_flags, &mm->def_flags 163 ); 164 }
+7
mm/huge_memory.c
··· 1496 } 1497 1498 /* 1499 * Migrate the THP to the requested node, returns with page unlocked 1500 * and access rights restored. 1501 */
··· 1496 } 1497 1498 /* 1499 + * The page_table_lock above provides a memory barrier 1500 + * with change_protection_range. 1501 + */ 1502 + if (mm_tlb_flush_pending(vma->vm_mm)) 1503 + flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); 1504 + 1505 + /* 1506 * Migrate the THP to the requested node, returns with page unlocked 1507 * and access rights restored. 1508 */
+1 -1
mm/hugetlb.c
··· 4062 return ret; 4063 out_release_unlock: 4064 spin_unlock(ptl); 4065 - out_release_nounlock: 4066 if (vm_shared) 4067 unlock_page(page); 4068 put_page(page); 4069 goto out; 4070 }
··· 4062 return ret; 4063 out_release_unlock: 4064 spin_unlock(ptl); 4065 if (vm_shared) 4066 unlock_page(page); 4067 + out_release_nounlock: 4068 put_page(page); 4069 goto out; 4070 }
+2 -1
mm/ksm.c
··· 1038 goto out_unlock; 1039 1040 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || 1041 - (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) { 1042 pte_t entry; 1043 1044 swapped = PageSwapCache(page);
··· 1038 goto out_unlock; 1039 1040 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || 1041 + (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || 1042 + mm_tlb_flush_pending(mm)) { 1043 pte_t entry; 1044 1045 swapped = PageSwapCache(page);
+35 -7
mm/memory.c
··· 215 return true; 216 } 217 218 - /* tlb_gather_mmu 219 - * Called to initialize an (on-stack) mmu_gather structure for page-table 220 - * tear-down from @mm. The @fullmm argument is used when @mm is without 221 - * users and we're going to destroy the full address space (exit/execve). 222 - */ 223 - void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 224 { 225 tlb->mm = mm; 226 ··· 271 * Called at the end of the shootdown operation to free up any resources 272 * that were required. 273 */ 274 - void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 275 { 276 struct mmu_gather_batch *batch, *next; 277 278 tlb_flush_mmu(tlb); 279 ··· 397 } 398 399 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ 400 401 /* 402 * Note: this doesn't free the actual pages themselves. That
··· 215 return true; 216 } 217 218 + void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 219 + unsigned long start, unsigned long end) 220 { 221 tlb->mm = mm; 222 ··· 275 * Called at the end of the shootdown operation to free up any resources 276 * that were required. 277 */ 278 + void arch_tlb_finish_mmu(struct mmu_gather *tlb, 279 + unsigned long start, unsigned long end, bool force) 280 { 281 struct mmu_gather_batch *batch, *next; 282 + 283 + if (force) 284 + __tlb_adjust_range(tlb, start, end - start); 285 286 tlb_flush_mmu(tlb); 287 ··· 397 } 398 399 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ 400 + 401 + /* tlb_gather_mmu 402 + * Called to initialize an (on-stack) mmu_gather structure for page-table 403 + * tear-down from @mm. The @fullmm argument is used when @mm is without 404 + * users and we're going to destroy the full address space (exit/execve). 405 + */ 406 + void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 407 + unsigned long start, unsigned long end) 408 + { 409 + arch_tlb_gather_mmu(tlb, mm, start, end); 410 + inc_tlb_flush_pending(tlb->mm); 411 + } 412 + 413 + void tlb_finish_mmu(struct mmu_gather *tlb, 414 + unsigned long start, unsigned long end) 415 + { 416 + /* 417 + * If there are parallel threads are doing PTE changes on same range 418 + * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB 419 + * flush by batching, a thread has stable TLB entry can fail to flush 420 + * the TLB by observing pte_none|!pte_dirty, for example so flush TLB 421 + * forcefully if we detect parallel PTE batching threads. 422 + */ 423 + bool force = mm_tlb_flush_nested(tlb->mm); 424 + 425 + arch_tlb_finish_mmu(tlb, start, end, force); 426 + dec_tlb_flush_pending(tlb->mm); 427 + } 428 429 /* 430 * Note: this doesn't free the actual pages themselves. That
-6
mm/migrate.c
··· 1937 put_page(new_page); 1938 goto out_fail; 1939 } 1940 - /* 1941 - * We are not sure a pending tlb flush here is for a huge page 1942 - * mapping or not. Hence use the tlb range variant 1943 - */ 1944 - if (mm_tlb_flush_pending(mm)) 1945 - flush_tlb_range(vma, mmun_start, mmun_end); 1946 1947 /* Prepare a page as a migration target */ 1948 __SetPageLocked(new_page);
··· 1937 put_page(new_page); 1938 goto out_fail; 1939 } 1940 1941 /* Prepare a page as a migration target */ 1942 __SetPageLocked(new_page);
+2 -2
mm/mprotect.c
··· 244 BUG_ON(addr >= end); 245 pgd = pgd_offset(mm, addr); 246 flush_cache_range(vma, addr, end); 247 - set_tlb_flush_pending(mm); 248 do { 249 next = pgd_addr_end(addr, end); 250 if (pgd_none_or_clear_bad(pgd)) ··· 256 /* Only flush the TLB if we actually modified any entries: */ 257 if (pages) 258 flush_tlb_range(vma, start, end); 259 - clear_tlb_flush_pending(mm); 260 261 return pages; 262 }
··· 244 BUG_ON(addr >= end); 245 pgd = pgd_offset(mm, addr); 246 flush_cache_range(vma, addr, end); 247 + inc_tlb_flush_pending(mm); 248 do { 249 next = pgd_addr_end(addr, end); 250 if (pgd_none_or_clear_bad(pgd)) ··· 256 /* Only flush the TLB if we actually modified any entries: */ 257 if (pages) 258 flush_tlb_range(vma, start, end); 259 + dec_tlb_flush_pending(mm); 260 261 return pages; 262 }
+6 -5
mm/page_alloc.c
··· 4458 * Part of the reclaimable slab consists of items that are in use, 4459 * and cannot be freed. Cap this estimate at the low watermark. 4460 */ 4461 - available += global_page_state(NR_SLAB_RECLAIMABLE) - 4462 - min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); 4463 4464 if (available < 0) 4465 available = 0; ··· 4603 global_node_page_state(NR_FILE_DIRTY), 4604 global_node_page_state(NR_WRITEBACK), 4605 global_node_page_state(NR_UNSTABLE_NFS), 4606 - global_page_state(NR_SLAB_RECLAIMABLE), 4607 - global_page_state(NR_SLAB_UNRECLAIMABLE), 4608 global_node_page_state(NR_FILE_MAPPED), 4609 global_node_page_state(NR_SHMEM), 4610 global_page_state(NR_PAGETABLE), ··· 7669 7670 /* Make sure the range is really isolated. */ 7671 if (test_pages_isolated(outer_start, end, false)) { 7672 - pr_info("%s: [%lx, %lx) PFNs busy\n", 7673 __func__, outer_start, end); 7674 ret = -EBUSY; 7675 goto done;
··· 4458 * Part of the reclaimable slab consists of items that are in use, 4459 * and cannot be freed. Cap this estimate at the low watermark. 4460 */ 4461 + available += global_node_page_state(NR_SLAB_RECLAIMABLE) - 4462 + min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2, 4463 + wmark_low); 4464 4465 if (available < 0) 4466 available = 0; ··· 4602 global_node_page_state(NR_FILE_DIRTY), 4603 global_node_page_state(NR_WRITEBACK), 4604 global_node_page_state(NR_UNSTABLE_NFS), 4605 + global_node_page_state(NR_SLAB_RECLAIMABLE), 4606 + global_node_page_state(NR_SLAB_UNRECLAIMABLE), 4607 global_node_page_state(NR_FILE_MAPPED), 4608 global_node_page_state(NR_SHMEM), 4609 global_page_state(NR_PAGETABLE), ··· 7668 7669 /* Make sure the range is really isolated. */ 7670 if (test_pages_isolated(outer_start, end, false)) { 7671 + pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", 7672 __func__, outer_start, end); 7673 ret = -EBUSY; 7674 goto done;
+30 -22
mm/rmap.c
··· 888 .flags = PVMW_SYNC, 889 }; 890 int *cleaned = arg; 891 892 while (page_vma_mapped_walk(&pvmw)) { 893 int ret = 0; 894 - address = pvmw.address; 895 if (pvmw.pte) { 896 pte_t entry; 897 pte_t *pte = pvmw.pte; ··· 899 if (!pte_dirty(*pte) && !pte_write(*pte)) 900 continue; 901 902 - flush_cache_page(vma, address, pte_pfn(*pte)); 903 - entry = ptep_clear_flush(vma, address, pte); 904 entry = pte_wrprotect(entry); 905 entry = pte_mkclean(entry); 906 - set_pte_at(vma->vm_mm, address, pte, entry); 907 ret = 1; 908 } else { 909 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE ··· 913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 914 continue; 915 916 - flush_cache_page(vma, address, page_to_pfn(page)); 917 - entry = pmdp_huge_clear_flush(vma, address, pmd); 918 entry = pmd_wrprotect(entry); 919 entry = pmd_mkclean(entry); 920 - set_pmd_at(vma->vm_mm, address, pmd, entry); 921 ret = 1; 922 #else 923 /* unexpected pmd-mapped page? */ ··· 926 } 927 928 if (ret) { 929 - mmu_notifier_invalidate_page(vma->vm_mm, address); 930 (*cleaned)++; 931 } 932 } 933 934 return true; ··· 1328 }; 1329 pte_t pteval; 1330 struct page *subpage; 1331 - bool ret = true; 1332 enum ttu_flags flags = (enum ttu_flags)arg; 1333 1334 /* munlock has nothing to gain from examining un-locked vmas */ ··· 1368 VM_BUG_ON_PAGE(!pvmw.pte, page); 1369 1370 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1371 - address = pvmw.address; 1372 - 1373 1374 if (!(flags & TTU_IGNORE_ACCESS)) { 1375 - if (ptep_clear_flush_young_notify(vma, address, 1376 pvmw.pte)) { 1377 ret = false; 1378 page_vma_mapped_walk_done(&pvmw); ··· 1379 } 1380 1381 /* Nuke the page table entry. */ 1382 - flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1383 if (should_defer_flush(mm, flags)) { 1384 /* 1385 * We clear the PTE but do not flush so potentially ··· 1389 * transition on a cached TLB entry is written through 1390 * and traps if the PTE is unmapped. 1391 */ 1392 - pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1393 1394 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1395 } else { 1396 - pteval = ptep_clear_flush(vma, address, pvmw.pte); 1397 } 1398 1399 /* Move the dirty bit to the page. Now the pte is gone. */ ··· 1409 if (PageHuge(page)) { 1410 int nr = 1 << compound_order(page); 1411 hugetlb_count_sub(nr, mm); 1412 - set_huge_swap_pte_at(mm, address, 1413 pvmw.pte, pteval, 1414 vma_mmu_pagesize(vma)); 1415 } else { 1416 dec_mm_counter(mm, mm_counter(page)); 1417 - set_pte_at(mm, address, pvmw.pte, pteval); 1418 } 1419 1420 } else if (pte_unused(pteval)) { ··· 1438 swp_pte = swp_entry_to_pte(entry); 1439 if (pte_soft_dirty(pteval)) 1440 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1441 - set_pte_at(mm, address, pvmw.pte, swp_pte); 1442 } else if (PageAnon(page)) { 1443 swp_entry_t entry = { .val = page_private(subpage) }; 1444 pte_t swp_pte; ··· 1464 * If the page was redirtied, it cannot be 1465 * discarded. Remap the page to page table. 1466 */ 1467 - set_pte_at(mm, address, pvmw.pte, pteval); 1468 SetPageSwapBacked(page); 1469 ret = false; 1470 page_vma_mapped_walk_done(&pvmw); ··· 1472 } 1473 1474 if (swap_duplicate(entry) < 0) { 1475 - set_pte_at(mm, address, pvmw.pte, pteval); 1476 ret = false; 1477 page_vma_mapped_walk_done(&pvmw); 1478 break; ··· 1488 swp_pte = swp_entry_to_pte(entry); 1489 if (pte_soft_dirty(pteval)) 1490 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1491 - set_pte_at(mm, address, pvmw.pte, swp_pte); 1492 } else 1493 dec_mm_counter(mm, mm_counter_file(page)); 1494 discard: 1495 page_remove_rmap(subpage, PageHuge(page)); 1496 put_page(page); 1497 - mmu_notifier_invalidate_page(mm, address); 1498 } 1499 return ret; 1500 } 1501
··· 888 .flags = PVMW_SYNC, 889 }; 890 int *cleaned = arg; 891 + bool invalidation_needed = false; 892 893 while (page_vma_mapped_walk(&pvmw)) { 894 int ret = 0; 895 if (pvmw.pte) { 896 pte_t entry; 897 pte_t *pte = pvmw.pte; ··· 899 if (!pte_dirty(*pte) && !pte_write(*pte)) 900 continue; 901 902 + flush_cache_page(vma, pvmw.address, pte_pfn(*pte)); 903 + entry = ptep_clear_flush(vma, pvmw.address, pte); 904 entry = pte_wrprotect(entry); 905 entry = pte_mkclean(entry); 906 + set_pte_at(vma->vm_mm, pvmw.address, pte, entry); 907 ret = 1; 908 } else { 909 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE ··· 913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 914 continue; 915 916 + flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 917 + entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd); 918 entry = pmd_wrprotect(entry); 919 entry = pmd_mkclean(entry); 920 + set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry); 921 ret = 1; 922 #else 923 /* unexpected pmd-mapped page? */ ··· 926 } 927 928 if (ret) { 929 (*cleaned)++; 930 + invalidation_needed = true; 931 } 932 + } 933 + 934 + if (invalidation_needed) { 935 + mmu_notifier_invalidate_range(vma->vm_mm, address, 936 + address + (1UL << compound_order(page))); 937 } 938 939 return true; ··· 1323 }; 1324 pte_t pteval; 1325 struct page *subpage; 1326 + bool ret = true, invalidation_needed = false; 1327 enum ttu_flags flags = (enum ttu_flags)arg; 1328 1329 /* munlock has nothing to gain from examining un-locked vmas */ ··· 1363 VM_BUG_ON_PAGE(!pvmw.pte, page); 1364 1365 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1366 1367 if (!(flags & TTU_IGNORE_ACCESS)) { 1368 + if (ptep_clear_flush_young_notify(vma, pvmw.address, 1369 pvmw.pte)) { 1370 ret = false; 1371 page_vma_mapped_walk_done(&pvmw); ··· 1376 } 1377 1378 /* Nuke the page table entry. */ 1379 + flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte)); 1380 if (should_defer_flush(mm, flags)) { 1381 /* 1382 * We clear the PTE but do not flush so potentially ··· 1386 * transition on a cached TLB entry is written through 1387 * and traps if the PTE is unmapped. 1388 */ 1389 + pteval = ptep_get_and_clear(mm, pvmw.address, 1390 + pvmw.pte); 1391 1392 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1393 } else { 1394 + pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 1395 } 1396 1397 /* Move the dirty bit to the page. Now the pte is gone. */ ··· 1405 if (PageHuge(page)) { 1406 int nr = 1 << compound_order(page); 1407 hugetlb_count_sub(nr, mm); 1408 + set_huge_swap_pte_at(mm, pvmw.address, 1409 pvmw.pte, pteval, 1410 vma_mmu_pagesize(vma)); 1411 } else { 1412 dec_mm_counter(mm, mm_counter(page)); 1413 + set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1414 } 1415 1416 } else if (pte_unused(pteval)) { ··· 1434 swp_pte = swp_entry_to_pte(entry); 1435 if (pte_soft_dirty(pteval)) 1436 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1437 + set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1438 } else if (PageAnon(page)) { 1439 swp_entry_t entry = { .val = page_private(subpage) }; 1440 pte_t swp_pte; ··· 1460 * If the page was redirtied, it cannot be 1461 * discarded. Remap the page to page table. 1462 */ 1463 + set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1464 SetPageSwapBacked(page); 1465 ret = false; 1466 page_vma_mapped_walk_done(&pvmw); ··· 1468 } 1469 1470 if (swap_duplicate(entry) < 0) { 1471 + set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1472 ret = false; 1473 page_vma_mapped_walk_done(&pvmw); 1474 break; ··· 1484 swp_pte = swp_entry_to_pte(entry); 1485 if (pte_soft_dirty(pteval)) 1486 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1487 + set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1488 } else 1489 dec_mm_counter(mm, mm_counter_file(page)); 1490 discard: 1491 page_remove_rmap(subpage, PageHuge(page)); 1492 put_page(page); 1493 + invalidation_needed = true; 1494 } 1495 + 1496 + if (invalidation_needed) 1497 + mmu_notifier_invalidate_range(mm, address, 1498 + address + (1UL << compound_order(page))); 1499 return ret; 1500 } 1501
+10 -2
mm/shmem.c
··· 1022 */ 1023 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1024 spin_lock(&sbinfo->shrinklist_lock); 1025 - if (list_empty(&info->shrinklist)) { 1026 list_add_tail(&info->shrinklist, 1027 &sbinfo->shrinklist); 1028 sbinfo->shrinklist_len++; ··· 1821 * to shrink under memory pressure. 1822 */ 1823 spin_lock(&sbinfo->shrinklist_lock); 1824 - if (list_empty(&info->shrinklist)) { 1825 list_add_tail(&info->shrinklist, 1826 &sbinfo->shrinklist); 1827 sbinfo->shrinklist_len++;
··· 1022 */ 1023 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1024 spin_lock(&sbinfo->shrinklist_lock); 1025 + /* 1026 + * _careful to defend against unlocked access to 1027 + * ->shrink_list in shmem_unused_huge_shrink() 1028 + */ 1029 + if (list_empty_careful(&info->shrinklist)) { 1030 list_add_tail(&info->shrinklist, 1031 &sbinfo->shrinklist); 1032 sbinfo->shrinklist_len++; ··· 1817 * to shrink under memory pressure. 1818 */ 1819 spin_lock(&sbinfo->shrinklist_lock); 1820 + /* 1821 + * _careful to defend against unlocked access to 1822 + * ->shrink_list in shmem_unused_huge_shrink() 1823 + */ 1824 + if (list_empty_careful(&info->shrinklist)) { 1825 list_add_tail(&info->shrinklist, 1826 &sbinfo->shrinklist); 1827 sbinfo->shrinklist_len++;
+1 -1
mm/util.c
··· 633 * which are reclaimable, under pressure. The dentry 634 * cache and most inode caches should fall into this 635 */ 636 - free += global_page_state(NR_SLAB_RECLAIMABLE); 637 638 /* 639 * Leave reserved pages. The pages are not for anonymous pages.
··· 633 * which are reclaimable, under pressure. The dentry 634 * cache and most inode caches should fall into this 635 */ 636 + free += global_node_page_state(NR_SLAB_RECLAIMABLE); 637 638 /* 639 * Leave reserved pages. The pages are not for anonymous pages.
+51 -9
net/batman-adv/translation-table.c
··· 1549 return found; 1550 } 1551 1552 static void 1553 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, 1554 - struct batadv_orig_node *orig_node, int ttvn) 1555 { 1556 struct batadv_tt_orig_list_entry *orig_entry; 1557 ··· 1593 * was added during a "temporary client detection" 1594 */ 1595 orig_entry->ttvn = ttvn; 1596 - goto out; 1597 } 1598 1599 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); ··· 1606 batadv_tt_global_size_inc(orig_node, tt_global->common.vid); 1607 orig_entry->orig_node = orig_node; 1608 orig_entry->ttvn = ttvn; 1609 kref_init(&orig_entry->refcount); 1610 1611 spin_lock_bh(&tt_global->list_lock); ··· 1616 spin_unlock_bh(&tt_global->list_lock); 1617 atomic_inc(&tt_global->orig_list_count); 1618 1619 out: 1620 if (orig_entry) 1621 batadv_tt_orig_list_entry_put(orig_entry); ··· 1739 } 1740 1741 /* the change can carry possible "attribute" flags like the 1742 - * TT_CLIENT_WIFI, therefore they have to be copied in the 1743 * client entry 1744 */ 1745 - common->flags |= flags; 1746 1747 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 1748 * one originator left in the list and we previously received a ··· 1759 } 1760 add_orig_entry: 1761 /* add the new orig_entry (if needed) or update it */ 1762 - batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); 1763 1764 batadv_dbg(BATADV_DBG_TT, bat_priv, 1765 "Creating new global tt entry: %pM (vid: %d, via %pM)\n", ··· 1983 struct batadv_tt_orig_list_entry *orig, 1984 bool best) 1985 { 1986 void *hdr; 1987 struct batadv_orig_node_vlan *vlan; 1988 u8 last_ttvn; ··· 2013 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || 2014 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || 2015 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || 2016 - nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) 2017 goto nla_put_failure; 2018 2019 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) ··· 2627 unsigned short vid) 2628 { 2629 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 2630 struct batadv_tt_common_entry *tt_common; 2631 struct batadv_tt_global_entry *tt_global; 2632 struct hlist_head *head; ··· 2666 /* find out if this global entry is announced by this 2667 * originator 2668 */ 2669 - if (!batadv_tt_global_entry_has_orig(tt_global, 2670 - orig_node)) 2671 continue; 2672 2673 /* use network order to read the VID: this ensures that ··· 2680 /* compute the CRC on flags that have to be kept in sync 2681 * among nodes 2682 */ 2683 - flags = tt_common->flags & BATADV_TT_SYNC_MASK; 2684 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); 2685 2686 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); 2687 } 2688 rcu_read_unlock(); 2689 }
··· 1549 return found; 1550 } 1551 1552 + /** 1553 + * batadv_tt_global_sync_flags - update TT sync flags 1554 + * @tt_global: the TT global entry to update sync flags in 1555 + * 1556 + * Updates the sync flag bits in the tt_global flag attribute with a logical 1557 + * OR of all sync flags from any of its TT orig entries. 1558 + */ 1559 + static void 1560 + batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global) 1561 + { 1562 + struct batadv_tt_orig_list_entry *orig_entry; 1563 + const struct hlist_head *head; 1564 + u16 flags = BATADV_NO_FLAGS; 1565 + 1566 + rcu_read_lock(); 1567 + head = &tt_global->orig_list; 1568 + hlist_for_each_entry_rcu(orig_entry, head, list) 1569 + flags |= orig_entry->flags; 1570 + rcu_read_unlock(); 1571 + 1572 + flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK); 1573 + tt_global->common.flags = flags; 1574 + } 1575 + 1576 + /** 1577 + * batadv_tt_global_orig_entry_add - add or update a TT orig entry 1578 + * @tt_global: the TT global entry to add an orig entry in 1579 + * @orig_node: the originator to add an orig entry for 1580 + * @ttvn: translation table version number of this changeset 1581 + * @flags: TT sync flags 1582 + */ 1583 static void 1584 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, 1585 + struct batadv_orig_node *orig_node, int ttvn, 1586 + u8 flags) 1587 { 1588 struct batadv_tt_orig_list_entry *orig_entry; 1589 ··· 1561 * was added during a "temporary client detection" 1562 */ 1563 orig_entry->ttvn = ttvn; 1564 + orig_entry->flags = flags; 1565 + goto sync_flags; 1566 } 1567 1568 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); ··· 1573 batadv_tt_global_size_inc(orig_node, tt_global->common.vid); 1574 orig_entry->orig_node = orig_node; 1575 orig_entry->ttvn = ttvn; 1576 + orig_entry->flags = flags; 1577 kref_init(&orig_entry->refcount); 1578 1579 spin_lock_bh(&tt_global->list_lock); ··· 1582 spin_unlock_bh(&tt_global->list_lock); 1583 atomic_inc(&tt_global->orig_list_count); 1584 1585 + sync_flags: 1586 + batadv_tt_global_sync_flags(tt_global); 1587 out: 1588 if (orig_entry) 1589 batadv_tt_orig_list_entry_put(orig_entry); ··· 1703 } 1704 1705 /* the change can carry possible "attribute" flags like the 1706 + * TT_CLIENT_TEMP, therefore they have to be copied in the 1707 * client entry 1708 */ 1709 + common->flags |= flags & (~BATADV_TT_SYNC_MASK); 1710 1711 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 1712 * one originator left in the list and we previously received a ··· 1723 } 1724 add_orig_entry: 1725 /* add the new orig_entry (if needed) or update it */ 1726 + batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn, 1727 + flags & BATADV_TT_SYNC_MASK); 1728 1729 batadv_dbg(BATADV_DBG_TT, bat_priv, 1730 "Creating new global tt entry: %pM (vid: %d, via %pM)\n", ··· 1946 struct batadv_tt_orig_list_entry *orig, 1947 bool best) 1948 { 1949 + u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags; 1950 void *hdr; 1951 struct batadv_orig_node_vlan *vlan; 1952 u8 last_ttvn; ··· 1975 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || 1976 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || 1977 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || 1978 + nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags)) 1979 goto nla_put_failure; 1980 1981 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) ··· 2589 unsigned short vid) 2590 { 2591 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 2592 + struct batadv_tt_orig_list_entry *tt_orig; 2593 struct batadv_tt_common_entry *tt_common; 2594 struct batadv_tt_global_entry *tt_global; 2595 struct hlist_head *head; ··· 2627 /* find out if this global entry is announced by this 2628 * originator 2629 */ 2630 + tt_orig = batadv_tt_global_orig_entry_find(tt_global, 2631 + orig_node); 2632 + if (!tt_orig) 2633 continue; 2634 2635 /* use network order to read the VID: this ensures that ··· 2640 /* compute the CRC on flags that have to be kept in sync 2641 * among nodes 2642 */ 2643 + flags = tt_orig->flags; 2644 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); 2645 2646 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); 2647 + 2648 + batadv_tt_orig_list_entry_put(tt_orig); 2649 } 2650 rcu_read_unlock(); 2651 }
+2
net/batman-adv/types.h
··· 1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client 1261 * @orig_node: pointer to orig node announcing this non-mesh client 1262 * @ttvn: translation table version number which added the non-mesh client 1263 * @list: list node for batadv_tt_global_entry::orig_list 1264 * @refcount: number of contexts the object is used 1265 * @rcu: struct used for freeing in an RCU-safe manner ··· 1268 struct batadv_tt_orig_list_entry { 1269 struct batadv_orig_node *orig_node; 1270 u8 ttvn; 1271 struct hlist_node list; 1272 struct kref refcount; 1273 struct rcu_head rcu;
··· 1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client 1261 * @orig_node: pointer to orig node announcing this non-mesh client 1262 * @ttvn: translation table version number which added the non-mesh client 1263 + * @flags: per orig entry TT sync flags 1264 * @list: list node for batadv_tt_global_entry::orig_list 1265 * @refcount: number of contexts the object is used 1266 * @rcu: struct used for freeing in an RCU-safe manner ··· 1267 struct batadv_tt_orig_list_entry { 1268 struct batadv_orig_node *orig_node; 1269 u8 ttvn; 1270 + u8 flags; 1271 struct hlist_node list; 1272 struct kref refcount; 1273 struct rcu_head rcu;
+1 -1
net/core/dev.c
··· 2739 { 2740 if (tx_path) 2741 return skb->ip_summed != CHECKSUM_PARTIAL && 2742 - skb->ip_summed != CHECKSUM_NONE; 2743 2744 return skb->ip_summed == CHECKSUM_NONE; 2745 }
··· 2739 { 2740 if (tx_path) 2741 return skb->ip_summed != CHECKSUM_PARTIAL && 2742 + skb->ip_summed != CHECKSUM_UNNECESSARY; 2743 2744 return skb->ip_summed == CHECKSUM_NONE; 2745 }
+7
net/ipv4/af_inet.c
··· 1731 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK; 1732 #endif 1733 1734 return 0; 1735 } 1736
··· 1731 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK; 1732 #endif 1733 1734 + /* Some igmp sysctl, whose values are always used */ 1735 + net->ipv4.sysctl_igmp_max_memberships = 20; 1736 + net->ipv4.sysctl_igmp_max_msf = 10; 1737 + /* IGMP reports for link-local multicast groups are enabled by default */ 1738 + net->ipv4.sysctl_igmp_llm_reports = 1; 1739 + net->ipv4.sysctl_igmp_qrv = 2; 1740 + 1741 return 0; 1742 } 1743
+10 -2
net/ipv4/cipso_ipv4.c
··· 1523 int taglen; 1524 1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { 1526 - if (optptr[0] == IPOPT_CIPSO) 1527 return optptr; 1528 - taglen = optptr[1]; 1529 optlen -= taglen; 1530 optptr += taglen; 1531 }
··· 1523 int taglen; 1524 1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { 1526 + switch (optptr[0]) { 1527 + case IPOPT_CIPSO: 1528 return optptr; 1529 + case IPOPT_END: 1530 + return NULL; 1531 + case IPOPT_NOOP: 1532 + taglen = 1; 1533 + break; 1534 + default: 1535 + taglen = optptr[1]; 1536 + } 1537 optlen -= taglen; 1538 optptr += taglen; 1539 }
+1
net/ipv4/fou.c
··· 450 out: 451 NAPI_GRO_CB(skb)->flush |= flush; 452 skb_gro_remcsum_cleanup(skb, &grc); 453 454 return pp; 455 }
··· 450 out: 451 NAPI_GRO_CB(skb)->flush |= flush; 452 skb_gro_remcsum_cleanup(skb, &grc); 453 + skb->remcsum_offload = 0; 454 455 return pp; 456 }
-6
net/ipv4/igmp.c
··· 2974 goto out_sock; 2975 } 2976 2977 - /* Sysctl initialization */ 2978 - net->ipv4.sysctl_igmp_max_memberships = 20; 2979 - net->ipv4.sysctl_igmp_max_msf = 10; 2980 - /* IGMP reports for link-local multicast groups are enabled by default */ 2981 - net->ipv4.sysctl_igmp_llm_reports = 1; 2982 - net->ipv4.sysctl_igmp_qrv = 2; 2983 return 0; 2984 2985 out_sock:
··· 2974 goto out_sock; 2975 } 2976 2977 return 0; 2978 2979 out_sock:
+5 -3
net/ipv4/ip_output.c
··· 965 csummode = CHECKSUM_PARTIAL; 966 967 cork->length += length; 968 - if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || 969 - (skb && skb_is_gso(skb))) && 970 (sk->sk_protocol == IPPROTO_UDP) && 971 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 972 - (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { 973 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 974 hh_len, fragheaderlen, transhdrlen, 975 maxfraglen, flags); ··· 1289 return -EINVAL; 1290 1291 if ((size + skb->len > mtu) && 1292 (sk->sk_protocol == IPPROTO_UDP) && 1293 (rt->dst.dev->features & NETIF_F_UFO)) { 1294 if (skb->ip_summed != CHECKSUM_PARTIAL)
··· 965 csummode = CHECKSUM_PARTIAL; 966 967 cork->length += length; 968 + if ((skb && skb_is_gso(skb)) || 969 + (((length + (skb ? skb->len : fragheaderlen)) > mtu) && 970 + (skb_queue_len(queue) <= 1) && 971 (sk->sk_protocol == IPPROTO_UDP) && 972 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 973 + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) { 974 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 975 hh_len, fragheaderlen, transhdrlen, 976 maxfraglen, flags); ··· 1288 return -EINVAL; 1289 1290 if ((size + skb->len > mtu) && 1291 + (skb_queue_len(&sk->sk_write_queue) == 1) && 1292 (sk->sk_protocol == IPPROTO_UDP) && 1293 (rt->dst.dev->features & NETIF_F_UFO)) { 1294 if (skb->ip_summed != CHECKSUM_PARTIAL)
+19 -15
net/ipv4/tcp_input.c
··· 107 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 108 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 109 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 110 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 111 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 112 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ ··· 2521 return; 2522 2523 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2524 - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || 2525 - (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { 2526 tp->snd_cwnd = tp->snd_ssthresh; 2527 tp->snd_cwnd_stamp = tcp_jiffies32; 2528 } ··· 3005 /* Offset the time elapsed after installing regular RTO */ 3006 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 3007 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 3008 - struct sk_buff *skb = tcp_write_queue_head(sk); 3009 - u64 rto_time_stamp = skb->skb_mstamp + 3010 - jiffies_to_usecs(rto); 3011 - s64 delta_us = rto_time_stamp - tp->tcp_mstamp; 3012 /* delta_us may not be positive if the socket is locked 3013 * when the retrans timer fires and is rescheduled. 3014 */ ··· 3015 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 3016 TCP_RTO_MAX); 3017 } 3018 } 3019 3020 /* If we get here, the whole TSO packet has not been acked. */ ··· 3185 ca_rtt_us, sack->rate); 3186 3187 if (flag & FLAG_ACKED) { 3188 - tcp_rearm_rto(sk); 3189 if (unlikely(icsk->icsk_mtup.probe_size && 3190 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3191 tcp_mtup_probe_success(sk); ··· 3213 * after when the head was last (re)transmitted. Otherwise the 3214 * timeout may continue to extend in loss recovery. 3215 */ 3216 - tcp_rearm_rto(sk); 3217 } 3218 3219 if (icsk->icsk_ca_ops->pkts_acked) { ··· 3585 if (after(ack, tp->snd_nxt)) 3586 goto invalid_ack; 3587 3588 - if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 3589 - tcp_rearm_rto(sk); 3590 - 3591 if (after(ack, prior_snd_una)) { 3592 flag |= FLAG_SND_UNA_ADVANCED; 3593 icsk->icsk_retransmits = 0; ··· 3649 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, 3650 &sack_state); 3651 3652 if (tcp_ack_is_dubious(sk, flag)) { 3653 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3654 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); 3655 } 3656 - if (tp->tlp_high_seq) 3657 - tcp_process_tlp_ack(sk, ack, flag); 3658 3659 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3660 sk_dst_confirm(sk); 3661 3662 - if (icsk->icsk_pending == ICSK_TIME_RETRANS) 3663 - tcp_schedule_loss_probe(sk); 3664 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3665 lost = tp->lost - lost; /* freshly marked lost */ 3666 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
··· 107 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 108 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 109 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 110 + #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ 111 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 112 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 113 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ ··· 2520 return; 2521 2522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2523 + if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && 2524 + (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { 2525 tp->snd_cwnd = tp->snd_ssthresh; 2526 tp->snd_cwnd_stamp = tcp_jiffies32; 2527 } ··· 3004 /* Offset the time elapsed after installing regular RTO */ 3005 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 3006 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 3007 + s64 delta_us = tcp_rto_delta_us(sk); 3008 /* delta_us may not be positive if the socket is locked 3009 * when the retrans timer fires and is rescheduled. 3010 */ ··· 3017 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 3018 TCP_RTO_MAX); 3019 } 3020 + } 3021 + 3022 + /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ 3023 + static void tcp_set_xmit_timer(struct sock *sk) 3024 + { 3025 + if (!tcp_schedule_loss_probe(sk)) 3026 + tcp_rearm_rto(sk); 3027 } 3028 3029 /* If we get here, the whole TSO packet has not been acked. */ ··· 3180 ca_rtt_us, sack->rate); 3181 3182 if (flag & FLAG_ACKED) { 3183 + flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3184 if (unlikely(icsk->icsk_mtup.probe_size && 3185 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3186 tcp_mtup_probe_success(sk); ··· 3208 * after when the head was last (re)transmitted. Otherwise the 3209 * timeout may continue to extend in loss recovery. 3210 */ 3211 + flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3212 } 3213 3214 if (icsk->icsk_ca_ops->pkts_acked) { ··· 3580 if (after(ack, tp->snd_nxt)) 3581 goto invalid_ack; 3582 3583 if (after(ack, prior_snd_una)) { 3584 flag |= FLAG_SND_UNA_ADVANCED; 3585 icsk->icsk_retransmits = 0; ··· 3647 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, 3648 &sack_state); 3649 3650 + if (tp->tlp_high_seq) 3651 + tcp_process_tlp_ack(sk, ack, flag); 3652 + /* If needed, reset TLP/RTO timer; RACK may later override this. */ 3653 + if (flag & FLAG_SET_XMIT_TIMER) 3654 + tcp_set_xmit_timer(sk); 3655 + 3656 if (tcp_ack_is_dubious(sk, flag)) { 3657 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3658 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); 3659 } 3660 3661 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3662 sk_dst_confirm(sk); 3663 3664 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3665 lost = tp->lost - lost; /* freshly marked lost */ 3666 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
+9 -18
net/ipv4/tcp_output.c
··· 2377 { 2378 struct inet_connection_sock *icsk = inet_csk(sk); 2379 struct tcp_sock *tp = tcp_sk(sk); 2380 - u32 timeout, tlp_time_stamp, rto_time_stamp; 2381 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); 2382 2383 - /* No consecutive loss probes. */ 2384 - if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { 2385 - tcp_rearm_rto(sk); 2386 - return false; 2387 - } 2388 /* Don't do any loss probe on a Fast Open connection before 3WHS 2389 * finishes. 2390 */ 2391 if (tp->fastopen_rsk) 2392 - return false; 2393 - 2394 - /* TLP is only scheduled when next timer event is RTO. */ 2395 - if (icsk->icsk_pending != ICSK_TIME_RETRANS) 2396 return false; 2397 2398 /* Schedule a loss probe in 2*RTT for SACK capable connections ··· 2408 (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 2409 timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 2410 2411 - /* If RTO is shorter, just schedule TLP in its place. */ 2412 - tlp_time_stamp = tcp_jiffies32 + timeout; 2413 - rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 2414 - if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 2415 - s32 delta = rto_time_stamp - tcp_jiffies32; 2416 - if (delta > 0) 2417 - timeout = delta; 2418 - } 2419 2420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 2421 TCP_RTO_MAX); ··· 3436 int err; 3437 3438 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); 3439 tcp_connect_init(sk); 3440 3441 if (unlikely(tp->repair)) {
··· 2377 { 2378 struct inet_connection_sock *icsk = inet_csk(sk); 2379 struct tcp_sock *tp = tcp_sk(sk); 2380 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); 2381 + u32 timeout, rto_delta_us; 2382 2383 /* Don't do any loss probe on a Fast Open connection before 3WHS 2384 * finishes. 2385 */ 2386 if (tp->fastopen_rsk) 2387 return false; 2388 2389 /* Schedule a loss probe in 2*RTT for SACK capable connections ··· 2417 (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 2418 timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 2419 2420 + /* If the RTO formula yields an earlier time, then use that time. */ 2421 + rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */ 2422 + if (rto_delta_us > 0) 2423 + timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); 2424 2425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 2426 TCP_RTO_MAX); ··· 3449 int err; 3450 3451 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); 3452 + 3453 + if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 3454 + return -EHOSTUNREACH; /* Routing failure or similar. */ 3455 + 3456 tcp_connect_init(sk); 3457 3458 if (unlikely(tp->repair)) {
+2 -1
net/ipv4/tcp_timer.c
··· 652 goto death; 653 } 654 655 - if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 656 goto out; 657 658 elapsed = keepalive_time_when(tp);
··· 652 goto death; 653 } 654 655 + if (!sock_flag(sk, SOCK_KEEPOPEN) || 656 + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) 657 goto out; 658 659 elapsed = keepalive_time_when(tp);
+1 -1
net/ipv4/udp.c
··· 802 if (is_udplite) /* UDP-Lite */ 803 csum = udplite_csum(skb); 804 805 - else if (sk->sk_no_check_tx) { /* UDP csum disabled */ 806 807 skb->ip_summed = CHECKSUM_NONE; 808 goto send;
··· 802 if (is_udplite) /* UDP-Lite */ 803 csum = udplite_csum(skb); 804 805 + else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */ 806 807 skb->ip_summed = CHECKSUM_NONE; 808 goto send;
+1 -1
net/ipv4/udp_offload.c
··· 235 if (uh->check == 0) 236 uh->check = CSUM_MANGLED_0; 237 238 - skb->ip_summed = CHECKSUM_NONE; 239 240 /* If there is no outer header we can fake a checksum offload 241 * due to the fact that we have already done the checksum in
··· 235 if (uh->check == 0) 236 uh->check = CSUM_MANGLED_0; 237 238 + skb->ip_summed = CHECKSUM_UNNECESSARY; 239 240 /* If there is no outer header we can fake a checksum offload 241 * due to the fact that we have already done the checksum in
+4 -3
net/ipv6/ip6_output.c
··· 1381 */ 1382 1383 cork->length += length; 1384 - if ((((length + (skb ? skb->len : headersize)) > mtu) || 1385 - (skb && skb_is_gso(skb))) && 1386 (sk->sk_protocol == IPPROTO_UDP) && 1387 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 1388 - (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1389 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1390 hh_len, fragheaderlen, exthdrlen, 1391 transhdrlen, mtu, flags, fl6);
··· 1381 */ 1382 1383 cork->length += length; 1384 + if ((skb && skb_is_gso(skb)) || 1385 + (((length + (skb ? skb->len : headersize)) > mtu) && 1386 + (skb_queue_len(queue) <= 1) && 1387 (sk->sk_protocol == IPPROTO_UDP) && 1388 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 1389 + (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) { 1390 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1391 hh_len, fragheaderlen, exthdrlen, 1392 transhdrlen, mtu, flags, fl6);
+3 -8
net/ipv6/route.c
··· 2351 if (on_link) 2352 nrt->rt6i_flags &= ~RTF_GATEWAY; 2353 2354 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 2355 2356 if (ip6_ins_rt(nrt)) ··· 2462 .fc_dst_len = prefixlen, 2463 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 2464 RTF_UP | RTF_PREF(pref), 2465 .fc_nlinfo.portid = 0, 2466 .fc_nlinfo.nlh = NULL, 2467 .fc_nlinfo.nl_net = net, ··· 2515 .fc_ifindex = dev->ifindex, 2516 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 2517 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 2518 .fc_nlinfo.portid = 0, 2519 .fc_nlinfo.nlh = NULL, 2520 .fc_nlinfo.nl_net = dev_net(dev), ··· 3427 rtm->rtm_flags = 0; 3428 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 3429 rtm->rtm_protocol = rt->rt6i_protocol; 3430 - if (rt->rt6i_flags & RTF_DYNAMIC) 3431 - rtm->rtm_protocol = RTPROT_REDIRECT; 3432 - else if (rt->rt6i_flags & RTF_ADDRCONF) { 3433 - if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) 3434 - rtm->rtm_protocol = RTPROT_RA; 3435 - else 3436 - rtm->rtm_protocol = RTPROT_KERNEL; 3437 - } 3438 3439 if (rt->rt6i_flags & RTF_CACHE) 3440 rtm->rtm_flags |= RTM_F_CLONED;
··· 2351 if (on_link) 2352 nrt->rt6i_flags &= ~RTF_GATEWAY; 2353 2354 + nrt->rt6i_protocol = RTPROT_REDIRECT; 2355 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 2356 2357 if (ip6_ins_rt(nrt)) ··· 2461 .fc_dst_len = prefixlen, 2462 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 2463 RTF_UP | RTF_PREF(pref), 2464 + .fc_protocol = RTPROT_RA, 2465 .fc_nlinfo.portid = 0, 2466 .fc_nlinfo.nlh = NULL, 2467 .fc_nlinfo.nl_net = net, ··· 2513 .fc_ifindex = dev->ifindex, 2514 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 2515 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 2516 + .fc_protocol = RTPROT_RA, 2517 .fc_nlinfo.portid = 0, 2518 .fc_nlinfo.nlh = NULL, 2519 .fc_nlinfo.nl_net = dev_net(dev), ··· 3424 rtm->rtm_flags = 0; 3425 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 3426 rtm->rtm_protocol = rt->rt6i_protocol; 3427 3428 if (rt->rt6i_flags & RTF_CACHE) 3429 rtm->rtm_flags |= RTM_F_CLONED;
+1 -1
net/ipv6/udp_offload.c
··· 72 if (uh->check == 0) 73 uh->check = CSUM_MANGLED_0; 74 75 - skb->ip_summed = CHECKSUM_NONE; 76 77 /* If there is no outer header we can fake a checksum offload 78 * due to the fact that we have already done the checksum in
··· 72 if (uh->check == 0) 73 uh->check = CSUM_MANGLED_0; 74 75 + skb->ip_summed = CHECKSUM_UNNECESSARY; 76 77 /* If there is no outer header we can fake a checksum offload 78 * due to the fact that we have already done the checksum in
+9 -4
net/packet/af_packet.c
··· 3700 3701 if (optlen != sizeof(val)) 3702 return -EINVAL; 3703 - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) 3704 - return -EBUSY; 3705 if (copy_from_user(&val, optval, sizeof(val))) 3706 return -EFAULT; 3707 if (val > INT_MAX) 3708 return -EINVAL; 3709 - po->tp_reserve = val; 3710 - return 0; 3711 } 3712 case PACKET_LOSS: 3713 {
··· 3700 3701 if (optlen != sizeof(val)) 3702 return -EINVAL; 3703 if (copy_from_user(&val, optval, sizeof(val))) 3704 return -EFAULT; 3705 if (val > INT_MAX) 3706 return -EINVAL; 3707 + lock_sock(sk); 3708 + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { 3709 + ret = -EBUSY; 3710 + } else { 3711 + po->tp_reserve = val; 3712 + ret = 0; 3713 + } 3714 + release_sock(sk); 3715 + return ret; 3716 } 3717 case PACKET_LOSS: 3718 {
+4 -1
net/rds/ib_recv.c
··· 1015 if (rds_ib_ring_empty(&ic->i_recv_ring)) 1016 rds_ib_stats_inc(s_ib_rx_ring_empty); 1017 1018 - if (rds_ib_ring_low(&ic->i_recv_ring)) 1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT); 1020 } 1021 1022 int rds_ib_recv_path(struct rds_conn_path *cp) ··· 1031 if (rds_conn_up(conn)) { 1032 rds_ib_attempt_ack(ic); 1033 rds_ib_recv_refill(conn, 0, GFP_KERNEL); 1034 } 1035 1036 return ret;
··· 1015 if (rds_ib_ring_empty(&ic->i_recv_ring)) 1016 rds_ib_stats_inc(s_ib_rx_ring_empty); 1017 1018 + if (rds_ib_ring_low(&ic->i_recv_ring)) { 1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT); 1020 + rds_ib_stats_inc(s_ib_rx_refill_from_cq); 1021 + } 1022 } 1023 1024 int rds_ib_recv_path(struct rds_conn_path *cp) ··· 1029 if (rds_conn_up(conn)) { 1030 rds_ib_attempt_ack(ic); 1031 rds_ib_recv_refill(conn, 0, GFP_KERNEL); 1032 + rds_ib_stats_inc(s_ib_rx_refill_from_thread); 1033 } 1034 1035 return ret;
+11 -11
net/sched/act_ipt.c
··· 36 static unsigned int xt_net_id; 37 static struct tc_action_ops act_xt_ops; 38 39 - static int ipt_init_target(struct xt_entry_target *t, char *table, 40 - unsigned int hook) 41 { 42 struct xt_tgchk_param par; 43 struct xt_target *target; ··· 49 return PTR_ERR(target); 50 51 t->u.kernel.target = target; 52 par.table = table; 53 - par.entryinfo = NULL; 54 par.target = target; 55 par.targinfo = t->data; 56 par.hook_mask = hook; ··· 92 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, 93 }; 94 95 - static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, 96 struct nlattr *est, struct tc_action **a, 97 const struct tc_action_ops *ops, int ovr, int bind) 98 { 99 struct nlattr *tb[TCA_IPT_MAX + 1]; 100 struct tcf_ipt *ipt; 101 struct xt_entry_target *td, *t; ··· 161 if (unlikely(!t)) 162 goto err2; 163 164 - err = ipt_init_target(t, tname, hook); 165 if (err < 0) 166 goto err3; 167 ··· 195 struct nlattr *est, struct tc_action **a, int ovr, 196 int bind) 197 { 198 - struct tc_action_net *tn = net_generic(net, ipt_net_id); 199 - 200 - return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind); 201 } 202 203 static int tcf_xt_init(struct net *net, struct nlattr *nla, 204 struct nlattr *est, struct tc_action **a, int ovr, 205 int bind) 206 { 207 - struct tc_action_net *tn = net_generic(net, xt_net_id); 208 - 209 - return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind); 210 } 211 212 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
··· 36 static unsigned int xt_net_id; 37 static struct tc_action_ops act_xt_ops; 38 39 + static int ipt_init_target(struct net *net, struct xt_entry_target *t, 40 + char *table, unsigned int hook) 41 { 42 struct xt_tgchk_param par; 43 struct xt_target *target; ··· 49 return PTR_ERR(target); 50 51 t->u.kernel.target = target; 52 + memset(&par, 0, sizeof(par)); 53 + par.net = net; 54 par.table = table; 55 par.target = target; 56 par.targinfo = t->data; 57 par.hook_mask = hook; ··· 91 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, 92 }; 93 94 + static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, 95 struct nlattr *est, struct tc_action **a, 96 const struct tc_action_ops *ops, int ovr, int bind) 97 { 98 + struct tc_action_net *tn = net_generic(net, id); 99 struct nlattr *tb[TCA_IPT_MAX + 1]; 100 struct tcf_ipt *ipt; 101 struct xt_entry_target *td, *t; ··· 159 if (unlikely(!t)) 160 goto err2; 161 162 + err = ipt_init_target(net, t, tname, hook); 163 if (err < 0) 164 goto err3; 165 ··· 193 struct nlattr *est, struct tc_action **a, int ovr, 194 int bind) 195 { 196 + return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, 197 + bind); 198 } 199 200 static int tcf_xt_init(struct net *net, struct nlattr *nla, 201 struct nlattr *est, struct tc_action **a, int ovr, 202 int bind) 203 { 204 + return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, 205 + bind); 206 } 207 208 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
+1 -3
net/tipc/node.c
··· 1455 /* Initiate synch mode if applicable */ 1456 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1457 syncpt = iseqno + exp_pkts - 1; 1458 - if (!tipc_link_is_up(l)) { 1459 - tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1460 __tipc_node_link_up(n, bearer_id, xmitq); 1461 - } 1462 if (n->state == SELF_UP_PEER_UP) { 1463 n->sync_point = syncpt; 1464 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
··· 1455 /* Initiate synch mode if applicable */ 1456 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1457 syncpt = iseqno + exp_pkts - 1; 1458 + if (!tipc_link_is_up(l)) 1459 __tipc_node_link_up(n, bearer_id, xmitq); 1460 if (n->state == SELF_UP_PEER_UP) { 1461 n->sync_point = syncpt; 1462 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+64 -23
scripts/get_maintainer.pl
··· 18 19 use Getopt::Long qw(:config no_auto_abbrev); 20 use Cwd; 21 22 my $cur_path = fastgetcwd() . '/'; 23 my $lk_path = "./"; ··· 59 my $pattern_depth = 0; 60 my $version = 0; 61 my $help = 0; 62 63 my $vcs_used = 0; 64 ··· 251 'sections!' => \$sections, 252 'fe|file-emails!' => \$file_emails, 253 'f|file' => \$from_filename, 254 'v|version' => \$version, 255 'h|help|usage' => \$help, 256 )) { ··· 310 311 my @typevalue = (); 312 my %keyword_hash; 313 314 - open (my $maint, '<', "${lk_path}MAINTAINERS") 315 - or die "$P: Can't open MAINTAINERS: $!\n"; 316 - while (<$maint>) { 317 - my $line = $_; 318 319 - if ($line =~ m/^([A-Z]):\s*(.*)/) { 320 - my $type = $1; 321 - my $value = $2; 322 323 - ##Filename pattern matching 324 - if ($type eq "F" || $type eq "X") { 325 - $value =~ s@\.@\\\.@g; ##Convert . to \. 326 - $value =~ s/\*/\.\*/g; ##Convert * to .* 327 - $value =~ s/\?/\./g; ##Convert ? to . 328 - ##if pattern is a directory and it lacks a trailing slash, add one 329 - if ((-d $value)) { 330 - $value =~ s@([^/])$@$1/@; 331 } 332 - } elsif ($type eq "K") { 333 - $keyword_hash{@typevalue} = $value; 334 } 335 - push(@typevalue, "$type:$value"); 336 - } elsif (!/^(\s)*$/) { 337 - $line =~ s/\n$//g; 338 - push(@typevalue, $line); 339 } 340 } 341 - close($maint); 342 343 344 # 345 # Read mail address map ··· 914 if ( (-f "${lk_path}COPYING") 915 && (-f "${lk_path}CREDITS") 916 && (-f "${lk_path}Kbuild") 917 - && (-f "${lk_path}MAINTAINERS") 918 && (-f "${lk_path}Makefile") 919 && (-f "${lk_path}README") 920 && (-d "${lk_path}Documentation")
··· 18 19 use Getopt::Long qw(:config no_auto_abbrev); 20 use Cwd; 21 + use File::Find; 22 23 my $cur_path = fastgetcwd() . '/'; 24 my $lk_path = "./"; ··· 58 my $pattern_depth = 0; 59 my $version = 0; 60 my $help = 0; 61 + my $find_maintainer_files = 0; 62 63 my $vcs_used = 0; 64 ··· 249 'sections!' => \$sections, 250 'fe|file-emails!' => \$file_emails, 251 'f|file' => \$from_filename, 252 + 'find-maintainer-files' => \$find_maintainer_files, 253 'v|version' => \$version, 254 'h|help|usage' => \$help, 255 )) { ··· 307 308 my @typevalue = (); 309 my %keyword_hash; 310 + my @mfiles = (); 311 312 + sub read_maintainer_file { 313 + my ($file) = @_; 314 315 + open (my $maint, '<', "$file") 316 + or die "$P: Can't open MAINTAINERS file '$file': $!\n"; 317 + while (<$maint>) { 318 + my $line = $_; 319 320 + if ($line =~ m/^([A-Z]):\s*(.*)/) { 321 + my $type = $1; 322 + my $value = $2; 323 + 324 + ##Filename pattern matching 325 + if ($type eq "F" || $type eq "X") { 326 + $value =~ s@\.@\\\.@g; ##Convert . to \. 327 + $value =~ s/\*/\.\*/g; ##Convert * to .* 328 + $value =~ s/\?/\./g; ##Convert ? to . 329 + ##if pattern is a directory and it lacks a trailing slash, add one 330 + if ((-d $value)) { 331 + $value =~ s@([^/])$@$1/@; 332 + } 333 + } elsif ($type eq "K") { 334 + $keyword_hash{@typevalue} = $value; 335 } 336 + push(@typevalue, "$type:$value"); 337 + } elsif (!(/^\s*$/ || /^\s*\#/)) { 338 + $line =~ s/\n$//g; 339 + push(@typevalue, $line); 340 } 341 + } 342 + close($maint); 343 + } 344 + 345 + sub find_is_maintainer_file { 346 + my ($file) = $_; 347 + return if ($file !~ m@/MAINTAINERS$@); 348 + $file = $File::Find::name; 349 + return if (! -f $file); 350 + push(@mfiles, $file); 351 + } 352 + 353 + sub find_ignore_git { 354 + return grep { $_ !~ /^\.git$/; } @_; 355 + } 356 + 357 + if (-d "${lk_path}MAINTAINERS") { 358 + opendir(DIR, "${lk_path}MAINTAINERS") or die $!; 359 + my @files = readdir(DIR); 360 + closedir(DIR); 361 + foreach my $file (@files) { 362 + push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./); 363 } 364 } 365 366 + if ($find_maintainer_files) { 367 + find( { wanted => \&find_is_maintainer_file, 368 + preprocess => \&find_ignore_git, 369 + no_chdir => 1, 370 + }, "${lk_path}"); 371 + } else { 372 + push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS"; 373 + } 374 + 375 + foreach my $file (@mfiles) { 376 + read_maintainer_file("$file"); 377 + } 378 379 # 380 # Read mail address map ··· 873 if ( (-f "${lk_path}COPYING") 874 && (-f "${lk_path}CREDITS") 875 && (-f "${lk_path}Kbuild") 876 + && (-e "${lk_path}MAINTAINERS") 877 && (-f "${lk_path}Makefile") 878 && (-f "${lk_path}README") 879 && (-d "${lk_path}Documentation")
+73 -22
scripts/parse-maintainers.pl
··· 2 3 use strict; 4 5 - my %map; 6 7 - # sort comparison function 8 sub by_category($$) { 9 my ($a, $b) = @_; 10 ··· 15 $a =~ s/THE REST/ZZZZZZ/g; 16 $b =~ s/THE REST/ZZZZZZ/g; 17 18 - $a cmp $b; 19 } 20 21 - sub alpha_output { 22 - my $key; 23 - my $sort_method = \&by_category; 24 - my $sep = ""; 25 26 - foreach $key (sort $sort_method keys %map) { 27 - if ($key ne " ") { 28 - print $sep . $key . "\n"; 29 - $sep = "\n"; 30 - } 31 - print $map{$key}; 32 } 33 } 34 ··· 52 return $s; 53 } 54 55 sub file_input { 56 my $lastline = ""; 57 my $case = " "; 58 - $map{$case} = ""; 59 60 - while (<>) { 61 my $line = $_; 62 63 # Pattern line? 64 if ($line =~ m/^([A-Z]):\s*(.*)/) { 65 $line = $1 . ":\t" . trim($2) . "\n"; 66 if ($lastline eq "") { 67 - $map{$case} = $map{$case} . $line; 68 next; 69 } 70 $case = trim($lastline); 71 - exists $map{$case} and die "Header '$case' already exists"; 72 - $map{$case} = $line; 73 $lastline = ""; 74 next; 75 } 76 77 if ($case eq " ") { 78 - $map{$case} = $map{$case} . $lastline; 79 $lastline = $line; 80 next; 81 } 82 trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'"); 83 $lastline = $line; 84 } 85 - $map{$case} = $map{$case} . $lastline; 86 } 87 88 - &file_input; 89 - &alpha_output; 90 exit(0);
··· 2 3 use strict; 4 5 + my $P = $0; 6 7 + # sort comparison functions 8 sub by_category($$) { 9 my ($a, $b) = @_; 10 ··· 15 $a =~ s/THE REST/ZZZZZZ/g; 16 $b =~ s/THE REST/ZZZZZZ/g; 17 18 + return $a cmp $b; 19 } 20 21 + sub by_pattern($$) { 22 + my ($a, $b) = @_; 23 + my $preferred_order = 'MRPLSWTQBCFXNK'; 24 25 + my $a1 = uc(substr($a, 0, 1)); 26 + my $b1 = uc(substr($b, 0, 1)); 27 + 28 + my $a_index = index($preferred_order, $a1); 29 + my $b_index = index($preferred_order, $b1); 30 + 31 + $a_index = 1000 if ($a_index == -1); 32 + $b_index = 1000 if ($b_index == -1); 33 + 34 + if (($a1 =~ /^F$/ && $b1 =~ /^F$/) || 35 + ($a1 =~ /^X$/ && $b1 =~ /^X$/)) { 36 + return $a cmp $b; 37 + } 38 + 39 + if ($a_index < $b_index) { 40 + return -1; 41 + } elsif ($a_index == $b_index) { 42 + return 0; 43 + } else { 44 + return 1; 45 } 46 } 47 ··· 39 return $s; 40 } 41 42 + sub alpha_output { 43 + my ($hashref, $filename) = (@_); 44 + 45 + open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n"; 46 + foreach my $key (sort by_category keys %$hashref) { 47 + if ($key eq " ") { 48 + chomp $$hashref{$key}; 49 + print $file $$hashref{$key}; 50 + } else { 51 + print $file "\n" . $key . "\n"; 52 + foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) { 53 + print $file ($pattern . "\n"); 54 + } 55 + } 56 + } 57 + close($file); 58 + } 59 + 60 sub file_input { 61 + my ($hashref, $filename) = (@_); 62 + 63 my $lastline = ""; 64 my $case = " "; 65 + $$hashref{$case} = ""; 66 67 + open(my $file, '<', "$filename") or die "$P: $filename: open failed - $!\n"; 68 + 69 + while (<$file>) { 70 my $line = $_; 71 72 # Pattern line? 73 if ($line =~ m/^([A-Z]):\s*(.*)/) { 74 $line = $1 . ":\t" . trim($2) . "\n"; 75 if ($lastline eq "") { 76 + $$hashref{$case} = $$hashref{$case} . $line; 77 next; 78 } 79 $case = trim($lastline); 80 + exists $$hashref{$case} and die "Header '$case' already exists"; 81 + $$hashref{$case} = $line; 82 $lastline = ""; 83 next; 84 } 85 86 if ($case eq " ") { 87 + $$hashref{$case} = $$hashref{$case} . $lastline; 88 $lastline = $line; 89 next; 90 } 91 trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'"); 92 $lastline = $line; 93 } 94 + $$hashref{$case} = $$hashref{$case} . $lastline; 95 + close($file); 96 } 97 98 + my %hash; 99 + my %new_hash; 100 + 101 + file_input(\%hash, "MAINTAINERS"); 102 + 103 + foreach my $type (@ARGV) { 104 + foreach my $key (keys %hash) { 105 + if ($key =~ /$type/ || $hash{$key} =~ /$type/) { 106 + $new_hash{$key} = $hash{$key}; 107 + delete $hash{$key}; 108 + } 109 + } 110 + } 111 + 112 + alpha_output(\%hash, "MAINTAINERS.new"); 113 + alpha_output(\%new_hash, "SECTION.new"); 114 + 115 exit(0);
+2
tools/build/feature/test-bpf.c
··· 11 # define __NR_bpf 280 12 # elif defined(__sparc__) 13 # define __NR_bpf 349 14 # else 15 # error __NR_bpf not defined. libbpf does not support your arch. 16 # endif
··· 11 # define __NR_bpf 280 12 # elif defined(__sparc__) 13 # define __NR_bpf 349 14 + # elif defined(__s390__) 15 + # define __NR_bpf 351 16 # else 17 # error __NR_bpf not defined. libbpf does not support your arch. 18 # endif
+2
tools/lib/bpf/bpf.c
··· 39 # define __NR_bpf 280 40 # elif defined(__sparc__) 41 # define __NR_bpf 349 42 # else 43 # error __NR_bpf not defined. libbpf does not support your arch. 44 # endif
··· 39 # define __NR_bpf 280 40 # elif defined(__sparc__) 41 # define __NR_bpf 349 42 + # elif defined(__s390__) 43 + # define __NR_bpf 351 44 # else 45 # error __NR_bpf not defined. libbpf does not support your arch. 46 # endif
+11
tools/testing/selftests/bpf/test_pkt_md_access.c
··· 12 13 int _version SEC("version") = 1; 14 15 #define TEST_FIELD(TYPE, FIELD, MASK) \ 16 { \ 17 TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ 18 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ 19 return TC_ACT_SHOT; \ 20 } 21 22 SEC("test1") 23 int process(struct __sk_buff *skb)
··· 12 13 int _version SEC("version") = 1; 14 15 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 16 #define TEST_FIELD(TYPE, FIELD, MASK) \ 17 { \ 18 TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ 19 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ 20 return TC_ACT_SHOT; \ 21 } 22 + #else 23 + #define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b)) 24 + #define TEST_FIELD(TYPE, FIELD, MASK) \ 25 + { \ 26 + TYPE tmp = *((volatile TYPE *)&skb->FIELD + \ 27 + TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \ 28 + if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ 29 + return TC_ACT_SHOT; \ 30 + } 31 + #endif 32 33 SEC("test1") 34 int process(struct __sk_buff *skb)
+10 -9
tools/testing/selftests/bpf/test_verifier.c
··· 8 * License as published by the Free Software Foundation. 9 */ 10 11 #include <asm/types.h> 12 #include <linux/types.h> 13 #include <stdint.h> ··· 1099 "check skb->hash byte load permitted", 1100 .insns = { 1101 BPF_MOV64_IMM(BPF_REG_0, 0), 1102 - #ifdef __LITTLE_ENDIAN 1103 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1104 offsetof(struct __sk_buff, hash)), 1105 #else ··· 1136 "check skb->hash byte load not permitted 3", 1137 .insns = { 1138 BPF_MOV64_IMM(BPF_REG_0, 0), 1139 - #ifdef __LITTLE_ENDIAN 1140 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1141 offsetof(struct __sk_buff, hash) + 3), 1142 #else ··· 1245 "check skb->hash half load permitted", 1246 .insns = { 1247 BPF_MOV64_IMM(BPF_REG_0, 0), 1248 - #ifdef __LITTLE_ENDIAN 1249 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1250 offsetof(struct __sk_buff, hash)), 1251 #else ··· 1260 "check skb->hash half load not permitted", 1261 .insns = { 1262 BPF_MOV64_IMM(BPF_REG_0, 0), 1263 - #ifdef __LITTLE_ENDIAN 1264 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1265 offsetof(struct __sk_buff, hash) + 2), 1266 #else ··· 5423 "check bpf_perf_event_data->sample_period byte load permitted", 5424 .insns = { 5425 BPF_MOV64_IMM(BPF_REG_0, 0), 5426 - #ifdef __LITTLE_ENDIAN 5427 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 5428 offsetof(struct bpf_perf_event_data, sample_period)), 5429 #else ··· 5439 "check bpf_perf_event_data->sample_period half load permitted", 5440 .insns = { 5441 BPF_MOV64_IMM(BPF_REG_0, 0), 5442 - #ifdef __LITTLE_ENDIAN 5443 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5444 offsetof(struct bpf_perf_event_data, sample_period)), 5445 #else ··· 5455 "check bpf_perf_event_data->sample_period word load permitted", 5456 .insns = { 5457 BPF_MOV64_IMM(BPF_REG_0, 0), 5458 - #ifdef __LITTLE_ENDIAN 5459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 5460 offsetof(struct bpf_perf_event_data, sample_period)), 5461 #else ··· 5482 "check skb->data half load not permitted", 5483 .insns = { 5484 BPF_MOV64_IMM(BPF_REG_0, 0), 5485 - #ifdef __LITTLE_ENDIAN 5486 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5487 offsetof(struct __sk_buff, data)), 5488 #else ··· 5498 "check skb->tc_classid half load not permitted for lwt prog", 5499 .insns = { 5500 BPF_MOV64_IMM(BPF_REG_0, 0), 5501 - #ifdef __LITTLE_ENDIAN 5502 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5503 offsetof(struct __sk_buff, tc_classid)), 5504 #else
··· 8 * License as published by the Free Software Foundation. 9 */ 10 11 + #include <endian.h> 12 #include <asm/types.h> 13 #include <linux/types.h> 14 #include <stdint.h> ··· 1098 "check skb->hash byte load permitted", 1099 .insns = { 1100 BPF_MOV64_IMM(BPF_REG_0, 0), 1101 + #if __BYTE_ORDER == __LITTLE_ENDIAN 1102 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1103 offsetof(struct __sk_buff, hash)), 1104 #else ··· 1135 "check skb->hash byte load not permitted 3", 1136 .insns = { 1137 BPF_MOV64_IMM(BPF_REG_0, 0), 1138 + #if __BYTE_ORDER == __LITTLE_ENDIAN 1139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1140 offsetof(struct __sk_buff, hash) + 3), 1141 #else ··· 1244 "check skb->hash half load permitted", 1245 .insns = { 1246 BPF_MOV64_IMM(BPF_REG_0, 0), 1247 + #if __BYTE_ORDER == __LITTLE_ENDIAN 1248 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1249 offsetof(struct __sk_buff, hash)), 1250 #else ··· 1259 "check skb->hash half load not permitted", 1260 .insns = { 1261 BPF_MOV64_IMM(BPF_REG_0, 0), 1262 + #if __BYTE_ORDER == __LITTLE_ENDIAN 1263 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1264 offsetof(struct __sk_buff, hash) + 2), 1265 #else ··· 5422 "check bpf_perf_event_data->sample_period byte load permitted", 5423 .insns = { 5424 BPF_MOV64_IMM(BPF_REG_0, 0), 5425 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5426 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 5427 offsetof(struct bpf_perf_event_data, sample_period)), 5428 #else ··· 5438 "check bpf_perf_event_data->sample_period half load permitted", 5439 .insns = { 5440 BPF_MOV64_IMM(BPF_REG_0, 0), 5441 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5442 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5443 offsetof(struct bpf_perf_event_data, sample_period)), 5444 #else ··· 5454 "check bpf_perf_event_data->sample_period word load permitted", 5455 .insns = { 5456 BPF_MOV64_IMM(BPF_REG_0, 0), 5457 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 5459 offsetof(struct bpf_perf_event_data, sample_period)), 5460 #else ··· 5481 "check skb->data half load not permitted", 5482 .insns = { 5483 BPF_MOV64_IMM(BPF_REG_0, 0), 5484 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5485 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5486 offsetof(struct __sk_buff, data)), 5487 #else ··· 5497 "check skb->tc_classid half load not permitted for lwt prog", 5498 .insns = { 5499 BPF_MOV64_IMM(BPF_REG_0, 0), 5500 + #if __BYTE_ORDER == __LITTLE_ENDIAN 5501 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5502 offsetof(struct __sk_buff, tc_classid)), 5503 #else