-1
Documentation/filesystems/btrfs.rst
-1
Documentation/filesystems/btrfs.rst
+1
Documentation/process/embargoed-hardware-issues.rst
+1
Documentation/process/embargoed-hardware-issues.rst
···
251
251
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
252
252
Intel Tony Luck <tony.luck@intel.com>
253
253
Qualcomm Trilok Soni <tsoni@codeaurora.org>
254
+
RISC-V Palmer Dabbelt <palmer@dabbelt.com>
254
255
Samsung Javier González <javier.gonz@samsung.com>
255
256
256
257
Microsoft James Morris <jamorris@linux.microsoft.com>
+11
-12
MAINTAINERS
+11
-12
MAINTAINERS
···
1855
1855
F: arch/arm/boot/dts/amlogic/
1856
1856
F: arch/arm/mach-meson/
1857
1857
F: arch/arm64/boot/dts/amlogic/
1858
-
F: drivers/genpd/amlogic/
1858
+
F: drivers/pmdomain/amlogic/
1859
1859
F: drivers/mmc/host/meson*
1860
1860
F: drivers/phy/amlogic/
1861
1861
F: drivers/pinctrl/meson/
···
1918
1918
F: drivers/clk/clk-apple-nco.c
1919
1919
F: drivers/cpufreq/apple-soc-cpufreq.c
1920
1920
F: drivers/dma/apple-admac.c
1921
-
F: drivers/genpd/apple/
1921
+
F: drivers/pmdomain/apple/
1922
1922
F: drivers/i2c/busses/i2c-pasemi-core.c
1923
1923
F: drivers/i2c/busses/i2c-pasemi-platform.c
1924
1924
F: drivers/iommu/apple-dart.c
···
2435
2435
F: drivers/clk/clk-nomadik.c
2436
2436
F: drivers/clocksource/clksrc-dbx500-prcmu.c
2437
2437
F: drivers/dma/ste_dma40*
2438
-
F: drivers/genpd/st/ste-ux500-pm-domain.c
2438
+
F: drivers/pmdomain/st/ste-ux500-pm-domain.c
2439
2439
F: drivers/hwspinlock/u8500_hsem.c
2440
2440
F: drivers/i2c/busses/i2c-nomadik.c
2441
2441
F: drivers/iio/adc/ab8500-gpadc.c
···
2598
2598
F: arch/arm/mach-shmobile/
2599
2599
F: arch/arm64/boot/dts/renesas/
2600
2600
F: arch/riscv/boot/dts/renesas/
2601
-
F: drivers/genpd/renesas/
2601
+
F: drivers/pmdomain/renesas/
2602
2602
F: drivers/soc/renesas/
2603
2603
F: include/linux/soc/renesas/
2604
2604
K: \brenesas,
···
4026
4026
F: drivers/irqchip/irq-bcm63*
4027
4027
F: drivers/irqchip/irq-bcm7*
4028
4028
F: drivers/irqchip/irq-brcmstb*
4029
-
F: drivers/genpd/bcm/bcm63xx-power.c
4029
+
F: drivers/pmdomain/bcm/bcm63xx-power.c
4030
4030
F: include/linux/bcm963xx_nvram.h
4031
4031
F: include/linux/bcm963xx_tag.h
4032
4032
···
4248
4248
L: linux-pm@vger.kernel.org
4249
4249
S: Maintained
4250
4250
T: git https://github.com/broadcom/stblinux.git
4251
-
F: drivers/genpd/bcm/bcm-pmb.c
4251
+
F: drivers/pmdomain/bcm/bcm-pmb.c
4252
4252
F: include/dt-bindings/soc/bcm-pmb.h
4253
4253
4254
4254
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
···
4378
4378
L: linux-btrfs@vger.kernel.org
4379
4379
S: Maintained
4380
4380
W: https://btrfs.readthedocs.io
4381
-
W: https://btrfs.wiki.kernel.org/
4382
4381
Q: https://patchwork.kernel.org/project/linux-btrfs/list/
4383
4382
C: irc://irc.libera.chat/btrfs
4384
4383
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
···
8728
8729
L: linux-pm@vger.kernel.org
8729
8730
S: Supported
8730
8731
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
8731
-
F: drivers/genpd/
8732
+
F: drivers/pmdomain/
8732
8733
8733
8734
GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER
8734
8735
M: Eugen Hristev <eugen.hristev@microchip.com>
···
17679
17680
L: linux-arm-msm@vger.kernel.org
17680
17681
S: Maintained
17681
17682
F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml
17682
-
F: drivers/genpd/qcom/cpr.c
17683
+
F: drivers/pmdomain/qcom/cpr.c
17683
17684
17684
17685
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
17685
17686
M: Ilia Lin <ilia.lin@kernel.org>
···
20513
20514
M: Walker Chen <walker.chen@starfivetech.com>
20514
20515
S: Supported
20515
20516
F: Documentation/devicetree/bindings/power/starfive*
20516
-
F: drivers/genpd/starfive/jh71xx-pmu.c
20517
+
F: drivers/pmdomain/starfive/jh71xx-pmu.c
20517
20518
F: include/dt-bindings/power/starfive,jh7110-pmu.h
20518
20519
20519
20520
STARFIVE SOC DRIVERS
···
21338
21339
F: drivers/irqchip/irq-ti-sci-intr.c
21339
21340
F: drivers/reset/reset-ti-sci.c
21340
21341
F: drivers/soc/ti/ti_sci_inta_msi.c
21341
-
F: drivers/genpd/ti/ti_sci_pm_domains.c
21342
+
F: drivers/pmdomain/ti/ti_sci_pm_domains.c
21342
21343
F: include/dt-bindings/soc/ti,sci_pm_domain.h
21343
21344
F: include/linux/soc/ti/ti_sci_inta_msi.h
21344
21345
F: include/linux/soc/ti/ti_sci_protocol.h
···
21580
21581
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
21581
21582
S: Maintained
21582
21583
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
21583
-
F: drivers/genpd/ti/omap_prm.c
21584
+
F: drivers/pmdomain/ti/omap_prm.c
21584
21585
F: drivers/soc/ti/*
21585
21586
21586
21587
TI LM49xxx FAMILY ASoC CODEC DRIVERS
+1
-1
Makefile
+1
-1
Makefile
+1
arch/parisc/include/asm/cache.h
+1
arch/parisc/include/asm/cache.h
-8
arch/parisc/include/asm/mckinley.h
-8
arch/parisc/include/asm/mckinley.h
+3
-2
arch/parisc/include/asm/pdc.h
+3
-2
arch/parisc/include/asm/pdc.h
···
44
44
int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no);
45
45
int pdc_cache_info(struct pdc_cache_info *cache);
46
46
int pdc_spaceid_bits(unsigned long *space_bits);
47
-
#ifndef CONFIG_PA20
48
47
int pdc_btlb_info(struct pdc_btlb_info *btlb);
48
+
int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
49
+
unsigned long entry_info, unsigned long slot);
50
+
int pdc_btlb_purge_all(void);
49
51
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
50
-
#endif /* !CONFIG_PA20 */
51
52
int pdc_pim_toc11(struct pdc_toc_pim_11 *ret);
52
53
int pdc_pim_toc20(struct pdc_toc_pim_20 *ret);
53
54
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
+1
arch/parisc/include/asm/processor.h
+1
arch/parisc/include/asm/processor.h
+5
-2
arch/parisc/include/asm/ropes.h
+5
-2
arch/parisc/include/asm/ropes.h
···
29
29
struct ioc {
30
30
void __iomem *ioc_hpa; /* I/O MMU base address */
31
31
char *res_map; /* resource map, bit == pdir entry */
32
-
u64 *pdir_base; /* physical base address */
32
+
__le64 *pdir_base; /* physical base address */
33
33
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
34
34
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
35
35
#ifdef ZX1_SUPPORT
···
86
86
struct ioc ioc[MAX_IOC];
87
87
};
88
88
89
+
/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
90
+
extern struct sba_device *sba_list;
91
+
89
92
#define ASTRO_RUNWAY_PORT 0x582
90
93
#define IKE_MERCED_PORT 0x803
91
94
#define REO_MERCED_PORT 0x804
···
113
110
114
111
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
115
112
116
-
#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
113
+
#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL
117
114
118
115
#define SBA_FUNC_ID 0x0000 /* function id */
119
116
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
+15
arch/parisc/include/asm/shmparam.h
+15
arch/parisc/include/asm/shmparam.h
···
2
2
#ifndef _ASMPARISC_SHMPARAM_H
3
3
#define _ASMPARISC_SHMPARAM_H
4
4
5
+
/*
6
+
* PA-RISC uses virtually indexed & physically tagged (VIPT) caches
7
+
* which has strict requirements when two pages to the same physical
8
+
* address are accessed through different mappings. Read the section
9
+
* "Address Aliasing" in the arch docs for more detail:
10
+
* PA-RISC 1.1 (page 3-6):
11
+
* https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf
12
+
* PA-RISC 2.0 (page F-5):
13
+
* https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf
14
+
*
15
+
* For Linux we allow kernel and userspace to map pages on page size
16
+
* granularity (SHMLBA) but have to ensure that, if two pages are
17
+
* mapped to the same physical address, the virtual and physical
18
+
* addresses modulo SHM_COLOUR are identical.
19
+
*/
5
20
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
6
21
#define SHM_COLOUR 0x00400000 /* shared mappings colouring */
7
22
+2
arch/parisc/kernel/asm-offsets.c
+2
arch/parisc/kernel/asm-offsets.c
+1
-7
arch/parisc/kernel/cache.c
+1
-7
arch/parisc/kernel/cache.c
···
58
58
59
59
struct pdc_cache_info cache_info __ro_after_init;
60
60
#ifndef CONFIG_PA20
61
-
static struct pdc_btlb_info btlb_info __ro_after_init;
61
+
struct pdc_btlb_info btlb_info __ro_after_init;
62
62
#endif
63
63
64
64
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
···
263
263
dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
264
264
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
265
265
#undef CAFL_STRIDE
266
-
267
-
#ifndef CONFIG_PA20
268
-
if (pdc_btlb_info(&btlb_info) < 0) {
269
-
memset(&btlb_info, 0, sizeof btlb_info);
270
-
}
271
-
#endif
272
266
273
267
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
274
268
PDC_MODEL_NVA_UNSUPPORTED) {
+1
-1
arch/parisc/kernel/drivers.c
+1
-1
arch/parisc/kernel/drivers.c
···
925
925
pr_info("#define PARISC_MODEL \"%s\"\n\n",
926
926
boot_cpu_data.pdc.sys_model_name);
927
927
928
+
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
928
929
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
929
930
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
930
-
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
931
931
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
932
932
#undef p
933
933
+45
-11
arch/parisc/kernel/firmware.c
+45
-11
arch/parisc/kernel/firmware.c
···
687
687
return retval;
688
688
}
689
689
690
-
#ifndef CONFIG_PA20
691
690
/**
692
691
* pdc_btlb_info - Return block TLB information.
693
692
* @btlb: The return buffer.
···
695
696
*/
696
697
int pdc_btlb_info(struct pdc_btlb_info *btlb)
697
698
{
698
-
int retval;
699
+
int retval;
699
700
unsigned long flags;
700
701
701
-
spin_lock_irqsave(&pdc_lock, flags);
702
-
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
703
-
memcpy(btlb, pdc_result, sizeof(*btlb));
704
-
spin_unlock_irqrestore(&pdc_lock, flags);
702
+
if (IS_ENABLED(CONFIG_PA20))
703
+
return PDC_BAD_PROC;
705
704
706
-
if(retval < 0) {
707
-
btlb->max_size = 0;
708
-
}
709
-
return retval;
705
+
spin_lock_irqsave(&pdc_lock, flags);
706
+
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
707
+
memcpy(btlb, pdc_result, sizeof(*btlb));
708
+
spin_unlock_irqrestore(&pdc_lock, flags);
709
+
710
+
if(retval < 0) {
711
+
btlb->max_size = 0;
712
+
}
713
+
return retval;
714
+
}
715
+
716
+
int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
717
+
unsigned long entry_info, unsigned long slot)
718
+
{
719
+
int retval;
720
+
unsigned long flags;
721
+
722
+
if (IS_ENABLED(CONFIG_PA20))
723
+
return PDC_BAD_PROC;
724
+
725
+
spin_lock_irqsave(&pdc_lock, flags);
726
+
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32),
727
+
(unsigned long) vpage, physpage, len, entry_info, slot);
728
+
spin_unlock_irqrestore(&pdc_lock, flags);
729
+
return retval;
730
+
}
731
+
732
+
int pdc_btlb_purge_all(void)
733
+
{
734
+
int retval;
735
+
unsigned long flags;
736
+
737
+
if (IS_ENABLED(CONFIG_PA20))
738
+
return PDC_BAD_PROC;
739
+
740
+
spin_lock_irqsave(&pdc_lock, flags);
741
+
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL);
742
+
spin_unlock_irqrestore(&pdc_lock, flags);
743
+
return retval;
710
744
}
711
745
712
746
/**
···
760
728
int retval;
761
729
unsigned long flags;
762
730
731
+
if (IS_ENABLED(CONFIG_PA20))
732
+
return PDC_BAD_PROC;
733
+
763
734
spin_lock_irqsave(&pdc_lock, flags);
764
735
memcpy(pdc_result2, mod_path, sizeof(*mod_path));
765
736
retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
···
772
737
773
738
return retval;
774
739
}
775
-
#endif /* !CONFIG_PA20 */
776
740
777
741
/**
778
742
* pdc_lan_station_id - Get the LAN address.
+13
-3
arch/parisc/kernel/head.S
+13
-3
arch/parisc/kernel/head.S
···
180
180
std %dp,0x18(%r10)
181
181
#endif
182
182
183
-
#ifdef CONFIG_64BIT
184
-
/* Get PDCE_PROC for monarch CPU. */
185
183
#define MEM_PDC_LO 0x388
186
184
#define MEM_PDC_HI 0x35C
185
+
#ifdef CONFIG_64BIT
186
+
/* Get PDCE_PROC for monarch CPU. */
187
187
ldw MEM_PDC_LO(%r0),%r3
188
188
ldw MEM_PDC_HI(%r0),%r10
189
189
depd %r10, 31, 32, %r3 /* move to upper word */
···
269
269
tovirt_r1 %r6
270
270
mtctl %r6,%cr30 /* restore task thread info */
271
271
#endif
272
-
272
+
273
+
#ifndef CONFIG_64BIT
274
+
/* clear all BTLBs */
275
+
ldi PDC_BLOCK_TLB,%arg0
276
+
load32 PA(stext_pdc_btlb_ret), %rp
277
+
ldw MEM_PDC_LO(%r0),%r3
278
+
bv (%r3)
279
+
ldi PDC_BTLB_PURGE_ALL,%arg1
280
+
stext_pdc_btlb_ret:
281
+
#endif
282
+
273
283
/* PARANOID: clear user scratch/user space SR's */
274
284
mtsp %r0,%sr0
275
285
mtsp %r0,%sr1
+1
-1
arch/parisc/kernel/irq.c
+1
-1
arch/parisc/kernel/irq.c
+2
arch/parisc/kernel/processor.c
+2
arch/parisc/kernel/processor.c
+1
arch/parisc/kernel/vmlinux.lds.S
+1
arch/parisc/kernel/vmlinux.lds.S
+72
arch/parisc/mm/init.c
+72
arch/parisc/mm/init.c
···
32
32
#include <asm/sections.h>
33
33
#include <asm/msgbuf.h>
34
34
#include <asm/sparsemem.h>
35
+
#include <asm/asm-offsets.h>
35
36
36
37
extern int data_start;
37
38
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
···
719
718
720
719
sparse_init();
721
720
parisc_bootmem_free();
721
+
}
722
+
723
+
static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
724
+
unsigned long entry_info)
725
+
{
726
+
const int slot_max = btlb_info.fixed_range_info.num_comb;
727
+
int min_num_pages = btlb_info.min_size;
728
+
unsigned long size;
729
+
730
+
/* map at minimum 4 pages */
731
+
if (min_num_pages < 4)
732
+
min_num_pages = 4;
733
+
734
+
size = HUGEPAGE_SIZE;
735
+
while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
736
+
/* starting address must have same alignment as size! */
737
+
/* if correctly aligned and fits in double size, increase */
738
+
if (((start & (2 * size - 1)) == 0) &&
739
+
(end - start) >= (2 * size)) {
740
+
size <<= 1;
741
+
continue;
742
+
}
743
+
/* if current size alignment is too big, try smaller size */
744
+
if ((start & (size - 1)) != 0) {
745
+
size >>= 1;
746
+
continue;
747
+
}
748
+
if ((end - start) >= size) {
749
+
if ((size >> PAGE_SHIFT) >= min_num_pages)
750
+
pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
751
+
size >> PAGE_SHIFT, entry_info, *slot);
752
+
(*slot)++;
753
+
start += size;
754
+
continue;
755
+
}
756
+
size /= 2;
757
+
continue;
758
+
}
759
+
}
760
+
761
+
void btlb_init_per_cpu(void)
762
+
{
763
+
unsigned long s, t, e;
764
+
int slot;
765
+
766
+
/* BTLBs are not available on 64-bit CPUs */
767
+
if (IS_ENABLED(CONFIG_PA20))
768
+
return;
769
+
else if (pdc_btlb_info(&btlb_info) < 0) {
770
+
memset(&btlb_info, 0, sizeof btlb_info);
771
+
}
772
+
773
+
/* insert BLTLBs for code and data segments */
774
+
s = (uintptr_t) dereference_function_descriptor(&_stext);
775
+
e = (uintptr_t) dereference_function_descriptor(&_etext);
776
+
t = (uintptr_t) dereference_function_descriptor(&_sdata);
777
+
BUG_ON(t != e);
778
+
779
+
/* code segments */
780
+
slot = 0;
781
+
alloc_btlb(s, e, &slot, 0x13800000);
782
+
783
+
/* sanity check */
784
+
t = (uintptr_t) dereference_function_descriptor(&_edata);
785
+
e = (uintptr_t) dereference_function_descriptor(&__bss_start);
786
+
BUG_ON(t != e);
787
+
788
+
/* data segments */
789
+
s = (uintptr_t) dereference_function_descriptor(&_sdata);
790
+
e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
791
+
alloc_btlb(s, e, &slot, 0x11800000);
722
792
}
723
793
724
794
#ifdef CONFIG_PA20
+2
-2
arch/riscv/include/asm/errata_list.h
+2
-2
arch/riscv/include/asm/errata_list.h
···
105
105
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
106
106
* 0000001 01001 rs1 000 00000 0001011
107
107
* dcache.cva rs1 (clean, virtual address)
108
-
* 0000001 00100 rs1 000 00000 0001011
108
+
* 0000001 00101 rs1 000 00000 0001011
109
109
*
110
110
* dcache.cipa rs1 (clean then invalidate, physical address)
111
111
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
···
118
118
* 0000000 11001 00000 000 00000 0001011
119
119
*/
120
120
#define THEAD_inval_A0 ".long 0x0265000b"
121
-
#define THEAD_clean_A0 ".long 0x0245000b"
121
+
#define THEAD_clean_A0 ".long 0x0255000b"
122
122
#define THEAD_flush_A0 ".long 0x0275000b"
123
123
#define THEAD_SYNC_S ".long 0x0190000b"
124
124
+7
-1
arch/riscv/kernel/elf_kexec.c
+7
-1
arch/riscv/kernel/elf_kexec.c
···
98
98
kbuf.image = image;
99
99
kbuf.buf_min = lowest_paddr;
100
100
kbuf.buf_max = ULONG_MAX;
101
-
kbuf.buf_align = PAGE_SIZE;
101
+
102
+
/*
103
+
* Current riscv boot protocol requires 2MB alignment for
104
+
* RV64 and 4MB alignment for RV32
105
+
*
106
+
*/
107
+
kbuf.buf_align = PMD_SIZE;
102
108
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
103
109
kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
104
110
kbuf.top_down = false;
+5
-2
arch/riscv/kvm/vcpu_onereg.c
+5
-2
arch/riscv/kvm/vcpu_onereg.c
···
460
460
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
461
461
return -ENOENT;
462
462
463
-
*reg_val = 0;
464
463
host_isa_ext = kvm_isa_ext_arr[reg_num];
464
+
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
465
+
return -ENOENT;
466
+
467
+
*reg_val = 0;
465
468
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
466
469
*reg_val = 1; /* Mark the given extension as available */
467
470
···
845
842
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
846
843
847
844
isa_ext = kvm_isa_ext_arr[i];
848
-
if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
845
+
if (!__riscv_isa_extension_available(NULL, isa_ext))
849
846
continue;
850
847
851
848
if (uindices) {
+1
-1
arch/x86/Kconfig
+1
-1
arch/x86/Kconfig
···
1945
1945
select UCS2_STRING
1946
1946
select EFI_RUNTIME_WRAPPERS
1947
1947
select ARCH_USE_MEMREMAP_PROT
1948
+
select EFI_RUNTIME_MAP if KEXEC_CORE
1948
1949
help
1949
1950
This enables the kernel to use EFI runtime services that are
1950
1951
available (such as the EFI variable services).
···
2021
2020
config EFI_RUNTIME_MAP
2022
2021
bool "Export EFI runtime maps to sysfs" if EXPERT
2023
2022
depends on EFI
2024
-
default KEXEC_CORE
2025
2023
help
2026
2024
Export EFI runtime memory regions to /sys/firmware/efi/runtime-map.
2027
2025
That memory map is required by the 2nd kernel to set up EFI virtual
+8
arch/x86/boot/compressed/ident_map_64.c
+8
arch/x86/boot/compressed/ident_map_64.c
···
59
59
return NULL;
60
60
}
61
61
62
+
/* Consumed more tables than expected? */
63
+
if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
64
+
debug_putstr("pgt_buf running low in " __FILE__ "\n");
65
+
debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
66
+
debug_putaddr(pages->pgt_buf_offset);
67
+
debug_putaddr(pages->pgt_buf_size);
68
+
}
69
+
62
70
entry = pages->pgt_buf + pages->pgt_buf_offset;
63
71
pages->pgt_buf_offset += PAGE_SIZE;
64
72
+32
-15
arch/x86/include/asm/boot.h
+32
-15
arch/x86/include/asm/boot.h
···
40
40
#ifdef CONFIG_X86_64
41
41
# define BOOT_STACK_SIZE 0x4000
42
42
43
-
# define BOOT_INIT_PGT_SIZE (6*4096)
44
-
# ifdef CONFIG_RANDOMIZE_BASE
45
43
/*
46
-
* Assuming all cross the 512GB boundary:
47
-
* 1 page for level4
48
-
* (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel
49
-
* 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP).
50
-
* Total is 19 pages.
44
+
* Used by decompressor's startup_32() to allocate page tables for identity
45
+
* mapping of the 4G of RAM in 4-level paging mode:
46
+
* - 1 level4 table;
47
+
* - 1 level3 table;
48
+
* - 4 level2 table that maps everything with 2M pages;
49
+
*
50
+
* The additional level5 table needed for 5-level paging is allocated from
51
+
* trampoline_32bit memory.
51
52
*/
52
-
# ifdef CONFIG_X86_VERBOSE_BOOTUP
53
-
# define BOOT_PGT_SIZE (19*4096)
54
-
# else /* !CONFIG_X86_VERBOSE_BOOTUP */
55
-
# define BOOT_PGT_SIZE (17*4096)
56
-
# endif
57
-
# else /* !CONFIG_RANDOMIZE_BASE */
58
-
# define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE
59
-
# endif
53
+
# define BOOT_INIT_PGT_SIZE (6*4096)
54
+
55
+
/*
56
+
* Total number of page tables kernel_add_identity_map() can allocate,
57
+
* including page tables consumed by startup_32().
58
+
*
59
+
* Worst-case scenario:
60
+
* - 5-level paging needs 1 level5 table;
61
+
* - KASLR needs to map kernel, boot_params, cmdline and randomized kernel,
62
+
* assuming all of them cross 256T boundary:
63
+
* + 4*2 level4 table;
64
+
* + 4*2 level3 table;
65
+
* + 4*2 level2 table;
66
+
* - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM):
67
+
* + 1 level4 table;
68
+
* + 1 level3 table;
69
+
* + 1 level2 table;
70
+
* Total: 28 tables
71
+
*
72
+
* Add 4 spare table in case decompressor touches anything beyond what is
73
+
* accounted above. Warn if it happens.
74
+
*/
75
+
# define BOOT_PGT_SIZE_WARN (28*4096)
76
+
# define BOOT_PGT_SIZE (32*4096)
60
77
61
78
#else /* !CONFIG_X86_64 */
62
79
# define BOOT_STACK_SIZE 0x1000
+2
-30
arch/x86/include/asm/efi.h
+2
-30
arch/x86/include/asm/efi.h
···
91
91
92
92
#ifdef CONFIG_X86_32
93
93
#define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1)
94
-
95
-
#define arch_efi_call_virt_setup() \
96
-
({ \
97
-
efi_fpu_begin(); \
98
-
firmware_restrict_branch_speculation_start(); \
99
-
})
100
-
101
-
#define arch_efi_call_virt_teardown() \
102
-
({ \
103
-
firmware_restrict_branch_speculation_end(); \
104
-
efi_fpu_end(); \
105
-
})
106
-
107
94
#else /* !CONFIG_X86_32 */
108
95
#define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT
109
96
···
103
116
__efi_call(__VA_ARGS__); \
104
117
})
105
118
106
-
#define arch_efi_call_virt_setup() \
107
-
({ \
108
-
efi_sync_low_kernel_mappings(); \
109
-
efi_fpu_begin(); \
110
-
firmware_restrict_branch_speculation_start(); \
111
-
efi_enter_mm(); \
112
-
})
113
-
114
119
#undef arch_efi_call_virt
115
120
#define arch_efi_call_virt(p, f, args...) ({ \
116
121
u64 ret, ibt = ibt_save(efi_disable_ibt_for_runtime); \
117
122
ret = efi_call((void *)p->f, args); \
118
123
ibt_restore(ibt); \
119
124
ret; \
120
-
})
121
-
122
-
#define arch_efi_call_virt_teardown() \
123
-
({ \
124
-
efi_leave_mm(); \
125
-
firmware_restrict_branch_speculation_end(); \
126
-
efi_fpu_end(); \
127
125
})
128
126
129
127
#ifdef CONFIG_KASAN
···
140
168
extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr);
141
169
extern void efi_free_boot_services(void);
142
170
143
-
void efi_enter_mm(void);
144
-
void efi_leave_mm(void);
171
+
void arch_efi_call_virt_setup(void);
172
+
void arch_efi_call_virt_teardown(void);
145
173
146
174
/* kexec external ABI */
147
175
struct efi_setup_data {
+8
arch/x86/include/asm/linkage.h
+8
arch/x86/include/asm/linkage.h
···
8
8
#undef notrace
9
9
#define notrace __attribute__((no_instrument_function))
10
10
11
+
#ifdef CONFIG_64BIT
12
+
/*
13
+
* The generic version tends to create spurious ENDBR instructions under
14
+
* certain conditions.
15
+
*/
16
+
#define _THIS_IP_ ({ unsigned long __here; asm ("lea 0(%%rip), %0" : "=r" (__here)); __here; })
17
+
#endif
18
+
11
19
#ifdef CONFIG_X86_32
12
20
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
13
21
#endif /* CONFIG_X86_32 */
+5
-6
arch/x86/kernel/apic/x2apic_uv_x.c
+5
-6
arch/x86/kernel/apic/x2apic_uv_x.c
···
1533
1533
{
1534
1534
struct uv_gam_range_entry *gre = uv_gre_table;
1535
1535
int nums, numn, nump;
1536
-
int cpu, i, lnid;
1536
+
int i, lnid, apicid;
1537
1537
int minsock = _min_socket;
1538
1538
int maxsock = _max_socket;
1539
1539
int minpnode = _min_pnode;
···
1584
1584
1585
1585
/* Set socket -> node values: */
1586
1586
lnid = NUMA_NO_NODE;
1587
-
for_each_possible_cpu(cpu) {
1588
-
int nid = cpu_to_node(cpu);
1589
-
int apicid, sockid;
1587
+
for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) {
1588
+
int nid = __apicid_to_node[apicid];
1589
+
int sockid;
1590
1590
1591
-
if (lnid == nid)
1591
+
if ((nid == NUMA_NO_NODE) || (lnid == nid))
1592
1592
continue;
1593
1593
lnid = nid;
1594
1594
1595
-
apicid = per_cpu(x86_cpu_to_apicid, cpu);
1596
1595
sockid = apicid >> uv_cpuid.socketid_shift;
1597
1596
1598
1597
if (_socket_to_node[sockid - minsock] == SOCK_EMPTY)
+9
-3
arch/x86/kernel/smpboot.c
+9
-3
arch/x86/kernel/smpboot.c
···
579
579
}
580
580
581
581
582
-
#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_CLUSTER) || defined(CONFIG_SCHED_MC)
583
582
static inline int x86_sched_itmt_flags(void)
584
583
{
585
584
return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
···
602
603
return cpu_cluster_flags() | x86_sched_itmt_flags();
603
604
}
604
605
#endif
605
-
#endif
606
+
607
+
static int x86_die_flags(void)
608
+
{
609
+
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
610
+
return x86_sched_itmt_flags();
611
+
612
+
return 0;
613
+
}
606
614
607
615
/*
608
616
* Set if a package/die has multiple NUMA nodes inside.
···
646
640
*/
647
641
if (!x86_has_numa_in_package) {
648
642
x86_topology[i++] = (struct sched_domain_topology_level){
649
-
cpu_cpu_mask, SD_INIT_NAME(DIE)
643
+
cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(DIE)
650
644
};
651
645
}
652
646
-4
arch/x86/lib/putuser.S
-4
arch/x86/lib/putuser.S
···
56
56
EXPORT_SYMBOL(__put_user_1)
57
57
58
58
SYM_FUNC_START(__put_user_nocheck_1)
59
-
ENDBR
60
59
ASM_STAC
61
60
2: movb %al,(%_ASM_CX)
62
61
xor %ecx,%ecx
···
75
76
EXPORT_SYMBOL(__put_user_2)
76
77
77
78
SYM_FUNC_START(__put_user_nocheck_2)
78
-
ENDBR
79
79
ASM_STAC
80
80
4: movw %ax,(%_ASM_CX)
81
81
xor %ecx,%ecx
···
94
96
EXPORT_SYMBOL(__put_user_4)
95
97
96
98
SYM_FUNC_START(__put_user_nocheck_4)
97
-
ENDBR
98
99
ASM_STAC
99
100
6: movl %eax,(%_ASM_CX)
100
101
xor %ecx,%ecx
···
116
119
EXPORT_SYMBOL(__put_user_8)
117
120
118
121
SYM_FUNC_START(__put_user_nocheck_8)
119
-
ENDBR
120
122
ASM_STAC
121
123
9: mov %_ASM_AX,(%_ASM_CX)
122
124
#ifdef CONFIG_X86_32
+12
arch/x86/platform/efi/efi_32.c
+12
arch/x86/platform/efi/efi_32.c
···
140
140
}
141
141
}
142
142
}
143
+
144
+
void arch_efi_call_virt_setup(void)
145
+
{
146
+
efi_fpu_begin();
147
+
firmware_restrict_branch_speculation_start();
148
+
}
149
+
150
+
void arch_efi_call_virt_teardown(void)
151
+
{
152
+
firmware_restrict_branch_speculation_end();
153
+
efi_fpu_end();
154
+
}
+17
-2
arch/x86/platform/efi/efi_64.c
+17
-2
arch/x86/platform/efi/efi_64.c
···
474
474
* can not change under us.
475
475
* It should be ensured that there are no concurrent calls to this function.
476
476
*/
477
-
void efi_enter_mm(void)
477
+
static void efi_enter_mm(void)
478
478
{
479
479
efi_prev_mm = current->active_mm;
480
480
current->active_mm = &efi_mm;
481
481
switch_mm(efi_prev_mm, &efi_mm, NULL);
482
482
}
483
483
484
-
void efi_leave_mm(void)
484
+
static void efi_leave_mm(void)
485
485
{
486
486
current->active_mm = efi_prev_mm;
487
487
switch_mm(&efi_mm, efi_prev_mm, NULL);
488
+
}
489
+
490
+
void arch_efi_call_virt_setup(void)
491
+
{
492
+
efi_sync_low_kernel_mappings();
493
+
efi_fpu_begin();
494
+
firmware_restrict_branch_speculation_start();
495
+
efi_enter_mm();
496
+
}
497
+
498
+
void arch_efi_call_virt_teardown(void)
499
+
{
500
+
efi_leave_mm();
501
+
firmware_restrict_branch_speculation_end();
502
+
efi_fpu_end();
488
503
}
489
504
490
505
static DEFINE_SPINLOCK(efi_runtime_lock);
+4
arch/x86/purgatory/Makefile
+4
arch/x86/purgatory/Makefile
···
19
19
# optimization flags.
20
20
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
21
21
22
+
# When LTO is enabled, llvm emits many text sections, which is not supported
23
+
# by kexec. Remove -flto=* flags.
24
+
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
25
+
22
26
# When linking purgatory.ro with -r unresolved symbols are not checked,
23
27
# also link a purgatory.chk binary without -r to check for unresolved symbols.
24
28
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
+7
-6
block/blk-mq.c
+7
-6
block/blk-mq.c
···
4405
4405
struct blk_mq_tags **new_tags;
4406
4406
int i;
4407
4407
4408
-
if (set->nr_hw_queues >= new_nr_hw_queues) {
4409
-
for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
4410
-
__blk_mq_free_map_and_rqs(set, i);
4408
+
if (set->nr_hw_queues >= new_nr_hw_queues)
4411
4409
goto done;
4412
-
}
4413
4410
4414
4411
new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4415
4412
GFP_KERNEL, set->numa_node);
···
4716
4719
{
4717
4720
struct request_queue *q;
4718
4721
LIST_HEAD(head);
4719
-
int prev_nr_hw_queues;
4722
+
int prev_nr_hw_queues = set->nr_hw_queues;
4723
+
int i;
4720
4724
4721
4725
lockdep_assert_held(&set->tag_list_lock);
4722
4726
···
4744
4746
blk_mq_sysfs_unregister_hctxs(q);
4745
4747
}
4746
4748
4747
-
prev_nr_hw_queues = set->nr_hw_queues;
4748
4749
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
4749
4750
goto reregister;
4750
4751
···
4778
4781
4779
4782
list_for_each_entry(q, &set->tag_list, tag_set_list)
4780
4783
blk_mq_unfreeze_queue(q);
4784
+
4785
+
/* Free the excess tags when nr_hw_queues shrink. */
4786
+
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
4787
+
__blk_mq_free_map_and_rqs(set, i);
4781
4788
}
4782
4789
4783
4790
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+1
-1
drivers/Makefile
+1
-1
drivers/Makefile
+1
-1
drivers/acpi/thermal.c
+1
-1
drivers/acpi/thermal.c
+9
drivers/ata/ahci.c
+9
drivers/ata/ahci.c
···
1883
1883
else
1884
1884
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
1885
1885
1886
+
if (!(hpriv->cap & HOST_CAP_PART))
1887
+
host->flags |= ATA_HOST_NO_PART;
1888
+
1889
+
if (!(hpriv->cap & HOST_CAP_SSC))
1890
+
host->flags |= ATA_HOST_NO_SSC;
1891
+
1892
+
if (!(hpriv->cap2 & HOST_CAP2_SDS))
1893
+
host->flags |= ATA_HOST_NO_DEVSLP;
1894
+
1886
1895
if (pi.flags & ATA_FLAG_EM)
1887
1896
ahci_reset_em(host);
1888
1897
+23
-12
drivers/ata/libahci.c
+23
-12
drivers/ata/libahci.c
···
1256
1256
return sprintf(buf, "%d\n", emp->blink_policy);
1257
1257
}
1258
1258
1259
+
static void ahci_port_clear_pending_irq(struct ata_port *ap)
1260
+
{
1261
+
struct ahci_host_priv *hpriv = ap->host->private_data;
1262
+
void __iomem *port_mmio = ahci_port_base(ap);
1263
+
u32 tmp;
1264
+
1265
+
/* clear SError */
1266
+
tmp = readl(port_mmio + PORT_SCR_ERR);
1267
+
dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
1268
+
writel(tmp, port_mmio + PORT_SCR_ERR);
1269
+
1270
+
/* clear port IRQ */
1271
+
tmp = readl(port_mmio + PORT_IRQ_STAT);
1272
+
dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
1273
+
if (tmp)
1274
+
writel(tmp, port_mmio + PORT_IRQ_STAT);
1275
+
1276
+
writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
1277
+
}
1278
+
1259
1279
static void ahci_port_init(struct device *dev, struct ata_port *ap,
1260
1280
int port_no, void __iomem *mmio,
1261
1281
void __iomem *port_mmio)
···
1290
1270
if (rc)
1291
1271
dev_warn(dev, "%s (%d)\n", emsg, rc);
1292
1272
1293
-
/* clear SError */
1294
-
tmp = readl(port_mmio + PORT_SCR_ERR);
1295
-
dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
1296
-
writel(tmp, port_mmio + PORT_SCR_ERR);
1297
-
1298
-
/* clear port IRQ */
1299
-
tmp = readl(port_mmio + PORT_IRQ_STAT);
1300
-
dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
1301
-
if (tmp)
1302
-
writel(tmp, port_mmio + PORT_IRQ_STAT);
1303
-
1304
-
writel(1 << port_no, mmio + HOST_IRQ_STAT);
1273
+
ahci_port_clear_pending_irq(ap);
1305
1274
1306
1275
/* mark esata ports */
1307
1276
tmp = readl(port_mmio + PORT_CMD);
···
1611
1602
ata_tf_init(link->device, &tf);
1612
1603
tf.status = ATA_BUSY;
1613
1604
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1605
+
1606
+
ahci_port_clear_pending_irq(ap);
1614
1607
1615
1608
rc = sata_link_hardreset(link, timing, deadline, online,
1616
1609
ahci_check_ready);
+2
-5
drivers/ata/libata-core.c
+2
-5
drivers/ata/libata-core.c
···
4783
4783
* been aborted by the device due to a limit timeout using the policy
4784
4784
* 0xD. For these commands, invoke EH to get the command sense data.
4785
4785
*/
4786
-
if (qc->result_tf.status & ATA_SENSE &&
4787
-
((ata_is_ncq(qc->tf.protocol) &&
4788
-
dev->flags & ATA_DFLAG_CDL_ENABLED) ||
4789
-
(!ata_is_ncq(qc->tf.protocol) &&
4790
-
ata_id_sense_reporting_enabled(dev->id)))) {
4786
+
if (qc->flags & ATA_QCFLAG_HAS_CDL &&
4787
+
qc->result_tf.status & ATA_SENSE) {
4791
4788
/*
4792
4789
* Tell SCSI EH to not overwrite scmd->result even if this
4793
4790
* command is finished with result SAM_STAT_GOOD.
+3
-13
drivers/ata/libata-eh.c
+3
-13
drivers/ata/libata-eh.c
···
2796
2796
}
2797
2797
}
2798
2798
2799
-
/*
2800
-
* Some controllers can't be frozen very well and may set spurious
2801
-
* error conditions during reset. Clear accumulated error
2802
-
* information and re-thaw the port if frozen. As reset is the
2803
-
* final recovery action and we cross check link onlineness against
2804
-
* device classification later, no hotplug event is lost by this.
2805
-
*/
2799
+
/* clear cached SError */
2806
2800
spin_lock_irqsave(link->ap->lock, flags);
2807
-
memset(&link->eh_info, 0, sizeof(link->eh_info));
2801
+
link->eh_info.serror = 0;
2808
2802
if (slave)
2809
-
memset(&slave->eh_info, 0, sizeof(link->eh_info));
2810
-
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2803
+
slave->eh_info.serror = 0;
2811
2804
spin_unlock_irqrestore(link->ap->lock, flags);
2812
-
2813
-
if (ata_port_is_frozen(ap))
2814
-
ata_eh_thaw_port(ap);
2815
2805
2816
2806
/*
2817
2807
* Make sure onlineness and classification result correspond.
+16
-3
drivers/ata/libata-sata.c
+16
-3
drivers/ata/libata-sata.c
···
396
396
case ATA_LPM_MED_POWER_WITH_DIPM:
397
397
case ATA_LPM_MIN_POWER_WITH_PARTIAL:
398
398
case ATA_LPM_MIN_POWER:
399
-
if (ata_link_nr_enabled(link) > 0)
400
-
/* no restrictions on LPM transitions */
399
+
if (ata_link_nr_enabled(link) > 0) {
400
+
/* assume no restrictions on LPM transitions */
401
401
scontrol &= ~(0x7 << 8);
402
-
else {
402
+
403
+
/*
404
+
* If the controller does not support partial, slumber,
405
+
* or devsleep, then disallow these transitions.
406
+
*/
407
+
if (link->ap->host->flags & ATA_HOST_NO_PART)
408
+
scontrol |= (0x1 << 8);
409
+
410
+
if (link->ap->host->flags & ATA_HOST_NO_SSC)
411
+
scontrol |= (0x2 << 8);
412
+
413
+
if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
414
+
scontrol |= (0x4 << 8);
415
+
} else {
403
416
/* empty port, power off */
404
417
scontrol &= ~0xf;
405
418
scontrol |= (0x1 << 2);
+4
-5
drivers/ata/pata_parport/comm.c
+4
-5
drivers/ata/pata_parport/comm.c
···
37
37
{
38
38
int l, h, r;
39
39
40
-
r = regr + cont_map[cont];
40
+
r = regr + cont_map[cont];
41
41
42
42
switch (pi->mode) {
43
43
case 0:
···
90
90
}
91
91
92
92
static void comm_disconnect(struct pi_adapter *pi)
93
-
94
93
{
95
94
w2(0); w2(0); w2(0); w2(4);
96
95
w0(pi->saved_r0);
···
171
172
w4l(swab16(((u16 *)buf)[2 * k]) |
172
173
swab16(((u16 *)buf)[2 * k + 1]) << 16);
173
174
break;
174
-
}
175
+
}
175
176
}
176
177
177
178
static void comm_log_adapter(struct pi_adapter *pi)
178
-
179
-
{ char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
179
+
{
180
+
char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
180
181
181
182
dev_info(&pi->dev,
182
183
"DataStor Commuter at 0x%x, mode %d (%s), delay %d\n",
+2
-2
drivers/ata/sata_mv.c
+2
-2
drivers/ata/sata_mv.c
···
1255
1255
1256
1256
for (b = 0; b < bytes; ) {
1257
1257
for (w = 0, o = 0; b < bytes && w < 4; w++) {
1258
-
o += snprintf(linebuf + o, sizeof(linebuf) - o,
1259
-
"%08x ", readl(start + b));
1258
+
o += scnprintf(linebuf + o, sizeof(linebuf) - o,
1259
+
"%08x ", readl(start + b));
1260
1260
b += sizeof(u32);
1261
1261
}
1262
1262
dev_dbg(dev, "%s: %p: %s\n",
+2
drivers/base/core.c
+2
drivers/base/core.c
-2
drivers/char/agp/parisc-agp.c
-2
drivers/char/agp/parisc-agp.c
+1
-1
drivers/char/tpm/tpm-chip.c
+1
-1
drivers/char/tpm/tpm-chip.c
+35
-68
drivers/comedi/Kconfig
+35
-68
drivers/comedi/Kconfig
···
67
67
68
68
config COMEDI_PARPORT
69
69
tristate "Parallel port support"
70
-
depends on HAS_IOPORT
71
70
help
72
71
Enable support for the standard parallel port.
73
72
A cheap and easy way to get a few more digital I/O lines. Steal
···
79
80
config COMEDI_SSV_DNP
80
81
tristate "SSV Embedded Systems DIL/Net-PC support"
81
82
depends on X86_32 || COMPILE_TEST
82
-
depends on HAS_IOPORT
83
83
help
84
84
Enable support for SSV Embedded Systems DIL/Net-PC
85
85
···
89
91
90
92
menuconfig COMEDI_ISA_DRIVERS
91
93
bool "Comedi ISA and PC/104 drivers"
92
-
depends on ISA
93
94
help
94
95
Enable comedi ISA and PC/104 drivers to be built
95
96
···
100
103
101
104
config COMEDI_PCL711
102
105
tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
103
-
depends on HAS_IOPORT
104
-
depends on COMEDI_8254
106
+
select COMEDI_8254
105
107
help
106
108
Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
107
109
···
161
165
162
166
config COMEDI_PCL812
163
167
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
164
-
depends on HAS_IOPORT
165
168
select COMEDI_ISADMA if ISA_DMA_API
166
-
depends on COMEDI_8254
169
+
select COMEDI_8254
167
170
help
168
171
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
169
172
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
···
173
178
174
179
config COMEDI_PCL816
175
180
tristate "Advantech PCL-814 and PCL-816 ISA card support"
176
-
depends on HAS_IOPORT
177
181
select COMEDI_ISADMA if ISA_DMA_API
178
-
depends on COMEDI_8254
182
+
select COMEDI_8254
179
183
help
180
184
Enable support for Advantech PCL-814 and PCL-816 ISA cards
181
185
···
183
189
184
190
config COMEDI_PCL818
185
191
tristate "Advantech PCL-718 and PCL-818 ISA card support"
186
-
depends on HAS_IOPORT
187
192
select COMEDI_ISADMA if ISA_DMA_API
188
-
depends on COMEDI_8254
193
+
select COMEDI_8254
189
194
help
190
195
Enable support for Advantech PCL-818 ISA cards
191
196
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
···
203
210
204
211
config COMEDI_AMPLC_DIO200_ISA
205
212
tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
206
-
depends on COMEDI_AMPLC_DIO200
213
+
select COMEDI_AMPLC_DIO200
207
214
help
208
215
Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
209
216
PC272E ISA DIO boards
···
255
262
256
263
config COMEDI_DAS16M1
257
264
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
258
-
depends on HAS_IOPORT
259
-
depends on COMEDI_8254
265
+
select COMEDI_8254
260
266
select COMEDI_8255
261
267
help
262
268
Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
···
265
273
266
274
config COMEDI_DAS08_ISA
267
275
tristate "DAS-08 compatible ISA and PC/104 card support"
268
-
depends on COMEDI_DAS08
276
+
select COMEDI_DAS08
269
277
help
270
278
Enable support for Keithley Metrabyte/ComputerBoards DAS08
271
279
and compatible ISA and PC/104 cards:
···
278
286
279
287
config COMEDI_DAS16
280
288
tristate "DAS-16 compatible ISA and PC/104 card support"
281
-
depends on HAS_IOPORT
282
289
select COMEDI_ISADMA if ISA_DMA_API
283
-
depends on COMEDI_8254
290
+
select COMEDI_8254
284
291
select COMEDI_8255
285
292
help
286
293
Enable support for Keithley Metrabyte/ComputerBoards DAS16
···
296
305
297
306
config COMEDI_DAS800
298
307
tristate "DAS800 and compatible ISA card support"
299
-
depends on HAS_IOPORT
300
-
depends on COMEDI_8254
308
+
select COMEDI_8254
301
309
help
302
310
Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
303
311
Keithley Metrabyte DAS-800, DAS-801, DAS-802
···
308
318
309
319
config COMEDI_DAS1800
310
320
tristate "DAS1800 and compatible ISA card support"
311
-
depends on HAS_IOPORT
312
321
select COMEDI_ISADMA if ISA_DMA_API
313
-
depends on COMEDI_8254
322
+
select COMEDI_8254
314
323
help
315
324
Enable support for DAS1800 and compatible ISA cards
316
325
Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
···
323
334
324
335
config COMEDI_DAS6402
325
336
tristate "DAS6402 and compatible ISA card support"
326
-
depends on HAS_IOPORT
327
-
depends on COMEDI_8254
337
+
select COMEDI_8254
328
338
help
329
339
Enable support for DAS6402 and compatible ISA cards
330
340
Computerboards, Keithley Metrabyte DAS6402 and compatibles
···
402
414
403
415
config COMEDI_AIO_AIO12_8
404
416
tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
405
-
depends on HAS_IOPORT
406
-
depends on COMEDI_8254
417
+
select COMEDI_8254
407
418
select COMEDI_8255
408
419
help
409
420
Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
···
456
469
457
470
config COMEDI_NI_AT_A2150
458
471
tristate "NI AT-A2150 ISA card support"
459
-
depends on HAS_IOPORT
460
472
select COMEDI_ISADMA if ISA_DMA_API
461
-
depends on COMEDI_8254
473
+
select COMEDI_8254
462
474
help
463
475
Enable support for National Instruments AT-A2150 cards
464
476
···
466
480
467
481
config COMEDI_NI_AT_AO
468
482
tristate "NI AT-AO-6/10 EISA card support"
469
-
depends on HAS_IOPORT
470
-
depends on COMEDI_8254
483
+
select COMEDI_8254
471
484
help
472
485
Enable support for National Instruments AT-AO-6/10 cards
473
486
···
497
512
498
513
config COMEDI_NI_LABPC_ISA
499
514
tristate "NI Lab-PC and compatibles ISA support"
500
-
depends on COMEDI_NI_LABPC
515
+
select COMEDI_NI_LABPC
501
516
help
502
517
Enable support for National Instruments Lab-PC and compatibles
503
518
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
···
561
576
562
577
menuconfig COMEDI_PCI_DRIVERS
563
578
tristate "Comedi PCI drivers"
564
-
depends on PCI && HAS_IOPORT
579
+
depends on PCI
565
580
help
566
581
Enable support for comedi PCI drivers.
567
582
···
710
725
711
726
config COMEDI_ADL_PCI9111
712
727
tristate "ADLink PCI-9111HR support"
713
-
depends on HAS_IOPORT
714
-
depends on COMEDI_8254
728
+
select COMEDI_8254
715
729
help
716
730
Enable support for ADlink PCI9111 cards
717
731
···
720
736
config COMEDI_ADL_PCI9118
721
737
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
722
738
depends on HAS_DMA
723
-
depends on COMEDI_8254
739
+
select COMEDI_8254
724
740
help
725
741
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
726
742
···
729
745
730
746
config COMEDI_ADV_PCI1710
731
747
tristate "Advantech PCI-171x and PCI-1731 support"
732
-
depends on HAS_IOPORT
733
-
depends on COMEDI_8254
748
+
select COMEDI_8254
734
749
help
735
750
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
736
751
PCI-1713 and PCI-1731
···
773
790
774
791
config COMEDI_ADV_PCI_DIO
775
792
tristate "Advantech PCI DIO card support"
776
-
depends on HAS_IOPORT
777
-
depends on COMEDI_8254
793
+
select COMEDI_8254
778
794
select COMEDI_8255
779
795
help
780
796
Enable support for Advantech PCI DIO cards
···
786
804
787
805
config COMEDI_AMPLC_DIO200_PCI
788
806
tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
789
-
depends on COMEDI_AMPLC_DIO200
807
+
select COMEDI_AMPLC_DIO200
790
808
help
791
809
Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
792
810
and PCIe296 DIO boards.
···
814
832
815
833
config COMEDI_AMPLC_PCI224
816
834
tristate "Amplicon PCI224 and PCI234 support"
817
-
depends on HAS_IOPORT
818
-
depends on COMEDI_8254
835
+
select COMEDI_8254
819
836
help
820
837
Enable support for Amplicon PCI224 and PCI234 AO boards
821
838
···
823
842
824
843
config COMEDI_AMPLC_PCI230
825
844
tristate "Amplicon PCI230 and PCI260 support"
826
-
depends on HAS_IOPORT
827
-
depends on COMEDI_8254
845
+
select COMEDI_8254
828
846
select COMEDI_8255
829
847
help
830
848
Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
···
842
862
843
863
config COMEDI_DAS08_PCI
844
864
tristate "DAS-08 PCI support"
845
-
depends on COMEDI_DAS08
865
+
select COMEDI_DAS08
846
866
help
847
867
Enable support for PCI DAS-08 cards.
848
868
···
929
949
930
950
config COMEDI_CB_PCIDAS
931
951
tristate "MeasurementComputing PCI-DAS support"
932
-
depends on HAS_IOPORT
933
-
depends on COMEDI_8254
952
+
select COMEDI_8254
934
953
select COMEDI_8255
935
954
help
936
955
Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
···
953
974
954
975
config COMEDI_CB_PCIMDAS
955
976
tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
956
-
depends on HAS_IOPORT
957
-
depends on COMEDI_8254
977
+
select COMEDI_8254
958
978
select COMEDI_8255
959
979
help
960
980
Enable support for ComputerBoards/MeasurementComputing PCI Migration
···
973
995
974
996
config COMEDI_ME4000
975
997
tristate "Meilhaus ME-4000 support"
976
-
depends on HAS_IOPORT
977
-
depends on COMEDI_8254
998
+
select COMEDI_8254
978
999
help
979
1000
Enable support for Meilhaus PCI data acquisition cards
980
1001
ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
···
1031
1054
1032
1055
config COMEDI_NI_LABPC_PCI
1033
1056
tristate "NI Lab-PC PCI-1200 support"
1034
-
depends on COMEDI_NI_LABPC
1057
+
select COMEDI_NI_LABPC
1035
1058
help
1036
1059
Enable support for National Instruments Lab-PC PCI-1200.
1037
1060
···
1053
1076
config COMEDI_NI_PCIMIO
1054
1077
tristate "NI PCI-MIO-E series and M series support"
1055
1078
depends on HAS_DMA
1056
-
depends on HAS_IOPORT
1057
1079
select COMEDI_NI_TIOCMD
1058
1080
select COMEDI_8255
1059
1081
help
···
1074
1098
1075
1099
config COMEDI_RTD520
1076
1100
tristate "Real Time Devices PCI4520/DM7520 support"
1077
-
depends on HAS_IOPORT
1078
-
depends on COMEDI_8254
1101
+
select COMEDI_8254
1079
1102
help
1080
1103
Enable support for Real Time Devices PCI4520/DM7520
1081
1104
···
1114
1139
1115
1140
config COMEDI_CB_DAS16_CS
1116
1141
tristate "CB DAS16 series PCMCIA support"
1117
-
depends on HAS_IOPORT
1118
-
depends on COMEDI_8254
1142
+
select COMEDI_8254
1119
1143
help
1120
1144
Enable support for the ComputerBoards/MeasurementComputing PCMCIA
1121
1145
cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
···
1124
1150
1125
1151
config COMEDI_DAS08_CS
1126
1152
tristate "CB DAS08 PCMCIA support"
1127
-
depends on COMEDI_DAS08
1153
+
select COMEDI_DAS08
1128
1154
help
1129
1155
Enable support for the ComputerBoards/MeasurementComputing DAS-08
1130
1156
PCMCIA card
···
1134
1160
1135
1161
config COMEDI_NI_DAQ_700_CS
1136
1162
tristate "NI DAQCard-700 PCMCIA support"
1137
-
depends on HAS_IOPORT
1138
1163
help
1139
1164
Enable support for the National Instruments PCMCIA DAQCard-700 DIO
1140
1165
···
1142
1169
1143
1170
config COMEDI_NI_DAQ_DIO24_CS
1144
1171
tristate "NI DAQ-Card DIO-24 PCMCIA support"
1145
-
depends on HAS_IOPORT
1146
1172
select COMEDI_8255
1147
1173
help
1148
1174
Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
···
1151
1179
1152
1180
config COMEDI_NI_LABPC_CS
1153
1181
tristate "NI DAQCard-1200 PCMCIA support"
1154
-
depends on COMEDI_NI_LABPC
1182
+
select COMEDI_NI_LABPC
1155
1183
help
1156
1184
Enable support for the National Instruments PCMCIA DAQCard-1200
1157
1185
···
1160
1188
1161
1189
config COMEDI_NI_MIO_CS
1162
1190
tristate "NI DAQCard E series PCMCIA support"
1163
-
depends on HAS_IOPORT
1164
1191
select COMEDI_NI_TIO
1165
1192
select COMEDI_8255
1166
1193
help
···
1172
1201
1173
1202
config COMEDI_QUATECH_DAQP_CS
1174
1203
tristate "Quatech DAQP PCMCIA data capture card support"
1175
-
depends on HAS_IOPORT
1176
1204
help
1177
1205
Enable support for the Quatech DAQP PCMCIA data capture cards
1178
1206
DAQP-208 and DAQP-308
···
1248
1278
1249
1279
config COMEDI_8254
1250
1280
tristate
1251
-
depends on HAS_IOPORT
1252
1281
1253
1282
config COMEDI_8255
1254
1283
tristate
1255
1284
1256
1285
config COMEDI_8255_SA
1257
1286
tristate "Standalone 8255 support"
1258
-
depends on HAS_IOPORT
1259
1287
select COMEDI_8255
1260
1288
help
1261
1289
Enable support for 8255 digital I/O as a standalone driver.
···
1285
1317
called kcomedilib.
1286
1318
1287
1319
config COMEDI_AMPLC_DIO200
1288
-
depends on COMEDI_8254
1320
+
select COMEDI_8254
1289
1321
tristate
1290
1322
1291
1323
config COMEDI_AMPLC_PC236
···
1294
1326
1295
1327
config COMEDI_DAS08
1296
1328
tristate
1297
-
depends on COMEDI_8254
1329
+
select COMEDI_8254
1298
1330
select COMEDI_8255
1299
1331
1300
1332
config COMEDI_ISADMA
···
1302
1334
1303
1335
config COMEDI_NI_LABPC
1304
1336
tristate
1305
-
depends on HAS_IOPORT
1306
-
depends on COMEDI_8254
1337
+
select COMEDI_8254
1307
1338
select COMEDI_8255
1308
1339
1309
1340
config COMEDI_NI_LABPC_ISADMA
+1
-1
drivers/firewire/core-device.c
+1
-1
drivers/firewire/core-device.c
+1
-1
drivers/firewire/core-topology.c
+1
-1
drivers/firewire/core-topology.c
+1
-1
drivers/firmware/efi/libstub/unaccepted_memory.c
+1
-1
drivers/firmware/efi/libstub/unaccepted_memory.c
···
62
62
bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start,
63
63
EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE);
64
64
65
-
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
65
+
status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
66
66
sizeof(*unaccepted_table) + bitmap_size,
67
67
(void **)&unaccepted_table);
68
68
if (status != EFI_SUCCESS) {
drivers/genpd/Makefile
drivers/pmdomain/Makefile
drivers/genpd/Makefile
drivers/pmdomain/Makefile
drivers/genpd/actions/Makefile
drivers/pmdomain/actions/Makefile
drivers/genpd/actions/Makefile
drivers/pmdomain/actions/Makefile
drivers/genpd/actions/owl-sps-helper.c
drivers/pmdomain/actions/owl-sps-helper.c
drivers/genpd/actions/owl-sps-helper.c
drivers/pmdomain/actions/owl-sps-helper.c
drivers/genpd/actions/owl-sps.c
drivers/pmdomain/actions/owl-sps.c
drivers/genpd/actions/owl-sps.c
drivers/pmdomain/actions/owl-sps.c
drivers/genpd/amlogic/Makefile
drivers/pmdomain/amlogic/Makefile
drivers/genpd/amlogic/Makefile
drivers/pmdomain/amlogic/Makefile
drivers/genpd/amlogic/meson-ee-pwrc.c
drivers/pmdomain/amlogic/meson-ee-pwrc.c
drivers/genpd/amlogic/meson-ee-pwrc.c
drivers/pmdomain/amlogic/meson-ee-pwrc.c
drivers/genpd/amlogic/meson-gx-pwrc-vpu.c
drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
drivers/genpd/amlogic/meson-gx-pwrc-vpu.c
drivers/pmdomain/amlogic/meson-gx-pwrc-vpu.c
drivers/genpd/amlogic/meson-secure-pwrc.c
drivers/pmdomain/amlogic/meson-secure-pwrc.c
drivers/genpd/amlogic/meson-secure-pwrc.c
drivers/pmdomain/amlogic/meson-secure-pwrc.c
drivers/genpd/apple/Makefile
drivers/pmdomain/apple/Makefile
drivers/genpd/apple/Makefile
drivers/pmdomain/apple/Makefile
drivers/genpd/apple/pmgr-pwrstate.c
drivers/pmdomain/apple/pmgr-pwrstate.c
drivers/genpd/apple/pmgr-pwrstate.c
drivers/pmdomain/apple/pmgr-pwrstate.c
drivers/genpd/bcm/Makefile
drivers/pmdomain/bcm/Makefile
drivers/genpd/bcm/Makefile
drivers/pmdomain/bcm/Makefile
drivers/genpd/bcm/bcm-pmb.c
drivers/pmdomain/bcm/bcm-pmb.c
drivers/genpd/bcm/bcm-pmb.c
drivers/pmdomain/bcm/bcm-pmb.c
drivers/genpd/bcm/bcm2835-power.c
drivers/pmdomain/bcm/bcm2835-power.c
drivers/genpd/bcm/bcm2835-power.c
drivers/pmdomain/bcm/bcm2835-power.c
drivers/genpd/bcm/bcm63xx-power.c
drivers/pmdomain/bcm/bcm63xx-power.c
drivers/genpd/bcm/bcm63xx-power.c
drivers/pmdomain/bcm/bcm63xx-power.c
drivers/genpd/bcm/raspberrypi-power.c
drivers/pmdomain/bcm/raspberrypi-power.c
drivers/genpd/bcm/raspberrypi-power.c
drivers/pmdomain/bcm/raspberrypi-power.c
drivers/genpd/imx/Makefile
drivers/pmdomain/imx/Makefile
drivers/genpd/imx/Makefile
drivers/pmdomain/imx/Makefile
drivers/genpd/imx/gpc.c
drivers/pmdomain/imx/gpc.c
drivers/genpd/imx/gpc.c
drivers/pmdomain/imx/gpc.c
drivers/genpd/imx/gpcv2.c
drivers/pmdomain/imx/gpcv2.c
drivers/genpd/imx/gpcv2.c
drivers/pmdomain/imx/gpcv2.c
drivers/genpd/imx/imx8m-blk-ctrl.c
drivers/pmdomain/imx/imx8m-blk-ctrl.c
drivers/genpd/imx/imx8m-blk-ctrl.c
drivers/pmdomain/imx/imx8m-blk-ctrl.c
drivers/genpd/imx/imx8mp-blk-ctrl.c
drivers/pmdomain/imx/imx8mp-blk-ctrl.c
drivers/genpd/imx/imx8mp-blk-ctrl.c
drivers/pmdomain/imx/imx8mp-blk-ctrl.c
drivers/genpd/imx/imx93-blk-ctrl.c
drivers/pmdomain/imx/imx93-blk-ctrl.c
drivers/genpd/imx/imx93-blk-ctrl.c
drivers/pmdomain/imx/imx93-blk-ctrl.c
drivers/genpd/imx/imx93-pd.c
drivers/pmdomain/imx/imx93-pd.c
drivers/genpd/imx/imx93-pd.c
drivers/pmdomain/imx/imx93-pd.c
drivers/genpd/imx/scu-pd.c
drivers/pmdomain/imx/scu-pd.c
drivers/genpd/imx/scu-pd.c
drivers/pmdomain/imx/scu-pd.c
drivers/genpd/mediatek/Makefile
drivers/pmdomain/mediatek/Makefile
drivers/genpd/mediatek/Makefile
drivers/pmdomain/mediatek/Makefile
drivers/genpd/mediatek/mt6795-pm-domains.h
drivers/pmdomain/mediatek/mt6795-pm-domains.h
drivers/genpd/mediatek/mt6795-pm-domains.h
drivers/pmdomain/mediatek/mt6795-pm-domains.h
drivers/genpd/mediatek/mt8167-pm-domains.h
drivers/pmdomain/mediatek/mt8167-pm-domains.h
drivers/genpd/mediatek/mt8167-pm-domains.h
drivers/pmdomain/mediatek/mt8167-pm-domains.h
drivers/genpd/mediatek/mt8173-pm-domains.h
drivers/pmdomain/mediatek/mt8173-pm-domains.h
drivers/genpd/mediatek/mt8173-pm-domains.h
drivers/pmdomain/mediatek/mt8173-pm-domains.h
drivers/genpd/mediatek/mt8183-pm-domains.h
drivers/pmdomain/mediatek/mt8183-pm-domains.h
drivers/genpd/mediatek/mt8183-pm-domains.h
drivers/pmdomain/mediatek/mt8183-pm-domains.h
drivers/genpd/mediatek/mt8186-pm-domains.h
drivers/pmdomain/mediatek/mt8186-pm-domains.h
drivers/genpd/mediatek/mt8186-pm-domains.h
drivers/pmdomain/mediatek/mt8186-pm-domains.h
drivers/genpd/mediatek/mt8188-pm-domains.h
drivers/pmdomain/mediatek/mt8188-pm-domains.h
drivers/genpd/mediatek/mt8188-pm-domains.h
drivers/pmdomain/mediatek/mt8188-pm-domains.h
drivers/genpd/mediatek/mt8192-pm-domains.h
drivers/pmdomain/mediatek/mt8192-pm-domains.h
drivers/genpd/mediatek/mt8192-pm-domains.h
drivers/pmdomain/mediatek/mt8192-pm-domains.h
drivers/genpd/mediatek/mt8195-pm-domains.h
drivers/pmdomain/mediatek/mt8195-pm-domains.h
drivers/genpd/mediatek/mt8195-pm-domains.h
drivers/pmdomain/mediatek/mt8195-pm-domains.h
drivers/genpd/mediatek/mtk-pm-domains.c
drivers/pmdomain/mediatek/mtk-pm-domains.c
drivers/genpd/mediatek/mtk-pm-domains.c
drivers/pmdomain/mediatek/mtk-pm-domains.c
drivers/genpd/mediatek/mtk-pm-domains.h
drivers/pmdomain/mediatek/mtk-pm-domains.h
drivers/genpd/mediatek/mtk-pm-domains.h
drivers/pmdomain/mediatek/mtk-pm-domains.h
drivers/genpd/mediatek/mtk-scpsys.c
drivers/pmdomain/mediatek/mtk-scpsys.c
drivers/genpd/mediatek/mtk-scpsys.c
drivers/pmdomain/mediatek/mtk-scpsys.c
drivers/genpd/qcom/Makefile
drivers/pmdomain/qcom/Makefile
drivers/genpd/qcom/Makefile
drivers/pmdomain/qcom/Makefile
drivers/genpd/qcom/cpr.c
drivers/pmdomain/qcom/cpr.c
drivers/genpd/qcom/cpr.c
drivers/pmdomain/qcom/cpr.c
drivers/genpd/qcom/rpmhpd.c
drivers/pmdomain/qcom/rpmhpd.c
drivers/genpd/qcom/rpmhpd.c
drivers/pmdomain/qcom/rpmhpd.c
drivers/genpd/qcom/rpmpd.c
drivers/pmdomain/qcom/rpmpd.c
drivers/genpd/qcom/rpmpd.c
drivers/pmdomain/qcom/rpmpd.c
drivers/genpd/renesas/Makefile
drivers/pmdomain/renesas/Makefile
drivers/genpd/renesas/Makefile
drivers/pmdomain/renesas/Makefile
drivers/genpd/renesas/r8a7742-sysc.c
drivers/pmdomain/renesas/r8a7742-sysc.c
drivers/genpd/renesas/r8a7742-sysc.c
drivers/pmdomain/renesas/r8a7742-sysc.c
drivers/genpd/renesas/r8a7743-sysc.c
drivers/pmdomain/renesas/r8a7743-sysc.c
drivers/genpd/renesas/r8a7743-sysc.c
drivers/pmdomain/renesas/r8a7743-sysc.c
drivers/genpd/renesas/r8a7745-sysc.c
drivers/pmdomain/renesas/r8a7745-sysc.c
drivers/genpd/renesas/r8a7745-sysc.c
drivers/pmdomain/renesas/r8a7745-sysc.c
drivers/genpd/renesas/r8a77470-sysc.c
drivers/pmdomain/renesas/r8a77470-sysc.c
drivers/genpd/renesas/r8a77470-sysc.c
drivers/pmdomain/renesas/r8a77470-sysc.c
drivers/genpd/renesas/r8a774a1-sysc.c
drivers/pmdomain/renesas/r8a774a1-sysc.c
drivers/genpd/renesas/r8a774a1-sysc.c
drivers/pmdomain/renesas/r8a774a1-sysc.c
drivers/genpd/renesas/r8a774b1-sysc.c
drivers/pmdomain/renesas/r8a774b1-sysc.c
drivers/genpd/renesas/r8a774b1-sysc.c
drivers/pmdomain/renesas/r8a774b1-sysc.c
drivers/genpd/renesas/r8a774c0-sysc.c
drivers/pmdomain/renesas/r8a774c0-sysc.c
drivers/genpd/renesas/r8a774c0-sysc.c
drivers/pmdomain/renesas/r8a774c0-sysc.c
drivers/genpd/renesas/r8a774e1-sysc.c
drivers/pmdomain/renesas/r8a774e1-sysc.c
drivers/genpd/renesas/r8a774e1-sysc.c
drivers/pmdomain/renesas/r8a774e1-sysc.c
drivers/genpd/renesas/r8a7779-sysc.c
drivers/pmdomain/renesas/r8a7779-sysc.c
drivers/genpd/renesas/r8a7779-sysc.c
drivers/pmdomain/renesas/r8a7779-sysc.c
drivers/genpd/renesas/r8a7790-sysc.c
drivers/pmdomain/renesas/r8a7790-sysc.c
drivers/genpd/renesas/r8a7790-sysc.c
drivers/pmdomain/renesas/r8a7790-sysc.c
drivers/genpd/renesas/r8a7791-sysc.c
drivers/pmdomain/renesas/r8a7791-sysc.c
drivers/genpd/renesas/r8a7791-sysc.c
drivers/pmdomain/renesas/r8a7791-sysc.c
drivers/genpd/renesas/r8a7792-sysc.c
drivers/pmdomain/renesas/r8a7792-sysc.c
drivers/genpd/renesas/r8a7792-sysc.c
drivers/pmdomain/renesas/r8a7792-sysc.c
drivers/genpd/renesas/r8a7794-sysc.c
drivers/pmdomain/renesas/r8a7794-sysc.c
drivers/genpd/renesas/r8a7794-sysc.c
drivers/pmdomain/renesas/r8a7794-sysc.c
drivers/genpd/renesas/r8a7795-sysc.c
drivers/pmdomain/renesas/r8a7795-sysc.c
drivers/genpd/renesas/r8a7795-sysc.c
drivers/pmdomain/renesas/r8a7795-sysc.c
drivers/genpd/renesas/r8a7796-sysc.c
drivers/pmdomain/renesas/r8a7796-sysc.c
drivers/genpd/renesas/r8a7796-sysc.c
drivers/pmdomain/renesas/r8a7796-sysc.c
drivers/genpd/renesas/r8a77965-sysc.c
drivers/pmdomain/renesas/r8a77965-sysc.c
drivers/genpd/renesas/r8a77965-sysc.c
drivers/pmdomain/renesas/r8a77965-sysc.c
drivers/genpd/renesas/r8a77970-sysc.c
drivers/pmdomain/renesas/r8a77970-sysc.c
drivers/genpd/renesas/r8a77970-sysc.c
drivers/pmdomain/renesas/r8a77970-sysc.c
drivers/genpd/renesas/r8a77980-sysc.c
drivers/pmdomain/renesas/r8a77980-sysc.c
drivers/genpd/renesas/r8a77980-sysc.c
drivers/pmdomain/renesas/r8a77980-sysc.c
drivers/genpd/renesas/r8a77990-sysc.c
drivers/pmdomain/renesas/r8a77990-sysc.c
drivers/genpd/renesas/r8a77990-sysc.c
drivers/pmdomain/renesas/r8a77990-sysc.c
drivers/genpd/renesas/r8a77995-sysc.c
drivers/pmdomain/renesas/r8a77995-sysc.c
drivers/genpd/renesas/r8a77995-sysc.c
drivers/pmdomain/renesas/r8a77995-sysc.c
drivers/genpd/renesas/r8a779a0-sysc.c
drivers/pmdomain/renesas/r8a779a0-sysc.c
drivers/genpd/renesas/r8a779a0-sysc.c
drivers/pmdomain/renesas/r8a779a0-sysc.c
drivers/genpd/renesas/r8a779f0-sysc.c
drivers/pmdomain/renesas/r8a779f0-sysc.c
drivers/genpd/renesas/r8a779f0-sysc.c
drivers/pmdomain/renesas/r8a779f0-sysc.c
drivers/genpd/renesas/r8a779g0-sysc.c
drivers/pmdomain/renesas/r8a779g0-sysc.c
drivers/genpd/renesas/r8a779g0-sysc.c
drivers/pmdomain/renesas/r8a779g0-sysc.c
drivers/genpd/renesas/rcar-gen4-sysc.c
drivers/pmdomain/renesas/rcar-gen4-sysc.c
drivers/genpd/renesas/rcar-gen4-sysc.c
drivers/pmdomain/renesas/rcar-gen4-sysc.c
drivers/genpd/renesas/rcar-gen4-sysc.h
drivers/pmdomain/renesas/rcar-gen4-sysc.h
drivers/genpd/renesas/rcar-gen4-sysc.h
drivers/pmdomain/renesas/rcar-gen4-sysc.h
drivers/genpd/renesas/rcar-sysc.c
drivers/pmdomain/renesas/rcar-sysc.c
drivers/genpd/renesas/rcar-sysc.c
drivers/pmdomain/renesas/rcar-sysc.c
drivers/genpd/renesas/rcar-sysc.h
drivers/pmdomain/renesas/rcar-sysc.h
drivers/genpd/renesas/rcar-sysc.h
drivers/pmdomain/renesas/rcar-sysc.h
drivers/genpd/renesas/rmobile-sysc.c
drivers/pmdomain/renesas/rmobile-sysc.c
drivers/genpd/renesas/rmobile-sysc.c
drivers/pmdomain/renesas/rmobile-sysc.c
drivers/genpd/rockchip/Makefile
drivers/pmdomain/rockchip/Makefile
drivers/genpd/rockchip/Makefile
drivers/pmdomain/rockchip/Makefile
drivers/genpd/rockchip/pm-domains.c
drivers/pmdomain/rockchip/pm-domains.c
drivers/genpd/rockchip/pm-domains.c
drivers/pmdomain/rockchip/pm-domains.c
drivers/genpd/samsung/Makefile
drivers/pmdomain/samsung/Makefile
drivers/genpd/samsung/Makefile
drivers/pmdomain/samsung/Makefile
drivers/genpd/samsung/exynos-pm-domains.c
drivers/pmdomain/samsung/exynos-pm-domains.c
drivers/genpd/samsung/exynos-pm-domains.c
drivers/pmdomain/samsung/exynos-pm-domains.c
drivers/genpd/st/Makefile
drivers/pmdomain/st/Makefile
drivers/genpd/st/Makefile
drivers/pmdomain/st/Makefile
drivers/genpd/st/ste-ux500-pm-domain.c
drivers/pmdomain/st/ste-ux500-pm-domain.c
drivers/genpd/st/ste-ux500-pm-domain.c
drivers/pmdomain/st/ste-ux500-pm-domain.c
drivers/genpd/starfive/Makefile
drivers/pmdomain/starfive/Makefile
drivers/genpd/starfive/Makefile
drivers/pmdomain/starfive/Makefile
drivers/genpd/starfive/jh71xx-pmu.c
drivers/pmdomain/starfive/jh71xx-pmu.c
drivers/genpd/starfive/jh71xx-pmu.c
drivers/pmdomain/starfive/jh71xx-pmu.c
drivers/genpd/sunxi/Makefile
drivers/pmdomain/sunxi/Makefile
drivers/genpd/sunxi/Makefile
drivers/pmdomain/sunxi/Makefile
drivers/genpd/sunxi/sun20i-ppu.c
drivers/pmdomain/sunxi/sun20i-ppu.c
drivers/genpd/sunxi/sun20i-ppu.c
drivers/pmdomain/sunxi/sun20i-ppu.c
drivers/genpd/tegra/Makefile
drivers/pmdomain/tegra/Makefile
drivers/genpd/tegra/Makefile
drivers/pmdomain/tegra/Makefile
drivers/genpd/tegra/powergate-bpmp.c
drivers/pmdomain/tegra/powergate-bpmp.c
drivers/genpd/tegra/powergate-bpmp.c
drivers/pmdomain/tegra/powergate-bpmp.c
drivers/genpd/ti/Makefile
drivers/pmdomain/ti/Makefile
drivers/genpd/ti/Makefile
drivers/pmdomain/ti/Makefile
drivers/genpd/ti/omap_prm.c
drivers/pmdomain/ti/omap_prm.c
drivers/genpd/ti/omap_prm.c
drivers/pmdomain/ti/omap_prm.c
drivers/genpd/ti/ti_sci_pm_domains.c
drivers/pmdomain/ti/ti_sci_pm_domains.c
drivers/genpd/ti/ti_sci_pm_domains.c
drivers/pmdomain/ti/ti_sci_pm_domains.c
drivers/genpd/xilinx/Makefile
drivers/pmdomain/xilinx/Makefile
drivers/genpd/xilinx/Makefile
drivers/pmdomain/xilinx/Makefile
drivers/genpd/xilinx/zynqmp-pm-domains.c
drivers/pmdomain/xilinx/zynqmp-pm-domains.c
drivers/genpd/xilinx/zynqmp-pm-domains.c
drivers/pmdomain/xilinx/zynqmp-pm-domains.c
-1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
-1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
···
1293
1293
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1294
1294
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
1295
1295
bool amdgpu_device_need_post(struct amdgpu_device *adev);
1296
-
bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
1297
1296
bool amdgpu_device_pcie_dynamic_switching_supported(void);
1298
1297
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
1299
1298
bool amdgpu_device_aspm_support_quirk(void);
+1
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+1
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
···
478
478
cu_info->cu_active_number = acu_info.number;
479
479
cu_info->cu_ao_mask = acu_info.ao_cu_mask;
480
480
memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
481
-
sizeof(acu_info.bitmap));
481
+
sizeof(cu_info->cu_bitmap));
482
482
cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
483
483
cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
484
484
cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
+1
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+1
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+1
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+1
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
+2
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+2
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
···
1103
1103
uint32_t wait_times,
1104
1104
uint32_t grace_period,
1105
1105
uint32_t *reg_offset,
1106
-
uint32_t *reg_data,
1107
-
uint32_t inst)
1106
+
uint32_t *reg_data)
1108
1107
{
1109
1108
*reg_data = wait_times;
1110
1109
···
1119
1120
SCH_WAVE,
1120
1121
grace_period);
1121
1122
1122
-
*reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
1123
-
mmCP_IQ_WAIT_TIME2);
1123
+
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
1124
1124
}
1125
1125
1126
1126
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
+1
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+1
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
-26
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-26
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
···
1245
1245
}
1246
1246
1247
1247
/*
1248
-
* On APUs with >= 64GB white flickering has been observed w/ SG enabled.
1249
-
* Disable S/G on such systems until we have a proper fix.
1250
-
* https://gitlab.freedesktop.org/drm/amd/-/issues/2354
1251
-
* https://gitlab.freedesktop.org/drm/amd/-/issues/2735
1252
-
*/
1253
-
bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
1254
-
{
1255
-
switch (amdgpu_sg_display) {
1256
-
case -1:
1257
-
break;
1258
-
case 0:
1259
-
return false;
1260
-
case 1:
1261
-
return true;
1262
-
default:
1263
-
return false;
1264
-
}
1265
-
if ((totalram_pages() << (PAGE_SHIFT - 10)) +
1266
-
(adev->gmc.real_vram_size / 1024) >= 64000000) {
1267
-
DRM_WARN("Disabling S/G due to >=64GB RAM\n");
1268
-
return false;
1269
-
}
1270
-
return true;
1271
-
}
1272
-
1273
-
/*
1274
1248
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
1275
1249
* speed switching. Until we have confirmation from Intel that a specific host
1276
1250
* supports it, it's safer that we keep it disabled for all.
+2
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+2
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
···
43
43
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
44
44
45
45
#define AMDGPU_MAX_GC_INSTANCES 8
46
+
#define KGD_MAX_QUEUES 128
46
47
47
48
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
48
49
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
···
258
257
uint32_t number;
259
258
uint32_t ao_cu_mask;
260
259
uint32_t ao_cu_bitmap[4][4];
261
-
uint32_t bitmap[4][4];
260
+
uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
262
261
};
263
262
264
263
struct amdgpu_gfx_ras {
+12
-7
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+12
-7
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
···
839
839
memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
840
840
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
841
841
memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
842
-
sizeof(adev->gfx.cu_info.bitmap));
842
+
sizeof(dev_info->cu_bitmap));
843
843
dev_info->vram_type = adev->gmc.vram_type;
844
844
dev_info->vram_bit_width = adev->gmc.vram_width;
845
845
dev_info->vce_harvest_config = adev->vce.harvest_config;
···
940
940
struct atom_context *atom_context;
941
941
942
942
atom_context = adev->mode_info.atom_context;
943
-
memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
944
-
memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
945
-
vbios_info.version = atom_context->version;
946
-
memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
947
-
sizeof(atom_context->vbios_ver_str));
948
-
memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
943
+
if (atom_context) {
944
+
memcpy(vbios_info.name, atom_context->name,
945
+
sizeof(atom_context->name));
946
+
memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
947
+
sizeof(atom_context->vbios_pn));
948
+
vbios_info.version = atom_context->version;
949
+
memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
950
+
sizeof(atom_context->vbios_ver_str));
951
+
memcpy(vbios_info.date, atom_context->date,
952
+
sizeof(atom_context->date));
953
+
}
949
954
950
955
return copy_to_user(out, &vbios_info,
951
956
min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
+4
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+4
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
···
1052
1052
info->ce_count = obj->err_data.ce_count;
1053
1053
1054
1054
if (err_data.ce_count) {
1055
-
if (adev->smuio.funcs &&
1055
+
if (!adev->aid_mask &&
1056
+
adev->smuio.funcs &&
1056
1057
adev->smuio.funcs->get_socket_id &&
1057
1058
adev->smuio.funcs->get_die_id) {
1058
1059
dev_info(adev->dev, "socket: %d, die: %d "
···
1073
1072
}
1074
1073
}
1075
1074
if (err_data.ue_count) {
1076
-
if (adev->smuio.funcs &&
1075
+
if (!adev->aid_mask &&
1076
+
adev->smuio.funcs &&
1077
1077
adev->smuio.funcs->get_socket_id &&
1078
1078
adev->smuio.funcs->get_die_id) {
1079
1079
dev_info(adev->dev, "socket: %d, die: %d "
+1
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+1
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
···
9449
9449
gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
9450
9450
adev, disable_masks[i * 2 + j]);
9451
9451
bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
9452
-
cu_info->bitmap[i][j] = bitmap;
9452
+
cu_info->bitmap[0][i][j] = bitmap;
9453
9453
9454
9454
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
9455
9455
if (bitmap & mask) {
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
···
6368
6368
* SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
6369
6369
* SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
6370
6370
*/
6371
-
cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
6371
+
cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
6372
6372
6373
6373
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
6374
6374
if (bitmap & mask)
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
···
3577
3577
gfx_v6_0_set_user_cu_inactive_bitmap(
3578
3578
adev, disable_masks[i * 2 + j]);
3579
3579
bitmap = gfx_v6_0_get_cu_enabled(adev);
3580
-
cu_info->bitmap[i][j] = bitmap;
3580
+
cu_info->bitmap[0][i][j] = bitmap;
3581
3581
3582
3582
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
3583
3583
if (bitmap & mask) {
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
···
5119
5119
gfx_v7_0_set_user_cu_inactive_bitmap(
5120
5120
adev, disable_masks[i * 2 + j]);
5121
5121
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5122
-
cu_info->bitmap[i][j] = bitmap;
5122
+
cu_info->bitmap[0][i][j] = bitmap;
5123
5123
5124
5124
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5125
5125
if (bitmap & mask) {
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+1
-1
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
···
7121
7121
gfx_v8_0_set_user_cu_inactive_bitmap(
7122
7122
adev, disable_masks[i * 2 + j]);
7123
7123
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7124
-
cu_info->bitmap[i][j] = bitmap;
7124
+
cu_info->bitmap[0][i][j] = bitmap;
7125
7125
7126
7126
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7127
7127
if (bitmap & mask) {
+2
-2
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+2
-2
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
···
1499
1499
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
1500
1500
1501
1501
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1502
-
if (cu_info->bitmap[i][j] & mask) {
1502
+
if (cu_info->bitmap[0][i][j] & mask) {
1503
1503
if (counter == pg_always_on_cu_num)
1504
1504
WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1505
1505
if (counter < always_on_cu_num)
···
7233
7233
* SE6,SH0 --> bitmap[2][1]
7234
7234
* SE7,SH0 --> bitmap[3][1]
7235
7235
*/
7236
-
cu_info->bitmap[i % 4][j + i / 4] = bitmap;
7236
+
cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
7237
7237
7238
7238
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7239
7239
if (bitmap & mask) {
+32
-40
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+32
-40
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
···
4259
4259
}
4260
4260
4261
4261
static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4262
-
u32 bitmap)
4262
+
u32 bitmap, int xcc_id)
4263
4263
{
4264
4264
u32 data;
4265
4265
···
4269
4269
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4270
4270
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4271
4271
4272
-
WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data);
4272
+
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4273
4273
}
4274
4274
4275
-
static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
4275
+
static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4276
4276
{
4277
4277
u32 data, mask;
4278
4278
4279
-
data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG);
4280
-
data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG);
4279
+
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4280
+
data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4281
4281
4282
4282
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4283
4283
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
···
4290
4290
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4291
4291
struct amdgpu_cu_info *cu_info)
4292
4292
{
4293
-
int i, j, k, counter, active_cu_number = 0;
4293
+
int i, j, k, counter, xcc_id, active_cu_number = 0;
4294
4294
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4295
4295
unsigned disable_masks[4 * 4];
4296
4296
···
4309
4309
adev->gfx.config.max_sh_per_se);
4310
4310
4311
4311
mutex_lock(&adev->grbm_idx_mutex);
4312
-
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4313
-
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4314
-
mask = 1;
4315
-
ao_bitmap = 0;
4316
-
counter = 0;
4317
-
gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0);
4318
-
gfx_v9_4_3_set_user_cu_inactive_bitmap(
4319
-
adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
4320
-
bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
4312
+
for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4313
+
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4314
+
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4315
+
mask = 1;
4316
+
ao_bitmap = 0;
4317
+
counter = 0;
4318
+
gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4319
+
gfx_v9_4_3_set_user_cu_inactive_bitmap(
4320
+
adev,
4321
+
disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4322
+
xcc_id);
4323
+
bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4321
4324
4322
-
/*
4323
-
* The bitmap(and ao_cu_bitmap) in cu_info structure is
4324
-
* 4x4 size array, and it's usually suitable for Vega
4325
-
* ASICs which has 4*2 SE/SH layout.
4326
-
* But for Arcturus, SE/SH layout is changed to 8*1.
4327
-
* To mostly reduce the impact, we make it compatible
4328
-
* with current bitmap array as below:
4329
-
* SE4,SH0 --> bitmap[0][1]
4330
-
* SE5,SH0 --> bitmap[1][1]
4331
-
* SE6,SH0 --> bitmap[2][1]
4332
-
* SE7,SH0 --> bitmap[3][1]
4333
-
*/
4334
-
cu_info->bitmap[i % 4][j + i / 4] = bitmap;
4325
+
cu_info->bitmap[xcc_id][i][j] = bitmap;
4335
4326
4336
-
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4337
-
if (bitmap & mask) {
4338
-
if (counter < adev->gfx.config.max_cu_per_sh)
4339
-
ao_bitmap |= mask;
4340
-
counter++;
4327
+
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4328
+
if (bitmap & mask) {
4329
+
if (counter < adev->gfx.config.max_cu_per_sh)
4330
+
ao_bitmap |= mask;
4331
+
counter++;
4332
+
}
4333
+
mask <<= 1;
4341
4334
}
4342
-
mask <<= 1;
4335
+
active_cu_number += counter;
4336
+
if (i < 2 && j < 2)
4337
+
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4338
+
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4343
4339
}
4344
-
active_cu_number += counter;
4345
-
if (i < 2 && j < 2)
4346
-
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4347
-
cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
4348
4340
}
4341
+
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4342
+
xcc_id);
4349
4343
}
4350
-
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4351
-
0);
4352
4344
mutex_unlock(&adev->grbm_idx_mutex);
4353
4345
4354
4346
cu_info->number = active_cu_number;
+3
drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+3
drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
···
345
345
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
346
346
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
347
347
}
348
+
if (amdgpu_sriov_vf(adev))
349
+
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
350
+
regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
348
351
}
349
352
350
353
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
+1
-1
drivers/gpu/drm/amd/amdgpu/soc21.c
+1
-1
drivers/gpu/drm/amd/amdgpu/soc21.c
···
766
766
* for the purpose of expose those registers
767
767
* to process space
768
768
*/
769
-
if (adev->nbio.funcs->remap_hdp_registers)
769
+
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
770
770
adev->nbio.funcs->remap_hdp_registers(adev);
771
771
/* enable the doorbell aperture */
772
772
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
+2
-1
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+2
-1
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
···
2087
2087
2088
2088
amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
2089
2089
cu->num_simd_per_cu = cu_info.simd_per_cu;
2090
-
cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
2090
+
cu->num_simd_cores = cu_info.simd_per_cu *
2091
+
(cu_info.cu_active_number / kdev->kfd->num_nodes);
2091
2092
cu->max_waves_simd = cu_info.max_waves_per_simd;
2092
2093
2093
2094
cu->wave_front_size = cu_info.wave_front_size;
+4
drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+4
drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+1
-2
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+1
-2
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+2
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+2
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
···
162
162
return NULL;
163
163
164
164
*doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx);
165
+
inx *= 2;
165
166
166
167
pr_debug("Get kernel queue doorbell\n"
167
168
" doorbell offset == 0x%08X\n"
···
177
176
unsigned int inx;
178
177
179
178
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
179
+
inx /= 2;
180
180
181
181
mutex_lock(&kfd->doorbell_mutex);
182
182
__clear_bit(inx, kfd->doorbell_bitmap);
+26
-8
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+26
-8
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
···
97
97
98
98
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
99
99
const uint32_t *cu_mask, uint32_t cu_mask_count,
100
-
uint32_t *se_mask)
100
+
uint32_t *se_mask, uint32_t inst)
101
101
{
102
102
struct kfd_cu_info cu_info;
103
103
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
104
104
bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
105
105
uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
106
-
int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1;
106
+
int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1;
107
+
uint32_t cu_active_per_node;
108
+
int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
109
+
int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
107
110
108
111
amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
109
112
110
-
if (cu_mask_count > cu_info.cu_active_number)
111
-
cu_mask_count = cu_info.cu_active_number;
113
+
cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes;
114
+
if (cu_mask_count > cu_active_per_node)
115
+
cu_mask_count = cu_active_per_node;
112
116
113
117
/* Exceeding these bounds corrupts the stack and indicates a coding error.
114
118
* Returning with no CU's enabled will hang the queue, which should be
···
145
141
for (se = 0; se < cu_info.num_shader_engines; se++)
146
142
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
147
143
cu_per_sh[se][sh] = hweight32(
148
-
cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]);
144
+
cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) *
145
+
cu_bitmap_sh_mul]);
149
146
150
147
/* Symmetrically map cu_mask to all SEs & SHs:
151
148
* se_mask programs up to 2 SH in the upper and lower 16 bits.
···
169
164
* cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
170
165
* ...
171
166
*
167
+
* For GFX 9.4.3, the following code only looks at a
168
+
* subset of the cu_mask corresponding to the inst parameter.
169
+
* If we have n XCCs under one GPU node
170
+
* cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0)
171
+
* cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0)
172
+
* ..
173
+
* cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0)
174
+
* cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0)
175
+
*
176
+
* For example, if there are 6 XCCs under 1 KFD node, this code
177
+
* running for each inst, will look at the bits as:
178
+
* inst, inst + 6, inst + 12...
179
+
*
172
180
* First ensure all CUs are disabled, then enable user specified CUs.
173
181
*/
174
182
for (i = 0; i < cu_info.num_shader_engines; i++)
175
183
se_mask[i] = 0;
176
184
177
-
i = 0;
178
-
for (cu = 0; cu < 16; cu += inc) {
185
+
i = inst;
186
+
for (cu = 0; cu < 16; cu += cu_inc) {
179
187
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
180
188
for (se = 0; se < cu_info.num_shader_engines; se++) {
181
189
if (cu_per_sh[se][sh] > cu) {
182
190
if (cu_mask[i / 32] & (en_mask << (i % 32)))
183
191
se_mask[se] |= en_mask << (cu + sh * 16);
184
192
i += inc;
185
-
if (i == cu_mask_count)
193
+
if (i >= cu_mask_count)
186
194
return;
187
195
}
188
196
}
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
···
138
138
139
139
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
140
140
const uint32_t *cu_mask, uint32_t cu_mask_count,
141
-
uint32_t *se_mask);
141
+
uint32_t *se_mask, uint32_t inst);
142
142
143
143
int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
144
144
uint32_t pipe_id, uint32_t queue_id,
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+42
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+42
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
···
71
71
}
72
72
73
73
mqd_symmetrically_map_cu_mask(mm,
74
-
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
74
+
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
75
75
76
76
m->compute_static_thread_mgmt_se0 = se_mask[0];
77
77
m->compute_static_thread_mgmt_se1 = se_mask[1];
···
321
321
return 0;
322
322
}
323
323
324
+
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
325
+
{
326
+
struct v11_compute_mqd *m;
327
+
328
+
m = get_mqd(mqd);
329
+
330
+
memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
331
+
}
332
+
333
+
static void restore_mqd(struct mqd_manager *mm, void **mqd,
334
+
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
335
+
struct queue_properties *qp,
336
+
const void *mqd_src,
337
+
const void *ctl_stack_src, const u32 ctl_stack_size)
338
+
{
339
+
uint64_t addr;
340
+
struct v11_compute_mqd *m;
341
+
342
+
m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
343
+
addr = mqd_mem_obj->gpu_addr;
344
+
345
+
memcpy(m, mqd_src, sizeof(*m));
346
+
347
+
*mqd = m;
348
+
if (gart_addr)
349
+
*gart_addr = addr;
350
+
351
+
m->cp_hqd_pq_doorbell_control =
352
+
qp->doorbell_off <<
353
+
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
354
+
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
355
+
m->cp_hqd_pq_doorbell_control);
356
+
357
+
qp->is_active = 0;
358
+
}
359
+
360
+
324
361
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
325
362
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
326
363
struct queue_properties *q)
···
495
458
mqd->mqd_size = sizeof(struct v11_compute_mqd);
496
459
mqd->get_wave_state = get_wave_state;
497
460
mqd->mqd_stride = kfd_mqd_stride;
461
+
mqd->checkpoint_mqd = checkpoint_mqd;
462
+
mqd->restore_mqd = restore_mqd;
498
463
#if defined(CONFIG_DEBUG_FS)
499
464
mqd->debugfs_show_mqd = debugfs_show_mqd;
500
465
#endif
···
541
502
mqd->update_mqd = update_mqd_sdma;
542
503
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
543
504
mqd->is_occupied = kfd_is_occupied_sdma;
505
+
mqd->checkpoint_mqd = checkpoint_mqd;
506
+
mqd->restore_mqd = restore_mqd;
544
507
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
545
508
mqd->mqd_stride = kfd_mqd_stride;
546
509
#if defined(CONFIG_DEBUG_FS)
+28
-16
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+28
-16
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
···
60
60
}
61
61
62
62
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
63
-
struct mqd_update_info *minfo)
63
+
struct mqd_update_info *minfo, uint32_t inst)
64
64
{
65
65
struct v9_mqd *m;
66
66
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
···
69
69
return;
70
70
71
71
mqd_symmetrically_map_cu_mask(mm,
72
-
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
72
+
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
73
73
74
74
m = get_mqd(mqd);
75
+
75
76
m->compute_static_thread_mgmt_se0 = se_mask[0];
76
77
m->compute_static_thread_mgmt_se1 = se_mask[1];
77
78
m->compute_static_thread_mgmt_se2 = se_mask[2];
78
79
m->compute_static_thread_mgmt_se3 = se_mask[3];
79
-
m->compute_static_thread_mgmt_se4 = se_mask[4];
80
-
m->compute_static_thread_mgmt_se5 = se_mask[5];
81
-
m->compute_static_thread_mgmt_se6 = se_mask[6];
82
-
m->compute_static_thread_mgmt_se7 = se_mask[7];
80
+
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) {
81
+
m->compute_static_thread_mgmt_se4 = se_mask[4];
82
+
m->compute_static_thread_mgmt_se5 = se_mask[5];
83
+
m->compute_static_thread_mgmt_se6 = se_mask[6];
84
+
m->compute_static_thread_mgmt_se7 = se_mask[7];
83
85
84
-
pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
85
-
m->compute_static_thread_mgmt_se0,
86
-
m->compute_static_thread_mgmt_se1,
87
-
m->compute_static_thread_mgmt_se2,
88
-
m->compute_static_thread_mgmt_se3,
89
-
m->compute_static_thread_mgmt_se4,
90
-
m->compute_static_thread_mgmt_se5,
91
-
m->compute_static_thread_mgmt_se6,
92
-
m->compute_static_thread_mgmt_se7);
86
+
pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
87
+
m->compute_static_thread_mgmt_se0,
88
+
m->compute_static_thread_mgmt_se1,
89
+
m->compute_static_thread_mgmt_se2,
90
+
m->compute_static_thread_mgmt_se3,
91
+
m->compute_static_thread_mgmt_se4,
92
+
m->compute_static_thread_mgmt_se5,
93
+
m->compute_static_thread_mgmt_se6,
94
+
m->compute_static_thread_mgmt_se7);
95
+
} else {
96
+
pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
97
+
inst, m->compute_static_thread_mgmt_se0,
98
+
m->compute_static_thread_mgmt_se1,
99
+
m->compute_static_thread_mgmt_se2,
100
+
m->compute_static_thread_mgmt_se3);
101
+
}
93
102
}
94
103
95
104
static void set_priority(struct v9_mqd *m, struct queue_properties *q)
···
299
290
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
300
291
m->cp_hqd_ctx_save_control = 0;
301
292
302
-
update_cu_mask(mm, mqd, minfo);
293
+
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3))
294
+
update_cu_mask(mm, mqd, minfo, 0);
303
295
set_priority(m, q);
304
296
305
297
q->is_active = QUEUE_IS_ACTIVE(*q);
···
685
675
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
686
676
m = get_mqd(mqd + size * xcc);
687
677
update_mqd(mm, m, q, minfo);
678
+
679
+
update_cu_mask(mm, mqd, minfo, xcc);
688
680
689
681
if (q->format == KFD_QUEUE_FORMAT_AQL) {
690
682
switch (xcc) {
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+1
-2
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+1
-2
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+1
-2
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+1
-2
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
···
1466
1466
1467
1467
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
1468
1468
{
1469
-
return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
1470
-
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
1469
+
return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
1471
1470
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
1472
1471
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
1473
1472
}
+42
-33
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+42
-33
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
···
450
450
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
451
451
dev->node_props.cpu_cores_count);
452
452
sysfs_show_32bit_prop(buffer, offs, "simd_count",
453
-
dev->gpu ? (dev->node_props.simd_count *
454
-
NUM_XCC(dev->gpu->xcc_mask)) : 0);
453
+
dev->gpu ? dev->node_props.simd_count : 0);
455
454
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
456
455
dev->node_props.mem_banks_count);
457
456
sysfs_show_32bit_prop(buffer, offs, "caches_count",
···
1596
1597
static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
1597
1598
struct kfd_gpu_cache_info *pcache_info,
1598
1599
struct kfd_cu_info *cu_info,
1599
-
int cache_type, unsigned int cu_processor_id)
1600
+
int cache_type, unsigned int cu_processor_id,
1601
+
struct kfd_node *knode)
1600
1602
{
1601
1603
unsigned int cu_sibling_map_mask;
1602
1604
int first_active_cu;
1603
-
int i, j, k;
1605
+
int i, j, k, xcc, start, end;
1604
1606
struct kfd_cache_properties *pcache = NULL;
1605
1607
1606
-
cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
1608
+
start = ffs(knode->xcc_mask) - 1;
1609
+
end = start + NUM_XCC(knode->xcc_mask);
1610
+
cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0];
1607
1611
cu_sibling_map_mask &=
1608
1612
((1 << pcache_info[cache_type].num_cu_shared) - 1);
1609
1613
first_active_cu = ffs(cu_sibling_map_mask);
···
1641
1639
cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
1642
1640
k = 0;
1643
1641
1644
-
for (i = 0; i < cu_info->num_shader_engines; i++) {
1645
-
for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1646
-
pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1647
-
pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1648
-
pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1649
-
pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1650
-
k += 4;
1642
+
for (xcc = start; xcc < end; xcc++) {
1643
+
for (i = 0; i < cu_info->num_shader_engines; i++) {
1644
+
for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1645
+
pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1646
+
pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1647
+
pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1648
+
pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1649
+
k += 4;
1651
1650
1652
-
cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4];
1653
-
cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1651
+
cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4];
1652
+
cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1653
+
}
1654
1654
}
1655
1655
}
1656
1656
pcache->sibling_map_size = k;
···
1670
1666
static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
1671
1667
{
1672
1668
struct kfd_gpu_cache_info *pcache_info = NULL;
1673
-
int i, j, k;
1669
+
int i, j, k, xcc, start, end;
1674
1670
int ct = 0;
1675
1671
unsigned int cu_processor_id;
1676
1672
int ret;
···
1704
1700
* then it will consider only one CU from
1705
1701
* the shared unit
1706
1702
*/
1703
+
start = ffs(kdev->xcc_mask) - 1;
1704
+
end = start + NUM_XCC(kdev->xcc_mask);
1705
+
1707
1706
for (ct = 0; ct < num_of_cache_types; ct++) {
1708
1707
cu_processor_id = gpu_processor_id;
1709
1708
if (pcache_info[ct].cache_level == 1) {
1710
-
for (i = 0; i < pcu_info->num_shader_engines; i++) {
1711
-
for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
1712
-
for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
1709
+
for (xcc = start; xcc < end; xcc++) {
1710
+
for (i = 0; i < pcu_info->num_shader_engines; i++) {
1711
+
for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
1712
+
for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
1713
1713
1714
-
ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
1715
-
pcu_info->cu_bitmap[i % 4][j + i / 4], ct,
1714
+
ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
1715
+
pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct,
1716
1716
cu_processor_id, k);
1717
1717
1718
-
if (ret < 0)
1719
-
break;
1718
+
if (ret < 0)
1719
+
break;
1720
1720
1721
-
if (!ret) {
1722
-
num_of_entries++;
1723
-
list_add_tail(&props_ext->list, &dev->cache_props);
1721
+
if (!ret) {
1722
+
num_of_entries++;
1723
+
list_add_tail(&props_ext->list, &dev->cache_props);
1724
+
}
1725
+
1726
+
/* Move to next CU block */
1727
+
num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1728
+
pcu_info->num_cu_per_sh) ?
1729
+
pcache_info[ct].num_cu_shared :
1730
+
(pcu_info->num_cu_per_sh - k);
1731
+
cu_processor_id += num_cu_shared;
1724
1732
}
1725
-
1726
-
/* Move to next CU block */
1727
-
num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1728
-
pcu_info->num_cu_per_sh) ?
1729
-
pcache_info[ct].num_cu_shared :
1730
-
(pcu_info->num_cu_per_sh - k);
1731
-
cu_processor_id += num_cu_shared;
1732
1733
}
1733
1734
}
1734
1735
}
1735
1736
} else {
1736
1737
ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
1737
-
pcu_info, ct, cu_processor_id);
1738
+
pcu_info, ct, cu_processor_id, kdev);
1738
1739
1739
1740
if (ret < 0)
1740
1741
break;
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+1
-1
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+94
-21
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+94
-21
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
···
1274
1274
1275
1275
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1276
1276
1277
-
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1278
-
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1279
-
page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1280
-
page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1281
-
page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1277
+
page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
1278
+
AMDGPU_GPU_PAGE_SHIFT);
1279
+
page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
1280
+
AMDGPU_GPU_PAGE_SHIFT);
1281
+
page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
1282
+
AMDGPU_GPU_PAGE_SHIFT);
1283
+
page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
1284
+
AMDGPU_GPU_PAGE_SHIFT);
1285
+
page_table_base.high_part = upper_32_bits(pt_base);
1282
1286
page_table_base.low_part = lower_32_bits(pt_base);
1283
1287
1284
1288
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
···
1644
1640
}
1645
1641
break;
1646
1642
}
1647
-
if (init_data.flags.gpu_vm_support)
1648
-
init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
1643
+
if (init_data.flags.gpu_vm_support &&
1644
+
(amdgpu_sg_display == 0))
1645
+
init_data.flags.gpu_vm_support = false;
1649
1646
1650
1647
if (init_data.flags.gpu_vm_support)
1651
1648
adev->mode_info.gpu_vm_support = true;
···
2340
2335
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2341
2336
}
2342
2337
2338
+
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
2339
+
{
2340
+
int ret;
2341
+
u8 guid[16];
2342
+
u64 tmp64;
2343
+
2344
+
mutex_lock(&mgr->lock);
2345
+
if (!mgr->mst_primary)
2346
+
goto out_fail;
2347
+
2348
+
if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
2349
+
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2350
+
goto out_fail;
2351
+
}
2352
+
2353
+
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2354
+
DP_MST_EN |
2355
+
DP_UP_REQ_EN |
2356
+
DP_UPSTREAM_IS_SRC);
2357
+
if (ret < 0) {
2358
+
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
2359
+
goto out_fail;
2360
+
}
2361
+
2362
+
/* Some hubs forget their guids after they resume */
2363
+
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2364
+
if (ret != 16) {
2365
+
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2366
+
goto out_fail;
2367
+
}
2368
+
2369
+
if (memchr_inv(guid, 0, 16) == NULL) {
2370
+
tmp64 = get_jiffies_64();
2371
+
memcpy(&guid[0], &tmp64, sizeof(u64));
2372
+
memcpy(&guid[8], &tmp64, sizeof(u64));
2373
+
2374
+
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
2375
+
2376
+
if (ret != 16) {
2377
+
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
2378
+
goto out_fail;
2379
+
}
2380
+
}
2381
+
2382
+
memcpy(mgr->mst_primary->guid, guid, 16);
2383
+
2384
+
out_fail:
2385
+
mutex_unlock(&mgr->lock);
2386
+
}
2387
+
2343
2388
static void s3_handle_mst(struct drm_device *dev, bool suspend)
2344
2389
{
2345
2390
struct amdgpu_dm_connector *aconnector;
2346
2391
struct drm_connector *connector;
2347
2392
struct drm_connector_list_iter iter;
2348
2393
struct drm_dp_mst_topology_mgr *mgr;
2349
-
int ret;
2350
-
bool need_hotplug = false;
2351
2394
2352
2395
drm_connector_list_iter_begin(dev, &iter);
2353
2396
drm_for_each_connector_iter(connector, &iter) {
···
2417
2364
if (!dp_is_lttpr_present(aconnector->dc_link))
2418
2365
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2419
2366
2420
-
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2421
-
if (ret < 0) {
2422
-
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2423
-
aconnector->dc_link);
2424
-
need_hotplug = true;
2425
-
}
2367
+
/* TODO: move resume_mst_branch_status() into drm mst resume again
2368
+
* once topology probing work is pulled out from mst resume into mst
2369
+
* resume 2nd step. mst resume 2nd step should be called after old
2370
+
* state getting restored (i.e. drm_atomic_helper_resume()).
2371
+
*/
2372
+
resume_mst_branch_status(mgr);
2426
2373
}
2427
2374
}
2428
2375
drm_connector_list_iter_end(&iter);
2429
-
2430
-
if (need_hotplug)
2431
-
drm_kms_helper_hotplug_event(dev);
2432
2376
}
2433
2377
2434
2378
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
···
2819
2769
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2820
2770
enum dc_connection_type new_connection_type = dc_connection_none;
2821
2771
struct dc_state *dc_state;
2822
-
int i, r, j;
2772
+
int i, r, j, ret;
2773
+
bool need_hotplug = false;
2823
2774
2824
2775
if (amdgpu_in_reset(adev)) {
2825
2776
dc_state = dm->cached_dc_state;
···
2918
2867
continue;
2919
2868
2920
2869
/*
2921
-
* this is the case when traversing through already created
2870
+
* this is the case when traversing through already created end sink
2922
2871
* MST connectors, should be skipped
2923
2872
*/
2924
2873
if (aconnector && aconnector->mst_root)
···
2977
2926
drm_atomic_helper_resume(ddev, dm->cached_state);
2978
2927
2979
2928
dm->cached_state = NULL;
2929
+
2930
+
/* Do mst topology probing after resuming cached state*/
2931
+
drm_connector_list_iter_begin(ddev, &iter);
2932
+
drm_for_each_connector_iter(connector, &iter) {
2933
+
aconnector = to_amdgpu_dm_connector(connector);
2934
+
if (aconnector->dc_link->type != dc_connection_mst_branch ||
2935
+
aconnector->mst_root)
2936
+
continue;
2937
+
2938
+
ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
2939
+
2940
+
if (ret < 0) {
2941
+
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2942
+
aconnector->dc_link);
2943
+
need_hotplug = true;
2944
+
}
2945
+
}
2946
+
drm_connector_list_iter_end(&iter);
2947
+
2948
+
if (need_hotplug)
2949
+
drm_kms_helper_hotplug_event(ddev);
2980
2950
2981
2951
amdgpu_dm_irq_resume_late(adev);
2982
2952
···
8145
8073
bundle->surface_updates[planes_count].plane_info =
8146
8074
&bundle->plane_infos[planes_count];
8147
8075
8148
-
if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8076
+
if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
8077
+
acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
8149
8078
fill_dc_dirty_rects(plane, old_plane_state,
8150
8079
new_plane_state, new_crtc_state,
8151
8080
&bundle->flip_addrs[planes_count],
+1
-1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+1
-1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+28
-7
drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+28
-7
drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
···
169
169
/* Return first available DIG link encoder. */
170
170
static enum engine_id find_first_avail_link_enc(
171
171
const struct dc_context *ctx,
172
-
const struct dc_state *state)
172
+
const struct dc_state *state,
173
+
enum engine_id eng_id_requested)
173
174
{
174
175
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
175
176
int i;
177
+
178
+
if (eng_id_requested != ENGINE_ID_UNKNOWN) {
179
+
180
+
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
181
+
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
182
+
if (eng_id == eng_id_requested)
183
+
return eng_id;
184
+
}
185
+
}
186
+
187
+
eng_id = ENGINE_ID_UNKNOWN;
176
188
177
189
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
178
190
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
···
299
287
struct dc_stream_state *streams[],
300
288
uint8_t stream_count)
301
289
{
302
-
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
290
+
enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN;
303
291
int i;
304
292
int j;
305
293
···
389
377
* assigned to that endpoint.
390
378
*/
391
379
link_enc = get_link_enc_used_by_link(state, stream->link);
392
-
if (link_enc == NULL)
393
-
eng_id = find_first_avail_link_enc(stream->ctx, state);
380
+
if (link_enc == NULL) {
381
+
382
+
if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
383
+
stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN)
384
+
eng_id_req = stream->link->dpia_preferred_eng_id;
385
+
386
+
eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req);
387
+
}
394
388
else
395
389
eng_id = link_enc->preferred_engine;
396
390
···
420
402
DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n",
421
403
__func__,
422
404
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
423
-
assignment.ep_id.link_id.enum_id - 1,
405
+
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
406
+
assignment.ep_id.link_id.enum_id :
407
+
assignment.ep_id.link_id.enum_id - 1,
424
408
assignment.eng_id);
425
409
}
426
410
for (i = 0; i < MAX_PIPES; i++) {
···
433
413
DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n",
434
414
__func__,
435
415
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
436
-
assignment.ep_id.link_id.enum_id - 1,
416
+
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
417
+
assignment.ep_id.link_id.enum_id :
418
+
assignment.ep_id.link_id.enum_id - 1,
437
419
assignment.eng_id);
438
420
}
439
421
···
500
478
if (stream)
501
479
link = stream->link;
502
480
503
-
// dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
504
481
return link;
505
482
}
506
483
+1
drivers/gpu/drm/amd/display/dc/dc.h
+1
drivers/gpu/drm/amd/display/dc/dc.h
+3
-1
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+3
-1
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
···
964
964
return;
965
965
}
966
966
967
-
if (link->panel_cntl) {
967
+
if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled ||
968
+
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
969
+
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
968
970
bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl);
969
971
970
972
if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) {
+23
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+23
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
···
1032
1032
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
1033
1033
};
1034
1034
1035
+
/* ========================================================== */
1036
+
1037
+
/*
1038
+
* DPIA index | Preferred Encoder | Host Router
1039
+
* 0 | C | 0
1040
+
* 1 | First Available | 0
1041
+
* 2 | D | 1
1042
+
* 3 | First Available | 1
1043
+
*/
1044
+
/* ========================================================== */
1045
+
static const enum engine_id dpia_to_preferred_enc_id_table[] = {
1046
+
ENGINE_ID_DIGC,
1047
+
ENGINE_ID_DIGC,
1048
+
ENGINE_ID_DIGD,
1049
+
ENGINE_ID_DIGD
1050
+
};
1051
+
1052
+
static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index)
1053
+
{
1054
+
return dpia_to_preferred_enc_id_table[dpia_index];
1055
+
}
1056
+
1035
1057
static struct dce_i2c_hw *dcn31_i2c_hw_create(
1036
1058
struct dc_context *ctx,
1037
1059
uint32_t inst)
···
1807
1785
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
1808
1786
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
1809
1787
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
1788
+
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
1810
1789
};
1811
1790
1812
1791
static struct clock_source *dcn30_clock_source_create(
+1
drivers/gpu/drm/amd/display/dc/inc/core_types.h
+1
drivers/gpu/drm/amd/display/dc/inc/core_types.h
+4
drivers/gpu/drm/amd/display/dc/link/link_factory.c
+4
drivers/gpu/drm/amd/display/dc/link/link_factory.c
···
791
791
/* Set dpia port index : 0 to number of dpia ports */
792
792
link->ddc_hw_inst = init_params->connector_index;
793
793
794
+
// Assign Dpia preferred eng_id
795
+
if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia)
796
+
link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst);
797
+
794
798
/* TODO: Create link encoder */
795
799
796
800
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+4
-5
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+4
-5
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
···
31
31
#include <linux/types.h>
32
32
#include <linux/bitmap.h>
33
33
#include <linux/dma-fence.h>
34
+
#include "amdgpu_irq.h"
35
+
#include "amdgpu_gfx.h"
34
36
35
37
struct pci_dev;
36
38
struct amdgpu_device;
37
-
38
-
#define KGD_MAX_QUEUES 128
39
39
40
40
struct kfd_dev;
41
41
struct kgd_mem;
···
68
68
uint32_t wave_front_size;
69
69
uint32_t max_scratch_slots_per_cu;
70
70
uint32_t lds_size;
71
-
uint32_t cu_bitmap[4][4];
71
+
uint32_t cu_bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
72
72
};
73
73
74
74
/* For getting GPU local memory information from KGD */
···
326
326
uint32_t wait_times,
327
327
uint32_t grace_period,
328
328
uint32_t *reg_offset,
329
-
uint32_t *reg_data,
330
-
uint32_t inst);
329
+
uint32_t *reg_data);
331
330
void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
332
331
int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
333
332
void (*program_trap_handler_settings)(struct amdgpu_device *adev,
+1
-1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+1
-1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+2
drivers/gpu/drm/drm_connector.c
+2
drivers/gpu/drm/drm_connector.c
···
2203
2203
/**
2204
2204
* drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
2205
2205
* @connector: connector to create the Colorspace property on.
2206
+
* @supported_colorspaces: bitmap of supported color spaces
2206
2207
*
2207
2208
* Called by a driver the first time it's needed, must be attached to desired
2208
2209
* HDMI connectors.
···
2228
2227
/**
2229
2228
* drm_mode_create_dp_colorspace_property - create dp colorspace property
2230
2229
* @connector: connector to create the Colorspace property on.
2230
+
* @supported_colorspaces: bitmap of supported color spaces
2231
2231
*
2232
2232
* Called by a driver the first time it's needed, must be attached to desired
2233
2233
* DP connectors.
+1
-1
drivers/gpu/drm/drm_exec.c
+1
-1
drivers/gpu/drm/drm_exec.c
+21
drivers/gpu/drm/i915/display/intel_bios.c
+21
drivers/gpu/drm/i915/display/intel_bios.c
···
3540
3540
return map_aux_ch(devdata->i915, devdata->child.aux_channel);
3541
3541
}
3542
3542
3543
+
bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata)
3544
+
{
3545
+
struct drm_i915_private *i915;
3546
+
u8 aux_channel;
3547
+
int count = 0;
3548
+
3549
+
if (!devdata || !devdata->child.aux_channel)
3550
+
return false;
3551
+
3552
+
i915 = devdata->i915;
3553
+
aux_channel = devdata->child.aux_channel;
3554
+
3555
+
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
3556
+
if (intel_bios_encoder_supports_dp(devdata) &&
3557
+
aux_channel == devdata->child.aux_channel)
3558
+
count++;
3559
+
}
3560
+
3561
+
return count > 1;
3562
+
}
3563
+
3543
3564
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
3544
3565
{
3545
3566
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
+1
drivers/gpu/drm/i915/display/intel_bios.h
+1
drivers/gpu/drm/i915/display/intel_bios.h
···
273
273
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
274
274
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
275
275
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
276
+
bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata);
276
277
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
277
278
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
278
279
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
+6
-1
drivers/gpu/drm/i915/display/intel_dp.c
+6
-1
drivers/gpu/drm/i915/display/intel_dp.c
···
5512
5512
/*
5513
5513
* VBT and straps are liars. Also check HPD as that seems
5514
5514
* to be the most reliable piece of information available.
5515
+
*
5516
+
* ... expect on devices that forgot to hook HPD up for eDP
5517
+
* (eg. Acer Chromebook C710), so we'll check it only if multiple
5518
+
* ports are attempting to use the same AUX CH, according to VBT.
5515
5519
*/
5516
-
if (!intel_digital_port_connected(encoder)) {
5520
+
if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
5521
+
!intel_digital_port_connected(encoder)) {
5517
5522
/*
5518
5523
* If this fails, presume the DPCD answer came
5519
5524
* from some other port using the same AUX CH.
+1
-1
drivers/gpu/drm/radeon/radeon_sa.c
+1
-1
drivers/gpu/drm/radeon/radeon_sa.c
+5
-5
drivers/gpu/drm/tiny/gm12u320.c
+5
-5
drivers/gpu/drm/tiny/gm12u320.c
···
70
70
#define READ_STATUS_SIZE 13
71
71
#define MISC_VALUE_SIZE 4
72
72
73
-
#define CMD_TIMEOUT msecs_to_jiffies(200)
74
-
#define DATA_TIMEOUT msecs_to_jiffies(1000)
75
-
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
76
-
#define FIRST_FRAME_TIMEOUT msecs_to_jiffies(2000)
73
+
#define CMD_TIMEOUT 200
74
+
#define DATA_TIMEOUT 1000
75
+
#define IDLE_TIMEOUT 2000
76
+
#define FIRST_FRAME_TIMEOUT 2000
77
77
78
78
#define MISC_REQ_GET_SET_ECO_A 0xff
79
79
#define MISC_REQ_GET_SET_ECO_B 0x35
···
389
389
* switches back to showing its logo.
390
390
*/
391
391
queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
392
-
IDLE_TIMEOUT);
392
+
msecs_to_jiffies(IDLE_TIMEOUT));
393
393
394
394
return;
395
395
err:
+2
-7
drivers/gpu/drm/vkms/vkms_composer.c
+2
-7
drivers/gpu/drm/vkms/vkms_composer.c
···
408
408
if (enabled)
409
409
drm_crtc_vblank_get(&out->crtc);
410
410
411
-
mutex_lock(&out->enabled_lock);
411
+
spin_lock_irq(&out->lock);
412
412
old_enabled = out->composer_enabled;
413
413
out->composer_enabled = enabled;
414
-
415
-
/* the composition wasn't enabled, so unlock the lock to make sure the lock
416
-
* will be balanced even if we have a failed commit
417
-
*/
418
-
if (!out->composer_enabled)
419
-
mutex_unlock(&out->enabled_lock);
414
+
spin_unlock_irq(&out->lock);
420
415
421
416
if (old_enabled)
422
417
drm_crtc_vblank_put(&out->crtc);
+4
-5
drivers/gpu/drm/vkms/vkms_crtc.c
+4
-5
drivers/gpu/drm/vkms/vkms_crtc.c
···
16
16
struct drm_crtc *crtc = &output->crtc;
17
17
struct vkms_crtc_state *state;
18
18
u64 ret_overrun;
19
-
bool ret, fence_cookie, composer_enabled;
19
+
bool ret, fence_cookie;
20
20
21
21
fence_cookie = dma_fence_begin_signalling();
22
22
···
25
25
if (ret_overrun != 1)
26
26
pr_warn("%s: vblank timer overrun\n", __func__);
27
27
28
+
spin_lock(&output->lock);
28
29
ret = drm_crtc_handle_vblank(crtc);
29
30
if (!ret)
30
31
DRM_ERROR("vkms failure on handling vblank");
31
32
32
33
state = output->composer_state;
33
-
composer_enabled = output->composer_enabled;
34
-
mutex_unlock(&output->enabled_lock);
34
+
spin_unlock(&output->lock);
35
35
36
-
if (state && composer_enabled) {
36
+
if (state && output->composer_enabled) {
37
37
u64 frame = drm_crtc_accurate_vblank_count(crtc);
38
38
39
39
/* update frame_start only if a queued vkms_composer_worker()
···
295
295
296
296
spin_lock_init(&vkms_out->lock);
297
297
spin_lock_init(&vkms_out->composer_lock);
298
-
mutex_init(&vkms_out->enabled_lock);
299
298
300
299
vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
301
300
if (!vkms_out->composer_workq)
+1
-3
drivers/gpu/drm/vkms/vkms_drv.h
+1
-3
drivers/gpu/drm/vkms/vkms_drv.h
···
108
108
struct workqueue_struct *composer_workq;
109
109
/* protects concurrent access to composer */
110
110
spinlock_t lock;
111
-
/* guarantees that if the composer is enabled, a job will be queued */
112
-
struct mutex enabled_lock;
113
111
114
-
/* protected by @enabled_lock */
112
+
/* protected by @lock */
115
113
bool composer_enabled;
116
114
struct vkms_crtc_state *composer_state;
117
115
+1
-1
drivers/i2c/Kconfig
+1
-1
drivers/i2c/Kconfig
+2
-2
drivers/i2c/busses/Kconfig
+2
-2
drivers/i2c/busses/Kconfig
···
1384
1384
1385
1385
config I2C_MLXCPLD
1386
1386
tristate "Mellanox I2C driver"
1387
-
depends on X86_64 || ARM64 || COMPILE_TEST
1387
+
depends on X86_64 || (ARM64 && ACPI) || COMPILE_TEST
1388
1388
help
1389
1389
This exposes the Mellanox platform I2C busses to the linux I2C layer
1390
-
for X86 based systems.
1390
+
for X86 and ARM64/ACPI based systems.
1391
1391
Controller is implemented as CPLD logic.
1392
1392
1393
1393
This driver can also be built as a module. If so, the module will be
+5
-2
drivers/i2c/busses/i2c-aspeed.c
+5
-2
drivers/i2c/busses/i2c-aspeed.c
···
698
698
699
699
if (time_left == 0) {
700
700
/*
701
-
* If timed out and bus is still busy in a multi master
702
-
* environment, attempt recovery at here.
701
+
* In a multi-master setup, if a timeout occurs, attempt
702
+
* recovery. But if the bus is idle, we still need to reset the
703
+
* i2c controller to clear the remaining interrupts.
703
704
*/
704
705
if (bus->multi_master &&
705
706
(readl(bus->base + ASPEED_I2C_CMD_REG) &
706
707
ASPEED_I2CD_BUS_BUSY_STS))
707
708
aspeed_i2c_recover_bus(bus);
709
+
else
710
+
aspeed_i2c_reset(bus);
708
711
709
712
/*
710
713
* If timed out and the state is still pending, drop the pending
+1
drivers/i2c/busses/i2c-cadence.c
+1
drivers/i2c/busses/i2c-cadence.c
···
182
182
* @reset: Reset control for the device
183
183
* @quirks: flag for broken hold bit usage in r1p10
184
184
* @ctrl_reg: Cached value of the control register.
185
+
* @rinfo: I2C GPIO recovery information
185
186
* @ctrl_reg_diva_divb: value of fields DIV_A and DIV_B from CR register
186
187
* @slave: Registered slave instance.
187
188
* @dev_mode: I2C operating role(master/slave).
+1
drivers/md/dm-core.h
+1
drivers/md/dm-core.h
+6
-1
drivers/md/dm-ioctl.c
+6
-1
drivers/md/dm-ioctl.c
···
1630
1630
struct dm_dev_internal *dd;
1631
1631
struct dm_target_deps *deps;
1632
1632
1633
+
down_read(&table->devices_lock);
1634
+
1633
1635
deps = get_result_buffer(param, param_size, &len);
1634
1636
1635
1637
/*
···
1646
1644
needed = struct_size(deps, dev, count);
1647
1645
if (len < needed) {
1648
1646
param->flags |= DM_BUFFER_FULL_FLAG;
1649
-
return;
1647
+
goto out;
1650
1648
}
1651
1649
1652
1650
/*
···
1658
1656
deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
1659
1657
1660
1658
param->data_size = param->data_start + needed;
1659
+
1660
+
out:
1661
+
up_read(&table->devices_lock);
1661
1662
}
1662
1663
1663
1664
static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
+24
-8
drivers/md/dm-table.c
+24
-8
drivers/md/dm-table.c
···
135
135
return -ENOMEM;
136
136
137
137
INIT_LIST_HEAD(&t->devices);
138
+
init_rwsem(&t->devices_lock);
138
139
139
140
if (!num_targets)
140
141
num_targets = KEYS_PER_NODE;
···
360
359
if (dev == disk_devt(t->md->disk))
361
360
return -EINVAL;
362
361
362
+
down_write(&t->devices_lock);
363
+
363
364
dd = find_device(&t->devices, dev);
364
365
if (!dd) {
365
366
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
366
-
if (!dd)
367
-
return -ENOMEM;
367
+
if (!dd) {
368
+
r = -ENOMEM;
369
+
goto unlock_ret_r;
370
+
}
368
371
369
372
r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
370
373
if (r) {
371
374
kfree(dd);
372
-
return r;
375
+
goto unlock_ret_r;
373
376
}
374
377
375
378
refcount_set(&dd->count, 1);
···
383
378
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
384
379
r = upgrade_mode(dd, mode, t->md);
385
380
if (r)
386
-
return r;
381
+
goto unlock_ret_r;
387
382
}
388
383
refcount_inc(&dd->count);
389
384
out:
385
+
up_write(&t->devices_lock);
390
386
*result = dd->dm_dev;
391
387
return 0;
388
+
389
+
unlock_ret_r:
390
+
up_write(&t->devices_lock);
391
+
return r;
392
392
}
393
393
EXPORT_SYMBOL(dm_get_device);
394
394
···
429
419
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
430
420
{
431
421
int found = 0;
432
-
struct list_head *devices = &ti->table->devices;
422
+
struct dm_table *t = ti->table;
423
+
struct list_head *devices = &t->devices;
433
424
struct dm_dev_internal *dd;
425
+
426
+
down_write(&t->devices_lock);
434
427
435
428
list_for_each_entry(dd, devices, list) {
436
429
if (dd->dm_dev == d) {
···
443
430
}
444
431
if (!found) {
445
432
DMERR("%s: device %s not in table devices list",
446
-
dm_device_name(ti->table->md), d->name);
447
-
return;
433
+
dm_device_name(t->md), d->name);
434
+
goto unlock_ret;
448
435
}
449
436
if (refcount_dec_and_test(&dd->count)) {
450
-
dm_put_table_device(ti->table->md, d);
437
+
dm_put_table_device(t->md, d);
451
438
list_del(&dd->list);
452
439
kfree(dd);
453
440
}
441
+
442
+
unlock_ret:
443
+
up_write(&t->devices_lock);
454
444
}
455
445
EXPORT_SYMBOL(dm_put_device);
456
446
+2
-21
drivers/md/dm.c
+2
-21
drivers/md/dm.c
···
715
715
rcu_read_unlock();
716
716
}
717
717
718
-
static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md,
719
-
int *srcu_idx, blk_opf_t bio_opf)
720
-
{
721
-
if (bio_opf & REQ_NOWAIT)
722
-
return dm_get_live_table_fast(md);
723
-
else
724
-
return dm_get_live_table(md, srcu_idx);
725
-
}
726
-
727
-
static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx,
728
-
blk_opf_t bio_opf)
729
-
{
730
-
if (bio_opf & REQ_NOWAIT)
731
-
dm_put_live_table_fast(md);
732
-
else
733
-
dm_put_live_table(md, srcu_idx);
734
-
}
735
-
736
718
static char *_dm_claim_ptr = "I belong to device-mapper";
737
719
738
720
/*
···
1815
1833
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1816
1834
int srcu_idx;
1817
1835
struct dm_table *map;
1818
-
blk_opf_t bio_opf = bio->bi_opf;
1819
1836
1820
-
map = dm_get_live_table_bio(md, &srcu_idx, bio_opf);
1837
+
map = dm_get_live_table(md, &srcu_idx);
1821
1838
1822
1839
/* If suspended, or map not yet available, queue this IO for later */
1823
1840
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
···
1832
1851
1833
1852
dm_split_and_process_bio(md, map, bio);
1834
1853
out:
1835
-
dm_put_live_table_bio(md, srcu_idx, bio_opf);
1854
+
dm_put_live_table(md, srcu_idx);
1836
1855
}
1837
1856
1838
1857
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
+16
-7
drivers/md/md.c
+16
-7
drivers/md/md.c
···
798
798
} else
799
799
mutex_unlock(&mddev->reconfig_mutex);
800
800
801
+
md_wakeup_thread(mddev->thread);
802
+
wake_up(&mddev->sb_wait);
803
+
801
804
list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
802
805
list_del_init(&rdev->same_set);
803
806
kobject_del(&rdev->kobj);
804
807
export_rdev(rdev, mddev);
805
808
}
806
-
807
-
md_wakeup_thread(mddev->thread);
808
-
wake_up(&mddev->sb_wait);
809
809
}
810
810
EXPORT_SYMBOL_GPL(mddev_unlock);
811
811
···
2452
2452
if (test_bit(AutoDetected, &rdev->flags))
2453
2453
md_autodetect_dev(rdev->bdev->bd_dev);
2454
2454
#endif
2455
-
blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev);
2455
+
blkdev_put(rdev->bdev,
2456
+
test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
2456
2457
rdev->bdev = NULL;
2457
2458
kobject_put(&rdev->kobj);
2458
2459
}
···
3633
3632
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3634
3633
{
3635
3634
struct md_rdev *rdev;
3635
+
struct md_rdev *holder;
3636
3636
sector_t size;
3637
3637
int err;
3638
3638
···
3648
3646
if (err)
3649
3647
goto out_clear_rdev;
3650
3648
3649
+
if (super_format == -2) {
3650
+
holder = &claim_rdev;
3651
+
} else {
3652
+
holder = rdev;
3653
+
set_bit(Holder, &rdev->flags);
3654
+
}
3655
+
3651
3656
rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
3652
-
super_format == -2 ? &claim_rdev : rdev, NULL);
3657
+
holder, NULL);
3653
3658
if (IS_ERR(rdev->bdev)) {
3654
3659
pr_warn("md: could not open device unknown-block(%u,%u).\n",
3655
3660
MAJOR(newdev), MINOR(newdev));
···
3693
3684
return rdev;
3694
3685
3695
3686
out_blkdev_put:
3696
-
blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev);
3687
+
blkdev_put(rdev->bdev, holder);
3697
3688
out_clear_rdev:
3698
3689
md_rdev_clear(rdev);
3699
3690
out_free_rdev:
···
8265
8256
spin_unlock(&all_mddevs_lock);
8266
8257
8267
8258
if (to_put)
8268
-
mddev_put(mddev);
8259
+
mddev_put(to_put);
8269
8260
return next_mddev;
8270
8261
8271
8262
}
+3
drivers/md/md.h
+3
drivers/md/md.h
+1
-2
drivers/md/raid1.c
+1
-2
drivers/md/raid1.c
···
1837
1837
struct r1conf *conf = mddev->private;
1838
1838
int err = 0;
1839
1839
int number = rdev->raid_disk;
1840
+
struct raid1_info *p = conf->mirrors + number;
1840
1841
1841
1842
if (unlikely(number >= conf->raid_disks))
1842
1843
goto abort;
1843
-
1844
-
struct raid1_info *p = conf->mirrors + number;
1845
1844
1846
1845
if (rdev != p->rdev)
1847
1846
p = conf->mirrors + conf->raid_disks + number;
+2
drivers/net/dsa/sja1105/sja1105.h
+2
drivers/net/dsa/sja1105/sja1105.h
+43
-48
drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+43
-48
drivers/net/dsa/sja1105/sja1105_dynamic_config.c
···
1175
1175
1176
1176
static int
1177
1177
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
1178
-
struct sja1105_dyn_cmd *cmd,
1179
-
const struct sja1105_dynamic_table_ops *ops)
1178
+
const struct sja1105_dynamic_table_ops *ops,
1179
+
void *entry, bool check_valident,
1180
+
bool check_errors)
1180
1181
{
1181
1182
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
1183
+
struct sja1105_dyn_cmd cmd = {};
1182
1184
int rc;
1183
1185
1184
-
/* We don't _need_ to read the full entry, just the command area which
1185
-
* is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
1186
-
* buffer that contains the full entry too. Additionally, our API
1187
-
* doesn't really know how many bytes into the buffer does the command
1188
-
* area really begin. So just read back the whole entry.
1189
-
*/
1186
+
/* Read back the whole entry + command structure. */
1190
1187
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
1191
1188
ops->packed_size);
1192
1189
if (rc)
···
1192
1195
/* Unpack the command structure, and return it to the caller in case it
1193
1196
* needs to perform further checks on it (VALIDENT).
1194
1197
*/
1195
-
memset(cmd, 0, sizeof(*cmd));
1196
-
ops->cmd_packing(packed_buf, cmd, UNPACK);
1198
+
ops->cmd_packing(packed_buf, &cmd, UNPACK);
1197
1199
1198
1200
/* Hardware hasn't cleared VALID => still working on it */
1199
-
return cmd->valid ? -EAGAIN : 0;
1201
+
if (cmd.valid)
1202
+
return -EAGAIN;
1203
+
1204
+
if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
1205
+
return -ENOENT;
1206
+
1207
+
if (check_errors && cmd.errors)
1208
+
return -EINVAL;
1209
+
1210
+
/* Don't dereference possibly NULL pointer - maybe caller
1211
+
* only wanted to see whether the entry existed or not.
1212
+
*/
1213
+
if (entry)
1214
+
ops->entry_packing(packed_buf, entry, UNPACK);
1215
+
1216
+
return 0;
1200
1217
}
1201
1218
1202
1219
/* Poll the dynamic config entry's control area until the hardware has
···
1219
1208
*/
1220
1209
static int
1221
1210
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
1222
-
struct sja1105_dyn_cmd *cmd,
1223
-
const struct sja1105_dynamic_table_ops *ops)
1211
+
const struct sja1105_dynamic_table_ops *ops,
1212
+
void *entry, bool check_valident,
1213
+
bool check_errors)
1224
1214
{
1225
-
int rc;
1215
+
int err, rc;
1226
1216
1227
-
return read_poll_timeout(sja1105_dynamic_config_poll_valid,
1228
-
rc, rc != -EAGAIN,
1229
-
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
1230
-
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
1231
-
false, priv, cmd, ops);
1217
+
err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
1218
+
rc, rc != -EAGAIN,
1219
+
SJA1105_DYNAMIC_CONFIG_SLEEP_US,
1220
+
SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
1221
+
false, priv, ops, entry, check_valident,
1222
+
check_errors);
1223
+
return err < 0 ? err : rc;
1232
1224
}
1233
1225
1234
1226
/* Provides read access to the settings through the dynamic interface
···
1300
1286
mutex_lock(&priv->dynamic_config_lock);
1301
1287
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
1302
1288
ops->packed_size);
1303
-
if (rc < 0) {
1304
-
mutex_unlock(&priv->dynamic_config_lock);
1305
-
return rc;
1306
-
}
1307
-
1308
-
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
1309
-
mutex_unlock(&priv->dynamic_config_lock);
1310
1289
if (rc < 0)
1311
-
return rc;
1290
+
goto out;
1312
1291
1313
-
if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
1314
-
return -ENOENT;
1292
+
rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
1293
+
out:
1294
+
mutex_unlock(&priv->dynamic_config_lock);
1315
1295
1316
-
/* Don't dereference possibly NULL pointer - maybe caller
1317
-
* only wanted to see whether the entry existed or not.
1318
-
*/
1319
-
if (entry)
1320
-
ops->entry_packing(packed_buf, entry, UNPACK);
1321
-
return 0;
1296
+
return rc;
1322
1297
}
1323
1298
1324
1299
int sja1105_dynamic_config_write(struct sja1105_private *priv,
···
1359
1356
mutex_lock(&priv->dynamic_config_lock);
1360
1357
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
1361
1358
ops->packed_size);
1362
-
if (rc < 0) {
1363
-
mutex_unlock(&priv->dynamic_config_lock);
1364
-
return rc;
1365
-
}
1366
-
1367
-
rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
1368
-
mutex_unlock(&priv->dynamic_config_lock);
1369
1359
if (rc < 0)
1370
-
return rc;
1360
+
goto out;
1371
1361
1372
-
cmd = (struct sja1105_dyn_cmd) {0};
1373
-
ops->cmd_packing(packed_buf, &cmd, UNPACK);
1374
-
if (cmd.errors)
1375
-
return -EINVAL;
1362
+
rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
1363
+
out:
1364
+
mutex_unlock(&priv->dynamic_config_lock);
1376
1365
1377
-
return 0;
1366
+
return rc;
1378
1367
}
1379
1368
1380
1369
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
+51
-18
drivers/net/dsa/sja1105/sja1105_main.c
+51
-18
drivers/net/dsa/sja1105/sja1105_main.c
···
1798
1798
struct dsa_db db)
1799
1799
{
1800
1800
struct sja1105_private *priv = ds->priv;
1801
+
int rc;
1801
1802
1802
1803
if (!vid) {
1803
1804
switch (db.type) {
···
1813
1812
}
1814
1813
}
1815
1814
1816
-
return priv->info->fdb_add_cmd(ds, port, addr, vid);
1815
+
mutex_lock(&priv->fdb_lock);
1816
+
rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
1817
+
mutex_unlock(&priv->fdb_lock);
1818
+
1819
+
return rc;
1817
1820
}
1818
1821
1819
-
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1820
-
const unsigned char *addr, u16 vid,
1821
-
struct dsa_db db)
1822
+
static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
1823
+
const unsigned char *addr, u16 vid,
1824
+
struct dsa_db db)
1822
1825
{
1823
1826
struct sja1105_private *priv = ds->priv;
1824
1827
···
1840
1835
}
1841
1836
1842
1837
return priv->info->fdb_del_cmd(ds, port, addr, vid);
1838
+
}
1839
+
1840
+
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1841
+
const unsigned char *addr, u16 vid,
1842
+
struct dsa_db db)
1843
+
{
1844
+
struct sja1105_private *priv = ds->priv;
1845
+
int rc;
1846
+
1847
+
mutex_lock(&priv->fdb_lock);
1848
+
rc = __sja1105_fdb_del(ds, port, addr, vid, db);
1849
+
mutex_unlock(&priv->fdb_lock);
1850
+
1851
+
return rc;
1843
1852
}
1844
1853
1845
1854
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
···
1887
1868
if (!(l2_lookup.destports & BIT(port)))
1888
1869
continue;
1889
1870
1890
-
/* We need to hide the FDB entry for unknown multicast */
1891
-
if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
1892
-
l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
1893
-
continue;
1894
-
1895
1871
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1872
+
1873
+
/* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
1874
+
* only wants to see unicast
1875
+
*/
1876
+
if (is_multicast_ether_addr(macaddr))
1877
+
continue;
1896
1878
1897
1879
/* We need to hide the dsa_8021q VLANs from the user. */
1898
1880
if (vid_is_dsa_8021q(l2_lookup.vlanid))
···
1918
1898
};
1919
1899
int i;
1920
1900
1901
+
mutex_lock(&priv->fdb_lock);
1902
+
1921
1903
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1922
1904
struct sja1105_l2_lookup_entry l2_lookup = {0};
1923
1905
u8 macaddr[ETH_ALEN];
···
1933
1911
if (rc) {
1934
1912
dev_err(ds->dev, "Failed to read FDB: %pe\n",
1935
1913
ERR_PTR(rc));
1936
-
return;
1914
+
break;
1937
1915
}
1938
1916
1939
1917
if (!(l2_lookup.destports & BIT(port)))
···
1945
1923
1946
1924
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1947
1925
1948
-
rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
1926
+
rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
1949
1927
if (rc) {
1950
1928
dev_err(ds->dev,
1951
1929
"Failed to delete FDB entry %pM vid %lld: %pe\n",
1952
1930
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
1953
-
return;
1931
+
break;
1954
1932
}
1955
1933
}
1934
+
1935
+
mutex_unlock(&priv->fdb_lock);
1956
1936
}
1957
1937
1958
1938
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
···
2297
2273
int rc, i;
2298
2274
s64 now;
2299
2275
2276
+
mutex_lock(&priv->fdb_lock);
2300
2277
mutex_lock(&priv->mgmt_lock);
2301
2278
2302
2279
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
···
2410
2385
goto out;
2411
2386
out:
2412
2387
mutex_unlock(&priv->mgmt_lock);
2388
+
mutex_unlock(&priv->fdb_lock);
2413
2389
2414
2390
return rc;
2415
2391
}
···
2980
2954
{
2981
2955
struct sja1105_l2_lookup_entry *l2_lookup;
2982
2956
struct sja1105_table *table;
2983
-
int match;
2957
+
int match, rc;
2958
+
2959
+
mutex_lock(&priv->fdb_lock);
2984
2960
2985
2961
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
2986
2962
l2_lookup = table->entries;
···
2995
2967
if (match == table->entry_count) {
2996
2968
NL_SET_ERR_MSG_MOD(extack,
2997
2969
"Could not find FDB entry for unknown multicast");
2998
-
return -ENOSPC;
2970
+
rc = -ENOSPC;
2971
+
goto out;
2999
2972
}
3000
2973
3001
2974
if (flags.val & BR_MCAST_FLOOD)
···
3004
2975
else
3005
2976
l2_lookup[match].destports &= ~BIT(to);
3006
2977
3007
-
return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
3008
-
l2_lookup[match].index,
3009
-
&l2_lookup[match],
3010
-
true);
2978
+
rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
2979
+
l2_lookup[match].index,
2980
+
&l2_lookup[match], true);
2981
+
out:
2982
+
mutex_unlock(&priv->fdb_lock);
2983
+
2984
+
return rc;
3011
2985
}
3012
2986
3013
2987
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
···
3380
3348
mutex_init(&priv->ptp_data.lock);
3381
3349
mutex_init(&priv->dynamic_config_lock);
3382
3350
mutex_init(&priv->mgmt_lock);
3351
+
mutex_init(&priv->fdb_lock);
3383
3352
spin_lock_init(&priv->ts_id_lock);
3384
3353
3385
3354
rc = sja1105_parse_dt(priv);
+1
-1
drivers/net/ethernet/adi/adin1110.c
+1
-1
drivers/net/ethernet/adi/adin1110.c
···
1385
1385
return -ENOMEM;
1386
1386
1387
1387
other_port = priv->ports[!port_priv->nr];
1388
-
port_rules = adin1110_port_rules(port_priv, false, true);
1388
+
port_rules = adin1110_port_rules(other_port, false, true);
1389
1389
eth_broadcast_addr(mask);
1390
1390
1391
1391
return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+8
-2
drivers/net/ethernet/broadcom/asp2/bcmasp.c
+8
-2
drivers/net/ethernet/broadcom/asp2/bcmasp.c
···
528
528
ASP_RX_FILTER_BLK_CTRL);
529
529
}
530
530
531
-
void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
532
-
u32 *rule_cnt)
531
+
int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
532
+
u32 *rule_cnt)
533
533
{
534
534
struct bcmasp_priv *priv = intf->parent;
535
535
int j = 0, i;
536
536
537
537
for (i = 0; i < NUM_NET_FILTERS; i++) {
538
+
if (j == *rule_cnt)
539
+
return -EMSGSIZE;
540
+
538
541
if (!priv->net_filters[i].claimed ||
539
542
priv->net_filters[i].port != intf->port)
540
543
continue;
···
551
548
}
552
549
553
550
*rule_cnt = j;
551
+
552
+
return 0;
554
553
}
555
554
556
555
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
···
1305
1300
if (!intf) {
1306
1301
dev_err(dev, "Cannot create eth interface %d\n", i);
1307
1302
bcmasp_remove_intfs(priv);
1303
+
of_node_put(intf_node);
1308
1304
goto of_put_exit;
1309
1305
}
1310
1306
list_add_tail(&intf->list, &priv->intfs);
+2
-2
drivers/net/ethernet/broadcom/asp2/bcmasp.h
+2
-2
drivers/net/ethernet/broadcom/asp2/bcmasp.h
···
577
577
578
578
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
579
579
580
-
void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
581
-
u32 *rule_cnt);
580
+
int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
581
+
u32 *rule_cnt);
582
582
583
583
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
584
584
+1
-1
drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+1
-1
drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
···
335
335
err = bcmasp_flow_get(intf, cmd);
336
336
break;
337
337
case ETHTOOL_GRXCLSRLALL:
338
-
bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
338
+
err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
339
339
cmd->data = NUM_NET_FILTERS;
340
340
break;
341
341
default:
+3
-2
drivers/net/ethernet/cadence/macb_main.c
+3
-2
drivers/net/ethernet/cadence/macb_main.c
···
756
756
if (rx_pause)
757
757
ctrl |= MACB_BIT(PAE);
758
758
759
-
macb_set_tx_clk(bp, speed);
760
-
761
759
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
762
760
* cleared the pipeline and control registers.
763
761
*/
···
774
776
gem_readl(bp, HS_MAC_CONFIG)));
775
777
776
778
spin_unlock_irqrestore(&bp->lock, flags);
779
+
780
+
if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
781
+
macb_set_tx_clk(bp, speed);
777
782
778
783
/* Enable Rx and Tx; Enable PTP unicast */
779
784
ctrl = macb_readl(bp, NCR);
+4
-1
drivers/net/ethernet/intel/igb/igb_main.c
+4
-1
drivers/net/ethernet/intel/igb/igb_main.c
···
3827
3827
}
3828
3828
3829
3829
/* only call pci_enable_sriov() if no VFs are allocated already */
3830
-
if (!old_vfs)
3830
+
if (!old_vfs) {
3831
3831
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3832
+
if (err)
3833
+
goto err_out;
3834
+
}
3832
3835
3833
3836
goto out;
3834
3837
+15
-13
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+15
-13
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
···
979
979
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
980
980
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
981
981
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
982
+
u32 aflags = adapter->flags;
982
983
bool is_l2 = false;
983
984
u32 regval;
984
985
···
997
996
case HWTSTAMP_FILTER_NONE:
998
997
tsync_rx_ctl = 0;
999
998
tsync_rx_mtrl = 0;
1000
-
adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1001
-
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
999
+
aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1000
+
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
1002
1001
break;
1003
1002
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1004
1003
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
1005
1004
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
1006
-
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1007
-
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
1005
+
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1006
+
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
1008
1007
break;
1009
1008
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1010
1009
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
1011
1010
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
1012
-
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1013
-
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
1011
+
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1012
+
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
1014
1013
break;
1015
1014
case HWTSTAMP_FILTER_PTP_V2_EVENT:
1016
1015
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
···
1024
1023
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
1025
1024
is_l2 = true;
1026
1025
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1027
-
adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1028
-
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
1026
+
aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1027
+
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
1029
1028
break;
1030
1029
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1031
1030
case HWTSTAMP_FILTER_NTP_ALL:
···
1036
1035
if (hw->mac.type >= ixgbe_mac_X550) {
1037
1036
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
1038
1037
config->rx_filter = HWTSTAMP_FILTER_ALL;
1039
-
adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
1038
+
aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
1040
1039
break;
1041
1040
}
1042
1041
fallthrough;
···
1047
1046
* Delay_Req messages and hardware does not support
1048
1047
* timestamping all packets => return error
1049
1048
*/
1050
-
adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
1051
-
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
1052
1049
config->rx_filter = HWTSTAMP_FILTER_NONE;
1053
1050
return -ERANGE;
1054
1051
}
···
1078
1079
IXGBE_TSYNCRXCTL_TYPE_ALL |
1079
1080
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
1080
1081
config->rx_filter = HWTSTAMP_FILTER_ALL;
1081
-
adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
1082
-
adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
1082
+
aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
1083
+
aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
1083
1084
is_l2 = true;
1084
1085
break;
1085
1086
default:
···
1111
1112
IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl);
1112
1113
1113
1114
IXGBE_WRITE_FLUSH(hw);
1115
+
1116
+
/* configure adapter flags only when HW is actually configured */
1117
+
adapter->flags = aflags;
1114
1118
1115
1119
/* clear TX/RX time stamp registers, just to be sure */
1116
1120
ixgbe_ptp_clear_tx_timestamp(adapter);
+5
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+5
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+4
-2
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+4
-2
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
···
107
107
}
108
108
109
109
#define NPA_MAX_BURST 16
110
-
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
110
+
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
111
111
{
112
112
struct otx2_nic *pfvf = dev;
113
+
int cnt = cq->pool_ptrs;
113
114
u64 ptrs[NPA_MAX_BURST];
114
-
int num_ptrs = 1;
115
115
dma_addr_t bufptr;
116
+
int num_ptrs = 1;
116
117
117
118
/* Refill pool with new buffers */
118
119
while (cq->pool_ptrs) {
···
132
131
num_ptrs = 1;
133
132
}
134
133
}
134
+
return cnt - cq->pool_ptrs;
135
135
}
136
136
137
137
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
+1
-1
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+1
-1
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
···
24
24
return weight;
25
25
}
26
26
27
-
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
27
+
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
28
28
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
29
29
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
30
30
int cn10k_lmtst_init(struct otx2_nic *pfvf);
+6
-37
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+6
-37
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
···
574
574
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
575
575
dma_addr_t *dma)
576
576
{
577
-
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
578
-
struct refill_work *work;
579
-
struct delayed_work *dwork;
580
-
581
-
work = &pfvf->refill_wrk[cq->cq_idx];
582
-
dwork = &work->pool_refill_work;
583
-
/* Schedule a task if no other task is running */
584
-
if (!cq->refill_task_sched) {
585
-
cq->refill_task_sched = true;
586
-
schedule_delayed_work(dwork,
587
-
msecs_to_jiffies(100));
588
-
}
577
+
if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
589
578
return -ENOMEM;
590
-
}
591
579
return 0;
592
580
}
593
581
···
1070
1082
static void otx2_pool_refill_task(struct work_struct *work)
1071
1083
{
1072
1084
struct otx2_cq_queue *cq;
1073
-
struct otx2_pool *rbpool;
1074
1085
struct refill_work *wrk;
1075
-
int qidx, free_ptrs = 0;
1076
1086
struct otx2_nic *pfvf;
1077
-
dma_addr_t bufptr;
1087
+
int qidx;
1078
1088
1079
1089
wrk = container_of(work, struct refill_work, pool_refill_work.work);
1080
1090
pfvf = wrk->pf;
1081
1091
qidx = wrk - pfvf->refill_wrk;
1082
1092
cq = &pfvf->qset.cq[qidx];
1083
-
rbpool = cq->rbpool;
1084
-
free_ptrs = cq->pool_ptrs;
1085
1093
1086
-
while (cq->pool_ptrs) {
1087
-
if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
1088
-
/* Schedule a WQ if we fails to free atleast half of the
1089
-
* pointers else enable napi for this RQ.
1090
-
*/
1091
-
if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
1092
-
struct delayed_work *dwork;
1093
-
1094
-
dwork = &wrk->pool_refill_work;
1095
-
schedule_delayed_work(dwork,
1096
-
msecs_to_jiffies(100));
1097
-
} else {
1098
-
cq->refill_task_sched = false;
1099
-
}
1100
-
return;
1101
-
}
1102
-
pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
1103
-
cq->pool_ptrs--;
1104
-
}
1105
1094
cq->refill_task_sched = false;
1095
+
1096
+
local_bh_disable();
1097
+
napi_schedule(wrk->napi);
1098
+
local_bh_enable();
1106
1099
}
1107
1100
1108
1101
int otx2_config_nix_queues(struct otx2_nic *pfvf)
+2
-1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+2
-1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
···
302
302
struct refill_work {
303
303
struct delayed_work pool_refill_work;
304
304
struct otx2_nic *pf;
305
+
struct napi_struct *napi;
305
306
};
306
307
307
308
/* PTPv2 originTimestamp structure */
···
371
370
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
372
371
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
373
372
int size, int qidx);
374
-
void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
373
+
int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
375
374
void (*aura_freeptr)(void *dev, int aura, u64 buf);
376
375
};
377
376
+4
-3
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+4
-3
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
···
1943
1943
1944
1944
netif_tx_disable(netdev);
1945
1945
1946
+
for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1947
+
cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1948
+
devm_kfree(pf->dev, pf->refill_wrk);
1949
+
1946
1950
otx2_free_hw_resources(pf);
1947
1951
otx2_free_cints(pf, pf->hw.cint_cnt);
1948
1952
otx2_disable_napi(pf);
···
1954
1950
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1955
1951
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1956
1952
1957
-
for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1958
-
cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1959
-
devm_kfree(pf->dev, pf->refill_wrk);
1960
1953
1961
1954
kfree(qset->sq);
1962
1955
kfree(qset->cq);
+25
-5
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+25
-5
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
···
424
424
return processed_cqe;
425
425
}
426
426
427
-
void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
427
+
int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
428
428
{
429
429
struct otx2_nic *pfvf = dev;
430
+
int cnt = cq->pool_ptrs;
430
431
dma_addr_t bufptr;
431
432
432
433
while (cq->pool_ptrs) {
···
436
435
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
437
436
cq->pool_ptrs--;
438
437
}
438
+
439
+
return cnt - cq->pool_ptrs;
439
440
}
440
441
441
442
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
···
524
521
struct otx2_cq_queue *cq;
525
522
struct otx2_qset *qset;
526
523
struct otx2_nic *pfvf;
524
+
int filled_cnt = -1;
527
525
528
526
cq_poll = container_of(napi, struct otx2_cq_poll, napi);
529
527
pfvf = (struct otx2_nic *)cq_poll->dev;
···
545
541
}
546
542
547
543
if (rx_cq && rx_cq->pool_ptrs)
548
-
pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
544
+
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
549
545
/* Clear the IRQ */
550
546
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
551
547
···
565
561
otx2_config_irq_coalescing(pfvf, i);
566
562
}
567
563
568
-
/* Re-enable interrupts */
569
-
otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
570
-
BIT_ULL(0));
564
+
if (unlikely(!filled_cnt)) {
565
+
struct refill_work *work;
566
+
struct delayed_work *dwork;
567
+
568
+
work = &pfvf->refill_wrk[cq->cq_idx];
569
+
dwork = &work->pool_refill_work;
570
+
/* Schedule a task if no other task is running */
571
+
if (!cq->refill_task_sched) {
572
+
work->napi = napi;
573
+
cq->refill_task_sched = true;
574
+
schedule_delayed_work(dwork,
575
+
msecs_to_jiffies(100));
576
+
}
577
+
} else {
578
+
/* Re-enable interrupts */
579
+
otx2_write64(pfvf,
580
+
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
581
+
BIT_ULL(0));
582
+
}
571
583
}
572
584
return workdone;
573
585
}
+2
-2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+2
-2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
···
170
170
int size, int qidx);
171
171
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
172
172
int size, int qidx);
173
-
void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
174
-
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
173
+
int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
174
+
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
175
175
#endif /* OTX2_TXRX_H */
+6
-2
drivers/net/ethernet/mediatek/mtk_eth_soc.c
+6
-2
drivers/net/ethernet/mediatek/mtk_eth_soc.c
···
2005
2005
u8 *data, *new_data;
2006
2006
struct mtk_rx_dma_v2 *rxd, trxd;
2007
2007
int done = 0, bytes = 0;
2008
+
dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2008
2009
2009
2010
while (done < budget) {
2010
2011
unsigned int pktlen, *rxdcsum;
2011
2012
struct net_device *netdev;
2012
-
dma_addr_t dma_addr;
2013
2013
u32 hash, reason;
2014
2014
int mac = 0;
2015
2015
···
2186
2186
else
2187
2187
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2188
2188
2189
-
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2189
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
2190
+
likely(dma_addr != DMA_MAPPING_ERROR))
2190
2191
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2191
2192
2192
2193
ring->calc_idx = idx;
···
2995
2994
int i;
2996
2995
2997
2996
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2997
+
if (cnt == cmd->rule_cnt)
2998
+
return -EMSGSIZE;
2999
+
2998
3000
if (mac->hwlro_ip[i]) {
2999
3001
rule_locs[cnt] = i;
3000
3002
cnt++;
+4
-2
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+4
-2
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
···
214
214
dsa_port = mtk_flow_get_dsa_port(&dev);
215
215
216
216
if (dev == eth->netdev[0])
217
-
pse_port = 1;
217
+
pse_port = PSE_GDM1_PORT;
218
218
else if (dev == eth->netdev[1])
219
-
pse_port = 2;
219
+
pse_port = PSE_GDM2_PORT;
220
+
else if (dev == eth->netdev[2])
221
+
pse_port = PSE_GDM3_PORT;
220
222
else
221
223
return -EOPNOTSUPP;
222
224
+16
-2
drivers/net/ethernet/microchip/vcap/vcap_api.c
+16
-2
drivers/net/ethernet/microchip/vcap/vcap_api.c
···
1021
1021
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
1022
1022
newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
1023
1023
if (!newckf)
1024
-
return ERR_PTR(-ENOMEM);
1024
+
goto err;
1025
1025
list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
1026
1026
}
1027
1027
1028
1028
list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
1029
1029
newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
1030
1030
if (!newcaf)
1031
-
return ERR_PTR(-ENOMEM);
1031
+
goto err;
1032
1032
list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
1033
1033
}
1034
1034
1035
1035
return duprule;
1036
+
1037
+
err:
1038
+
list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) {
1039
+
list_del(&ckf->ctrl.list);
1040
+
kfree(ckf);
1041
+
}
1042
+
1043
+
list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) {
1044
+
list_del(&caf->ctrl.list);
1045
+
kfree(caf);
1046
+
}
1047
+
1048
+
kfree(duprule);
1049
+
return ERR_PTR(-ENOMEM);
1036
1050
}
1037
1051
1038
1052
static void vcap_apply_width(u8 *dst, int width, int bytes)
+16
-4
drivers/net/ethernet/renesas/rswitch.c
+16
-4
drivers/net/ethernet/renesas/rswitch.c
···
799
799
struct net_device *ndev = napi->dev;
800
800
struct rswitch_private *priv;
801
801
struct rswitch_device *rdev;
802
+
unsigned long flags;
802
803
int quota = budget;
803
804
804
805
rdev = netdev_priv(ndev);
···
817
816
818
817
netif_wake_subqueue(ndev, 0);
819
818
820
-
napi_complete(napi);
821
-
822
-
rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
823
-
rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
819
+
if (napi_complete_done(napi, budget - quota)) {
820
+
spin_lock_irqsave(&priv->lock, flags);
821
+
rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
822
+
rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
823
+
spin_unlock_irqrestore(&priv->lock, flags);
824
+
}
824
825
825
826
out:
826
827
return budget - quota;
···
838
835
struct rswitch_device *rdev = netdev_priv(ndev);
839
836
840
837
if (napi_schedule_prep(&rdev->napi)) {
838
+
spin_lock(&rdev->priv->lock);
841
839
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
842
840
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
841
+
spin_unlock(&rdev->priv->lock);
843
842
__napi_schedule(&rdev->napi);
844
843
}
845
844
}
···
1445
1440
static int rswitch_open(struct net_device *ndev)
1446
1441
{
1447
1442
struct rswitch_device *rdev = netdev_priv(ndev);
1443
+
unsigned long flags;
1448
1444
1449
1445
phy_start(ndev->phydev);
1450
1446
1451
1447
napi_enable(&rdev->napi);
1452
1448
netif_start_queue(ndev);
1453
1449
1450
+
spin_lock_irqsave(&rdev->priv->lock, flags);
1454
1451
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1455
1452
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1453
+
spin_unlock_irqrestore(&rdev->priv->lock, flags);
1456
1454
1457
1455
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1458
1456
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
···
1469
1461
{
1470
1462
struct rswitch_device *rdev = netdev_priv(ndev);
1471
1463
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
1464
+
unsigned long flags;
1472
1465
1473
1466
netif_tx_stop_all_queues(ndev);
1474
1467
bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
···
1485
1476
kfree(ts_info);
1486
1477
}
1487
1478
1479
+
spin_lock_irqsave(&rdev->priv->lock, flags);
1488
1480
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1489
1481
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1482
+
spin_unlock_irqrestore(&rdev->priv->lock, flags);
1490
1483
1491
1484
phy_stop(ndev->phydev);
1492
1485
napi_disable(&rdev->napi);
···
1898
1887
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1899
1888
if (!priv)
1900
1889
return -ENOMEM;
1890
+
spin_lock_init(&priv->lock);
1901
1891
1902
1892
attr = soc_device_match(rswitch_soc_no_speed_change);
1903
1893
if (attr)
+2
drivers/net/ethernet/renesas/rswitch.h
+2
drivers/net/ethernet/renesas/rswitch.h
+6
-4
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+6
-4
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
···
2704
2704
2705
2705
/* We still have pending packets, let's call for a new scheduling */
2706
2706
if (tx_q->dirty_tx != tx_q->cur_tx)
2707
-
hrtimer_start(&tx_q->txtimer,
2708
-
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2709
-
HRTIMER_MODE_REL);
2707
+
stmmac_tx_timer_arm(priv, queue);
2710
2708
2711
2709
flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
2712
2710
tx_q->txq_stats.tx_packets += tx_packets;
···
2993
2995
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2994
2996
{
2995
2997
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2998
+
u32 tx_coal_timer = priv->tx_coal_timer[queue];
2999
+
3000
+
if (!tx_coal_timer)
3001
+
return;
2996
3002
2997
3003
hrtimer_start(&tx_q->txtimer,
2998
-
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
3004
+
STMMAC_COAL_TIMER(tx_coal_timer),
2999
3005
HRTIMER_MODE_REL);
3000
3006
}
3001
3007
+3
drivers/net/usb/r8152.c
+3
drivers/net/usb/r8152.c
+2
drivers/net/veth.c
+2
drivers/net/veth.c
+35
-19
drivers/nvme/host/core.c
+35
-19
drivers/nvme/host/core.c
···
2245
2245
else
2246
2246
ctrl->ctrl_config = NVME_CC_CSS_NVM;
2247
2247
2248
-
if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2249
-
u32 crto;
2250
-
2251
-
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2252
-
if (ret) {
2253
-
dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2254
-
ret);
2255
-
return ret;
2256
-
}
2257
-
2258
-
if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
2259
-
ctrl->ctrl_config |= NVME_CC_CRIME;
2260
-
timeout = NVME_CRTO_CRIMT(crto);
2261
-
} else {
2262
-
timeout = NVME_CRTO_CRWMT(crto);
2263
-
}
2264
-
} else {
2265
-
timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2266
-
}
2248
+
if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
2249
+
ctrl->ctrl_config |= NVME_CC_CRIME;
2267
2250
2268
2251
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2269
2252
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
···
2259
2276
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
2260
2277
if (ret)
2261
2278
return ret;
2279
+
2280
+
/* CAP value may change after initial CC write */
2281
+
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2282
+
if (ret)
2283
+
return ret;
2284
+
2285
+
timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2286
+
if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2287
+
u32 crto, ready_timeout;
2288
+
2289
+
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2290
+
if (ret) {
2291
+
dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2292
+
ret);
2293
+
return ret;
2294
+
}
2295
+
2296
+
/*
2297
+
* CRTO should always be greater or equal to CAP.TO, but some
2298
+
* devices are known to get this wrong. Use the larger of the
2299
+
* two values.
2300
+
*/
2301
+
if (ctrl->ctrl_config & NVME_CC_CRIME)
2302
+
ready_timeout = NVME_CRTO_CRIMT(crto);
2303
+
else
2304
+
ready_timeout = NVME_CRTO_CRWMT(crto);
2305
+
2306
+
if (ready_timeout < timeout)
2307
+
dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
2308
+
crto, ctrl->cap);
2309
+
else
2310
+
timeout = ready_timeout;
2311
+
}
2262
2312
2263
2313
ctrl->ctrl_config |= NVME_CC_ENABLE;
2264
2314
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+1
-1
drivers/nvme/host/fc.c
+1
-1
drivers/nvme/host/fc.c
···
1924
1924
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1925
1925
struct request *rq = op->rq;
1926
1926
1927
-
if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
1927
+
if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
1928
1928
return NULL;
1929
1929
return blkcg_get_fc_appid(rq->bio);
1930
1930
}
+1
-1
drivers/nvme/host/hwmon.c
+1
-1
drivers/nvme/host/hwmon.c
···
187
187
return 0;
188
188
}
189
189
190
-
static const struct hwmon_channel_info *nvme_hwmon_info[] = {
190
+
static const struct hwmon_channel_info *const nvme_hwmon_info[] = {
191
191
HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
192
192
HWMON_CHANNEL_INFO(temp,
193
193
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
-3
drivers/nvme/host/pci.c
-3
drivers/nvme/host/pci.c
+1
-1
drivers/nvme/target/tcp.c
+1
-1
drivers/nvme/target/tcp.c
+9
-9
drivers/parisc/ccio-dma.c
+9
-9
drivers/parisc/ccio-dma.c
···
214
214
struct ioc {
215
215
struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
216
216
u8 *res_map; /* resource map, bit == pdir entry */
217
-
u64 *pdir_base; /* physical base address */
217
+
__le64 *pdir_base; /* physical base address */
218
218
u32 pdir_size; /* bytes, function of IOV Space size */
219
219
u32 res_hint; /* next available IOVP -
220
220
circular search */
···
339
339
BUG_ON(pages_needed == 0);
340
340
BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
341
341
342
-
DBG_RES("%s() size: %d pages_needed %d\n",
342
+
DBG_RES("%s() size: %zu pages_needed %d\n",
343
343
__func__, size, pages_needed);
344
344
345
345
/*
···
427
427
BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
428
428
BUG_ON(pages_mapped > BITS_PER_LONG);
429
429
430
-
DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
430
+
DBG_RES("%s(): res_idx: %d pages_mapped %lu\n",
431
431
__func__, res_idx, pages_mapped);
432
432
433
433
#ifdef CCIO_COLLECT_STATS
···
543
543
* index are bits 12:19 of the value returned by LCI.
544
544
*/
545
545
static void
546
-
ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
546
+
ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
547
547
unsigned long hints)
548
548
{
549
549
register unsigned long pa;
···
719
719
unsigned long flags;
720
720
dma_addr_t iovp;
721
721
dma_addr_t offset;
722
-
u64 *pdir_start;
722
+
__le64 *pdir_start;
723
723
unsigned long hint = hint_lookup[(int)direction];
724
724
725
725
BUG_ON(!dev);
···
746
746
747
747
pdir_start = &(ioc->pdir_base[idx]);
748
748
749
-
DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
750
-
__func__, addr, (long)iovp | offset, size);
749
+
DBG_RUN("%s() %px -> %#lx size: %zu\n",
750
+
__func__, addr, (long)(iovp | offset), size);
751
751
752
752
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
753
753
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
···
805
805
return;
806
806
}
807
807
808
-
DBG_RUN("%s() iovp 0x%lx/%x\n",
808
+
DBG_RUN("%s() iovp %#lx/%zx\n",
809
809
__func__, (long)iova, size);
810
810
811
811
iova ^= offset; /* clear offset bits */
···
1283
1283
iova_space_size>>20,
1284
1284
iov_order + PAGE_SHIFT);
1285
1285
1286
-
ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1286
+
ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
1287
1287
get_order(ioc->pdir_size));
1288
1288
if(NULL == ioc->pdir_base) {
1289
1289
panic("%s() could not allocate I/O Page Table\n", __func__);
+4
-4
drivers/parisc/iommu-helpers.h
+4
-4
drivers/parisc/iommu-helpers.h
···
14
14
static inline unsigned int
15
15
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
16
16
unsigned long hint,
17
-
void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
17
+
void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long,
18
18
unsigned long))
19
19
{
20
20
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
21
21
unsigned int n_mappings = 0;
22
22
unsigned long dma_offset = 0, dma_len = 0;
23
-
u64 *pdirp = NULL;
23
+
__le64 *pdirp = NULL;
24
24
25
25
/* Horrible hack. For efficiency's sake, dma_sg starts one
26
26
* entry below the true start (it is immediately incremented
···
31
31
unsigned long vaddr;
32
32
long size;
33
33
34
-
DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
35
-
(unsigned long)sg_dma_address(startsg), cnt,
34
+
DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
35
+
(unsigned long)sg_dma_address(startsg),
36
36
sg_virt(startsg), startsg->length
37
37
);
38
38
+2
-2
drivers/parisc/iosapic.c
+2
-2
drivers/parisc/iosapic.c
···
202
202
203
203
static DEFINE_SPINLOCK(iosapic_lock);
204
204
205
-
static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
205
+
static inline void iosapic_eoi(__le32 __iomem *addr, __le32 data)
206
206
{
207
-
__raw_writel(data, addr);
207
+
__raw_writel((__force u32)data, addr);
208
208
}
209
209
210
210
/*
+2
-2
drivers/parisc/iosapic_private.h
+2
-2
drivers/parisc/iosapic_private.h
···
118
118
struct vector_info {
119
119
struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
120
120
struct irt_entry *irte; /* IRT entry */
121
-
u32 __iomem *eoi_addr; /* precalculate EOI reg address */
122
-
u32 eoi_data; /* IA64: ? PA: swapped txn_data */
121
+
__le32 __iomem *eoi_addr; /* precalculate EOI reg address */
122
+
__le32 eoi_data; /* IA64: ? PA: swapped txn_data */
123
123
int txn_irq; /* virtual IRQ number for processor */
124
124
ulong txn_addr; /* IA64: id_eid PA: partial HPA */
125
125
u32 txn_data; /* CPU interrupt bit */
+16
-22
drivers/parisc/sba_iommu.c
+16
-22
drivers/parisc/sba_iommu.c
···
46
46
#include <linux/module.h>
47
47
48
48
#include <asm/ropes.h>
49
-
#include <asm/mckinley.h> /* for proc_mckinley_root */
50
-
#include <asm/runway.h> /* for proc_runway_root */
51
49
#include <asm/page.h> /* for PAGE0 */
52
50
#include <asm/pdc.h> /* for PDC_MODEL_* */
53
51
#include <asm/pdcpat.h> /* for is_pdc_pat() */
···
120
122
#endif
121
123
122
124
static struct proc_dir_entry *proc_runway_root __ro_after_init;
123
-
struct proc_dir_entry *proc_mckinley_root __ro_after_init;
125
+
static struct proc_dir_entry *proc_mckinley_root __ro_after_init;
124
126
125
127
/************************************
126
128
** SBA register read and write support
···
202
204
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
203
205
{
204
206
/* start printing from lowest pde in rval */
205
-
u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
207
+
__le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
206
208
unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
207
209
uint rcnt;
208
210
···
569
571
*/
570
572
571
573
static void
572
-
sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
574
+
sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
573
575
unsigned long hint)
574
576
{
575
577
u64 pa; /* physical address */
···
613
615
sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
614
616
{
615
617
u32 iovp = (u32) SBA_IOVP(ioc,iova);
616
-
u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
618
+
__le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
617
619
618
620
#ifdef ASSERT_PDIR_SANITY
619
621
/* Assert first pdir entry is set.
···
714
716
unsigned long flags;
715
717
dma_addr_t iovp;
716
718
dma_addr_t offset;
717
-
u64 *pdir_start;
719
+
__le64 *pdir_start;
718
720
int pide;
719
721
720
722
ioc = GET_IOC(dev);
···
1432
1434
1433
1435
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1434
1436
1435
-
DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1437
+
DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n",
1436
1438
__func__,
1437
1439
ioc->ioc_hpa,
1438
1440
(unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
···
1469
1471
ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1470
1472
#endif
1471
1473
1472
-
DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1474
+
DBG_INIT("%s() IOV base %#lx mask %#0lx\n",
1473
1475
__func__, ioc->ibase, ioc->imask);
1474
1476
1475
1477
/*
···
1581
1583
1582
1584
if (!IS_PLUTO(sba_dev->dev)) {
1583
1585
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1584
-
DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1586
+
DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->",
1585
1587
__func__, sba_dev->sba_hpa, ioc_ctl);
1586
1588
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1587
1589
ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
···
1666
1668
/* flush out the last writes */
1667
1669
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1668
1670
1669
-
DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1671
+
DBG_INIT(" ioc[%d] ROPE_CFG %#lx ROPE_DBG %lx\n",
1670
1672
i,
1671
-
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1672
-
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1673
+
(unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1674
+
(unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1673
1675
);
1674
-
DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1675
-
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1676
-
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1676
+
DBG_INIT(" STATUS_CONTROL %#lx FLUSH_CTRL %#lx\n",
1677
+
(unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1678
+
(unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1677
1679
);
1678
1680
1679
1681
if (IS_PLUTO(sba_dev->dev)) {
···
1737
1739
#ifdef ASSERT_PDIR_SANITY
1738
1740
/* Mark first bit busy - ie no IOVA 0 */
1739
1741
sba_dev->ioc[i].res_map[0] = 0x80;
1740
-
sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1742
+
sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL;
1741
1743
#endif
1742
1744
1743
1745
/* Third (and last) part of PIRANHA BUG */
···
1897
1899
int i;
1898
1900
char *version;
1899
1901
void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
1900
-
#ifdef CONFIG_PROC_FS
1901
-
struct proc_dir_entry *root;
1902
-
#endif
1902
+
struct proc_dir_entry *root __maybe_unused;
1903
1903
1904
1904
sba_dump_ranges(sba_addr);
1905
1905
···
1963
1967
1964
1968
hppa_dma_ops = &sba_ops;
1965
1969
1966
-
#ifdef CONFIG_PROC_FS
1967
1970
switch (dev->id.hversion) {
1968
1971
case PLUTO_MCKINLEY_PORT:
1969
1972
if (!proc_mckinley_root)
···
1980
1985
1981
1986
proc_create_single("sba_iommu", 0, root, sba_proc_info);
1982
1987
proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
1983
-
#endif
1984
1988
return 0;
1985
1989
}
1986
1990
+3
-2
drivers/platform/mellanox/Kconfig
+3
-2
drivers/platform/mellanox/Kconfig
···
60
60
tristate "Mellanox BlueField Firmware Boot Control driver"
61
61
depends on ARM64
62
62
depends on ACPI
63
+
depends on NET
63
64
help
64
65
The Mellanox BlueField firmware implements functionality to
65
66
request swapping the primary and alternate eMMC boot partition,
···
81
80
82
81
config NVSW_SN2201
83
82
tristate "Nvidia SN2201 platform driver support"
84
-
depends on HWMON
85
-
depends on I2C
83
+
depends on HWMON && I2C
84
+
depends on ACPI || COMPILE_TEST
86
85
select REGMAP_I2C
87
86
help
88
87
This driver provides support for the Nvidia SN2201 platform.
+14
-27
drivers/platform/mellanox/mlxbf-pmc.c
+14
-27
drivers/platform/mellanox/mlxbf-pmc.c
···
191
191
};
192
192
193
193
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
194
+
{ 0x0, "DISABLE" },
194
195
{ 0xa0, "TPIO_DATA_BEAT" },
195
196
{ 0xa1, "TDMA_DATA_BEAT" },
196
197
{ 0xa2, "MAP_DATA_BEAT" },
···
215
214
};
216
215
217
216
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
217
+
{ 0x0, "DISABLE" },
218
218
{ 0xa0, "TPIO_DATA_BEAT" },
219
219
{ 0xa1, "TDMA_DATA_BEAT" },
220
220
{ 0xa2, "MAP_DATA_BEAT" },
···
248
246
};
249
247
250
248
static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
249
+
{ 0x0, "DISABLE" },
251
250
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
252
251
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
253
252
{ 0x114, "SERR_INJ" },
···
261
258
};
262
259
263
260
static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
261
+
{ 0x0, "DISABLE" },
264
262
{ 0xc0, "RXREQ_MSS" },
265
263
{ 0xc1, "RXDAT_MSS" },
266
264
{ 0xc2, "TXRSP_MSS" },
···
269
265
};
270
266
271
267
static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
268
+
{ 0x0, "DISABLE" },
272
269
{ 0x45, "HNF_REQUESTS" },
273
270
{ 0x46, "HNF_REJECTS" },
274
271
{ 0x47, "ALL_BUSY" },
···
328
323
};
329
324
330
325
static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
326
+
{ 0x0, "DISABLE" },
331
327
{ 0x12, "CDN_REQ" },
332
328
{ 0x13, "DDN_REQ" },
333
329
{ 0x14, "NDN_REQ" },
···
898
892
uint64_t *result)
899
893
{
900
894
uint32_t perfcfg_offset, perfval_offset;
901
-
uint64_t perfmon_cfg, perfevt, perfctl;
895
+
uint64_t perfmon_cfg, perfevt;
902
896
903
897
if (cnt_num >= pmc->block[blk_num].counters)
904
898
return -EINVAL;
···
909
903
perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
910
904
perfval_offset = perfcfg_offset +
911
905
pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
912
-
913
-
/* Set counter in "read" mode */
914
-
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
915
-
MLXBF_PMC_PERFCTL);
916
-
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
917
-
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
918
-
919
-
if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
920
-
MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
921
-
return -EFAULT;
922
-
923
-
/* Check if the counter is enabled */
924
-
925
-
if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
926
-
MLXBF_PMC_READ_REG_64, &perfctl))
927
-
return -EFAULT;
928
-
929
-
if (!FIELD_GET(MLXBF_PMC_PERFCTL_EN0, perfctl))
930
-
return -EINVAL;
931
906
932
907
/* Set counter in "read" mode */
933
908
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
···
995
1008
} else
996
1009
return -EINVAL;
997
1010
998
-
return sprintf(buf, "0x%llx\n", value);
1011
+
return sysfs_emit(buf, "0x%llx\n", value);
999
1012
}
1000
1013
1001
1014
/* Store function for "counter" sysfs files */
···
1065
1078
1066
1079
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
1067
1080
if (err)
1068
-
return sprintf(buf, "No event being monitored\n");
1081
+
return sysfs_emit(buf, "No event being monitored\n");
1069
1082
1070
1083
evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
1071
1084
if (!evt_name)
1072
1085
return -EINVAL;
1073
1086
1074
-
return sprintf(buf, "0x%llx: %s\n", evt_num, evt_name);
1087
+
return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
1075
1088
}
1076
1089
1077
1090
/* Store function for "event" sysfs files */
···
1126
1139
return -EINVAL;
1127
1140
1128
1141
for (i = 0, buf[0] = '\0'; i < size; ++i) {
1129
-
len += sprintf(e_info, "0x%x: %s\n", events[i].evt_num,
1130
-
events[i].evt_name);
1131
-
if (len > PAGE_SIZE)
1142
+
len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
1143
+
events[i].evt_num, events[i].evt_name);
1144
+
if (len >= PAGE_SIZE)
1132
1145
break;
1133
1146
strcat(buf, e_info);
1134
1147
ret = len;
···
1155
1168
1156
1169
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
1157
1170
1158
-
return sprintf(buf, "%d\n", value);
1171
+
return sysfs_emit(buf, "%d\n", value);
1159
1172
}
1160
1173
1161
1174
/* Store function for "enable" sysfs files - only for l3cache */
+66
-24
drivers/platform/mellanox/mlxbf-tmfifo.c
+66
-24
drivers/platform/mellanox/mlxbf-tmfifo.c
···
59
59
* @vq: pointer to the virtio virtqueue
60
60
* @desc: current descriptor of the pending packet
61
61
* @desc_head: head descriptor of the pending packet
62
+
* @drop_desc: dummy desc for packet dropping
62
63
* @cur_len: processed length of the current descriptor
63
64
* @rem_len: remaining length of the pending packet
64
65
* @pkt_len: total length of the pending packet
···
76
75
struct virtqueue *vq;
77
76
struct vring_desc *desc;
78
77
struct vring_desc *desc_head;
78
+
struct vring_desc drop_desc;
79
79
int cur_len;
80
80
int rem_len;
81
81
u32 pkt_len;
···
87
85
int vdev_id;
88
86
struct mlxbf_tmfifo *fifo;
89
87
};
88
+
89
+
/* Check whether vring is in drop mode. */
90
+
#define IS_VRING_DROP(_r) ({ \
91
+
typeof(_r) (r) = (_r); \
92
+
(r->desc_head == &r->drop_desc ? true : false); })
93
+
94
+
/* A stub length to drop maximum length packet. */
95
+
#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
90
96
91
97
/* Interrupt types. */
92
98
enum {
···
224
214
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
225
215
226
216
/* Maximum L2 header length. */
227
-
#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
217
+
#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
228
218
229
219
/* Supported virtio-net features. */
230
220
#define MLXBF_TMFIFO_NET_FEATURES \
···
272
262
vring->align = SMP_CACHE_BYTES;
273
263
vring->index = i;
274
264
vring->vdev_id = tm_vdev->vdev.id.device;
265
+
vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
275
266
dev = &tm_vdev->vdev.dev;
276
267
277
268
size = vring_size(vring->num, vring->align);
···
378
367
return len;
379
368
}
380
369
381
-
static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
370
+
static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
382
371
{
383
372
struct vring_desc *desc_head;
384
373
u32 len = 0;
···
607
596
608
597
if (vring->cur_len + sizeof(u64) <= len) {
609
598
/* The whole word. */
610
-
if (is_rx)
611
-
memcpy(addr + vring->cur_len, &data, sizeof(u64));
612
-
else
613
-
memcpy(&data, addr + vring->cur_len, sizeof(u64));
599
+
if (!IS_VRING_DROP(vring)) {
600
+
if (is_rx)
601
+
memcpy(addr + vring->cur_len, &data,
602
+
sizeof(u64));
603
+
else
604
+
memcpy(&data, addr + vring->cur_len,
605
+
sizeof(u64));
606
+
}
614
607
vring->cur_len += sizeof(u64);
615
608
} else {
616
609
/* Leftover bytes. */
617
-
if (is_rx)
618
-
memcpy(addr + vring->cur_len, &data,
619
-
len - vring->cur_len);
620
-
else
621
-
memcpy(&data, addr + vring->cur_len,
622
-
len - vring->cur_len);
610
+
if (!IS_VRING_DROP(vring)) {
611
+
if (is_rx)
612
+
memcpy(addr + vring->cur_len, &data,
613
+
len - vring->cur_len);
614
+
else
615
+
memcpy(&data, addr + vring->cur_len,
616
+
len - vring->cur_len);
617
+
}
623
618
vring->cur_len = len;
624
619
}
625
620
···
642
625
* flag is set.
643
626
*/
644
627
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
645
-
struct vring_desc *desc,
628
+
struct vring_desc **desc,
646
629
bool is_rx, bool *vring_change)
647
630
{
648
631
struct mlxbf_tmfifo *fifo = vring->fifo;
649
632
struct virtio_net_config *config;
650
633
struct mlxbf_tmfifo_msg_hdr hdr;
651
634
int vdev_id, hdr_len;
635
+
bool drop_rx = false;
652
636
653
637
/* Read/Write packet header. */
654
638
if (is_rx) {
···
669
651
if (ntohs(hdr.len) >
670
652
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
671
653
config->mtu) +
672
-
MLXBF_TMFIFO_NET_L2_OVERHEAD)
673
-
return;
654
+
MLXBF_TMFIFO_NET_L2_OVERHEAD)
655
+
drop_rx = true;
674
656
} else {
675
657
vdev_id = VIRTIO_ID_CONSOLE;
676
658
hdr_len = 0;
···
685
667
686
668
if (!tm_dev2)
687
669
return;
688
-
vring->desc = desc;
670
+
vring->desc = *desc;
689
671
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
690
672
*vring_change = true;
691
673
}
674
+
675
+
if (drop_rx && !IS_VRING_DROP(vring)) {
676
+
if (vring->desc_head)
677
+
mlxbf_tmfifo_release_pkt(vring);
678
+
*desc = &vring->drop_desc;
679
+
vring->desc_head = *desc;
680
+
vring->desc = *desc;
681
+
}
682
+
692
683
vring->pkt_len = ntohs(hdr.len) + hdr_len;
693
684
} else {
694
685
/* Network virtio has an extra header. */
695
686
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
696
687
sizeof(struct virtio_net_hdr) : 0;
697
-
vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
688
+
vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
698
689
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
699
690
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
700
691
hdr.len = htons(vring->pkt_len - hdr_len);
···
736
709
/* Get the descriptor of the next packet. */
737
710
if (!vring->desc) {
738
711
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
739
-
if (!desc)
740
-
return false;
712
+
if (!desc) {
713
+
/* Drop next Rx packet to avoid stuck. */
714
+
if (is_rx) {
715
+
desc = &vring->drop_desc;
716
+
vring->desc_head = desc;
717
+
vring->desc = desc;
718
+
} else {
719
+
return false;
720
+
}
721
+
}
741
722
} else {
742
723
desc = vring->desc;
743
724
}
744
725
745
726
/* Beginning of a packet. Start to Rx/Tx packet header. */
746
727
if (vring->pkt_len == 0) {
747
-
mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
728
+
mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
748
729
(*avail)--;
749
730
750
731
/* Return if new packet is for another ring. */
···
778
743
vring->rem_len -= len;
779
744
780
745
/* Get the next desc on the chain. */
781
-
if (vring->rem_len > 0 &&
746
+
if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
782
747
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
783
748
idx = virtio16_to_cpu(vdev, desc->next);
784
749
desc = &vr->desc[idx];
785
750
goto mlxbf_tmfifo_desc_done;
786
751
}
787
752
788
-
/* Done and release the pending packet. */
789
-
mlxbf_tmfifo_release_pending_pkt(vring);
753
+
/* Done and release the packet. */
790
754
desc = NULL;
791
755
fifo->vring[is_rx] = NULL;
756
+
if (!IS_VRING_DROP(vring)) {
757
+
mlxbf_tmfifo_release_pkt(vring);
758
+
} else {
759
+
vring->pkt_len = 0;
760
+
vring->desc_head = NULL;
761
+
vring->desc = NULL;
762
+
return false;
763
+
}
792
764
793
765
/*
794
766
* Make sure the load/store are in order before
···
975
933
976
934
/* Release the pending packet. */
977
935
if (vring->desc)
978
-
mlxbf_tmfifo_release_pending_pkt(vring);
936
+
mlxbf_tmfifo_release_pkt(vring);
979
937
vq = vring->vq;
980
938
if (vq) {
981
939
vring->vq = NULL;
+9
drivers/platform/x86/asus-nb-wmi.c
+9
drivers/platform/x86/asus-nb-wmi.c
···
480
480
},
481
481
{
482
482
.callback = dmi_matched,
483
+
.ident = "ASUS ROG FLOW X16",
484
+
.matches = {
485
+
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
486
+
DMI_MATCH(DMI_PRODUCT_NAME, "GV601V"),
487
+
},
488
+
.driver_data = &quirk_asus_tablet_mode,
489
+
},
490
+
{
491
+
.callback = dmi_matched,
483
492
.ident = "ASUS VivoBook E410MA",
484
493
.matches = {
485
494
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+7
-7
drivers/scsi/lpfc/lpfc_debugfs.c
+7
-7
drivers/scsi/lpfc/lpfc_debugfs.c
···
6073
6073
phba->hba_debugfs_root,
6074
6074
phba,
6075
6075
&lpfc_debugfs_op_multixripools);
6076
-
if (!phba->debug_multixri_pools) {
6076
+
if (IS_ERR(phba->debug_multixri_pools)) {
6077
6077
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6078
6078
"0527 Cannot create debugfs multixripools\n");
6079
6079
goto debug_failed;
···
6085
6085
debugfs_create_file(name, S_IFREG | 0644,
6086
6086
phba->hba_debugfs_root,
6087
6087
phba, &lpfc_cgn_buffer_op);
6088
-
if (!phba->debug_cgn_buffer) {
6088
+
if (IS_ERR(phba->debug_cgn_buffer)) {
6089
6089
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6090
6090
"6527 Cannot create debugfs "
6091
6091
"cgn_buffer\n");
···
6098
6098
debugfs_create_file(name, S_IFREG | 0644,
6099
6099
phba->hba_debugfs_root,
6100
6100
phba, &lpfc_rx_monitor_op);
6101
-
if (!phba->debug_rx_monitor) {
6101
+
if (IS_ERR(phba->debug_rx_monitor)) {
6102
6102
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6103
6103
"6528 Cannot create debugfs "
6104
6104
"rx_monitor\n");
···
6111
6111
debugfs_create_file(name, 0644,
6112
6112
phba->hba_debugfs_root,
6113
6113
phba, &lpfc_debugfs_ras_log);
6114
-
if (!phba->debug_ras_log) {
6114
+
if (IS_ERR(phba->debug_ras_log)) {
6115
6115
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6116
6116
"6148 Cannot create debugfs"
6117
6117
" ras_log\n");
···
6132
6132
debugfs_create_file(name, S_IFREG | 0644,
6133
6133
phba->hba_debugfs_root,
6134
6134
phba, &lpfc_debugfs_op_lockstat);
6135
-
if (!phba->debug_lockstat) {
6135
+
if (IS_ERR(phba->debug_lockstat)) {
6136
6136
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6137
6137
"4610 Can't create debugfs lockstat\n");
6138
6138
goto debug_failed;
···
6358
6358
debugfs_create_file(name, 0644,
6359
6359
vport->vport_debugfs_root,
6360
6360
vport, &lpfc_debugfs_op_scsistat);
6361
-
if (!vport->debug_scsistat) {
6361
+
if (IS_ERR(vport->debug_scsistat)) {
6362
6362
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6363
6363
"4611 Cannot create debugfs scsistat\n");
6364
6364
goto debug_failed;
···
6369
6369
debugfs_create_file(name, 0644,
6370
6370
vport->vport_debugfs_root,
6371
6371
vport, &lpfc_debugfs_op_ioktime);
6372
-
if (!vport->debug_ioktime) {
6372
+
if (IS_ERR(vport->debug_ioktime)) {
6373
6373
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
6374
6374
"0815 Cannot create debugfs ioktime\n");
6375
6375
goto debug_failed;
+3
-2
drivers/scsi/lpfc/lpfc_hbadisc.c
+3
-2
drivers/scsi/lpfc/lpfc_hbadisc.c
···
199
199
/* Only 1 thread can drop the initial node reference. If
200
200
* another thread has set NLP_DROPPED, this thread is done.
201
201
*/
202
-
if (!(ndlp->nlp_flag & NLP_DROPPED)) {
202
+
if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
203
+
!(ndlp->nlp_flag & NLP_DROPPED)) {
203
204
ndlp->nlp_flag |= NLP_DROPPED;
204
205
spin_unlock_irqrestore(&ndlp->lock, iflags);
205
206
lpfc_nlp_put(ndlp);
206
-
spin_lock_irqsave(&ndlp->lock, iflags);
207
+
return;
207
208
}
208
209
209
210
spin_unlock_irqrestore(&ndlp->lock, iflags);
+17
-7
drivers/scsi/lpfc/lpfc_nvme.c
+17
-7
drivers/scsi/lpfc/lpfc_nvme.c
···
228
228
spin_unlock_irq(&ndlp->lock);
229
229
230
230
/* On a devloss timeout event, one more put is executed provided the
231
-
* NVME and SCSI rport unregister requests are complete. If the vport
232
-
* is unloading, this extra put is executed by lpfc_drop_node.
231
+
* NVME and SCSI rport unregister requests are complete.
233
232
*/
234
233
if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
235
234
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
···
2566
2567
* nvme_transport perspective. Loss of an rport just means IO cannot
2567
2568
* be sent and recovery is completely up to the initator.
2568
2569
* For now, the driver just unbinds the DID and port_role so that
2569
-
* no further IO can be issued. Changes are planned for later.
2570
-
*
2571
-
* Notes - the ndlp reference count is not decremented here since
2572
-
* since there is no nvme_transport api for devloss. Node ref count
2573
-
* is only adjusted in driver unload.
2570
+
* no further IO can be issued.
2574
2571
*/
2575
2572
void
2576
2573
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
···
2641
2646
"6167 NVME unregister failed %d "
2642
2647
"port_state x%x\n",
2643
2648
ret, remoteport->port_state);
2649
+
2650
+
if (vport->load_flag & FC_UNLOADING) {
2651
+
/* Only 1 thread can drop the initial node
2652
+
* reference. Check if another thread has set
2653
+
* NLP_DROPPED.
2654
+
*/
2655
+
spin_lock_irq(&ndlp->lock);
2656
+
if (!(ndlp->nlp_flag & NLP_DROPPED)) {
2657
+
ndlp->nlp_flag |= NLP_DROPPED;
2658
+
spin_unlock_irq(&ndlp->lock);
2659
+
lpfc_nlp_put(ndlp);
2660
+
return;
2661
+
}
2662
+
spin_unlock_irq(&ndlp->lock);
2663
+
}
2644
2664
}
2645
2665
}
2646
2666
return;
+1
-1
drivers/scsi/megaraid/megaraid_sas.h
+1
-1
drivers/scsi/megaraid/megaraid_sas.h
···
2332
2332
u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
2333
2333
bool use_seqnum_jbod_fp; /* Added for PD sequence */
2334
2334
bool smp_affinity_enable;
2335
-
spinlock_t crashdump_lock;
2335
+
struct mutex crashdump_lock;
2336
2336
2337
2337
struct megasas_register_set __iomem *reg_set;
2338
2338
u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+9
-12
drivers/scsi/megaraid/megaraid_sas_base.c
+9
-12
drivers/scsi/megaraid/megaraid_sas_base.c
···
3271
3271
struct megasas_instance *instance =
3272
3272
(struct megasas_instance *) shost->hostdata;
3273
3273
int val = 0;
3274
-
unsigned long flags;
3275
3274
3276
3275
if (kstrtoint(buf, 0, &val) != 0)
3277
3276
return -EINVAL;
3278
3277
3279
-
spin_lock_irqsave(&instance->crashdump_lock, flags);
3278
+
mutex_lock(&instance->crashdump_lock);
3280
3279
instance->fw_crash_buffer_offset = val;
3281
-
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3280
+
mutex_unlock(&instance->crashdump_lock);
3282
3281
return strlen(buf);
3283
3282
}
3284
3283
···
3292
3293
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3293
3294
unsigned long chunk_left_bytes;
3294
3295
unsigned long src_addr;
3295
-
unsigned long flags;
3296
3296
u32 buff_offset;
3297
3297
3298
-
spin_lock_irqsave(&instance->crashdump_lock, flags);
3298
+
mutex_lock(&instance->crashdump_lock);
3299
3299
buff_offset = instance->fw_crash_buffer_offset;
3300
3300
if (!instance->crash_dump_buf ||
3301
3301
!((instance->fw_crash_state == AVAILABLE) ||
3302
3302
(instance->fw_crash_state == COPYING))) {
3303
3303
dev_err(&instance->pdev->dev,
3304
3304
"Firmware crash dump is not available\n");
3305
-
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3305
+
mutex_unlock(&instance->crashdump_lock);
3306
3306
return -EINVAL;
3307
3307
}
3308
3308
3309
3309
if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3310
3310
dev_err(&instance->pdev->dev,
3311
3311
"Firmware crash dump offset is out of range\n");
3312
-
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3312
+
mutex_unlock(&instance->crashdump_lock);
3313
3313
return 0;
3314
3314
}
3315
3315
···
3320
3322
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3321
3323
(buff_offset % dmachunk);
3322
3324
memcpy(buf, (void *)src_addr, size);
3323
-
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3325
+
mutex_unlock(&instance->crashdump_lock);
3324
3326
3325
3327
return size;
3326
3328
}
···
3345
3347
struct megasas_instance *instance =
3346
3348
(struct megasas_instance *) shost->hostdata;
3347
3349
int val = 0;
3348
-
unsigned long flags;
3349
3350
3350
3351
if (kstrtoint(buf, 0, &val) != 0)
3351
3352
return -EINVAL;
···
3358
3361
instance->fw_crash_state = val;
3359
3362
3360
3363
if ((val == COPIED) || (val == COPY_ERROR)) {
3361
-
spin_lock_irqsave(&instance->crashdump_lock, flags);
3364
+
mutex_lock(&instance->crashdump_lock);
3362
3365
megasas_free_host_crash_buffer(instance);
3363
-
spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3366
+
mutex_unlock(&instance->crashdump_lock);
3364
3367
if (val == COPY_ERROR)
3365
3368
dev_info(&instance->pdev->dev, "application failed to "
3366
3369
"copy Firmware crash dump\n");
···
7419
7422
init_waitqueue_head(&instance->int_cmd_wait_q);
7420
7423
init_waitqueue_head(&instance->abort_cmd_wait_q);
7421
7424
7422
-
spin_lock_init(&instance->crashdump_lock);
7425
+
mutex_init(&instance->crashdump_lock);
7423
7426
spin_lock_init(&instance->mfi_pool_lock);
7424
7427
spin_lock_init(&instance->hba_lock);
7425
7428
spin_lock_init(&instance->stream_lock);
+1
-1
drivers/scsi/pm8001/pm8001_hwi.c
+1
-1
drivers/scsi/pm8001/pm8001_hwi.c
···
4180
4180
payload.sas_identify.dev_type = SAS_END_DEVICE;
4181
4181
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
4182
4182
memcpy(payload.sas_identify.sas_addr,
4183
-
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
4183
+
&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
4184
4184
payload.sas_identify.phy_id = phy_id;
4185
4185
4186
4186
return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+17
-34
drivers/scsi/pm8001/pm8001_init.c
+17
-34
drivers/scsi/pm8001/pm8001_init.c
···
273
273
return ret;
274
274
}
275
275
276
-
static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
277
276
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
278
277
279
278
/**
···
293
294
pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
294
295
pm8001_ha->chip->n_phy);
295
296
296
-
/* Setup Interrupt */
297
-
rc = pm8001_setup_irq(pm8001_ha);
298
-
if (rc) {
299
-
pm8001_dbg(pm8001_ha, FAIL,
300
-
"pm8001_setup_irq failed [ret: %d]\n", rc);
301
-
goto err_out;
302
-
}
303
297
/* Request Interrupt */
304
298
rc = pm8001_request_irq(pm8001_ha);
305
299
if (rc)
···
1023
1031
}
1024
1032
#endif
1025
1033
1026
-
static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
1027
-
{
1028
-
struct pci_dev *pdev;
1029
-
1030
-
pdev = pm8001_ha->pdev;
1031
-
1032
-
#ifdef PM8001_USE_MSIX
1033
-
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
1034
-
return pm8001_setup_msix(pm8001_ha);
1035
-
pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
1036
-
#endif
1037
-
return 0;
1038
-
}
1039
-
1040
1034
/**
1041
1035
* pm8001_request_irq - register interrupt
1042
1036
* @pm8001_ha: our ha struct.
1043
1037
*/
1044
1038
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
1045
1039
{
1046
-
struct pci_dev *pdev;
1040
+
struct pci_dev *pdev = pm8001_ha->pdev;
1041
+
#ifdef PM8001_USE_MSIX
1047
1042
int rc;
1048
1043
1049
-
pdev = pm8001_ha->pdev;
1044
+
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
1045
+
rc = pm8001_setup_msix(pm8001_ha);
1046
+
if (rc) {
1047
+
pm8001_dbg(pm8001_ha, FAIL,
1048
+
"pm8001_setup_irq failed [ret: %d]\n", rc);
1049
+
return rc;
1050
+
}
1050
1051
1051
-
#ifdef PM8001_USE_MSIX
1052
-
if (pdev->msix_cap && pci_msi_enabled())
1053
-
return pm8001_request_msix(pm8001_ha);
1054
-
else {
1055
-
pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
1056
-
goto intx;
1052
+
if (pdev->msix_cap && pci_msi_enabled())
1053
+
return pm8001_request_msix(pm8001_ha);
1057
1054
}
1055
+
1056
+
pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
1058
1057
#endif
1059
1058
1060
-
intx:
1061
1059
/* initialize the INT-X interrupt */
1062
1060
pm8001_ha->irq_vector[0].irq_id = 0;
1063
1061
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
1064
-
rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
1065
-
pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
1066
-
return rc;
1062
+
1063
+
return request_irq(pdev->irq, pm8001_interrupt_handler_intx,
1064
+
IRQF_SHARED, pm8001_ha->name,
1065
+
SHOST_TO_SAS_HA(pm8001_ha->shost));
1067
1066
}
1068
1067
1069
1068
/**
+3
-1
drivers/scsi/pm8001/pm80xx_hwi.c
+3
-1
drivers/scsi/pm8001/pm80xx_hwi.c
···
3671
3671
(struct set_ctrl_cfg_resp *)(piomb + 4);
3672
3672
u32 status = le32_to_cpu(pPayload->status);
3673
3673
u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
3674
+
u32 tag = le32_to_cpu(pPayload->tag);
3674
3675
3675
3676
pm8001_dbg(pm8001_ha, MSG,
3676
3677
"SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
3677
3678
status, err_qlfr_pgcd);
3679
+
pm8001_tag_free(pm8001_ha, tag);
3678
3680
3679
3681
return 0;
3680
3682
}
···
4673
4671
payload.sas_identify.dev_type = SAS_END_DEVICE;
4674
4672
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
4675
4673
memcpy(payload.sas_identify.sas_addr,
4676
-
&pm8001_ha->sas_addr, SAS_ADDR_SIZE);
4674
+
&pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
4677
4675
payload.sas_identify.phy_id = phy_id;
4678
4676
4679
4677
return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+2
-2
drivers/scsi/ppa.c
+2
-2
drivers/scsi/ppa.c
···
307
307
case PPA_EPP_8:
308
308
epp_reset(ppb);
309
309
w_ctr(ppb, 0x4);
310
-
if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01))
310
+
if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
311
311
outsl(ppb + 4, buffer, len >> 2);
312
-
else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03))
312
+
else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
313
313
outsw(ppb + 4, buffer, len >> 1);
314
314
else
315
315
outsb(ppb + 4, buffer, len);
+8
-2
drivers/scsi/qedf/qedf_io.c
+8
-2
drivers/scsi/qedf/qedf_io.c
···
1904
1904
goto drop_rdata_kref;
1905
1905
}
1906
1906
1907
+
spin_lock_irqsave(&fcport->rport_lock, flags);
1907
1908
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1908
1909
test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1909
1910
test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
···
1912
1911
"io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1913
1912
io_req->xid, io_req->sc_cmd);
1914
1913
rc = 1;
1914
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
1915
1915
goto drop_rdata_kref;
1916
1916
}
1917
+
1918
+
/* Set the command type to abort */
1919
+
io_req->cmd_type = QEDF_ABTS;
1920
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
1917
1921
1918
1922
kref_get(&io_req->refcount);
1919
1923
···
1926
1920
qedf->control_requests++;
1927
1921
qedf->packet_aborts++;
1928
1922
1929
-
/* Set the command type to abort */
1930
-
io_req->cmd_type = QEDF_ABTS;
1931
1923
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1932
1924
1933
1925
set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
···
2214
2210
refcount, fcport, fcport->rdata->ids.port_id);
2215
2211
2216
2212
/* Cleanup cmds re-use the same TID as the original I/O */
2213
+
spin_lock_irqsave(&fcport->rport_lock, flags);
2217
2214
io_req->cmd_type = QEDF_CLEANUP;
2215
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
2218
2216
io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2219
2217
2220
2218
init_completion(&io_req->cleanup_done);
+6
-1
drivers/scsi/qedf/qedf_main.c
+6
-1
drivers/scsi/qedf/qedf_main.c
···
2805
2805
struct qedf_ioreq *io_req;
2806
2806
struct qedf_rport *fcport;
2807
2807
u32 comp_type;
2808
+
u8 io_comp_type;
2809
+
unsigned long flags;
2808
2810
2809
2811
comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2810
2812
FCOE_CQE_CQE_TYPE_MASK;
···
2840
2838
return;
2841
2839
}
2842
2840
2841
+
spin_lock_irqsave(&fcport->rport_lock, flags);
2842
+
io_comp_type = io_req->cmd_type;
2843
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
2843
2844
2844
2845
switch (comp_type) {
2845
2846
case FCOE_GOOD_COMPLETION_CQE_TYPE:
2846
2847
atomic_inc(&fcport->free_sqes);
2847
-
switch (io_req->cmd_type) {
2848
+
switch (io_comp_type) {
2848
2849
case QEDF_SCSI_CMD:
2849
2850
qedf_scsi_completion(qedf, cqe, io_req);
2850
2851
break;
+3
-3
drivers/scsi/qla2xxx/qla_dfs.c
+3
-3
drivers/scsi/qla2xxx/qla_dfs.c
···
116
116
117
117
sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
118
118
fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
119
-
if (!fp->dfs_rport_dir)
119
+
if (IS_ERR(fp->dfs_rport_dir))
120
120
return;
121
121
if (NVME_TARGET(vha->hw, fp))
122
122
debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
···
708
708
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
709
709
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
710
710
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
711
-
if (!ha->tgt.dfs_naqp) {
711
+
if (IS_ERR(ha->tgt.dfs_naqp)) {
712
712
ql_log(ql_log_warn, vha, 0xd011,
713
713
"Unable to create debugFS naqp node.\n");
714
714
goto out;
715
715
}
716
716
}
717
717
vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
718
-
if (!vha->dfs_rport_root) {
718
+
if (IS_ERR(vha->dfs_rport_root)) {
719
719
ql_log(ql_log_warn, vha, 0xd012,
720
720
"Unable to create debugFS rports node.\n");
721
721
goto out;
+1
-1
drivers/scsi/qla2xxx/qla_inline.h
+1
-1
drivers/scsi/qla2xxx/qla_inline.h
+3
-3
drivers/scsi/qla2xxx/qla_isr.c
+3
-3
drivers/scsi/qla2xxx/qla_isr.c
···
3965
3965
if (!ha->flags.fw_started)
3966
3966
return;
3967
3967
3968
-
if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
3968
+
if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
3969
3969
rsp->qpair->rcv_intr = 1;
3970
3970
3971
3971
if (!rsp->qpair->cpu_mapped)
···
4468
4468
}
4469
4469
ha = qpair->hw;
4470
4470
4471
-
queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4471
+
queue_work(ha->wq, &qpair->q_work);
4472
4472
4473
4473
return IRQ_HANDLED;
4474
4474
}
···
4494
4494
wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4495
4495
spin_unlock_irqrestore(&ha->hardware_lock, flags);
4496
4496
4497
-
queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4497
+
queue_work(ha->wq, &qpair->q_work);
4498
4498
4499
4499
return IRQ_HANDLED;
4500
4500
}
+5
-5
drivers/scsi/qla2xxx/qla_nvme.c
+5
-5
drivers/scsi/qla2xxx/qla_nvme.c
···
399
399
nvme->u.nvme.dl = 0;
400
400
nvme->u.nvme.timeout_sec = 0;
401
401
nvme->u.nvme.cmd_dma = fd_resp->rspdma;
402
-
nvme->u.nvme.cmd_len = fd_resp->rsplen;
402
+
nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
403
403
nvme->u.nvme.rsp_len = 0;
404
404
nvme->u.nvme.rsp_dma = 0;
405
405
nvme->u.nvme.exchange_address = uctx->exchange_address;
406
406
nvme->u.nvme.nport_handle = uctx->nport_handle;
407
407
nvme->u.nvme.ox_id = uctx->ox_id;
408
408
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
409
-
le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
409
+
fd_resp->rsplen, DMA_TO_DEVICE);
410
410
411
411
ql_dbg(ql_dbg_unsol, vha, 0x2122,
412
412
"Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
···
504
504
nvme->u.nvme.desc = fd;
505
505
nvme->u.nvme.dir = 0;
506
506
nvme->u.nvme.dl = 0;
507
-
nvme->u.nvme.cmd_len = fd->rqstlen;
508
-
nvme->u.nvme.rsp_len = fd->rsplen;
507
+
nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
508
+
nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
509
509
nvme->u.nvme.rsp_dma = fd->rspdma;
510
510
nvme->u.nvme.timeout_sec = fd->timeout;
511
511
nvme->u.nvme.cmd_dma = fd->rqstdma;
512
512
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
513
-
le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
513
+
fd->rqstlen, DMA_TO_DEVICE);
514
514
515
515
rval = qla2x00_start_sp(sp);
516
516
if (rval != QLA_SUCCESS) {
+1
-2
drivers/scsi/qla2xxx/qla_target.c
+1
-2
drivers/scsi/qla2xxx/qla_target.c
···
4425
4425
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4426
4426
} else if (ha->msix_count) {
4427
4427
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4428
-
queue_work_on(smp_processor_id(), qla_tgt_wq,
4429
-
&cmd->work);
4428
+
queue_work(qla_tgt_wq, &cmd->work);
4430
4429
else
4431
4430
queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4432
4431
&cmd->work);
+2
-2
drivers/scsi/qla2xxx/tcm_qla2xxx.c
+2
-2
drivers/scsi/qla2xxx/tcm_qla2xxx.c
···
310
310
cmd->trc_flags |= TRC_CMD_DONE;
311
311
312
312
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
313
-
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
313
+
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
314
314
}
315
315
316
316
/*
···
547
547
cmd->trc_flags |= TRC_DATA_IN;
548
548
cmd->cmd_in_wq = 1;
549
549
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
550
-
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
550
+
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
551
551
}
552
552
553
553
static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
+12
-12
drivers/target/target_core_configfs.c
+12
-12
drivers/target/target_core_configfs.c
···
1392
1392
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1393
1393
unsigned char buf[INQUIRY_VENDOR_LEN + 2];
1394
1394
char *stripped = NULL;
1395
-
size_t len;
1395
+
ssize_t len;
1396
1396
ssize_t ret;
1397
1397
1398
-
len = strlcpy(buf, page, sizeof(buf));
1399
-
if (len < sizeof(buf)) {
1398
+
len = strscpy(buf, page, sizeof(buf));
1399
+
if (len > 0) {
1400
1400
/* Strip any newline added from userspace. */
1401
1401
stripped = strstrip(buf);
1402
1402
len = strlen(stripped);
1403
1403
}
1404
-
if (len > INQUIRY_VENDOR_LEN) {
1404
+
if (len < 0 || len > INQUIRY_VENDOR_LEN) {
1405
1405
pr_err("Emulated T10 Vendor Identification exceeds"
1406
1406
" INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
1407
1407
"\n");
···
1448
1448
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1449
1449
unsigned char buf[INQUIRY_MODEL_LEN + 2];
1450
1450
char *stripped = NULL;
1451
-
size_t len;
1451
+
ssize_t len;
1452
1452
ssize_t ret;
1453
1453
1454
-
len = strlcpy(buf, page, sizeof(buf));
1455
-
if (len < sizeof(buf)) {
1454
+
len = strscpy(buf, page, sizeof(buf));
1455
+
if (len > 0) {
1456
1456
/* Strip any newline added from userspace. */
1457
1457
stripped = strstrip(buf);
1458
1458
len = strlen(stripped);
1459
1459
}
1460
-
if (len > INQUIRY_MODEL_LEN) {
1460
+
if (len < 0 || len > INQUIRY_MODEL_LEN) {
1461
1461
pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
1462
1462
__stringify(INQUIRY_MODEL_LEN)
1463
1463
"\n");
···
1504
1504
/* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1505
1505
unsigned char buf[INQUIRY_REVISION_LEN + 2];
1506
1506
char *stripped = NULL;
1507
-
size_t len;
1507
+
ssize_t len;
1508
1508
ssize_t ret;
1509
1509
1510
-
len = strlcpy(buf, page, sizeof(buf));
1511
-
if (len < sizeof(buf)) {
1510
+
len = strscpy(buf, page, sizeof(buf));
1511
+
if (len > 0) {
1512
1512
/* Strip any newline added from userspace. */
1513
1513
stripped = strstrip(buf);
1514
1514
len = strlen(stripped);
1515
1515
}
1516
-
if (len > INQUIRY_REVISION_LEN) {
1516
+
if (len < 0 || len > INQUIRY_REVISION_LEN) {
1517
1517
pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
1518
1518
__stringify(INQUIRY_REVISION_LEN)
1519
1519
"\n");
+1
drivers/target/target_core_transport.c
+1
drivers/target/target_core_transport.c
+4
-2
drivers/thermal/thermal_core.c
+4
-2
drivers/thermal/thermal_core.c
···
348
348
struct thermal_trip trip;
349
349
350
350
/* Ignore disabled trip points */
351
-
if (test_bit(trip_id, &tz->trips_disabled) ||
352
-
trip.temperature == THERMAL_TEMP_INVALID)
351
+
if (test_bit(trip_id, &tz->trips_disabled))
353
352
return;
354
353
355
354
__thermal_zone_get_trip(tz, trip_id, &trip);
355
+
356
+
if (trip.temperature == THERMAL_TEMP_INVALID)
357
+
return;
356
358
357
359
if (tz->last_temperature != THERMAL_TEMP_INVALID) {
358
360
if (tz->last_temperature < trip.temperature &&
+6
-2
drivers/thermal/thermal_of.c
+6
-2
drivers/thermal/thermal_of.c
···
37
37
*/
38
38
for_each_child_of_node(trips, t) {
39
39
40
-
if (t == trip)
40
+
if (t == trip) {
41
+
of_node_put(t);
41
42
goto out;
43
+
}
42
44
i++;
43
45
}
44
46
···
403
401
404
402
for_each_child_of_node(cm_np, child) {
405
403
ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action);
406
-
if (ret)
404
+
if (ret) {
405
+
of_node_put(child);
407
406
break;
407
+
}
408
408
}
409
409
410
410
of_node_put(cm_np);
+2
-1
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+2
-1
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
···
110
110
}
111
111
112
112
static int __ti_thermal_get_trend(struct thermal_zone_device *tz,
113
-
struct thermal_trip *trip, enum thermal_trend *trend)
113
+
const struct thermal_trip *trip,
114
+
enum thermal_trend *trend)
114
115
{
115
116
struct ti_thermal_data *data = thermal_zone_device_priv(tz);
116
117
struct ti_bandgap *bgp;
+7
-6
drivers/ufs/core/ufshcd.c
+7
-6
drivers/ufs/core/ufshcd.c
···
22
22
#include <linux/module.h>
23
23
#include <linux/regulator/consumer.h>
24
24
#include <linux/sched/clock.h>
25
+
#include <linux/iopoll.h>
25
26
#include <scsi/scsi_cmnd.h>
26
27
#include <scsi/scsi_dbg.h>
27
28
#include <scsi/scsi_driver.h>
···
2300
2299
*/
2301
2300
static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2302
2301
{
2303
-
return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
2302
+
u32 val;
2303
+
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
2304
+
500, UIC_CMD_TIMEOUT * 1000, false, hba,
2305
+
REG_CONTROLLER_STATUS);
2306
+
return ret == 0 ? true : false;
2304
2307
}
2305
2308
2306
2309
/**
···
2397
2392
bool completion)
2398
2393
{
2399
2394
lockdep_assert_held(&hba->uic_cmd_mutex);
2400
-
lockdep_assert_held(hba->host->host_lock);
2401
2395
2402
2396
if (!ufshcd_ready_for_uic_cmd(hba)) {
2403
2397
dev_err(hba->dev,
···
2423
2419
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2424
2420
{
2425
2421
int ret;
2426
-
unsigned long flags;
2427
2422
2428
2423
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2429
2424
return 0;
···
2431
2428
mutex_lock(&hba->uic_cmd_mutex);
2432
2429
ufshcd_add_delay_before_dme_cmd(hba);
2433
2430
2434
-
spin_lock_irqsave(hba->host->host_lock, flags);
2435
2431
ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2436
-
spin_unlock_irqrestore(hba->host->host_lock, flags);
2437
2432
if (!ret)
2438
2433
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2439
2434
···
4134
4133
wmb();
4135
4134
reenable_intr = true;
4136
4135
}
4137
-
ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4138
4136
spin_unlock_irqrestore(hba->host->host_lock, flags);
4137
+
ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4139
4138
if (ret) {
4140
4139
dev_err(hba->dev,
4141
4140
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+3
drivers/usb/typec/ucsi/debugfs.c
+3
drivers/usb/typec/ucsi/debugfs.c
+1
-1
drivers/w1/masters/ds2482.c
+1
-1
drivers/w1/masters/ds2482.c
+1
-1
fs/btrfs/Kconfig
+1
-1
fs/btrfs/Kconfig
···
31
31
continue to be mountable and usable by newer kernels.
32
32
33
33
For more information, please see the web pages at
34
-
http://btrfs.wiki.kernel.org.
34
+
https://btrfs.readthedocs.io
35
35
36
36
To compile this file system support as a module, choose M here. The
37
37
module will be called btrfs.
+10
-2
fs/btrfs/block-group.c
+10
-2
fs/btrfs/block-group.c
···
3028
3028
btrfs_mark_buffer_dirty(leaf);
3029
3029
fail:
3030
3030
btrfs_release_path(path);
3031
-
/* We didn't update the block group item, need to revert @commit_used. */
3032
-
if (ret < 0) {
3031
+
/*
3032
+
* We didn't update the block group item, need to revert commit_used
3033
+
* unless the block group item didn't exist yet - this is to prevent a
3034
+
* race with a concurrent insertion of the block group item, with
3035
+
* insert_block_group_item(), that happened just after we attempted to
3036
+
* update. In that case we would reset commit_used to 0 just after the
3037
+
* insertion set it to a value greater than 0 - if the block group later
3038
+
* becomes with 0 used bytes, we would incorrectly skip its update.
3039
+
*/
3040
+
if (ret < 0 && ret != -ENOENT) {
3033
3041
spin_lock(&cache->lock);
3034
3042
cache->commit_used = old_commit_used;
3035
3043
spin_unlock(&cache->lock);
+71
-33
fs/btrfs/delayed-inode.c
+71
-33
fs/btrfs/delayed-inode.c
···
412
412
413
413
static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
414
414
{
415
+
struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
415
416
struct rb_root_cached *root;
416
417
struct btrfs_delayed_root *delayed_root;
417
418
···
420
419
if (RB_EMPTY_NODE(&delayed_item->rb_node))
421
420
return;
422
421
423
-
delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
422
+
/* If it's in a rbtree, then we need to have delayed node locked. */
423
+
lockdep_assert_held(&delayed_node->mutex);
424
+
425
+
delayed_root = delayed_node->root->fs_info->delayed_root;
424
426
425
427
BUG_ON(!delayed_root);
426
428
427
429
if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
428
-
root = &delayed_item->delayed_node->ins_root;
430
+
root = &delayed_node->ins_root;
429
431
else
430
-
root = &delayed_item->delayed_node->del_root;
432
+
root = &delayed_node->del_root;
431
433
432
434
rb_erase_cached(&delayed_item->rb_node, root);
433
435
RB_CLEAR_NODE(&delayed_item->rb_node);
434
-
delayed_item->delayed_node->count--;
436
+
delayed_node->count--;
435
437
436
438
finish_one_item(delayed_root);
437
439
}
···
1157
1153
ret = __btrfs_commit_inode_delayed_items(trans, path,
1158
1154
curr_node);
1159
1155
if (ret) {
1160
-
btrfs_release_delayed_node(curr_node);
1161
-
curr_node = NULL;
1162
1156
btrfs_abort_transaction(trans, ret);
1163
1157
break;
1164
1158
}
1165
1159
1166
1160
prev_node = curr_node;
1167
1161
curr_node = btrfs_next_delayed_node(curr_node);
1162
+
/*
1163
+
* See the comment below about releasing path before releasing
1164
+
* node. If the commit of delayed items was successful the path
1165
+
* should always be released, but in case of an error, it may
1166
+
* point to locked extent buffers (a leaf at the very least).
1167
+
*/
1168
+
ASSERT(path->nodes[0] == NULL);
1168
1169
btrfs_release_delayed_node(prev_node);
1169
1170
}
1170
1171
1172
+
/*
1173
+
* Release the path to avoid a potential deadlock and lockdep splat when
1174
+
* releasing the delayed node, as that requires taking the delayed node's
1175
+
* mutex. If another task starts running delayed items before we take
1176
+
* the mutex, it will first lock the mutex and then it may try to lock
1177
+
* the same btree path (leaf).
1178
+
*/
1179
+
btrfs_free_path(path);
1180
+
1171
1181
if (curr_node)
1172
1182
btrfs_release_delayed_node(curr_node);
1173
-
btrfs_free_path(path);
1174
1183
trans->block_rsv = block_rsv;
1175
1184
1176
1185
return ret;
···
1430
1413
btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1431
1414
}
1432
1415
1433
-
/* Will return 0 or -ENOMEM */
1416
+
static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1417
+
{
1418
+
struct btrfs_fs_info *fs_info = trans->fs_info;
1419
+
const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1420
+
1421
+
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1422
+
return;
1423
+
1424
+
/*
1425
+
* Adding the new dir index item does not require touching another
1426
+
* leaf, so we can release 1 unit of metadata that was previously
1427
+
* reserved when starting the transaction. This applies only to
1428
+
* the case where we had a transaction start and excludes the
1429
+
* transaction join case (when replaying log trees).
1430
+
*/
1431
+
trace_btrfs_space_reservation(fs_info, "transaction",
1432
+
trans->transid, bytes, 0);
1433
+
btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1434
+
ASSERT(trans->bytes_reserved >= bytes);
1435
+
trans->bytes_reserved -= bytes;
1436
+
}
1437
+
1438
+
/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
1434
1439
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1435
1440
const char *name, int name_len,
1436
1441
struct btrfs_inode *dir,
···
1494
1455
1495
1456
mutex_lock(&delayed_node->mutex);
1496
1457
1458
+
/*
1459
+
* First attempt to insert the delayed item. This is to make the error
1460
+
* handling path simpler in case we fail (-EEXIST). There's no risk of
1461
+
* any other task coming in and running the delayed item before we do
1462
+
* the metadata space reservation below, because we are holding the
1463
+
* delayed node's mutex and that mutex must also be locked before the
1464
+
* node's delayed items can be run.
1465
+
*/
1466
+
ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1467
+
if (unlikely(ret)) {
1468
+
btrfs_err(trans->fs_info,
1469
+
"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1470
+
name_len, name, index, btrfs_root_id(delayed_node->root),
1471
+
delayed_node->inode_id, dir->index_cnt,
1472
+
delayed_node->index_cnt, ret);
1473
+
btrfs_release_delayed_item(delayed_item);
1474
+
btrfs_release_dir_index_item_space(trans);
1475
+
mutex_unlock(&delayed_node->mutex);
1476
+
goto release_node;
1477
+
}
1478
+
1497
1479
if (delayed_node->index_item_leaves == 0 ||
1498
1480
delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1499
1481
delayed_node->curr_index_batch_size = data_len;
···
1532
1472
* impossible.
1533
1473
*/
1534
1474
if (WARN_ON(ret)) {
1535
-
mutex_unlock(&delayed_node->mutex);
1536
1475
btrfs_release_delayed_item(delayed_item);
1476
+
mutex_unlock(&delayed_node->mutex);
1537
1477
goto release_node;
1538
1478
}
1539
1479
1540
1480
delayed_node->index_item_leaves++;
1541
-
} else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
1542
-
const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1543
-
1544
-
/*
1545
-
* Adding the new dir index item does not require touching another
1546
-
* leaf, so we can release 1 unit of metadata that was previously
1547
-
* reserved when starting the transaction. This applies only to
1548
-
* the case where we had a transaction start and excludes the
1549
-
* transaction join case (when replaying log trees).
1550
-
*/
1551
-
trace_btrfs_space_reservation(fs_info, "transaction",
1552
-
trans->transid, bytes, 0);
1553
-
btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1554
-
ASSERT(trans->bytes_reserved >= bytes);
1555
-
trans->bytes_reserved -= bytes;
1556
-
}
1557
-
1558
-
ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1559
-
if (unlikely(ret)) {
1560
-
btrfs_err(trans->fs_info,
1561
-
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1562
-
name_len, name, delayed_node->root->root_key.objectid,
1563
-
delayed_node->inode_id, ret);
1564
-
BUG();
1481
+
} else {
1482
+
btrfs_release_dir_index_item_space(trans);
1565
1483
}
1566
1484
mutex_unlock(&delayed_node->mutex);
1567
1485
+12
-10
fs/btrfs/disk-io.c
+12
-10
fs/btrfs/disk-io.c
···
520
520
struct folio *folio)
521
521
{
522
522
struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
523
+
struct btrfs_subpage_info *spi = fs_info->subpage_info;
523
524
struct btrfs_subpage *subpage;
524
525
struct extent_buffer *eb;
525
526
int cur_bit = 0;
···
534
533
btrfs_assert_tree_write_locked(eb);
535
534
return filemap_dirty_folio(mapping, folio);
536
535
}
536
+
537
+
ASSERT(spi);
537
538
subpage = folio_get_private(folio);
538
539
539
-
ASSERT(subpage->dirty_bitmap);
540
-
while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
540
+
for (cur_bit = spi->dirty_offset;
541
+
cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
542
+
cur_bit++) {
541
543
unsigned long flags;
542
544
u64 cur;
543
-
u16 tmp = (1 << cur_bit);
544
545
545
546
spin_lock_irqsave(&subpage->lock, flags);
546
-
if (!(tmp & subpage->dirty_bitmap)) {
547
+
if (!test_bit(cur_bit, subpage->bitmaps)) {
547
548
spin_unlock_irqrestore(&subpage->lock, flags);
548
-
cur_bit++;
549
549
continue;
550
550
}
551
551
spin_unlock_irqrestore(&subpage->lock, flags);
···
559
557
btrfs_assert_tree_write_locked(eb);
560
558
free_extent_buffer(eb);
561
559
562
-
cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
560
+
cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
563
561
}
564
562
return filemap_dirty_folio(mapping, folio);
565
563
}
···
1549
1547
1550
1548
delta = ktime_get_seconds() - cur->start_time;
1551
1549
if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1552
-
cur->state < TRANS_STATE_COMMIT_START &&
1550
+
cur->state < TRANS_STATE_COMMIT_PREP &&
1553
1551
delta < fs_info->commit_interval) {
1554
1552
spin_unlock(&fs_info->trans_lock);
1555
1553
delay -= msecs_to_jiffies((delta - 1) * 1000);
···
2684
2682
btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2685
2683
btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2686
2684
btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2687
-
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_start,
2688
-
BTRFS_LOCKDEP_TRANS_COMMIT_START);
2685
+
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2686
+
BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2689
2687
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2690
2688
BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2691
2689
btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
···
4872
4870
while (!list_empty(&fs_info->trans_list)) {
4873
4871
t = list_first_entry(&fs_info->trans_list,
4874
4872
struct btrfs_transaction, list);
4875
-
if (t->state >= TRANS_STATE_COMMIT_START) {
4873
+
if (t->state >= TRANS_STATE_COMMIT_PREP) {
4876
4874
refcount_inc(&t->use_count);
4877
4875
spin_unlock(&fs_info->trans_lock);
4878
4876
btrfs_wait_for_commit(fs_info, t->transid);
+7
-1
fs/btrfs/ioctl.c
+7
-1
fs/btrfs/ioctl.c
···
1958
1958
goto out_put;
1959
1959
}
1960
1960
1961
+
/*
1962
+
* We don't need the path anymore, so release it and
1963
+
* avoid deadlocks and lockdep warnings in case
1964
+
* btrfs_iget() needs to lookup the inode from its root
1965
+
* btree and lock the same leaf.
1966
+
*/
1967
+
btrfs_release_path(path);
1961
1968
temp_inode = btrfs_iget(sb, key2.objectid, root);
1962
1969
if (IS_ERR(temp_inode)) {
1963
1970
ret = PTR_ERR(temp_inode);
···
1985
1978
goto out_put;
1986
1979
}
1987
1980
1988
-
btrfs_release_path(path);
1989
1981
key.objectid = key.offset;
1990
1982
key.offset = (u64)-1;
1991
1983
dirid = key.objectid;
+1
-1
fs/btrfs/locking.h
+1
-1
fs/btrfs/locking.h
+1
-1
fs/btrfs/ordered-data.c
+1
-1
fs/btrfs/ordered-data.c
+24
-15
fs/btrfs/transaction.c
+24
-15
fs/btrfs/transaction.c
···
56
56
* | Call btrfs_commit_transaction() on any trans handle attached to
57
57
* | transaction N
58
58
* V
59
+
* Transaction N [[TRANS_STATE_COMMIT_PREP]]
60
+
* |
61
+
* | If there are simultaneous calls to btrfs_commit_transaction() one will win
62
+
* | the race and the rest will wait for the winner to commit the transaction.
63
+
* |
64
+
* | The winner will wait for previous running transaction to completely finish
65
+
* | if there is one.
66
+
* |
59
67
* Transaction N [[TRANS_STATE_COMMIT_START]]
60
68
* |
61
-
* | Will wait for previous running transaction to completely finish if there
62
-
* | is one
63
-
* |
64
-
* | Then one of the following happes:
69
+
* | Then one of the following happens:
65
70
* | - Wait for all other trans handle holders to release.
66
71
* | The btrfs_commit_transaction() caller will do the commit work.
67
72
* | - Wait for current transaction to be committed by others.
···
117
112
*/
118
113
static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
119
114
[TRANS_STATE_RUNNING] = 0U,
115
+
[TRANS_STATE_COMMIT_PREP] = 0U,
120
116
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
121
117
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
122
118
__TRANS_ATTACH |
···
1988
1982
* Wait for the current transaction commit to start and block
1989
1983
* subsequent transaction joins
1990
1984
*/
1991
-
btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
1985
+
btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
1992
1986
wait_event(fs_info->transaction_blocked_wait,
1993
1987
cur_trans->state >= TRANS_STATE_COMMIT_START ||
1994
1988
TRANS_ABORTED(cur_trans));
···
2135
2129
return;
2136
2130
2137
2131
lockdep_assert_held(&trans->fs_info->trans_lock);
2138
-
ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
2132
+
ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP);
2139
2133
2140
2134
list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
2141
2135
}
···
2159
2153
ktime_t interval;
2160
2154
2161
2155
ASSERT(refcount_read(&trans->use_count) == 1);
2162
-
btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2156
+
btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2163
2157
2164
2158
clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
2165
2159
···
2219
2213
}
2220
2214
2221
2215
spin_lock(&fs_info->trans_lock);
2222
-
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2216
+
if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) {
2223
2217
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2224
2218
2225
2219
add_pending_snapshot(trans);
···
2231
2225
want_state = TRANS_STATE_SUPER_COMMITTED;
2232
2226
2233
2227
btrfs_trans_state_lockdep_release(fs_info,
2234
-
BTRFS_LOCKDEP_TRANS_COMMIT_START);
2228
+
BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2235
2229
ret = btrfs_end_transaction(trans);
2236
2230
wait_for_commit(cur_trans, want_state);
2237
2231
···
2243
2237
return ret;
2244
2238
}
2245
2239
2246
-
cur_trans->state = TRANS_STATE_COMMIT_START;
2240
+
cur_trans->state = TRANS_STATE_COMMIT_PREP;
2247
2241
wake_up(&fs_info->transaction_blocked_wait);
2248
-
btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2242
+
btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2249
2243
2250
2244
if (cur_trans->list.prev != &fs_info->trans_list) {
2251
2245
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
···
2266
2260
btrfs_put_transaction(prev_trans);
2267
2261
if (ret)
2268
2262
goto lockdep_release;
2269
-
} else {
2270
-
spin_unlock(&fs_info->trans_lock);
2263
+
spin_lock(&fs_info->trans_lock);
2271
2264
}
2272
2265
} else {
2273
-
spin_unlock(&fs_info->trans_lock);
2274
2266
/*
2275
2267
* The previous transaction was aborted and was already removed
2276
2268
* from the list of transactions at fs_info->trans_list. So we
···
2276
2272
* corrupt state (pointing to trees with unwritten nodes/leafs).
2277
2273
*/
2278
2274
if (BTRFS_FS_ERROR(fs_info)) {
2275
+
spin_unlock(&fs_info->trans_lock);
2279
2276
ret = -EROFS;
2280
2277
goto lockdep_release;
2281
2278
}
2282
2279
}
2280
+
2281
+
cur_trans->state = TRANS_STATE_COMMIT_START;
2282
+
wake_up(&fs_info->transaction_blocked_wait);
2283
+
spin_unlock(&fs_info->trans_lock);
2283
2284
2284
2285
/*
2285
2286
* Get the time spent on the work done by the commit thread and not
···
2595
2586
goto cleanup_transaction;
2596
2587
2597
2588
lockdep_trans_commit_start_release:
2598
-
btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_START);
2589
+
btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2599
2590
btrfs_end_transaction(trans);
2600
2591
return ret;
2601
2592
}
+1
fs/btrfs/transaction.h
+1
fs/btrfs/transaction.h
+10
-4
fs/efivarfs/super.c
+10
-4
fs/efivarfs/super.c
···
32
32
u64 storage_space, remaining_space, max_variable_size;
33
33
efi_status_t status;
34
34
35
-
status = efivar_query_variable_info(attr, &storage_space, &remaining_space,
36
-
&max_variable_size);
37
-
if (status != EFI_SUCCESS)
38
-
return efi_status_to_err(status);
35
+
/* Some UEFI firmware does not implement QueryVariableInfo() */
36
+
storage_space = remaining_space = 0;
37
+
if (efi_rt_services_supported(EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO)) {
38
+
status = efivar_query_variable_info(attr, &storage_space,
39
+
&remaining_space,
40
+
&max_variable_size);
41
+
if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED)
42
+
pr_warn_ratelimited("query_variable_info() failed: 0x%lx\n",
43
+
status);
44
+
}
39
45
40
46
/*
41
47
* This is not a normal filesystem, so no point in pretending it has a block
+33
-21
fs/ext4/mballoc.c
+33
-21
fs/ext4/mballoc.c
···
16
16
#include <linux/slab.h>
17
17
#include <linux/nospec.h>
18
18
#include <linux/backing-dev.h>
19
+
#include <linux/freezer.h>
19
20
#include <trace/events/ext4.h>
20
21
21
22
/*
···
6907
6906
return ret;
6908
6907
}
6909
6908
6909
+
static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6910
+
ext4_group_t grp)
6911
+
{
6912
+
if (grp < ext4_get_groups_count(sb))
6913
+
return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6914
+
return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6915
+
ext4_group_first_block_no(sb, grp) - 1) >>
6916
+
EXT4_CLUSTER_BITS(sb);
6917
+
}
6918
+
6919
+
static bool ext4_trim_interrupted(void)
6920
+
{
6921
+
return fatal_signal_pending(current) || freezing(current);
6922
+
}
6923
+
6910
6924
static int ext4_try_to_trim_range(struct super_block *sb,
6911
6925
struct ext4_buddy *e4b, ext4_grpblk_t start,
6912
6926
ext4_grpblk_t max, ext4_grpblk_t minblocks)
···
6929
6913
__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6930
6914
{
6931
6915
ext4_grpblk_t next, count, free_count;
6916
+
bool set_trimmed = false;
6932
6917
void *bitmap;
6933
6918
6934
6919
bitmap = e4b->bd_bitmap;
6920
+
if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
6921
+
set_trimmed = true;
6935
6922
start = max(e4b->bd_info->bb_first_free, start);
6936
6923
count = 0;
6937
6924
free_count = 0;
···
6949
6930
int ret = ext4_trim_extent(sb, start, next - start, e4b);
6950
6931
6951
6932
if (ret && ret != -EOPNOTSUPP)
6952
-
break;
6933
+
return count;
6953
6934
count += next - start;
6954
6935
}
6955
6936
free_count += next - start;
6956
6937
start = next + 1;
6957
6938
6958
-
if (fatal_signal_pending(current)) {
6959
-
count = -ERESTARTSYS;
6960
-
break;
6961
-
}
6939
+
if (ext4_trim_interrupted())
6940
+
return count;
6962
6941
6963
6942
if (need_resched()) {
6964
6943
ext4_unlock_group(sb, e4b->bd_group);
···
6968
6951
break;
6969
6952
}
6970
6953
6954
+
if (set_trimmed)
6955
+
EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6956
+
6971
6957
return count;
6972
6958
}
6973
6959
···
6981
6961
* @start: first group block to examine
6982
6962
* @max: last group block to examine
6983
6963
* @minblocks: minimum extent block count
6984
-
* @set_trimmed: set the trimmed flag if at least one block is trimmed
6985
6964
*
6986
6965
* ext4_trim_all_free walks through group's block bitmap searching for free
6987
6966
* extents. When the free extent is found, mark it as used in group buddy
···
6990
6971
static ext4_grpblk_t
6991
6972
ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6992
6973
ext4_grpblk_t start, ext4_grpblk_t max,
6993
-
ext4_grpblk_t minblocks, bool set_trimmed)
6974
+
ext4_grpblk_t minblocks)
6994
6975
{
6995
6976
struct ext4_buddy e4b;
6996
6977
int ret;
···
7007
6988
ext4_lock_group(sb, group);
7008
6989
7009
6990
if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
7010
-
minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
6991
+
minblocks < EXT4_SB(sb)->s_last_trim_minblks)
7011
6992
ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
7012
-
if (ret >= 0 && set_trimmed)
7013
-
EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
7014
-
} else {
6993
+
else
7015
6994
ret = 0;
7016
-
}
7017
6995
7018
6996
ext4_unlock_group(sb, group);
7019
6997
ext4_mb_unload_buddy(&e4b);
···
7043
7027
ext4_fsblk_t first_data_blk =
7044
7028
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
7045
7029
ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
7046
-
bool whole_group, eof = false;
7047
7030
int ret = 0;
7048
7031
7049
7032
start = range->start >> sb->s_blocksize_bits;
···
7061
7046
if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
7062
7047
goto out;
7063
7048
}
7064
-
if (end >= max_blks - 1) {
7049
+
if (end >= max_blks - 1)
7065
7050
end = max_blks - 1;
7066
-
eof = true;
7067
-
}
7068
7051
if (end <= first_data_blk)
7069
7052
goto out;
7070
7053
if (start < first_data_blk)
···
7076
7063
7077
7064
/* end now represents the last cluster to discard in this group */
7078
7065
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7079
-
whole_group = true;
7080
7066
7081
7067
for (group = first_group; group <= last_group; group++) {
7068
+
if (ext4_trim_interrupted())
7069
+
break;
7082
7070
grp = ext4_get_group_info(sb, group);
7083
7071
if (!grp)
7084
7072
continue;
···
7096
7082
* change it for the last group, note that last_cluster is
7097
7083
* already computed earlier by ext4_get_group_no_and_offset()
7098
7084
*/
7099
-
if (group == last_group) {
7085
+
if (group == last_group)
7100
7086
end = last_cluster;
7101
-
whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7102
-
}
7103
7087
if (grp->bb_free >= minlen) {
7104
7088
cnt = ext4_trim_all_free(sb, group, first_cluster,
7105
-
end, minlen, whole_group);
7089
+
end, minlen);
7106
7090
if (cnt < 0) {
7107
7091
ret = cnt;
7108
7092
break;
+15
-11
fs/ext4/namei.c
+15
-11
fs/ext4/namei.c
···
343
343
struct buffer_head *bh)
344
344
{
345
345
struct ext4_dir_entry_tail *t;
346
+
int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
346
347
347
348
#ifdef PARANOID
348
349
struct ext4_dir_entry *d, *top;
349
350
350
351
d = (struct ext4_dir_entry *)bh->b_data;
351
352
top = (struct ext4_dir_entry *)(bh->b_data +
352
-
(EXT4_BLOCK_SIZE(inode->i_sb) -
353
-
sizeof(struct ext4_dir_entry_tail)));
354
-
while (d < top && d->rec_len)
353
+
(blocksize - sizeof(struct ext4_dir_entry_tail)));
354
+
while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize))
355
355
d = (struct ext4_dir_entry *)(((void *)d) +
356
-
le16_to_cpu(d->rec_len));
356
+
ext4_rec_len_from_disk(d->rec_len, blocksize));
357
357
358
358
if (d != top)
359
359
return NULL;
···
364
364
#endif
365
365
366
366
if (t->det_reserved_zero1 ||
367
-
le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
367
+
(ext4_rec_len_from_disk(t->det_rec_len, blocksize) !=
368
+
sizeof(struct ext4_dir_entry_tail)) ||
368
369
t->det_reserved_zero2 ||
369
370
t->det_reserved_ft != EXT4_FT_DIR_CSUM)
370
371
return NULL;
···
446
445
struct ext4_dir_entry *dp;
447
446
struct dx_root_info *root;
448
447
int count_offset;
448
+
int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
449
+
unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize);
449
450
450
-
if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
451
+
if (rlen == blocksize)
451
452
count_offset = 8;
452
-
else if (le16_to_cpu(dirent->rec_len) == 12) {
453
+
else if (rlen == 12) {
453
454
dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
454
-
if (le16_to_cpu(dp->rec_len) !=
455
-
EXT4_BLOCK_SIZE(inode->i_sb) - 12)
455
+
if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12)
456
456
return NULL;
457
457
root = (struct dx_root_info *)(((void *)dp + 12));
458
458
if (root->reserved_zero ||
···
1317
1315
unsigned int buflen = bh->b_size;
1318
1316
char *base = bh->b_data;
1319
1317
struct dx_hash_info h = *hinfo;
1318
+
int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
1320
1319
1321
1320
if (ext4_has_metadata_csum(dir->i_sb))
1322
1321
buflen -= sizeof(struct ext4_dir_entry_tail);
···
1338
1335
map_tail--;
1339
1336
map_tail->hash = h.hash;
1340
1337
map_tail->offs = ((char *) de - base)>>2;
1341
-
map_tail->size = le16_to_cpu(de->rec_len);
1338
+
map_tail->size = ext4_rec_len_from_disk(de->rec_len,
1339
+
blocksize);
1342
1340
count++;
1343
1341
cond_resched();
1344
1342
}
1345
-
de = ext4_next_entry(de, dir->i_sb->s_blocksize);
1343
+
de = ext4_next_entry(de, blocksize);
1346
1344
}
1347
1345
return count;
1348
1346
}
+6
-10
fs/jbd2/commit.c
+6
-10
fs/jbd2/commit.c
···
298
298
299
299
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
300
300
{
301
-
struct page *page = bh->b_page;
302
301
char *addr;
303
302
__u32 checksum;
304
303
305
-
addr = kmap_atomic(page);
306
-
checksum = crc32_be(crc32_sum,
307
-
(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
308
-
kunmap_atomic(addr);
304
+
addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
305
+
checksum = crc32_be(crc32_sum, addr, bh->b_size);
306
+
kunmap_local(addr);
309
307
310
308
return checksum;
311
309
}
···
320
322
struct buffer_head *bh, __u32 sequence)
321
323
{
322
324
journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
323
-
struct page *page = bh->b_page;
324
325
__u8 *addr;
325
326
__u32 csum32;
326
327
__be32 seq;
···
328
331
return;
329
332
330
333
seq = cpu_to_be32(sequence);
331
-
addr = kmap_atomic(page);
334
+
addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
332
335
csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
333
-
csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
334
-
bh->b_size);
335
-
kunmap_atomic(addr);
336
+
csum32 = jbd2_chksum(j, csum32, addr, bh->b_size);
337
+
kunmap_local(addr);
336
338
337
339
if (jbd2_has_feature_csum3(j))
338
340
tag3->t_checksum = cpu_to_be32(csum32);
+2
fs/jbd2/journal.c
+2
fs/jbd2/journal.c
···
1601
1601
1602
1602
err_cleanup:
1603
1603
percpu_counter_destroy(&journal->j_checkpoint_jh_count);
1604
+
if (journal->j_chksum_driver)
1605
+
crypto_free_shash(journal->j_chksum_driver);
1604
1606
kfree(journal->j_wbuf);
1605
1607
jbd2_journal_destroy_revoke(journal);
1606
1608
journal_fail_superblock(journal);
+4
-8
fs/jbd2/transaction.c
+4
-8
fs/jbd2/transaction.c
···
935
935
/* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
936
936
static void jbd2_freeze_jh_data(struct journal_head *jh)
937
937
{
938
-
struct page *page;
939
-
int offset;
940
938
char *source;
941
939
struct buffer_head *bh = jh2bh(jh);
942
940
943
941
J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
944
-
page = bh->b_page;
945
-
offset = offset_in_page(bh->b_data);
946
-
source = kmap_atomic(page);
942
+
source = kmap_local_folio(bh->b_folio, bh_offset(bh));
947
943
/* Fire data frozen trigger just before we copy the data */
948
-
jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
949
-
memcpy(jh->b_frozen_data, source + offset, bh->b_size);
950
-
kunmap_atomic(source);
944
+
jbd2_buffer_frozen_trigger(jh, source, jh->b_triggers);
945
+
memcpy(jh->b_frozen_data, source, bh->b_size);
946
+
kunmap_local(source);
951
947
952
948
/*
953
949
* Now that the frozen data is saved off, we need to store any matching
+2
-2
fs/nfsd/nfs4proc.c
+2
-2
fs/nfsd/nfs4proc.c
···
1058
1058
rename->rn_tname, rename->rn_tnamelen);
1059
1059
if (status)
1060
1060
return status;
1061
-
set_change_info(&rename->rn_sinfo, &cstate->current_fh);
1062
-
set_change_info(&rename->rn_tinfo, &cstate->save_fh);
1061
+
set_change_info(&rename->rn_sinfo, &cstate->save_fh);
1062
+
set_change_info(&rename->rn_tinfo, &cstate->current_fh);
1063
1063
return nfs_ok;
1064
1064
}
1065
1065
+3
-2
fs/nfsd/nfssvc.c
+3
-2
fs/nfsd/nfssvc.c
···
1082
1082
1083
1083
int nfsd_pool_stats_release(struct inode *inode, struct file *file)
1084
1084
{
1085
+
struct seq_file *seq = file->private_data;
1086
+
struct svc_serv *serv = seq->private;
1085
1087
int ret = seq_release(inode, file);
1086
-
struct net *net = inode->i_sb->s_fs_info;
1087
1088
1088
1089
mutex_lock(&nfsd_mutex);
1089
-
nfsd_put(net);
1090
+
svc_put(serv);
1090
1091
mutex_unlock(&nfsd_mutex);
1091
1092
return ret;
1092
1093
}
+2
-1
fs/overlayfs/copy_up.c
+2
-1
fs/overlayfs/copy_up.c
···
618
618
if (err)
619
619
return err;
620
620
621
-
if (inode->i_flags & OVL_COPY_I_FLAGS_MASK) {
621
+
if (inode->i_flags & OVL_COPY_I_FLAGS_MASK &&
622
+
(S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) {
622
623
/*
623
624
* Copy the fileattr inode flags that are the source of already
624
625
* copied i_flags
+3
-6
fs/overlayfs/file.c
+3
-6
fs/overlayfs/file.c
···
19
19
struct kiocb iocb;
20
20
refcount_t ref;
21
21
struct kiocb *orig_iocb;
22
-
struct fd fd;
23
22
};
24
23
25
24
static struct kmem_cache *ovl_aio_request_cachep;
···
279
280
static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
280
281
{
281
282
if (refcount_dec_and_test(&aio_req->ref)) {
282
-
fdput(aio_req->fd);
283
+
fput(aio_req->iocb.ki_filp);
283
284
kmem_cache_free(ovl_aio_request_cachep, aio_req);
284
285
}
285
286
}
···
341
342
if (!aio_req)
342
343
goto out;
343
344
344
-
aio_req->fd = real;
345
345
real.flags = 0;
346
346
aio_req->orig_iocb = iocb;
347
-
kiocb_clone(&aio_req->iocb, iocb, real.file);
347
+
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
348
348
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
349
349
refcount_set(&aio_req->ref, 2);
350
350
ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
···
407
409
if (!aio_req)
408
410
goto out;
409
411
410
-
aio_req->fd = real;
411
412
real.flags = 0;
412
413
aio_req->orig_iocb = iocb;
413
-
kiocb_clone(&aio_req->iocb, iocb, real.file);
414
+
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
414
415
aio_req->iocb.ki_flags = ifl;
415
416
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
416
417
refcount_set(&aio_req->ref, 2);
+1
-1
fs/smb/client/inode.c
+1
-1
fs/smb/client/inode.c
+3
-3
fs/smb/client/smb2ops.c
+3
-3
fs/smb/client/smb2ops.c
···
297
297
cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
298
298
credits->value, new_val);
299
299
300
-
return -ENOTSUPP;
300
+
return -EOPNOTSUPP;
301
301
}
302
302
303
303
spin_lock(&server->req_lock);
···
1161
1161
/* Use a fudge factor of 256 bytes in case we collide
1162
1162
* with a different set_EAs command.
1163
1163
*/
1164
-
if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1164
+
if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1165
1165
MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1166
1166
used_len + ea_name_len + ea_value_len + 1) {
1167
1167
rc = -ENOSPC;
···
4591
4591
4592
4592
if (shdr->Command != SMB2_READ) {
4593
4593
cifs_server_dbg(VFS, "only big read responses are supported\n");
4594
-
return -ENOTSUPP;
4594
+
return -EOPNOTSUPP;
4595
4595
}
4596
4596
4597
4597
if (server->ops->is_session_expired &&
+19
-12
fs/smb/client/smb2pdu.c
+19
-12
fs/smb/client/smb2pdu.c
···
89
89
struct TCP_Server_Info *server)
90
90
{
91
91
struct smb3_hdr_req *smb3_hdr;
92
+
92
93
shdr->ProtocolId = SMB2_PROTO_NUMBER;
93
94
shdr->StructureSize = cpu_to_le16(64);
94
95
shdr->Command = smb2_cmd;
95
-
if (server->dialect >= SMB30_PROT_ID) {
96
-
/* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
97
-
smb3_hdr = (struct smb3_hdr_req *)shdr;
98
-
/* if primary channel is not set yet, use default channel for chan sequence num */
99
-
if (SERVER_IS_CHAN(server))
100
-
smb3_hdr->ChannelSequence =
101
-
cpu_to_le16(server->primary_server->channel_sequence_num);
102
-
else
103
-
smb3_hdr->ChannelSequence = cpu_to_le16(server->channel_sequence_num);
104
-
}
96
+
105
97
if (server) {
98
+
/* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
99
+
if (server->dialect >= SMB30_PROT_ID) {
100
+
smb3_hdr = (struct smb3_hdr_req *)shdr;
101
+
/*
102
+
* if primary channel is not set yet, use default
103
+
* channel for chan sequence num
104
+
*/
105
+
if (SERVER_IS_CHAN(server))
106
+
smb3_hdr->ChannelSequence =
107
+
cpu_to_le16(server->primary_server->channel_sequence_num);
108
+
else
109
+
smb3_hdr->ChannelSequence =
110
+
cpu_to_le16(server->channel_sequence_num);
111
+
}
106
112
spin_lock(&server->req_lock);
107
113
/* Request up to 10 credits but don't go over the limit. */
108
114
if (server->credits >= server->max_credits)
···
2240
2234
* (most servers default to 120 seconds) and most clients default to 0.
2241
2235
* This can be overridden at mount ("handletimeout=") if the user wants
2242
2236
* a different persistent (or resilient) handle timeout for all opens
2243
-
* opens on a particular SMB3 mount.
2237
+
* on a particular SMB3 mount.
2244
2238
*/
2245
2239
buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
2246
2240
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
···
2385
2379
return 0;
2386
2380
}
2387
2381
2388
-
/* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
2382
+
/* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
2389
2383
static void setup_owner_group_sids(char *buf)
2390
2384
{
2391
2385
struct owner_group_sids *sids = (struct owner_group_sids *)buf;
···
3130
3124
SMB2_ioctl_free(struct smb_rqst *rqst)
3131
3125
{
3132
3126
int i;
3127
+
3133
3128
if (rqst && rqst->rq_iov) {
3134
3129
cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3135
3130
for (i = 1; i < rqst->rq_nvec; i++)
+1
-1
fs/smb/client/transport.c
+1
-1
fs/smb/client/transport.c
+1
-1
fs/smb/server/smb2pdu.c
+1
-1
fs/smb/server/smb2pdu.c
-1
fs/smb/server/smbacl.c
-1
fs/smb/server/smbacl.c
-6
fs/stat.c
-6
fs/stat.c
···
419
419
420
420
#ifdef __ARCH_WANT_NEW_STAT
421
421
422
-
#if BITS_PER_LONG == 32
423
-
# define choose_32_64(a,b) a
424
-
#else
425
-
# define choose_32_64(a,b) b
426
-
#endif
427
-
428
422
#ifndef INIT_STRUCT_STAT_PADDING
429
423
# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
430
424
#endif
+52
-7
fs/tracefs/event_inode.c
+52
-7
fs/tracefs/event_inode.c
···
185
185
186
186
/**
187
187
* eventfs_set_ef_status_free - set the ef->status to free
188
+
* @ti: the tracefs_inode of the dentry
188
189
* @dentry: dentry who's status to be freed
189
190
*
190
191
* eventfs_set_ef_status_free will be called if no more
191
192
* references remain
192
193
*/
193
-
void eventfs_set_ef_status_free(struct dentry *dentry)
194
+
void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry)
194
195
{
195
196
struct tracefs_inode *ti_parent;
196
-
struct eventfs_file *ef;
197
+
struct eventfs_inode *ei;
198
+
struct eventfs_file *ef, *tmp;
199
+
200
+
/* The top level events directory may be freed by this */
201
+
if (unlikely(ti->flags & TRACEFS_EVENT_TOP_INODE)) {
202
+
LIST_HEAD(ef_del_list);
203
+
204
+
mutex_lock(&eventfs_mutex);
205
+
206
+
ei = ti->private;
207
+
208
+
/* Record all the top level files */
209
+
list_for_each_entry_srcu(ef, &ei->e_top_files, list,
210
+
lockdep_is_held(&eventfs_mutex)) {
211
+
list_add_tail(&ef->del_list, &ef_del_list);
212
+
}
213
+
214
+
/* Nothing should access this, but just in case! */
215
+
ti->private = NULL;
216
+
217
+
mutex_unlock(&eventfs_mutex);
218
+
219
+
/* Now safely free the top level files and their children */
220
+
list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
221
+
list_del(&ef->del_list);
222
+
eventfs_remove(ef);
223
+
}
224
+
225
+
kfree(ei);
226
+
return;
227
+
}
197
228
198
229
mutex_lock(&eventfs_mutex);
230
+
199
231
ti_parent = get_tracefs(dentry->d_parent->d_inode);
200
232
if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
201
233
goto out;
···
452
420
453
421
ei = ti->private;
454
422
idx = srcu_read_lock(&eventfs_srcu);
455
-
list_for_each_entry_rcu(ef, &ei->e_top_files, list) {
423
+
list_for_each_entry_srcu(ef, &ei->e_top_files, list,
424
+
srcu_read_lock_held(&eventfs_srcu)) {
456
425
create_dentry(ef, dentry, false);
457
426
}
458
427
srcu_read_unlock(&eventfs_srcu, idx);
···
524
491
struct tracefs_inode *ti;
525
492
struct inode *inode;
526
493
494
+
if (security_locked_down(LOCKDOWN_TRACEFS))
495
+
return NULL;
496
+
527
497
if (IS_ERR(dentry))
528
498
return dentry;
529
499
···
543
507
INIT_LIST_HEAD(&ei->e_top_files);
544
508
545
509
ti = get_tracefs(inode);
546
-
ti->flags |= TRACEFS_EVENT_INODE;
510
+
ti->flags |= TRACEFS_EVENT_INODE | TRACEFS_EVENT_TOP_INODE;
547
511
ti->private = ei;
548
512
549
513
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
···
573
537
struct tracefs_inode *ti_parent;
574
538
struct eventfs_inode *ei_parent;
575
539
struct eventfs_file *ef;
540
+
541
+
if (security_locked_down(LOCKDOWN_TRACEFS))
542
+
return NULL;
576
543
577
544
if (!parent)
578
545
return ERR_PTR(-EINVAL);
···
607
568
struct eventfs_file *ef_parent)
608
569
{
609
570
struct eventfs_file *ef;
571
+
572
+
if (security_locked_down(LOCKDOWN_TRACEFS))
573
+
return NULL;
610
574
611
575
if (!ef_parent)
612
576
return ERR_PTR(-EINVAL);
···
647
605
struct tracefs_inode *ti;
648
606
struct eventfs_inode *ei;
649
607
struct eventfs_file *ef;
608
+
609
+
if (security_locked_down(LOCKDOWN_TRACEFS))
610
+
return -ENODEV;
650
611
651
612
if (!parent)
652
613
return -EINVAL;
···
698
653
const struct file_operations *fop)
699
654
{
700
655
struct eventfs_file *ef;
656
+
657
+
if (security_locked_down(LOCKDOWN_TRACEFS))
658
+
return -ENODEV;
701
659
702
660
if (!ef_parent)
703
661
return -EINVAL;
···
839
791
void eventfs_remove_events_dir(struct dentry *dentry)
840
792
{
841
793
struct tracefs_inode *ti;
842
-
struct eventfs_inode *ei;
843
794
844
795
if (!dentry || !dentry->d_inode)
845
796
return;
···
847
800
if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
848
801
return;
849
802
850
-
ei = ti->private;
851
803
d_invalidate(dentry);
852
804
dput(dentry);
853
-
kfree(ei);
854
805
}
+4
-1
fs/tracefs/inode.c
+4
-1
fs/tracefs/inode.c
···
385
385
386
386
ti = get_tracefs(inode);
387
387
if (ti && ti->flags & TRACEFS_EVENT_INODE)
388
-
eventfs_set_ef_status_free(dentry);
388
+
eventfs_set_ef_status_free(ti, dentry);
389
389
iput(inode);
390
390
}
391
391
···
673
673
*/
674
674
struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
675
675
{
676
+
if (security_locked_down(LOCKDOWN_TRACEFS))
677
+
return NULL;
678
+
676
679
return __create_dir(name, parent, &simple_dir_inode_operations);
677
680
}
678
681
+3
-2
fs/tracefs/internal.h
+3
-2
fs/tracefs/internal.h
···
3
3
#define _TRACEFS_INTERNAL_H
4
4
5
5
enum {
6
-
TRACEFS_EVENT_INODE = BIT(1),
6
+
TRACEFS_EVENT_INODE = BIT(1),
7
+
TRACEFS_EVENT_TOP_INODE = BIT(2),
7
8
};
8
9
9
10
struct tracefs_inode {
···
25
24
struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
26
25
struct dentry *eventfs_failed_creating(struct dentry *dentry);
27
26
struct dentry *eventfs_end_creating(struct dentry *dentry);
28
-
void eventfs_set_ef_status_free(struct dentry *dentry);
27
+
void eventfs_set_ef_status_free(struct tracefs_inode *ti, struct dentry *dentry);
29
28
30
29
#endif /* _TRACEFS_INTERNAL_H */
+31
-4
include/drm/drm_exec.h
+31
-4
include/drm/drm_exec.h
···
52
52
};
53
53
54
54
/**
55
+
* drm_exec_obj() - Return the object for a give drm_exec index
56
+
* @exec: Pointer to the drm_exec context
57
+
* @index: The index.
58
+
*
59
+
* Return: Pointer to the locked object corresponding to @index if
60
+
* index is within the number of locked objects. NULL otherwise.
61
+
*/
62
+
static inline struct drm_gem_object *
63
+
drm_exec_obj(struct drm_exec *exec, unsigned long index)
64
+
{
65
+
return index < exec->num_objects ? exec->objects[index] : NULL;
66
+
}
67
+
68
+
/**
55
69
* drm_exec_for_each_locked_object - iterate over all the locked objects
56
70
* @exec: drm_exec object
57
71
* @index: unsigned long index for the iteration
···
73
59
*
74
60
* Iterate over all the locked GEM objects inside the drm_exec object.
75
61
*/
76
-
#define drm_exec_for_each_locked_object(exec, index, obj) \
77
-
for (index = 0, obj = (exec)->objects[0]; \
78
-
index < (exec)->num_objects; \
79
-
++index, obj = (exec)->objects[index])
62
+
#define drm_exec_for_each_locked_object(exec, index, obj) \
63
+
for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
64
+
65
+
/**
66
+
* drm_exec_for_each_locked_object_reverse - iterate over all the locked
67
+
* objects in reverse locking order
68
+
* @exec: drm_exec object
69
+
* @index: unsigned long index for the iteration
70
+
* @obj: the current GEM object
71
+
*
72
+
* Iterate over all the locked GEM objects inside the drm_exec object in
73
+
* reverse locking order. Note that @index may go below zero and wrap,
74
+
* but that will be caught by drm_exec_obj(), returning a NULL object.
75
+
*/
76
+
#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
77
+
for ((index) = (exec)->num_objects - 1; \
78
+
((obj) = drm_exec_obj(exec, index)); --(index))
80
79
81
80
/**
82
81
* drm_exec_until_all_locked - loop until all GEM objects are locked
+3
-1
include/drm/drm_kunit_helpers.h
+3
-1
include/drm/drm_kunit_helpers.h
···
3
3
#ifndef DRM_KUNIT_HELPERS_H_
4
4
#define DRM_KUNIT_HELPERS_H_
5
5
6
+
#include <linux/device.h>
7
+
6
8
#include <kunit/test.h>
7
9
8
10
struct drm_device;
···
53
51
{
54
52
struct drm_driver *driver;
55
53
56
-
driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
54
+
driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
57
55
KUNIT_ASSERT_NOT_NULL(test, driver);
58
56
59
57
driver->driver_features = features;
+4
-1
include/linux/buffer_head.h
+4
-1
include/linux/buffer_head.h
···
171
171
return test_bit_acquire(BH_Uptodate, &bh->b_state);
172
172
}
173
173
174
-
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
174
+
static inline unsigned long bh_offset(const struct buffer_head *bh)
175
+
{
176
+
return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
177
+
}
175
178
176
179
/* If we *know* page->private refers to buffer_heads */
177
180
#define page_buffers(page) \
+2
include/linux/export-internal.h
+2
include/linux/export-internal.h
+1
-10
include/linux/i2c.h
+1
-10
include/linux/i2c.h
···
237
237
* struct i2c_driver - represent an I2C device driver
238
238
* @class: What kind of i2c device we instantiate (for detect)
239
239
* @probe: Callback for device binding
240
-
* @probe_new: Transitional callback for device binding - do not use
241
240
* @remove: Callback for device unbinding
242
241
* @shutdown: Callback for device shutdown
243
242
* @alert: Alert callback, for example for the SMBus alert protocol
···
271
272
struct i2c_driver {
272
273
unsigned int class;
273
274
274
-
union {
275
275
/* Standard driver model interfaces */
276
-
int (*probe)(struct i2c_client *client);
277
-
/*
278
-
* Legacy callback that was part of a conversion of .probe().
279
-
* Today it has the same semantic as .probe(). Don't use for new
280
-
* code.
281
-
*/
282
-
int (*probe_new)(struct i2c_client *client);
283
-
};
276
+
int (*probe)(struct i2c_client *client);
284
277
void (*remove)(struct i2c_client *client);
285
278
286
279
+5
include/linux/instruction_pointer.h
+5
include/linux/instruction_pointer.h
···
2
2
#ifndef _LINUX_INSTRUCTION_POINTER_H
3
3
#define _LINUX_INSTRUCTION_POINTER_H
4
4
5
+
#include <asm/linkage.h>
6
+
5
7
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
8
+
9
+
#ifndef _THIS_IP_
6
10
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
11
+
#endif
7
12
8
13
#endif /* _LINUX_INSTRUCTION_POINTER_H */
+4
include/linux/libata.h
+4
include/linux/libata.h
···
222
222
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
223
223
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
224
224
225
+
ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
226
+
ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
227
+
ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
228
+
225
229
/* bits 24:31 of host->flags are reserved for LLD specific flags */
226
230
227
231
/* various lengths of time */
+3
-3
include/linux/nvme-fc-driver.h
+3
-3
include/linux/nvme-fc-driver.h
···
53
53
struct nvmefc_ls_req {
54
54
void *rqstaddr;
55
55
dma_addr_t rqstdma;
56
-
__le32 rqstlen;
56
+
u32 rqstlen;
57
57
void *rspaddr;
58
58
dma_addr_t rspdma;
59
-
__le32 rsplen;
59
+
u32 rsplen;
60
60
u32 timeout;
61
61
62
62
void *private;
···
120
120
struct nvmefc_ls_rsp {
121
121
void *rspbuf;
122
122
dma_addr_t rspdma;
123
-
__le32 rsplen;
123
+
u16 rsplen;
124
124
125
125
void (*done)(struct nvmefc_ls_rsp *rsp);
126
126
void *nvme_fc_private; /* LLDD is not to access !! */
+2
-2
include/linux/thermal.h
+2
-2
include/linux/thermal.h
···
80
80
int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
81
81
int (*get_crit_temp) (struct thermal_zone_device *, int *);
82
82
int (*set_emul_temp) (struct thermal_zone_device *, int);
83
-
int (*get_trend) (struct thermal_zone_device *, struct thermal_trip *,
84
-
enum thermal_trend *);
83
+
int (*get_trend) (struct thermal_zone_device *,
84
+
const struct thermal_trip *, enum thermal_trend *);
85
85
void (*hot)(struct thermal_zone_device *);
86
86
void (*critical)(struct thermal_zone_device *);
87
87
};
+3
-4
include/linux/trace_events.h
+3
-4
include/linux/trace_events.h
···
62
62
/* Used to find the offset and length of dynamic fields in trace events */
63
63
struct trace_dynamic_info {
64
64
#ifdef CONFIG_CPU_BIG_ENDIAN
65
-
u16 offset;
66
65
u16 len;
66
+
u16 offset;
67
67
#else
68
-
u16 len;
69
68
u16 offset;
69
+
u16 len;
70
70
#endif
71
-
};
71
+
} __packed;
72
72
73
73
/*
74
74
* The trace entry - the most basic unit of tracing. This is what
···
650
650
struct trace_event_call *event_call;
651
651
struct event_filter __rcu *filter;
652
652
struct eventfs_file *ef;
653
-
struct dentry *dir;
654
653
struct trace_array *tr;
655
654
struct trace_subsystem_dir *system;
656
655
struct list_head triggers;
+6
-1
include/net/ipv6.h
+6
-1
include/net/ipv6.h
···
784
784
cpu_to_be32(0x0000ffff))) == 0UL;
785
785
}
786
786
787
+
static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
788
+
{
789
+
return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
790
+
}
791
+
787
792
static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
788
793
{
789
794
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
···
1365
1360
return 0;
1366
1361
}
1367
1362
1368
-
static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
1363
+
static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
1369
1364
{
1370
1365
int ret;
1371
1366
+5
io_uring/net.c
+5
io_uring/net.c
···
183
183
memcpy(async_msg, kmsg, sizeof(*kmsg));
184
184
if (async_msg->msg.msg_name)
185
185
async_msg->msg.msg_name = &async_msg->addr;
186
+
187
+
if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
188
+
return -EAGAIN;
189
+
186
190
/* if were using fast_iov, set it to the new one */
187
191
if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
188
192
size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
···
546
542
struct io_async_msghdr *iomsg)
547
543
{
548
544
iomsg->msg.msg_name = &iomsg->addr;
545
+
iomsg->msg.msg_iter.nr_segs = 0;
549
546
550
547
#ifdef CONFIG_COMPAT
551
548
if (req->ctx->compat)
+1
kernel/panic.c
+1
kernel/panic.c
+6
-6
kernel/power/hibernate.c
+6
-6
kernel/power/hibernate.c
···
786
786
unlock_device_hotplug();
787
787
if (snapshot_test) {
788
788
pm_pr_dbg("Checking hibernation image\n");
789
-
error = swsusp_check(snapshot_test);
789
+
error = swsusp_check(false);
790
790
if (!error)
791
-
error = load_image_and_restore(snapshot_test);
791
+
error = load_image_and_restore(false);
792
792
}
793
793
thaw_processes();
794
794
···
945
945
pm_pr_dbg("Looking for hibernation image.\n");
946
946
947
947
mutex_lock(&system_transition_mutex);
948
-
error = swsusp_check(false);
948
+
error = swsusp_check(true);
949
949
if (error)
950
950
goto Unlock;
951
951
952
952
/* The snapshot device should not be opened while we're running */
953
953
if (!hibernate_acquire()) {
954
954
error = -EBUSY;
955
-
swsusp_close(false);
955
+
swsusp_close(true);
956
956
goto Unlock;
957
957
}
958
958
···
973
973
goto Close_Finish;
974
974
}
975
975
976
-
error = load_image_and_restore(false);
976
+
error = load_image_and_restore(true);
977
977
thaw_processes();
978
978
Finish:
979
979
pm_notifier_call_chain(PM_POST_RESTORE);
···
987
987
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
988
988
return error;
989
989
Close_Finish:
990
-
swsusp_close(false);
990
+
swsusp_close(true);
991
991
goto Finish;
992
992
}
993
993
+2
-2
kernel/power/power.h
+2
-2
kernel/power/power.h
···
168
168
#define SF_HW_SIG 8
169
169
170
170
/* kernel/power/hibernate.c */
171
-
int swsusp_check(bool snapshot_test);
171
+
int swsusp_check(bool exclusive);
172
172
extern void swsusp_free(void);
173
173
extern int swsusp_read(unsigned int *flags_p);
174
174
extern int swsusp_write(unsigned int flags);
175
-
void swsusp_close(bool snapshot_test);
175
+
void swsusp_close(bool exclusive);
176
176
#ifdef CONFIG_SUSPEND
177
177
extern int swsusp_unmark(void);
178
178
#endif
+8
-6
kernel/power/swap.c
+8
-6
kernel/power/swap.c
···
1513
1513
static void *swsusp_holder;
1514
1514
1515
1515
/**
1516
-
* swsusp_check - Check for swsusp signature in the resume device
1516
+
* swsusp_check - Check for swsusp signature in the resume device
1517
+
* @exclusive: Open the resume device exclusively.
1517
1518
*/
1518
1519
1519
-
int swsusp_check(bool snapshot_test)
1520
+
int swsusp_check(bool exclusive)
1520
1521
{
1521
-
void *holder = snapshot_test ? &swsusp_holder : NULL;
1522
+
void *holder = exclusive ? &swsusp_holder : NULL;
1522
1523
int error;
1523
1524
1524
1525
hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, BLK_OPEN_READ,
···
1564
1563
}
1565
1564
1566
1565
/**
1567
-
* swsusp_close - close swap device.
1566
+
* swsusp_close - close swap device.
1567
+
* @exclusive: Close the resume device which is exclusively opened.
1568
1568
*/
1569
1569
1570
-
void swsusp_close(bool snapshot_test)
1570
+
void swsusp_close(bool exclusive)
1571
1571
{
1572
1572
if (IS_ERR(hib_resume_bdev)) {
1573
1573
pr_debug("Image device not initialised\n");
1574
1574
return;
1575
1575
}
1576
1576
1577
-
blkdev_put(hib_resume_bdev, snapshot_test ? &swsusp_holder : NULL);
1577
+
blkdev_put(hib_resume_bdev, exclusive ? &swsusp_holder : NULL);
1578
1578
}
1579
1579
1580
1580
/**
+25
-2
kernel/sched/fair.c
+25
-2
kernel/sched/fair.c
···
6619
6619
/* Working cpumask for: load_balance, load_balance_newidle. */
6620
6620
static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6621
6621
static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask);
6622
+
static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask);
6622
6623
6623
6624
#ifdef CONFIG_NO_HZ_COMMON
6624
6625
···
9580
9579
imbalance /= ncores_local + ncores_busiest;
9581
9580
9582
9581
/* Take advantage of resource in an empty sched group */
9583
-
if (imbalance == 0 && local->sum_nr_running == 0 &&
9582
+
if (imbalance <= 1 && local->sum_nr_running == 0 &&
9584
9583
busiest->sum_nr_running > 1)
9585
9584
imbalance = 2;
9586
9585
···
9768
9767
break;
9769
9768
9770
9769
case group_smt_balance:
9770
+
/*
9771
+
* Check if we have spare CPUs on either SMT group to
9772
+
* choose has spare or fully busy handling.
9773
+
*/
9774
+
if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
9775
+
goto has_spare;
9776
+
9777
+
fallthrough;
9778
+
9771
9779
case group_fully_busy:
9772
9780
/*
9773
9781
* Select the fully busy group with highest avg_load. In
···
9816
9806
else
9817
9807
return true;
9818
9808
}
9809
+
has_spare:
9819
9810
9820
9811
/*
9821
9812
* Select not overloaded group with lowest number of idle cpus
···
10928
10917
10929
10918
static int should_we_balance(struct lb_env *env)
10930
10919
{
10920
+
struct cpumask *swb_cpus = this_cpu_cpumask_var_ptr(should_we_balance_tmpmask);
10931
10921
struct sched_group *sg = env->sd->groups;
10932
10922
int cpu, idle_smt = -1;
10933
10923
···
10952
10940
return 1;
10953
10941
}
10954
10942
10943
+
cpumask_copy(swb_cpus, group_balance_mask(sg));
10955
10944
/* Try to find first idle CPU */
10956
-
for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
10945
+
for_each_cpu_and(cpu, swb_cpus, env->cpus) {
10957
10946
if (!idle_cpu(cpu))
10958
10947
continue;
10959
10948
···
10966
10953
if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) {
10967
10954
if (idle_smt == -1)
10968
10955
idle_smt = cpu;
10956
+
/*
10957
+
* If the core is not idle, and first SMT sibling which is
10958
+
* idle has been found, then its not needed to check other
10959
+
* SMT siblings for idleness:
10960
+
*/
10961
+
#ifdef CONFIG_SCHED_SMT
10962
+
cpumask_andnot(swb_cpus, swb_cpus, cpu_smt_mask(cpu));
10963
+
#endif
10969
10964
continue;
10970
10965
}
10971
10966
···
12939
12918
for_each_possible_cpu(i) {
12940
12919
zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i));
12941
12920
zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i));
12921
+
zalloc_cpumask_var_node(&per_cpu(should_we_balance_tmpmask, i),
12922
+
GFP_KERNEL, cpu_to_node(i));
12942
12923
12943
12924
#ifdef CONFIG_CFS_BANDWIDTH
12944
12925
INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i));
+7
kernel/trace/ring_buffer.c
+7
kernel/trace/ring_buffer.c
···
2198
2198
err = -ENOMEM;
2199
2199
goto out_err;
2200
2200
}
2201
+
2202
+
cond_resched();
2201
2203
}
2202
2204
2203
2205
cpus_read_lock();
···
2390
2388
*/
2391
2389
commit = rb_page_commit(iter_head_page);
2392
2390
smp_rmb();
2391
+
2392
+
/* An event needs to be at least 8 bytes in size */
2393
+
if (iter->head > commit - 8)
2394
+
goto reset;
2395
+
2393
2396
event = __rb_page_index(iter_head_page, iter->head);
2394
2397
length = rb_event_length(event);
2395
2398
+63
-9
kernel/trace/trace.c
+63
-9
kernel/trace/trace.c
···
1772
1772
init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1773
1773
tr->d_max_latency = trace_create_file("tracing_max_latency",
1774
1774
TRACE_MODE_WRITE,
1775
-
d_tracer, &tr->max_latency,
1775
+
d_tracer, tr,
1776
1776
&tracing_max_lat_fops);
1777
1777
}
1778
1778
···
1805
1805
1806
1806
#define trace_create_maxlat_file(tr, d_tracer) \
1807
1807
trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1808
-
d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1808
+
d_tracer, tr, &tracing_max_lat_fops)
1809
1809
1810
1810
#endif
1811
1811
···
4973
4973
return 0;
4974
4974
}
4975
4975
4976
+
/*
4977
+
* The private pointer of the inode is the trace_event_file.
4978
+
* Update the tr ref count associated to it.
4979
+
*/
4980
+
int tracing_open_file_tr(struct inode *inode, struct file *filp)
4981
+
{
4982
+
struct trace_event_file *file = inode->i_private;
4983
+
int ret;
4984
+
4985
+
ret = tracing_check_open_get_tr(file->tr);
4986
+
if (ret)
4987
+
return ret;
4988
+
4989
+
filp->private_data = inode->i_private;
4990
+
4991
+
return 0;
4992
+
}
4993
+
4994
+
int tracing_release_file_tr(struct inode *inode, struct file *filp)
4995
+
{
4996
+
struct trace_event_file *file = inode->i_private;
4997
+
4998
+
trace_array_put(file->tr);
4999
+
5000
+
return 0;
5001
+
}
5002
+
4976
5003
static int tracing_mark_open(struct inode *inode, struct file *filp)
4977
5004
{
4978
5005
stream_open(inode, filp);
···
6718
6691
tracing_max_lat_read(struct file *filp, char __user *ubuf,
6719
6692
size_t cnt, loff_t *ppos)
6720
6693
{
6721
-
return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6694
+
struct trace_array *tr = filp->private_data;
6695
+
6696
+
return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6722
6697
}
6723
6698
6724
6699
static ssize_t
6725
6700
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6726
6701
size_t cnt, loff_t *ppos)
6727
6702
{
6728
-
return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6703
+
struct trace_array *tr = filp->private_data;
6704
+
6705
+
return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6729
6706
}
6730
6707
6731
6708
#endif
···
7783
7752
7784
7753
#ifdef CONFIG_TRACER_MAX_TRACE
7785
7754
static const struct file_operations tracing_max_lat_fops = {
7786
-
.open = tracing_open_generic,
7755
+
.open = tracing_open_generic_tr,
7787
7756
.read = tracing_max_lat_read,
7788
7757
.write = tracing_max_lat_write,
7789
7758
.llseek = generic_file_llseek,
7759
+
.release = tracing_release_generic_tr,
7790
7760
};
7791
7761
#endif
7792
7762
7793
7763
static const struct file_operations set_tracer_fops = {
7794
-
.open = tracing_open_generic,
7764
+
.open = tracing_open_generic_tr,
7795
7765
.read = tracing_set_trace_read,
7796
7766
.write = tracing_set_trace_write,
7797
7767
.llseek = generic_file_llseek,
7768
+
.release = tracing_release_generic_tr,
7798
7769
};
7799
7770
7800
7771
static const struct file_operations tracing_pipe_fops = {
···
8989
8956
return cnt;
8990
8957
}
8991
8958
8959
+
static int tracing_open_options(struct inode *inode, struct file *filp)
8960
+
{
8961
+
struct trace_option_dentry *topt = inode->i_private;
8962
+
int ret;
8963
+
8964
+
ret = tracing_check_open_get_tr(topt->tr);
8965
+
if (ret)
8966
+
return ret;
8967
+
8968
+
filp->private_data = inode->i_private;
8969
+
return 0;
8970
+
}
8971
+
8972
+
static int tracing_release_options(struct inode *inode, struct file *file)
8973
+
{
8974
+
struct trace_option_dentry *topt = file->private_data;
8975
+
8976
+
trace_array_put(topt->tr);
8977
+
return 0;
8978
+
}
8992
8979
8993
8980
static const struct file_operations trace_options_fops = {
8994
-
.open = tracing_open_generic,
8981
+
.open = tracing_open_options,
8995
8982
.read = trace_options_read,
8996
8983
.write = trace_options_write,
8997
8984
.llseek = generic_file_llseek,
8985
+
.release = tracing_release_options,
8998
8986
};
8999
8987
9000
8988
/*
···
9793
9739
tr, &tracing_mark_fops);
9794
9740
9795
9741
file = __find_event_file(tr, "ftrace", "print");
9796
-
if (file && file->dir)
9797
-
trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9742
+
if (file && file->ef)
9743
+
eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
9798
9744
file, &event_trigger_fops);
9799
9745
tr->trace_marker_file = file;
9800
9746
+2
kernel/trace/trace.h
+2
kernel/trace/trace.h
···
610
610
void tracing_reset_all_online_cpus_unlocked(void);
611
611
int tracing_open_generic(struct inode *inode, struct file *filp);
612
612
int tracing_open_generic_tr(struct inode *inode, struct file *filp);
613
+
int tracing_open_file_tr(struct inode *inode, struct file *filp);
614
+
int tracing_release_file_tr(struct inode *inode, struct file *filp);
613
615
bool tracing_is_disabled(void);
614
616
bool tracer_tracing_is_on(struct trace_array *tr);
615
617
void tracer_tracing_on(struct trace_array *tr);
+13
-19
kernel/trace/trace_events.c
+13
-19
kernel/trace/trace_events.c
···
992
992
993
993
static void remove_event_file_dir(struct trace_event_file *file)
994
994
{
995
-
struct dentry *dir = file->dir;
996
-
struct dentry *child;
997
-
998
-
if (dir) {
999
-
spin_lock(&dir->d_lock); /* probably unneeded */
1000
-
list_for_each_entry(child, &dir->d_subdirs, d_child) {
1001
-
if (d_really_is_positive(child)) /* probably unneeded */
1002
-
d_inode(child)->i_private = NULL;
1003
-
}
1004
-
spin_unlock(&dir->d_lock);
1005
-
1006
-
tracefs_remove(dir);
1007
-
}
1008
995
eventfs_remove(file->ef);
1009
996
list_del(&file->list);
1010
997
remove_subsystem(file->system);
···
2090
2103
};
2091
2104
2092
2105
static const struct file_operations ftrace_enable_fops = {
2093
-
.open = tracing_open_generic,
2106
+
.open = tracing_open_file_tr,
2094
2107
.read = event_enable_read,
2095
2108
.write = event_enable_write,
2109
+
.release = tracing_release_file_tr,
2096
2110
.llseek = default_llseek,
2097
2111
};
2098
2112
···
2110
2122
};
2111
2123
2112
2124
static const struct file_operations ftrace_event_filter_fops = {
2113
-
.open = tracing_open_generic,
2125
+
.open = tracing_open_file_tr,
2114
2126
.read = event_filter_read,
2115
2127
.write = event_filter_write,
2128
+
.release = tracing_release_file_tr,
2116
2129
.llseek = default_llseek,
2117
2130
};
2118
2131
···
2286
2297
{
2287
2298
struct event_subsystem *system, *iter;
2288
2299
struct trace_subsystem_dir *dir;
2300
+
struct eventfs_file *ef;
2289
2301
int res;
2290
2302
2291
2303
/* First see if we did not already create this dir */
···
2319
2329
} else
2320
2330
__get_system(system);
2321
2331
2322
-
dir->ef = eventfs_add_subsystem_dir(name, parent);
2323
-
if (IS_ERR(dir->ef)) {
2332
+
ef = eventfs_add_subsystem_dir(name, parent);
2333
+
if (IS_ERR(ef)) {
2324
2334
pr_warn("Failed to create system directory %s\n", name);
2325
2335
__put_system(system);
2326
2336
goto out_free;
2327
2337
}
2328
2338
2339
+
dir->ef = ef;
2329
2340
dir->tr = tr;
2330
2341
dir->ref_count = 1;
2331
2342
dir->nr_events = 1;
···
2406
2415
struct trace_event_call *call = file->event_call;
2407
2416
struct eventfs_file *ef_subsystem = NULL;
2408
2417
struct trace_array *tr = file->tr;
2418
+
struct eventfs_file *ef;
2409
2419
const char *name;
2410
2420
int ret;
2411
2421
···
2423
2431
return -ENOMEM;
2424
2432
2425
2433
name = trace_event_name(call);
2426
-
file->ef = eventfs_add_dir(name, ef_subsystem);
2427
-
if (IS_ERR(file->ef)) {
2434
+
ef = eventfs_add_dir(name, ef_subsystem);
2435
+
if (IS_ERR(ef)) {
2428
2436
pr_warn("Could not create tracefs '%s' directory\n", name);
2429
2437
return -1;
2430
2438
}
2439
+
2440
+
file->ef = ef;
2431
2441
2432
2442
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2433
2443
eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file,
+2
-1
kernel/trace/trace_events_inject.c
+2
-1
kernel/trace/trace_events_inject.c
+1
-1
kernel/trace/trace_events_synth.c
+1
-1
kernel/trace/trace_events_synth.c
+33
-15
lib/kunit/executor.c
+33
-15
lib/kunit/executor.c
···
65
65
};
66
66
67
67
/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
68
-
static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
68
+
static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
69
69
const char *filter_glob)
70
70
{
71
71
const int len = strlen(filter_glob);
···
73
73
74
74
if (!period) {
75
75
parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
76
+
if (!parsed->suite_glob)
77
+
return -ENOMEM;
78
+
76
79
parsed->test_glob = NULL;
77
80
strcpy(parsed->suite_glob, filter_glob);
78
-
return;
81
+
return 0;
79
82
}
80
83
81
84
parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
85
+
if (!parsed->suite_glob)
86
+
return -ENOMEM;
87
+
82
88
parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
89
+
if (!parsed->test_glob) {
90
+
kfree(parsed->suite_glob);
91
+
return -ENOMEM;
92
+
}
83
93
84
94
strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
85
95
strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
96
+
97
+
return 0;
86
98
}
87
99
88
100
/* Create a copy of suite with only tests that match test_glob. */
···
164
152
}
165
153
copy_start = copy;
166
154
167
-
if (filter_glob)
168
-
kunit_parse_glob_filter(&parsed_glob, filter_glob);
155
+
if (filter_glob) {
156
+
*err = kunit_parse_glob_filter(&parsed_glob, filter_glob);
157
+
if (*err)
158
+
goto free_copy;
159
+
}
169
160
170
161
/* Parse attribute filters */
171
162
if (filters) {
172
163
filter_count = kunit_get_filter_count(filters);
173
164
parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
174
165
if (!parsed_filters) {
175
-
kfree(copy);
176
-
return filtered;
166
+
*err = -ENOMEM;
167
+
goto free_parsed_glob;
177
168
}
178
169
for (j = 0; j < filter_count; j++)
179
170
parsed_filters[j] = kunit_next_attr_filter(&filters, err);
180
171
if (*err)
181
-
goto err;
172
+
goto free_parsed_filters;
182
173
}
183
174
184
175
for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
···
193
178
parsed_glob.test_glob);
194
179
if (IS_ERR(filtered_suite)) {
195
180
*err = PTR_ERR(filtered_suite);
196
-
goto err;
181
+
goto free_parsed_filters;
197
182
}
198
183
}
199
184
if (filter_count > 0 && parsed_filters != NULL) {
···
210
195
filtered_suite = new_filtered_suite;
211
196
212
197
if (*err)
213
-
goto err;
198
+
goto free_parsed_filters;
199
+
214
200
if (IS_ERR(filtered_suite)) {
215
201
*err = PTR_ERR(filtered_suite);
216
-
goto err;
202
+
goto free_parsed_filters;
217
203
}
218
204
if (!filtered_suite)
219
205
break;
···
229
213
filtered.start = copy_start;
230
214
filtered.end = copy;
231
215
232
-
err:
233
-
if (*err)
234
-
kfree(copy);
216
+
free_parsed_filters:
217
+
if (filter_count)
218
+
kfree(parsed_filters);
235
219
220
+
free_parsed_glob:
236
221
if (filter_glob) {
237
222
kfree(parsed_glob.suite_glob);
238
223
kfree(parsed_glob.test_glob);
239
224
}
240
225
241
-
if (filter_count)
242
-
kfree(parsed_filters);
226
+
free_copy:
227
+
if (*err)
228
+
kfree(copy);
243
229
244
230
return filtered;
245
231
}
+8
-5
lib/kunit/executor_test.c
+8
-5
lib/kunit/executor_test.c
···
119
119
{
120
120
int j, filter_count;
121
121
struct kunit_attr_filter *parsed_filters;
122
-
char *filters = "speed>slow, module!=example";
122
+
char filters[] = "speed>slow, module!=example", *filter = filters;
123
123
int err = 0;
124
124
125
125
filter_count = kunit_get_filter_count(filters);
···
128
128
parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
129
129
GFP_KERNEL);
130
130
for (j = 0; j < filter_count; j++) {
131
-
parsed_filters[j] = kunit_next_attr_filter(&filters, &err);
131
+
parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
132
132
KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
133
133
}
134
134
···
154
154
.start = subsuite, .end = &subsuite[2],
155
155
};
156
156
struct kunit_suite_set got;
157
+
char filter[] = "speed>slow";
157
158
int err = 0;
158
159
159
160
subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
···
169
168
* attribute is unset and thus, the filtering is based on the parent attribute
170
169
* of slow.
171
170
*/
172
-
got = kunit_filter_suites(&suite_set, NULL, "speed>slow", NULL, &err);
171
+
got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
173
172
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
174
173
KUNIT_ASSERT_EQ(test, err, 0);
175
174
kfree_at_end(test, got.start);
···
192
191
.start = subsuite, .end = &subsuite[2],
193
192
};
194
193
struct kunit_suite_set got;
194
+
char filter[] = "module!=dummy";
195
195
int err = 0;
196
196
197
197
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
198
198
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
199
199
200
-
got = kunit_filter_suites(&suite_set, NULL, "module!=dummy", NULL, &err);
200
+
got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
201
201
KUNIT_ASSERT_EQ(test, err, 0);
202
202
kfree_at_end(test, got.start); /* just in case */
203
203
···
213
211
.start = subsuite, .end = &subsuite[1],
214
212
};
215
213
struct kunit_suite_set got;
214
+
char filter[] = "speed>slow";
216
215
int err = 0;
217
216
218
217
subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
219
218
220
219
/* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
221
-
got = kunit_filter_suites(&suite_set, NULL, "speed>slow", "skip", &err);
220
+
got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
222
221
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
223
222
KUNIT_ASSERT_EQ(test, err, 0);
224
223
kfree_at_end(test, got.start);
+2
-1
lib/kunit/test.c
+2
-1
lib/kunit/test.c
···
784
784
785
785
switch (val) {
786
786
case MODULE_STATE_LIVE:
787
-
kunit_module_init(mod);
788
787
break;
789
788
case MODULE_STATE_GOING:
790
789
kunit_module_exit(mod);
791
790
break;
792
791
case MODULE_STATE_COMING:
792
+
kunit_module_init(mod);
793
+
break;
793
794
case MODULE_STATE_UNFORMED:
794
795
break;
795
796
}
+1
-1
mm/mremap.c
+1
-1
mm/mremap.c
···
715
715
}
716
716
717
717
vma_iter_init(&vmi, mm, old_addr);
718
-
if (!do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false)) {
718
+
if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
719
719
/* OOM: unable to split vma, just get accounts right */
720
720
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
721
721
vm_acct_memory(old_len >> PAGE_SHIFT);
+1
net/hsr/hsr_forward.c
+1
net/hsr/hsr_forward.c
+5
-5
net/ipv4/devinet.c
+5
-5
net/ipv4/devinet.c
···
355
355
{
356
356
struct in_ifaddr *promote = NULL;
357
357
struct in_ifaddr *ifa, *ifa1;
358
-
struct in_ifaddr *last_prim;
358
+
struct in_ifaddr __rcu **last_prim;
359
359
struct in_ifaddr *prev_prom = NULL;
360
360
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
361
361
362
362
ASSERT_RTNL();
363
363
364
364
ifa1 = rtnl_dereference(*ifap);
365
-
last_prim = rtnl_dereference(in_dev->ifa_list);
365
+
last_prim = ifap;
366
366
if (in_dev->dead)
367
367
goto no_promotions;
368
368
···
376
376
while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
377
377
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
378
378
ifa1->ifa_scope <= ifa->ifa_scope)
379
-
last_prim = ifa;
379
+
last_prim = &ifa->ifa_next;
380
380
381
381
if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
382
382
ifa1->ifa_mask != ifa->ifa_mask ||
···
440
440
441
441
rcu_assign_pointer(prev_prom->ifa_next, next_sec);
442
442
443
-
last_sec = rtnl_dereference(last_prim->ifa_next);
443
+
last_sec = rtnl_dereference(*last_prim);
444
444
rcu_assign_pointer(promote->ifa_next, last_sec);
445
-
rcu_assign_pointer(last_prim->ifa_next, promote);
445
+
rcu_assign_pointer(*last_prim, promote);
446
446
}
447
447
448
448
promote->ifa_flags &= ~IFA_F_SECONDARY;
+28
-24
net/ipv4/inet_hashtables.c
+28
-24
net/ipv4/inet_hashtables.c
···
815
815
const struct net *net, unsigned short port,
816
816
int l3mdev, const struct sock *sk)
817
817
{
818
-
#if IS_ENABLED(CONFIG_IPV6)
819
-
if (sk->sk_family != tb->family)
818
+
if (!net_eq(ib2_net(tb), net) || tb->port != port ||
819
+
tb->l3mdev != l3mdev)
820
820
return false;
821
821
822
-
if (sk->sk_family == AF_INET6)
823
-
return net_eq(ib2_net(tb), net) && tb->port == port &&
824
-
tb->l3mdev == l3mdev &&
825
-
ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
826
-
else
827
-
#endif
828
-
return net_eq(ib2_net(tb), net) && tb->port == port &&
829
-
tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr;
830
-
}
831
-
832
-
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
833
-
unsigned short port, int l3mdev, const struct sock *sk)
834
-
{
835
822
#if IS_ENABLED(CONFIG_IPV6)
836
823
if (sk->sk_family != tb->family) {
837
824
if (sk->sk_family == AF_INET)
838
-
return net_eq(ib2_net(tb), net) && tb->port == port &&
839
-
tb->l3mdev == l3mdev &&
840
-
ipv6_addr_any(&tb->v6_rcv_saddr);
825
+
return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
826
+
tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
841
827
842
828
return false;
843
829
}
844
830
845
831
if (sk->sk_family == AF_INET6)
846
-
return net_eq(ib2_net(tb), net) && tb->port == port &&
847
-
tb->l3mdev == l3mdev &&
848
-
ipv6_addr_any(&tb->v6_rcv_saddr);
849
-
else
832
+
return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
850
833
#endif
851
-
return net_eq(ib2_net(tb), net) && tb->port == port &&
852
-
tb->l3mdev == l3mdev && tb->rcv_saddr == 0;
834
+
return tb->rcv_saddr == sk->sk_rcv_saddr;
835
+
}
836
+
837
+
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
838
+
unsigned short port, int l3mdev, const struct sock *sk)
839
+
{
840
+
if (!net_eq(ib2_net(tb), net) || tb->port != port ||
841
+
tb->l3mdev != l3mdev)
842
+
return false;
843
+
844
+
#if IS_ENABLED(CONFIG_IPV6)
845
+
if (sk->sk_family != tb->family) {
846
+
if (sk->sk_family == AF_INET)
847
+
return ipv6_addr_any(&tb->v6_rcv_saddr) ||
848
+
ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);
849
+
850
+
return false;
851
+
}
852
+
853
+
if (sk->sk_family == AF_INET6)
854
+
return ipv6_addr_any(&tb->v6_rcv_saddr);
855
+
#endif
856
+
return tb->rcv_saddr == 0;
853
857
}
854
858
855
859
/* The socket's bhash2 hashbucket spinlock must be held when this is called */
+8
-5
net/kcm/kcmsock.c
+8
-5
net/kcm/kcmsock.c
···
930
930
out_error:
931
931
kcm_push(kcm);
932
932
933
-
if (copied && sock->type == SOCK_SEQPACKET) {
933
+
if (sock->type == SOCK_SEQPACKET) {
934
934
/* Wrote some bytes before encountering an
935
935
* error, return partial success.
936
936
*/
937
-
goto partial_message;
938
-
}
939
-
940
-
if (head != kcm->seq_skb)
937
+
if (copied)
938
+
goto partial_message;
939
+
if (head != kcm->seq_skb)
940
+
kfree_skb(head);
941
+
} else {
941
942
kfree_skb(head);
943
+
kcm->seq_skb = NULL;
944
+
}
942
945
943
946
err = sk_stream_error(sk, msg->msg_flags, err);
944
947
+2
net/smc/smc_core.c
+2
net/smc/smc_core.c
···
1662
1662
{
1663
1663
struct smc_link_group *lgr, *n;
1664
1664
1665
+
spin_lock_bh(&smc_lgr_list.lock);
1665
1666
list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1666
1667
struct smc_link *link;
1667
1668
···
1681
1680
if (link)
1682
1681
smc_llc_add_link_local(link);
1683
1682
}
1683
+
spin_unlock_bh(&smc_lgr_list.lock);
1684
1684
}
1685
1685
1686
1686
/* link is down - switch connections to alternate link,
+2
-1
net/smc/smc_stats.h
+2
-1
net/smc/smc_stats.h
···
243
243
#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
244
244
do { \
245
245
typeof(_ini) i = (_ini); \
246
-
bool is_v2 = (i->smcd_version & SMC_V2); \
247
246
bool is_smcd = (i->is_smcd); \
247
+
u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
248
+
bool is_v2 = (version & SMC_V2); \
248
249
typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
249
250
if (is_v2 && is_smcd) \
250
251
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
+2
-2
net/tls/tls_sw.c
+2
-2
net/tls/tls_sw.c
···
817
817
psock = sk_psock_get(sk);
818
818
if (!psock || !policy) {
819
819
err = tls_push_record(sk, flags, record_type);
820
-
if (err && sk->sk_err == EBADMSG) {
820
+
if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
821
821
*copied -= sk_msg_free(sk, msg);
822
822
tls_free_open_rec(sk);
823
823
err = -sk->sk_err;
···
846
846
switch (psock->eval) {
847
847
case __SK_PASS:
848
848
err = tls_push_record(sk, flags, record_type);
849
-
if (err && sk->sk_err == EBADMSG) {
849
+
if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
850
850
*copied -= sk_msg_free(sk, msg);
851
851
tls_free_open_rec(sk);
852
852
err = -sk->sk_err;
+1
-1
scripts/Makefile.modinst
+1
-1
scripts/Makefile.modinst
+9
scripts/mod/modpost.c
+9
scripts/mod/modpost.c
···
1228
1228
*/
1229
1229
s->is_func = (ELF_ST_TYPE(sym->st_info) == STT_FUNC);
1230
1230
1231
+
/*
1232
+
* For parisc64, symbols prefixed $$ from the library have the symbol type
1233
+
* STT_LOPROC. They should be handled as functions too.
1234
+
*/
1235
+
if (elf->hdr->e_ident[EI_CLASS] == ELFCLASS64 &&
1236
+
elf->hdr->e_machine == EM_PARISC &&
1237
+
ELF_ST_TYPE(sym->st_info) == STT_LOPROC)
1238
+
s->is_func = true;
1239
+
1231
1240
if (match(secname, PATTERNS(INIT_SECTIONS)))
1232
1241
warn("%s: %s: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL.\n",
1233
1242
mod->name, name);
+1
-1
scripts/package/install-extmod-build
+1
-1
scripts/package/install-extmod-build
···
20
20
find "arch/${SRCARCH}" -maxdepth 1 -name 'Makefile*'
21
21
find include scripts -type f -o -type l
22
22
find "arch/${SRCARCH}" -name Kbuild.platforms -o -name Platform
23
-
find "$(find "arch/${SRCARCH}" -name include -o -name scripts -type d)" -type f
23
+
find "arch/${SRCARCH}" -name include -o -name scripts -type d
24
24
) | tar -c -f - -C "${srctree}" -T - | tar -xf - -C "${destdir}"
25
25
26
26
{
+8
-2
security/selinux/hooks.c
+8
-2
security/selinux/hooks.c
···
2775
2775
static int selinux_fs_context_submount(struct fs_context *fc,
2776
2776
struct super_block *reference)
2777
2777
{
2778
-
const struct superblock_security_struct *sbsec;
2778
+
const struct superblock_security_struct *sbsec = selinux_superblock(reference);
2779
2779
struct selinux_mnt_opts *opts;
2780
+
2781
+
/*
2782
+
* Ensure that fc->security remains NULL when no options are set
2783
+
* as expected by selinux_set_mnt_opts().
2784
+
*/
2785
+
if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT)))
2786
+
return 0;
2780
2787
2781
2788
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
2782
2789
if (!opts)
2783
2790
return -ENOMEM;
2784
2791
2785
-
sbsec = selinux_superblock(reference);
2786
2792
if (sbsec->flags & FSCONTEXT_MNT)
2787
2793
opts->fscontext_sid = sbsec->sid;
2788
2794
if (sbsec->flags & CONTEXT_MNT)
+2
-1
tools/objtool/check.c
+2
-1
tools/objtool/check.c
···
4333
4333
continue;
4334
4334
}
4335
4335
4336
-
if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
4336
+
if (insn_func(dest) && insn_func(insn) &&
4337
+
insn_func(dest)->pfunc == insn_func(insn)->pfunc) {
4337
4338
/*
4338
4339
* Anything from->to self is either _THIS_IP_ or
4339
4340
* IRET-to-self.
+17
-1
tools/testing/selftests/ftrace/ftracetest
+17
-1
tools/testing/selftests/ftrace/ftracetest
···
31
31
# kselftest skip code is 4
32
32
err_skip=4
33
33
34
+
# umount required
35
+
UMOUNT_DIR=""
36
+
34
37
# cgroup RT scheduling prevents chrt commands from succeeding, which
35
38
# induces failures in test wakeup tests. Disable for the duration of
36
39
# the tests.
···
48
45
49
46
cleanup() {
50
47
echo $sched_rt_runtime_orig > $sched_rt_runtime
48
+
if [ -n "${UMOUNT_DIR}" ]; then
49
+
umount ${UMOUNT_DIR} ||:
50
+
fi
51
51
}
52
52
53
53
errexit() { # message
···
130
124
;;
131
125
--logdir|-l)
132
126
LOG_DIR=$2
127
+
LINK_PTR=
133
128
shift 2
134
129
;;
135
130
*.tc)
···
167
160
mount -t tracefs nodev /sys/kernel/tracing ||
168
161
errexit "Failed to mount /sys/kernel/tracing"
169
162
TRACING_DIR="/sys/kernel/tracing"
163
+
UMOUNT_DIR=${TRACING_DIR}
170
164
# If debugfs exists, then so does /sys/kernel/debug
171
165
elif [ -d "/sys/kernel/debug" ]; then
172
166
mount -t debugfs nodev /sys/kernel/debug ||
173
167
errexit "Failed to mount /sys/kernel/debug"
174
168
TRACING_DIR="/sys/kernel/debug/tracing"
169
+
UMOUNT_DIR=${TRACING_DIR}
175
170
else
176
171
err_ret=$err_skip
177
172
errexit "debugfs and tracefs are not configured in this kernel"
···
190
181
TOP_DIR=`absdir $0`
191
182
TEST_DIR=$TOP_DIR/test.d
192
183
TEST_CASES=`find_testcases $TEST_DIR`
193
-
LOG_DIR=$TOP_DIR/logs/`date +%Y%m%d-%H%M%S`/
184
+
LOG_TOP_DIR=$TOP_DIR/logs
185
+
LOG_DATE=`date +%Y%m%d-%H%M%S`
186
+
LOG_DIR=$LOG_TOP_DIR/$LOG_DATE/
187
+
LINK_PTR=$LOG_TOP_DIR/latest
194
188
KEEP_LOG=0
195
189
KTAP=0
196
190
DEBUG=0
···
219
207
LOG_FILE=$LOG_DIR/ftracetest.log
220
208
mkdir -p $LOG_DIR || errexit "Failed to make a log directory: $LOG_DIR"
221
209
date > $LOG_FILE
210
+
if [ "x-$LINK_PTR" != "x-" ]; then
211
+
unlink $LINK_PTR
212
+
ln -fs $LOG_DATE $LINK_PTR
213
+
fi
222
214
fi
223
215
224
216
# Define text colors
+1
-1
tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+1
-1
tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+1
-1
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc
+1
-1
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc
···
1
1
#!/bin/sh
2
2
# SPDX-License-Identifier: GPL-2.0
3
3
# description: event trigger - test inter-event histogram trigger trace action with dynamic string param
4
-
# requires: set_event synthetic_events events/sched/sched_process_exec/hist "char name[]' >> synthetic_events":README ping:program
4
+
# requires: set_event synthetic_events events/sched/sched_process_exec/hist "' >> synthetic_events":README ping:program
5
5
6
6
fail() { #msg
7
7
echo $1
+1
-1
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
+1
-1
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
···
1
1
#!/bin/sh
2
2
# SPDX-License-Identifier: GPL-2.0
3
3
# description: event trigger - test synthetic_events syntax parser errors
4
-
# requires: synthetic_events error_log "char name[]' >> synthetic_events":README
4
+
# requires: synthetic_events error_log "' >> synthetic_events":README
5
5
6
6
check_error() { # command-with-error-pos-by-^
7
7
ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events'
+2
-1
tools/testing/selftests/kselftest/runner.sh
+2
-1
tools/testing/selftests/kselftest/runner.sh
···
36
36
{
37
37
# Make sure tests will time out if utility is available.
38
38
if [ -x /usr/bin/timeout ] ; then
39
-
/usr/bin/timeout --foreground "$kselftest_timeout" $1
39
+
/usr/bin/timeout --foreground "$kselftest_timeout" \
40
+
/usr/bin/timeout "$kselftest_timeout" $1
40
41
else
41
42
$1
42
43
fi
+65
-12
tools/testing/selftests/kselftest_deps.sh
+65
-12
tools/testing/selftests/kselftest_deps.sh
···
46
46
print_targets=0
47
47
48
48
while getopts "p" arg; do
49
-
case $arg in
50
-
p)
49
+
case $arg in
50
+
p)
51
51
print_targets=1
52
52
shift;;
53
-
esac
53
+
esac
54
54
done
55
55
56
56
if [ $# -eq 0 ]
···
92
92
# Get all TARGETS from selftests Makefile
93
93
targets=$(grep -E "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2)
94
94
95
+
# Initially, in LDLIBS related lines, the dep checker needs
96
+
# to ignore lines containing the following strings:
97
+
filter="\$(VAR_LDLIBS)\|pkg-config\|PKG_CONFIG\|IOURING_EXTRA_LIBS"
98
+
95
99
# Single test case
96
100
if [ $# -eq 2 ]
97
101
then
···
104
100
l1_test $test
105
101
l2_test $test
106
102
l3_test $test
103
+
l4_test $test
104
+
l5_test $test
107
105
108
106
print_results $1 $2
109
107
exit $?
···
119
113
# Append space at the end of the list to append more tests.
120
114
121
115
l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
122
-
grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
116
+
grep -v "$filter" | awk -F: '{print $1}' | uniq)
123
117
124
118
# Level 2: LDLIBS set dynamically.
125
119
#
···
132
126
# Append space at the end of the list to append more tests.
133
127
134
128
l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
135
-
grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
129
+
grep -v "$filter" | awk -F: '{print $1}' | uniq)
136
130
137
131
# Level 3
138
132
# memfd and others use pkg-config to find mount and fuse libs
···
144
138
# VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
145
139
146
140
l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \
147
-
grep -v "pkg-config" | awk -F: '{print $1}')
141
+
grep -v "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
148
142
149
-
#echo $l1_tests
150
-
#echo $l2_1_tests
151
-
#echo $l3_tests
143
+
# Level 4
144
+
# some tests may fall back to default using `|| echo -l<libname>`
145
+
# if pkg-config doesn't find the libs, instead of using VAR_LDLIBS
146
+
# as per level 3 checks.
147
+
# e.g:
148
+
# netfilter/Makefile
149
+
# LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
150
+
l4_tests=$(grep -r --include=Makefile "^LDLIBS" | \
151
+
grep "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq)
152
+
153
+
# Level 5
154
+
# some tests may use IOURING_EXTRA_LIBS to add extra libs to LDLIBS,
155
+
# which in turn may be defined in a sub-Makefile
156
+
# e.g.:
157
+
# mm/Makefile
158
+
# $(OUTPUT)/gup_longterm: LDLIBS += $(IOURING_EXTRA_LIBS)
159
+
l5_tests=$(grep -r --include=Makefile "LDLIBS +=.*\$(IOURING_EXTRA_LIBS)" | \
160
+
awk -F: '{print $1}' | uniq)
161
+
162
+
#echo l1_tests $l1_tests
163
+
#echo l2_tests $l2_tests
164
+
#echo l3_tests $l3_tests
165
+
#echo l4_tests $l4_tests
166
+
#echo l5_tests $l5_tests
152
167
153
168
all_tests
154
169
print_results $1 $2
···
191
164
for test in $l3_tests; do
192
165
l3_test $test
193
166
done
167
+
168
+
for test in $l4_tests; do
169
+
l4_test $test
170
+
done
171
+
172
+
for test in $l5_tests; do
173
+
l5_test $test
174
+
done
194
175
}
195
176
196
177
# Use same parsing used for l1_tests and pick libraries this time.
197
178
l1_test()
198
179
{
199
180
test_libs=$(grep --include=Makefile "^LDLIBS" $test | \
200
-
grep -v "VAR_LDLIBS" | \
181
+
grep -v "$filter" | \
201
182
sed -e 's/\:/ /' | \
202
183
sed -e 's/+/ /' | cut -d "=" -f 2)
203
184
204
185
check_libs $test $test_libs
205
186
}
206
187
207
-
# Use same parsing used for l2__tests and pick libraries this time.
188
+
# Use same parsing used for l2_tests and pick libraries this time.
208
189
l2_test()
209
190
{
210
191
test_libs=$(grep --include=Makefile ": LDLIBS" $test | \
211
-
grep -v "VAR_LDLIBS" | \
192
+
grep -v "$filter" | \
212
193
sed -e 's/\:/ /' | sed -e 's/+/ /' | \
213
194
cut -d "=" -f 2)
214
195
···
228
193
test_libs=$(grep --include=Makefile "^VAR_LDLIBS" $test | \
229
194
grep -v "pkg-config" | sed -e 's/\:/ /' |
230
195
sed -e 's/+/ /' | cut -d "=" -f 2)
196
+
197
+
check_libs $test $test_libs
198
+
}
199
+
200
+
l4_test()
201
+
{
202
+
test_libs=$(grep --include=Makefile "^VAR_LDLIBS\|^LDLIBS" $test | \
203
+
grep "\(pkg-config\|PKG_CONFIG\).*|| echo " | \
204
+
sed -e 's/.*|| echo //' | sed -e 's/)$//')
205
+
206
+
check_libs $test $test_libs
207
+
}
208
+
209
+
l5_test()
210
+
{
211
+
tests=$(find $(dirname "$test") -type f -name "*.mk")
212
+
test_libs=$(grep "^IOURING_EXTRA_LIBS +\?=" $tests | \
213
+
cut -d "=" -f 2)
231
214
232
215
check_libs $test $test_libs
233
216
}
+43
-17
tools/testing/selftests/kvm/riscv/get-reg-list.c
+43
-17
tools/testing/selftests/kvm/riscv/get-reg-list.c
···
12
12
13
13
#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
14
14
15
+
static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX];
16
+
15
17
bool filter_reg(__u64 reg)
16
18
{
17
-
/*
18
-
* Some ISA extensions are optional and not present on all host,
19
-
* but they can't be disabled through ISA_EXT registers when present.
20
-
* So, to make life easy, just filtering out these kind of registers.
21
-
*/
22
19
switch (reg & ~REG_MASK) {
20
+
/*
21
+
* Same set of ISA_EXT registers are not present on all host because
22
+
* ISA_EXT registers are visible to the KVM user space based on the
23
+
* ISA extensions available on the host. Also, disabling an ISA
24
+
* extension using corresponding ISA_EXT register does not affect
25
+
* the visibility of the ISA_EXT register itself.
26
+
*
27
+
* Based on above, we should filter-out all ISA_EXT registers.
28
+
*/
29
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A:
30
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C:
31
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D:
32
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F:
33
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H:
34
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I:
35
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M:
36
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT:
23
37
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC:
24
38
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL:
25
39
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
40
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM:
41
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ:
26
42
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB:
27
43
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA:
44
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V:
45
+
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT:
28
46
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA:
29
47
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS:
30
48
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR:
···
50
32
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI:
51
33
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM:
52
34
return true;
35
+
/* AIA registers are always available when Ssaia can't be disabled */
36
+
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect):
37
+
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1):
38
+
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2):
39
+
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh):
40
+
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph):
41
+
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
42
+
case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
43
+
return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA];
53
44
default:
54
45
break;
55
46
}
···
77
50
unsigned long value;
78
51
79
52
ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
80
-
if (ret) {
81
-
printf("Failed to get ext %d", ext);
82
-
return false;
83
-
}
84
-
85
-
return !!value;
53
+
return (ret) ? false : !!value;
86
54
}
87
55
88
56
void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
89
57
{
58
+
unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
90
59
struct vcpu_reg_sublist *s;
60
+
int rc;
61
+
62
+
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
63
+
__vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]);
91
64
92
65
/*
93
66
* Disable all extensions which were enabled by default
94
67
* if they were available in the risc-v host.
95
68
*/
96
-
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
97
-
__vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
69
+
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
70
+
rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
71
+
if (rc && isa_ext_state[i])
72
+
isa_ext_cant_disable[i] = true;
73
+
}
98
74
99
75
for_each_sublist(c, s) {
100
76
if (!s->feature)
···
536
506
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
537
507
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
538
508
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
539
-
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
540
-
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
541
-
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
542
-
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
543
509
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
544
510
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
545
511
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
+2
-2
tools/testing/selftests/lib.mk
+2
-2
tools/testing/selftests/lib.mk
···
106
106
run_tests: all
107
107
ifdef building_out_of_srctree
108
108
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
109
-
rsync -aLq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
109
+
rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
110
110
fi
111
111
@if [ "X$(TEST_PROGS)" != "X" ]; then \
112
112
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
···
120
120
121
121
define INSTALL_SINGLE_RULE
122
122
$(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
123
-
$(if $(INSTALL_LIST),rsync -aL $(INSTALL_LIST) $(INSTALL_PATH)/)
123
+
$(if $(INSTALL_LIST),rsync -a --copy-unsafe-links $(INSTALL_LIST) $(INSTALL_PATH)/)
124
124
endef
125
125
126
126
define INSTALL_RULE
+14
-12
tools/testing/selftests/net/bind_bhash.sh
+14
-12
tools/testing/selftests/net/bind_bhash.sh
···
2
2
# SPDX-License-Identifier: GPL-2.0
3
3
4
4
NR_FILES=32768
5
-
SAVED_NR_FILES=$(ulimit -n)
5
+
readonly NETNS="ns-$(mktemp -u XXXXXX)"
6
6
7
7
# default values
8
8
port=443
···
36
36
done
37
37
38
38
setup() {
39
+
ip netns add "${NETNS}"
40
+
ip -netns "${NETNS}" link add veth0 type veth peer name veth1
41
+
ip -netns "${NETNS}" link set lo up
42
+
ip -netns "${NETNS}" link set veth0 up
43
+
ip -netns "${NETNS}" link set veth1 up
44
+
39
45
if [[ "$use_v6" == true ]]; then
40
-
ip addr add $addr_v6 nodad dev eth0
46
+
ip -netns "${NETNS}" addr add $addr_v6 nodad dev veth0
41
47
else
42
-
ip addr add $addr_v4 dev lo
48
+
ip -netns "${NETNS}" addr add $addr_v4 dev lo
43
49
fi
44
-
ulimit -n $NR_FILES
45
50
}
46
51
47
52
cleanup() {
48
-
if [[ "$use_v6" == true ]]; then
49
-
ip addr del $addr_v6 dev eth0
50
-
else
51
-
ip addr del $addr_v4/32 dev lo
52
-
fi
53
-
ulimit -n $SAVED_NR_FILES
53
+
ip netns del "${NETNS}"
54
54
}
55
55
56
56
if [[ "$addr" != "" ]]; then
···
59
59
fi
60
60
setup
61
61
if [[ "$use_v6" == true ]] ; then
62
-
./bind_bhash $port "ipv6" $addr_v6
62
+
ip netns exec "${NETNS}" sh -c \
63
+
"ulimit -n ${NR_FILES};./bind_bhash ${port} ipv6 ${addr_v6}"
63
64
else
64
-
./bind_bhash $port "ipv4" $addr_v4
65
+
ip netns exec "${NETNS}" sh -c \
66
+
"ulimit -n ${NR_FILES};./bind_bhash ${port} ipv4 ${addr_v4}"
65
67
fi
66
68
cleanup
+57
-11
tools/testing/selftests/net/bind_wildcard.c
+57
-11
tools/testing/selftests/net/bind_wildcard.c
···
6
6
7
7
#include "../kselftest_harness.h"
8
8
9
+
struct in6_addr in6addr_v4mapped_any = {
10
+
.s6_addr = {
11
+
0, 0, 0, 0,
12
+
0, 0, 0, 0,
13
+
0, 0, 255, 255,
14
+
0, 0, 0, 0
15
+
}
16
+
};
17
+
18
+
struct in6_addr in6addr_v4mapped_loopback = {
19
+
.s6_addr = {
20
+
0, 0, 0, 0,
21
+
0, 0, 0, 0,
22
+
0, 0, 255, 255,
23
+
127, 0, 0, 1
24
+
}
25
+
};
26
+
9
27
FIXTURE(bind_wildcard)
10
28
{
11
29
struct sockaddr_in addr4;
12
30
struct sockaddr_in6 addr6;
13
-
int expected_errno;
14
31
};
15
32
16
33
FIXTURE_VARIANT(bind_wildcard)
17
34
{
18
35
const __u32 addr4_const;
19
36
const struct in6_addr *addr6_const;
37
+
int expected_errno;
20
38
};
21
39
22
40
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any)
23
41
{
24
42
.addr4_const = INADDR_ANY,
25
43
.addr6_const = &in6addr_any,
44
+
.expected_errno = EADDRINUSE,
26
45
};
27
46
28
47
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local)
29
48
{
30
49
.addr4_const = INADDR_ANY,
31
50
.addr6_const = &in6addr_loopback,
51
+
.expected_errno = 0,
52
+
};
53
+
54
+
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_any)
55
+
{
56
+
.addr4_const = INADDR_ANY,
57
+
.addr6_const = &in6addr_v4mapped_any,
58
+
.expected_errno = EADDRINUSE,
59
+
};
60
+
61
+
FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_v4mapped_local)
62
+
{
63
+
.addr4_const = INADDR_ANY,
64
+
.addr6_const = &in6addr_v4mapped_loopback,
65
+
.expected_errno = EADDRINUSE,
32
66
};
33
67
34
68
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any)
35
69
{
36
70
.addr4_const = INADDR_LOOPBACK,
37
71
.addr6_const = &in6addr_any,
72
+
.expected_errno = EADDRINUSE,
38
73
};
39
74
40
75
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local)
41
76
{
42
77
.addr4_const = INADDR_LOOPBACK,
43
78
.addr6_const = &in6addr_loopback,
79
+
.expected_errno = 0,
80
+
};
81
+
82
+
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_any)
83
+
{
84
+
.addr4_const = INADDR_LOOPBACK,
85
+
.addr6_const = &in6addr_v4mapped_any,
86
+
.expected_errno = EADDRINUSE,
87
+
};
88
+
89
+
FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_v4mapped_local)
90
+
{
91
+
.addr4_const = INADDR_LOOPBACK,
92
+
.addr6_const = &in6addr_v4mapped_loopback,
93
+
.expected_errno = EADDRINUSE,
44
94
};
45
95
46
96
FIXTURE_SETUP(bind_wildcard)
···
102
52
self->addr6.sin6_family = AF_INET6;
103
53
self->addr6.sin6_port = htons(0);
104
54
self->addr6.sin6_addr = *variant->addr6_const;
105
-
106
-
if (variant->addr6_const == &in6addr_any)
107
-
self->expected_errno = EADDRINUSE;
108
-
else
109
-
self->expected_errno = 0;
110
55
}
111
56
112
57
FIXTURE_TEARDOWN(bind_wildcard)
···
110
65
111
66
void bind_sockets(struct __test_metadata *_metadata,
112
67
FIXTURE_DATA(bind_wildcard) *self,
68
+
int expected_errno,
113
69
struct sockaddr *addr1, socklen_t addrlen1,
114
70
struct sockaddr *addr2, socklen_t addrlen2)
115
71
{
···
132
86
ASSERT_GT(fd[1], 0);
133
87
134
88
ret = bind(fd[1], addr2, addrlen2);
135
-
if (self->expected_errno) {
89
+
if (expected_errno) {
136
90
ASSERT_EQ(ret, -1);
137
-
ASSERT_EQ(errno, self->expected_errno);
91
+
ASSERT_EQ(errno, expected_errno);
138
92
} else {
139
93
ASSERT_EQ(ret, 0);
140
94
}
···
145
99
146
100
TEST_F(bind_wildcard, v4_v6)
147
101
{
148
-
bind_sockets(_metadata, self,
149
-
(struct sockaddr *)&self->addr4, sizeof(self->addr6),
102
+
bind_sockets(_metadata, self, variant->expected_errno,
103
+
(struct sockaddr *)&self->addr4, sizeof(self->addr4),
150
104
(struct sockaddr *)&self->addr6, sizeof(self->addr6));
151
105
}
152
106
153
107
TEST_F(bind_wildcard, v6_v4)
154
108
{
155
-
bind_sockets(_metadata, self,
109
+
bind_sockets(_metadata, self, variant->expected_errno,
156
110
(struct sockaddr *)&self->addr6, sizeof(self->addr6),
157
111
(struct sockaddr *)&self->addr4, sizeof(self->addr4));
158
112
}
+3
tools/testing/selftests/user_events/abi_test.c
+3
tools/testing/selftests/user_events/abi_test.c
···
19
19
#include <asm/unistd.h>
20
20
21
21
#include "../kselftest_harness.h"
22
+
#include "user_events_selftests.h"
22
23
23
24
const char *data_file = "/sys/kernel/tracing/user_events_data";
24
25
const char *enable_file = "/sys/kernel/tracing/events/user_events/__abi_event/enable";
···
94
93
};
95
94
96
95
FIXTURE_SETUP(user) {
96
+
USER_EVENT_FIXTURE_SETUP(return);
97
+
97
98
change_event(false);
98
99
self->check = 0;
99
100
}
+1
tools/testing/selftests/user_events/config
+1
tools/testing/selftests/user_events/config
···
1
+
CONFIG_USER_EVENTS=y
+2
tools/testing/selftests/user_events/dyn_test.c
+2
tools/testing/selftests/user_events/dyn_test.c
···
15
15
#include <unistd.h>
16
16
17
17
#include "../kselftest_harness.h"
18
+
#include "user_events_selftests.h"
18
19
19
20
const char *abi_file = "/sys/kernel/tracing/user_events_data";
20
21
const char *enable_file = "/sys/kernel/tracing/events/user_events/__test_event/enable";
···
147
146
};
148
147
149
148
FIXTURE_SETUP(user) {
149
+
USER_EVENT_FIXTURE_SETUP(return);
150
150
}
151
151
152
152
FIXTURE_TEARDOWN(user) {
+3
tools/testing/selftests/user_events/ftrace_test.c
+3
tools/testing/selftests/user_events/ftrace_test.c
···
16
16
#include <unistd.h>
17
17
18
18
#include "../kselftest_harness.h"
19
+
#include "user_events_selftests.h"
19
20
20
21
const char *data_file = "/sys/kernel/tracing/user_events_data";
21
22
const char *status_file = "/sys/kernel/tracing/user_events_status";
···
207
206
};
208
207
209
208
FIXTURE_SETUP(user) {
209
+
USER_EVENT_FIXTURE_SETUP(return);
210
+
210
211
self->status_fd = open(status_file, O_RDONLY);
211
212
ASSERT_NE(-1, self->status_fd);
212
213
+3
tools/testing/selftests/user_events/perf_test.c
+3
tools/testing/selftests/user_events/perf_test.c
···
17
17
#include <asm/unistd.h>
18
18
19
19
#include "../kselftest_harness.h"
20
+
#include "user_events_selftests.h"
20
21
21
22
const char *data_file = "/sys/kernel/tracing/user_events_data";
22
23
const char *id_file = "/sys/kernel/tracing/events/user_events/__test_event/id";
···
114
113
};
115
114
116
115
FIXTURE_SETUP(user) {
116
+
USER_EVENT_FIXTURE_SETUP(return);
117
+
117
118
self->data_fd = open(data_file, O_RDWR);
118
119
ASSERT_NE(-1, self->data_fd);
119
120
}
+100
tools/testing/selftests/user_events/user_events_selftests.h
+100
tools/testing/selftests/user_events/user_events_selftests.h
···
1
+
/* SPDX-License-Identifier: GPL-2.0 */
2
+
3
+
#ifndef _USER_EVENTS_SELFTESTS_H
4
+
#define _USER_EVENTS_SELFTESTS_H
5
+
6
+
#include <sys/stat.h>
7
+
#include <sys/types.h>
8
+
#include <sys/mount.h>
9
+
#include <unistd.h>
10
+
#include <errno.h>
11
+
12
+
#include "../kselftest.h"
13
+
14
+
static inline bool tracefs_enabled(char **message, bool *fail)
15
+
{
16
+
struct stat buf;
17
+
int ret;
18
+
19
+
*message = "";
20
+
*fail = false;
21
+
22
+
/* Ensure tracefs is installed */
23
+
ret = stat("/sys/kernel/tracing", &buf);
24
+
25
+
if (ret == -1) {
26
+
*message = "Tracefs is not installed";
27
+
return false;
28
+
}
29
+
30
+
/* Ensure mounted tracefs */
31
+
ret = stat("/sys/kernel/tracing/README", &buf);
32
+
33
+
if (ret == -1 && errno == ENOENT) {
34
+
if (mount(NULL, "/sys/kernel/tracing", "tracefs", 0, NULL) != 0) {
35
+
*message = "Cannot mount tracefs";
36
+
*fail = true;
37
+
return false;
38
+
}
39
+
40
+
ret = stat("/sys/kernel/tracing/README", &buf);
41
+
}
42
+
43
+
if (ret == -1) {
44
+
*message = "Cannot access tracefs";
45
+
*fail = true;
46
+
return false;
47
+
}
48
+
49
+
return true;
50
+
}
51
+
52
+
static inline bool user_events_enabled(char **message, bool *fail)
53
+
{
54
+
struct stat buf;
55
+
int ret;
56
+
57
+
*message = "";
58
+
*fail = false;
59
+
60
+
if (getuid() != 0) {
61
+
*message = "Must be run as root";
62
+
*fail = true;
63
+
return false;
64
+
}
65
+
66
+
if (!tracefs_enabled(message, fail))
67
+
return false;
68
+
69
+
/* Ensure user_events is installed */
70
+
ret = stat("/sys/kernel/tracing/user_events_data", &buf);
71
+
72
+
if (ret == -1) {
73
+
switch (errno) {
74
+
case ENOENT:
75
+
*message = "user_events is not installed";
76
+
return false;
77
+
78
+
default:
79
+
*message = "Cannot access user_events_data";
80
+
*fail = true;
81
+
return false;
82
+
}
83
+
}
84
+
85
+
return true;
86
+
}
87
+
88
+
#define USER_EVENT_FIXTURE_SETUP(statement) do { \
89
+
char *message; \
90
+
bool fail; \
91
+
if (!user_events_enabled(&message, &fail)) { \
92
+
if (fail) { \
93
+
TH_LOG("Setup failed due to: %s", message); \
94
+
ASSERT_FALSE(fail); \
95
+
} \
96
+
SKIP(statement, "Skipping due to: %s", message); \
97
+
} \
98
+
} while (0)
99
+
100
+
#endif /* _USER_EVENTS_SELFTESTS_H */