Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'sparc-for-6.10-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git/alarsson/linux-sparc

Pull sparc updates from Andreas Larsson:

- Avoid on-stack cpumask variables in a number of places

- Move struct termio to asm/termios.h, matching other architectures and
allowing certain user space applications to build also for sparc

- Fix missing prototype warnings for sparc64

- Fix version generation warnings for sparc32

- Fix bug where non-consecutive CPU IDs lead to some CPUs not starting

- Simplification using swap and cleanup using NULL for pointer

- Convert sparc parport and chmc drivers to use remove callbacks
returning void

* tag 'sparc-for-6.10-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git/alarsson/linux-sparc:
sparc/leon: Remove on-stack cpumask var
sparc/pci_msi: Remove on-stack cpumask var
sparc/of: Remove on-stack cpumask var
sparc/irq: Remove on-stack cpumask var
sparc/srmmu: Remove on-stack cpumask var
sparc: chmc: Convert to platform remove callback returning void
sparc: parport: Convert to platform remove callback returning void
sparc: Compare pointers to NULL instead of 0
sparc: Use swap() to fix Coccinelle warning
sparc32: Fix version generation failed warnings
sparc64: Fix number of online CPUs
sparc64: Fix prototype warning for sched_clock
sparc64: Fix prototype warnings in adi_64.c
sparc64: Fix prototype warning for dma_4v_iotsb_bind
sparc64: Fix prototype warning for uprobe_trap
sparc64: Fix prototype warning for alloc_irqstack_bootmem
sparc64: Fix prototype warning for vmemmap_free
sparc64: Fix prototype warnings in traps_64.c
sparc64: Fix prototype warning for init_vdso_image
sparc: move struct termio to asm/termios.h

+79 -117
+15 -8
arch/sparc/include/asm/asm-prototypes.h
··· 3 3 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 4 4 */ 5 5 6 - #include <asm/xor.h> 7 - #include <asm/checksum.h> 8 - #include <asm/trap_block.h> 9 - #include <linux/uaccess.h> 10 - #include <asm/atomic.h> 11 - #include <asm/ftrace.h> 12 - #include <asm/cacheflush.h> 13 - #include <asm/oplib.h> 14 6 #include <linux/atomic.h> 7 + #include <linux/uaccess.h> 8 + 9 + #include <asm/atomic.h> 10 + #include <asm/cacheflush.h> 11 + #include <asm/checksum.h> 12 + #include <asm/delay.h> 13 + #include <asm/ftrace.h> 14 + #include <asm/oplib.h> 15 + #include <asm/pgtable.h> 16 + #include <asm/trap_block.h> 17 + #include <asm/xor.h> 15 18 16 19 void *__memscan_zero(void *, size_t); 17 20 void *__memscan_generic(void *, int, size_t); ··· 26 23 void *memset(void *s, int c, size_t n); 27 24 typedef int TItype __attribute__((mode(TI))); 28 25 TItype __multi3(TItype a, TItype b); 26 + 27 + s64 __ashldi3(s64, int); 28 + s64 __lshrdi3(s64, int); 29 + s64 __ashrdi3(s64, int);
+1 -3
arch/sparc/include/asm/floppy_64.h
··· 704 704 ns87303_modify(config, ASC, ASC_DRV2_SEL, 0); 705 705 ns87303_modify(config, FCR, 0, FCR_LDE); 706 706 707 - config = sun_floppy_types[0]; 708 - sun_floppy_types[0] = sun_floppy_types[1]; 709 - sun_floppy_types[1] = config; 707 + swap(sun_floppy_types[0], sun_floppy_types[1]); 710 708 711 709 if (sun_pci_broken_drive != -1) { 712 710 sun_pci_broken_drive = 1 - sun_pci_broken_drive;
+2 -4
arch/sparc/include/asm/parport_64.h
··· 196 196 return err; 197 197 } 198 198 199 - static int ecpp_remove(struct platform_device *op) 199 + static void ecpp_remove(struct platform_device *op) 200 200 { 201 201 struct parport *p = dev_get_drvdata(&op->dev); 202 202 int slot = p->dma; ··· 216 216 d_len); 217 217 clear_bit(slot, dma_slot_map); 218 218 } 219 - 220 - return 0; 221 219 } 222 220 223 221 static const struct of_device_id ecpp_match[] = { ··· 243 245 .of_match_table = ecpp_match, 244 246 }, 245 247 .probe = ecpp_probe, 246 - .remove = ecpp_remove, 248 + .remove_new = ecpp_remove, 247 249 }; 248 250 249 251 static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
-2
arch/sparc/include/asm/smp_64.h
··· 47 47 int hard_smp_processor_id(void); 48 48 #define raw_smp_processor_id() (current_thread_info()->cpu) 49 49 50 - void smp_fill_in_cpu_possible_map(void); 51 50 void smp_fill_in_sib_core_maps(void); 52 51 void __noreturn cpu_play_dead(void); 53 52 ··· 76 77 #define smp_fill_in_sib_core_maps() do { } while (0) 77 78 #define smp_fetch_global_regs() do { } while (0) 78 79 #define smp_fetch_global_pmu() do { } while (0) 79 - #define smp_fill_in_cpu_possible_map() do { } while (0) 80 80 #define smp_init_cpu_poke() do { } while (0) 81 81 #define scheduler_poke() do { } while (0) 82 82
-10
arch/sparc/include/uapi/asm/termbits.h
··· 10 10 typedef unsigned long tcflag_t; 11 11 #endif 12 12 13 - #define NCC 8 14 - struct termio { 15 - unsigned short c_iflag; /* input mode flags */ 16 - unsigned short c_oflag; /* output mode flags */ 17 - unsigned short c_cflag; /* control mode flags */ 18 - unsigned short c_lflag; /* local mode flags */ 19 - unsigned char c_line; /* line discipline */ 20 - unsigned char c_cc[NCC]; /* control characters */ 21 - }; 22 - 23 13 #define NCCS 17 24 14 struct termios { 25 15 tcflag_t c_iflag; /* input mode flags */
+9
arch/sparc/include/uapi/asm/termios.h
··· 40 40 unsigned short ws_ypixel; 41 41 }; 42 42 43 + #define NCC 8 44 + struct termio { 45 + unsigned short c_iflag; /* input mode flags */ 46 + unsigned short c_oflag; /* output mode flags */ 47 + unsigned short c_cflag; /* control mode flags */ 48 + unsigned short c_lflag; /* local mode flags */ 49 + unsigned char c_line; /* line discipline */ 50 + unsigned char c_cc[NCC]; /* control characters */ 51 + }; 43 52 44 53 #endif /* _UAPI_SPARC_TERMIOS_H */
+7 -7
arch/sparc/kernel/adi_64.c
··· 121 121 mdesc_release(hp); 122 122 } 123 123 124 - tag_storage_desc_t *find_tag_store(struct mm_struct *mm, 125 - struct vm_area_struct *vma, 126 - unsigned long addr) 124 + static tag_storage_desc_t *find_tag_store(struct mm_struct *mm, 125 + struct vm_area_struct *vma, 126 + unsigned long addr) 127 127 { 128 128 tag_storage_desc_t *tag_desc = NULL; 129 129 unsigned long i, max_desc, flags; ··· 153 153 return tag_desc; 154 154 } 155 155 156 - tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm, 157 - struct vm_area_struct *vma, 158 - unsigned long addr) 156 + static tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm, 157 + struct vm_area_struct *vma, 158 + unsigned long addr) 159 159 { 160 160 unsigned char *tags; 161 161 unsigned long i, size, max_desc, flags; ··· 296 296 return tag_desc; 297 297 } 298 298 299 - void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm) 299 + static void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm) 300 300 { 301 301 unsigned long flags; 302 302 unsigned char *tags = NULL;
+2 -3
arch/sparc/kernel/chmc.c
··· 788 788 kfree(p); 789 789 } 790 790 791 - static int us3mc_remove(struct platform_device *op) 791 + static void us3mc_remove(struct platform_device *op) 792 792 { 793 793 void *p = dev_get_drvdata(&op->dev); 794 794 ··· 798 798 else if (mc_type == MC_TYPE_JBUS) 799 799 jbusmc_destroy(op, p); 800 800 } 801 - return 0; 802 801 } 803 802 804 803 static const struct of_device_id us3mc_match[] = { ··· 814 815 .of_match_table = us3mc_match, 815 816 }, 816 817 .probe = us3mc_probe, 817 - .remove = us3mc_remove, 818 + .remove_new = us3mc_remove, 818 819 }; 819 820 820 821 static inline bool us3mc_platform(void)
+3 -7
arch/sparc/kernel/irq_64.c
··· 349 349 #ifdef CONFIG_SMP 350 350 static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) 351 351 { 352 - cpumask_t mask; 353 352 int cpuid; 354 353 355 - cpumask_copy(&mask, affinity); 356 - if (cpumask_equal(&mask, cpu_online_mask)) { 354 + if (cpumask_equal(affinity, cpu_online_mask)) { 357 355 cpuid = map_to_cpu(irq); 358 356 } else { 359 - cpumask_t tmp; 360 - 361 - cpumask_and(&tmp, cpu_online_mask, &mask); 362 - cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp); 357 + cpuid = cpumask_first_and(affinity, cpu_online_mask); 358 + cpuid = cpuid < nr_cpu_ids ? cpuid : map_to_cpu(irq); 363 359 } 364 360 365 361 return cpuid;
+4
arch/sparc/kernel/kernel.h
··· 40 40 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr); 41 41 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr); 42 42 43 + /* uprobes.c */ 44 + asmlinkage void uprobe_trap(struct pt_regs *regs, 45 + unsigned long trap_level); 46 + 43 47 /* smp_64.c */ 44 48 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); 45 49 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
+3 -4
arch/sparc/kernel/leon_kernel.c
··· 106 106 #ifdef CONFIG_SMP 107 107 static int irq_choose_cpu(const struct cpumask *affinity) 108 108 { 109 - cpumask_t mask; 109 + unsigned int cpu = cpumask_first_and(affinity, cpu_online_mask); 110 110 111 - cpumask_and(&mask, cpu_online_mask, affinity); 112 - if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask)) 111 + if (cpumask_subset(cpu_online_mask, affinity) || cpu >= nr_cpu_ids) 113 112 return boot_cpu_id; 114 113 else 115 - return cpumask_first(&mask); 114 + return cpu; 116 115 } 117 116 #else 118 117 #define irq_choose_cpu(affinity) boot_cpu_id
+1 -4
arch/sparc/kernel/of_device_64.c
··· 624 624 out: 625 625 nid = of_node_to_nid(dp); 626 626 if (nid != -1) { 627 - cpumask_t numa_mask; 628 - 629 - cpumask_copy(&numa_mask, cpumask_of_node(nid)); 630 - irq_set_affinity(irq, &numa_mask); 627 + irq_set_affinity(irq, cpumask_of_node(nid)); 631 628 } 632 629 633 630 return irq;
+1 -4
arch/sparc/kernel/pci_msi.c
··· 287 287 288 288 nid = pbm->numa_node; 289 289 if (nid != -1) { 290 - cpumask_t numa_mask; 291 - 292 - cpumask_copy(&numa_mask, cpumask_of_node(nid)); 293 - irq_set_affinity(irq, &numa_mask); 290 + irq_set_affinity(irq, cpumask_of_node(nid)); 294 291 } 295 292 err = request_irq(irq, sparc64_msiq_interrupt, 0, 296 293 "MSIQ",
+3 -3
arch/sparc/kernel/pci_sun4v.c
··· 256 256 return NULL; 257 257 } 258 258 259 - unsigned long dma_4v_iotsb_bind(unsigned long devhandle, 260 - unsigned long iotsb_num, 261 - struct pci_bus *bus_dev) 259 + static unsigned long dma_4v_iotsb_bind(unsigned long devhandle, 260 + unsigned long iotsb_num, 261 + struct pci_bus *bus_dev) 262 262 { 263 263 struct pci_dev *pdev; 264 264 unsigned long err;
+3 -1
arch/sparc/kernel/prom_64.c
··· 483 483 ncpus_probed++; 484 484 #ifdef CONFIG_SMP 485 485 set_cpu_present(cpuid, true); 486 - set_cpu_possible(cpuid, true); 486 + 487 + if (num_possible_cpus() < nr_cpu_ids) 488 + set_cpu_possible(cpuid, true); 487 489 #endif 488 490 return NULL; 489 491 }
+1 -2
arch/sparc/kernel/setup_64.c
··· 599 599 pause_patch(); 600 600 } 601 601 602 - void __init alloc_irqstack_bootmem(void) 602 + static void __init alloc_irqstack_bootmem(void) 603 603 { 604 604 unsigned int i, node; 605 605 ··· 671 671 672 672 paging_init(); 673 673 init_sparc64_elf_hwcap(); 674 - smp_fill_in_cpu_possible_map(); 675 674 /* 676 675 * Once the OF device tree and MDESC have been setup and nr_cpus has 677 676 * been parsed, we know the list of possible cpus. Therefore we can
-14
arch/sparc/kernel/smp_64.c
··· 1216 1216 xcall_deliver_impl = hypervisor_xcall_deliver; 1217 1217 } 1218 1218 1219 - void __init smp_fill_in_cpu_possible_map(void) 1220 - { 1221 - int possible_cpus = num_possible_cpus(); 1222 - int i; 1223 - 1224 - if (possible_cpus > nr_cpu_ids) 1225 - possible_cpus = nr_cpu_ids; 1226 - 1227 - for (i = 0; i < possible_cpus; i++) 1228 - set_cpu_possible(i, true); 1229 - for (; i < NR_CPUS; i++) 1230 - set_cpu_possible(i, false); 1231 - } 1232 - 1233 1219 void smp_fill_in_sib_core_maps(void) 1234 1220 { 1235 1221 unsigned int i;
+1
arch/sparc/kernel/time_64.c
··· 33 33 #include <linux/clockchips.h> 34 34 #include <linux/clocksource.h> 35 35 #include <linux/platform_device.h> 36 + #include <linux/sched/clock.h> 36 37 #include <linux/ftrace.h> 37 38 38 39 #include <asm/oplib.h>
+5 -5
arch/sparc/kernel/traps_64.c
··· 250 250 sun4v_insn_access_exception(regs, addr, type_ctx); 251 251 } 252 252 253 - bool is_no_fault_exception(struct pt_regs *regs) 253 + static bool is_no_fault_exception(struct pt_regs *regs) 254 254 { 255 255 unsigned char asi; 256 256 u32 insn; ··· 2032 2032 /* Handle memory corruption detected error which is vectored in 2033 2033 * through resumable error trap. 2034 2034 */ 2035 - void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent) 2035 + static void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent) 2036 2036 { 2037 2037 if (notify_die(DIE_TRAP, "MCD error", regs, 0, 0x34, 2038 2038 SIGSEGV) == NOTIFY_STOP) ··· 2150 2150 /* Attempt to handle non-resumable errors generated from userspace. 2151 2151 * Returns true if the signal was handled, false otherwise. 2152 2152 */ 2153 - bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, 2154 - struct sun4v_error_entry *ent) { 2155 - 2153 + static bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, 2154 + struct sun4v_error_entry *ent) 2155 + { 2156 2156 unsigned int attrs = ent->err_attrs; 2157 2157 2158 2158 if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+2
arch/sparc/kernel/uprobes.c
··· 18 18 19 19 #include <asm/cacheflush.h> 20 20 21 + #include "kernel.h" 22 + 21 23 /* Compute the address of the breakpoint instruction and return it. 22 24 * 23 25 * Note that uprobe_get_swbp_addr is defined as a weak symbol in
-5
arch/sparc/mm/init_64.c
··· 2640 2640 2641 2641 return 0; 2642 2642 } 2643 - 2644 - void vmemmap_free(unsigned long start, unsigned long end, 2645 - struct vmem_altmap *altmap) 2646 - { 2647 - } 2648 2643 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2649 2644 2650 2645 /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
+12 -28
arch/sparc/mm/srmmu.c
··· 1653 1653 local_ops->tlb_all(); 1654 1654 } 1655 1655 1656 + static bool any_other_mm_cpus(struct mm_struct *mm) 1657 + { 1658 + return cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids; 1659 + } 1660 + 1656 1661 static void smp_flush_cache_mm(struct mm_struct *mm) 1657 1662 { 1658 1663 if (mm->context != NO_CONTEXT) { 1659 - cpumask_t cpu_mask; 1660 - cpumask_copy(&cpu_mask, mm_cpumask(mm)); 1661 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 1662 - if (!cpumask_empty(&cpu_mask)) 1664 + if (any_other_mm_cpus(mm)) 1663 1665 xc1(local_ops->cache_mm, (unsigned long)mm); 1664 1666 local_ops->cache_mm(mm); 1665 1667 } ··· 1670 1668 static void smp_flush_tlb_mm(struct mm_struct *mm) 1671 1669 { 1672 1670 if (mm->context != NO_CONTEXT) { 1673 - cpumask_t cpu_mask; 1674 - cpumask_copy(&cpu_mask, mm_cpumask(mm)); 1675 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 1676 - if (!cpumask_empty(&cpu_mask)) { 1671 + if (any_other_mm_cpus(mm)) { 1677 1672 xc1(local_ops->tlb_mm, (unsigned long)mm); 1678 1673 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) 1679 1674 cpumask_copy(mm_cpumask(mm), ··· 1687 1688 struct mm_struct *mm = vma->vm_mm; 1688 1689 1689 1690 if (mm->context != NO_CONTEXT) { 1690 - cpumask_t cpu_mask; 1691 - cpumask_copy(&cpu_mask, mm_cpumask(mm)); 1692 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 1693 - if (!cpumask_empty(&cpu_mask)) 1691 + if (any_other_mm_cpus(mm)) 1694 1692 xc3(local_ops->cache_range, (unsigned long)vma, start, 1695 1693 end); 1696 1694 local_ops->cache_range(vma, start, end); ··· 1701 1705 struct mm_struct *mm = vma->vm_mm; 1702 1706 1703 1707 if (mm->context != NO_CONTEXT) { 1704 - cpumask_t cpu_mask; 1705 - cpumask_copy(&cpu_mask, mm_cpumask(mm)); 1706 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 1707 - if (!cpumask_empty(&cpu_mask)) 1708 + if (any_other_mm_cpus(mm)) 1708 1709 xc3(local_ops->tlb_range, (unsigned long)vma, start, 1709 1710 end); 1710 1711 local_ops->tlb_range(vma, start, end); ··· 1713 1720 struct mm_struct *mm = vma->vm_mm; 1714 1721 1715 1722 if (mm->context != NO_CONTEXT) { 1716 - cpumask_t cpu_mask; 1717 - cpumask_copy(&cpu_mask, mm_cpumask(mm)); 1718 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 1719 - if (!cpumask_empty(&cpu_mask)) 1723 + if (any_other_mm_cpus(mm)) 1720 1724 xc2(local_ops->cache_page, (unsigned long)vma, page); 1721 1725 local_ops->cache_page(vma, page); 1722 1726 } ··· 1724 1734 struct mm_struct *mm = vma->vm_mm; 1725 1735 1726 1736 if (mm->context != NO_CONTEXT) { 1727 - cpumask_t cpu_mask; 1728 - cpumask_copy(&cpu_mask, mm_cpumask(mm)); 1729 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 1730 - if (!cpumask_empty(&cpu_mask)) 1737 + if (any_other_mm_cpus(mm)) 1731 1738 xc2(local_ops->tlb_page, (unsigned long)vma, page); 1732 1739 local_ops->tlb_page(vma, page); 1733 1740 } ··· 1746 1759 1747 1760 static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 1748 1761 { 1749 - cpumask_t cpu_mask; 1750 - cpumask_copy(&cpu_mask, mm_cpumask(mm)); 1751 - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); 1752 - if (!cpumask_empty(&cpu_mask)) 1762 + if (any_other_mm_cpus(mm)) 1753 1763 xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr); 1754 1764 local_ops->sig_insns(mm, insn_addr); 1755 1765 }
+1 -1
arch/sparc/prom/tree_64.c
··· 332 332 333 333 if (size == 0) 334 334 return 0; 335 - if ((pname == 0) || (value == 0)) 335 + if ((pname == NULL) || (value == NULL)) 336 336 return 0; 337 337 338 338 #ifdef CONFIG_SUN_LDOMS
+3 -2
arch/sparc/vdso/vma.c
··· 243 243 * Allocate pages for the vdso and vvar, and copy in the vdso text from the 244 244 * kernel image. 245 245 */ 246 - int __init init_vdso_image(const struct vdso_image *image, 247 - struct vm_special_mapping *vdso_mapping, bool elf64) 246 + static int __init init_vdso_image(const struct vdso_image *image, 247 + struct vm_special_mapping *vdso_mapping, 248 + bool elf64) 248 249 { 249 250 int cnpages = (image->size) / PAGE_SIZE; 250 251 struct page *dp, **dpp = NULL;