Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'csky-for-linus-6.0-rc1' of https://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:

- Add jump-label implementation

- Add qspinlock support

- Enable ARCH_INLINE_READ*/WRITE*/SPIN*

- Some fixups and a coding convention

* tag 'csky-for-linus-6.0-rc1' of https://github.com/c-sky/csky-linux:
csky: abiv1: Fixup compile error
csky: cmpxchg: Coding convention for BUILD_BUG()
csky: Enable ARCH_INLINE_READ*/WRITE*/SPIN*
csky: Add qspinlock support
csky: Add jump-label implementation
csky: Move HEAD_TEXT_SECTION out of __init_begin-end
csky: Correct position of _stext
csky: Use the bitmap API to allocate bitmaps
csky/kprobe: reclaim insn_slot on kprobe unregistration

+211 -20
+29
arch/csky/Kconfig
··· 8 8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 9 9 select ARCH_USE_BUILTIN_BSWAP 10 10 select ARCH_USE_QUEUED_RWLOCKS 11 + select ARCH_USE_QUEUED_SPINLOCKS 12 + select ARCH_INLINE_READ_LOCK if !PREEMPTION 13 + select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION 14 + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION 15 + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION 16 + select ARCH_INLINE_READ_UNLOCK if !PREEMPTION 17 + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION 18 + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION 19 + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION 20 + select ARCH_INLINE_WRITE_LOCK if !PREEMPTION 21 + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION 22 + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION 23 + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION 24 + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION 25 + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION 26 + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION 27 + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION 28 + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION 29 + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION 30 + select ARCH_INLINE_SPIN_LOCK if !PREEMPTION 31 + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION 32 + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION 33 + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION 34 + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION 35 + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION 36 + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION 37 + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION 11 38 select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) 12 39 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 13 40 select COMMON_CLK ··· 67 40 select GX6605S_TIMER if CPU_CK610 68 41 select HAVE_ARCH_TRACEHOOK 69 42 select HAVE_ARCH_AUDITSYSCALL 43 + select HAVE_ARCH_JUMP_LABEL if !CPU_CK610 44 + select HAVE_ARCH_JUMP_LABEL_RELATIVE 70 45 select HAVE_ARCH_MMAP_RND_BITS 71 46 select HAVE_ARCH_SECCOMP_FILTER 72 47 select HAVE_CONTEXT_TRACKING_USER
+6
arch/csky/abiv1/inc/abi/string.h
··· 6 6 #define __HAVE_ARCH_MEMCPY 7 7 extern void *memcpy(void *, const void *, __kernel_size_t); 8 8 9 + #define __HAVE_ARCH_MEMMOVE 10 + extern void *memmove(void *, const void *, __kernel_size_t); 11 + 12 + #define __HAVE_ARCH_MEMSET 13 + extern void *memset(void *, int, __kernel_size_t); 14 + 9 15 #endif /* __ABI_CSKY_STRING_H */
+2 -2
arch/csky/include/asm/Kbuild
··· 3 3 generic-y += extable.h 4 4 generic-y += gpio.h 5 5 generic-y += kvm_para.h 6 - generic-y += spinlock.h 7 - generic-y += spinlock_types.h 6 + generic-y += mcs_spinlock.h 8 7 generic-y += qrwlock.h 9 8 generic-y += qrwlock_types.h 9 + generic-y += qspinlock.h 10 10 generic-y += parport.h 11 11 generic-y += user.h 12 12 generic-y += vmlinux.lds.h
+25 -6
arch/csky/include/asm/cmpxchg.h
··· 4 4 #define __ASM_CSKY_CMPXCHG_H 5 5 6 6 #ifdef CONFIG_SMP 7 + #include <linux/bug.h> 7 8 #include <asm/barrier.h> 8 - 9 - extern void __bad_xchg(void); 10 9 11 10 #define __xchg_relaxed(new, ptr, size) \ 12 11 ({ \ ··· 14 15 __typeof__(*(ptr)) __ret; \ 15 16 unsigned long tmp; \ 16 17 switch (size) { \ 18 + case 2: { \ 19 + u32 ret; \ 20 + u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \ 21 + u32 mask = 0xffff << shif; \ 22 + __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \ 23 + __asm__ __volatile__ ( \ 24 + "1: ldex.w %0, (%4)\n" \ 25 + " and %1, %0, %2\n" \ 26 + " or %1, %1, %3\n" \ 27 + " stex.w %1, (%4)\n" \ 28 + " bez %1, 1b\n" \ 29 + : "=&r" (ret), "=&r" (tmp) \ 30 + : "r" (~mask), \ 31 + "r" ((u32)__new << shif), \ 32 + "r" (__ptr) \ 33 + : "memory"); \ 34 + __ret = (__typeof__(*(ptr))) \ 35 + ((ret & mask) >> shif); \ 36 + break; \ 37 + } \ 17 38 case 4: \ 18 39 asm volatile ( \ 19 40 "1: ldex.w %0, (%3) \n" \ ··· 45 26 :); \ 46 27 break; \ 47 28 default: \ 48 - __bad_xchg(); \ 29 + BUILD_BUG(); \ 49 30 } \ 50 31 __ret; \ 51 32 }) ··· 75 56 :); \ 76 57 break; \ 77 58 default: \ 78 - __bad_xchg(); \ 59 + BUILD_BUG(); \ 79 60 } \ 80 61 __ret; \ 81 62 }) ··· 106 87 :); \ 107 88 break; \ 108 89 default: \ 109 - __bad_xchg(); \ 90 + BUILD_BUG(); \ 110 91 } \ 111 92 __ret; \ 112 93 }) ··· 138 119 :); \ 139 120 break; \ 140 121 default: \ 141 - __bad_xchg(); \ 122 + BUILD_BUG(); \ 142 123 } \ 143 124 __ret; \ 144 125 })
+47
arch/csky/include/asm/jump_label.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + 3 + #ifndef __ASM_CSKY_JUMP_LABEL_H 4 + #define __ASM_CSKY_JUMP_LABEL_H 5 + 6 + #ifndef __ASSEMBLY__ 7 + 8 + #include <linux/types.h> 9 + 10 + #define JUMP_LABEL_NOP_SIZE 4 11 + 12 + static __always_inline bool arch_static_branch(struct static_key *key, 13 + bool branch) 14 + { 15 + asm_volatile_goto( 16 + "1: nop32 \n" 17 + " .pushsection __jump_table, \"aw\" \n" 18 + " .align 2 \n" 19 + " .long 1b - ., %l[label] - . \n" 20 + " .long %0 - . \n" 21 + " .popsection \n" 22 + : : "i"(&((char *)key)[branch]) : : label); 23 + 24 + return false; 25 + label: 26 + return true; 27 + } 28 + 29 + static __always_inline bool arch_static_branch_jump(struct static_key *key, 30 + bool branch) 31 + { 32 + asm_volatile_goto( 33 + "1: bsr32 %l[label] \n" 34 + " .pushsection __jump_table, \"aw\" \n" 35 + " .align 2 \n" 36 + " .long 1b - ., %l[label] - . \n" 37 + " .long %0 - . \n" 38 + " .popsection \n" 39 + : : "i"(&((char *)key)[branch]) : : label); 40 + 41 + return false; 42 + label: 43 + return true; 44 + } 45 + 46 + #endif /* __ASSEMBLY__ */ 47 + #endif /* __ASM_CSKY_JUMP_LABEL_H */
+10
arch/csky/include/asm/sections.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_SECTIONS_H 4 + #define __ASM_SECTIONS_H 5 + 6 + #include <asm-generic/sections.h> 7 + 8 + extern char _start[]; 9 + 10 + #endif /* __ASM_SECTIONS_H */
+12
arch/csky/include/asm/spinlock.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_SPINLOCK_H 4 + #define __ASM_CSKY_SPINLOCK_H 5 + 6 + #include <asm/qspinlock.h> 7 + #include <asm/qrwlock.h> 8 + 9 + /* See include/linux/spinlock.h */ 10 + #define smp_mb__after_spinlock() smp_mb() 11 + 12 + #endif /* __ASM_CSKY_SPINLOCK_H */
+9
arch/csky/include/asm/spinlock_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_SPINLOCK_TYPES_H 4 + #define __ASM_CSKY_SPINLOCK_TYPES_H 5 + 6 + #include <asm-generic/qspinlock_types.h> 7 + #include <asm-generic/qrwlock_types.h> 8 + 9 + #endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
+1
arch/csky/kernel/Makefile
··· 13 13 obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o 14 14 obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o 15 15 obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o 16 + obj-$(CONFIG_JUMP_LABEL) += jump_label.o 16 17 17 18 ifdef CONFIG_FUNCTION_TRACER 18 19 CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+54
arch/csky/kernel/jump_label.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/jump_label.h> 4 + #include <linux/kernel.h> 5 + #include <linux/memory.h> 6 + #include <linux/mutex.h> 7 + #include <linux/uaccess.h> 8 + #include <asm/cacheflush.h> 9 + 10 + #define NOP32_HI 0xc400 11 + #define NOP32_LO 0x4820 12 + #define BSR_LINK 0xe000 13 + 14 + void arch_jump_label_transform(struct jump_entry *entry, 15 + enum jump_label_type type) 16 + { 17 + unsigned long addr = jump_entry_code(entry); 18 + u16 insn[2]; 19 + int ret = 0; 20 + 21 + if (type == JUMP_LABEL_JMP) { 22 + long offset = jump_entry_target(entry) - jump_entry_code(entry); 23 + 24 + if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864)) 25 + return; 26 + 27 + offset = offset >> 1; 28 + 29 + insn[0] = BSR_LINK | 30 + ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); 31 + insn[1] = (uint16_t)((unsigned long) offset & 0xffff); 32 + } else { 33 + insn[0] = NOP32_HI; 34 + insn[1] = NOP32_LO; 35 + } 36 + 37 + ret = copy_to_kernel_nofault((void *)addr, insn, 4); 38 + WARN_ON(ret); 39 + 40 + flush_icache_range(addr, addr + 4); 41 + } 42 + 43 + void arch_jump_label_transform_static(struct jump_entry *entry, 44 + enum jump_label_type type) 45 + { 46 + /* 47 + * We use the same instructions in the arch_static_branch and 48 + * arch_static_branch_jump inline functions, so there's no 49 + * need to patch them up here. 50 + * The core will call arch_jump_label_transform when those 51 + * instructions need to be replaced. 52 + */ 53 + arch_jump_label_transform(entry, type); 54 + }
+4
arch/csky/kernel/probes/kprobes.c
··· 124 124 125 125 void __kprobes arch_remove_kprobe(struct kprobe *p) 126 126 { 127 + if (p->ainsn.api.insn) { 128 + free_insn_slot(p->ainsn.api.insn, 0); 129 + p->ainsn.api.insn = NULL; 130 + } 127 131 } 128 132 129 133 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+2 -2
arch/csky/kernel/setup.c
··· 31 31 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; 32 32 signed long size; 33 33 34 - memblock_reserve(__pa(_stext), _end - _stext); 34 + memblock_reserve(__pa(_start), _end - _start); 35 35 36 36 early_init_fdt_reserve_self(); 37 37 early_init_fdt_scan_reserved_mem(); ··· 78 78 pr_info("Phys. mem: %ldMB\n", 79 79 (unsigned long) memblock_phys_mem_size()/1024/1024); 80 80 81 - setup_initial_init_mm(_stext, _etext, _edata, _end); 81 + setup_initial_init_mm(_start, _etext, _edata, _end); 82 82 83 83 parse_early_param(); 84 84
+8 -7
arch/csky/kernel/vmlinux.lds.S
··· 22 22 { 23 23 . = PAGE_OFFSET + PHYS_OFFSET_OFFSET; 24 24 25 - _stext = .; 26 - __init_begin = .; 25 + _start = .; 27 26 HEAD_TEXT_SECTION 28 - INIT_TEXT_SECTION(PAGE_SIZE) 29 - INIT_DATA_SECTION(PAGE_SIZE) 30 - PERCPU_SECTION(L1_CACHE_BYTES) 31 27 . = ALIGN(PAGE_SIZE); 32 - __init_end = .; 33 28 34 29 .text : AT(ADDR(.text) - LOAD_OFFSET) { 35 30 _text = .; 31 + _stext = .; 36 32 VBR_BASE 37 33 IRQENTRY_TEXT 38 34 SOFTIRQENTRY_TEXT ··· 44 48 45 49 /* __init_begin __init_end must be page aligned for free_initmem */ 46 50 . = ALIGN(PAGE_SIZE); 47 - 51 + __init_begin = .; 52 + INIT_TEXT_SECTION(PAGE_SIZE) 53 + INIT_DATA_SECTION(PAGE_SIZE) 54 + PERCPU_SECTION(L1_CACHE_BYTES) 55 + . = ALIGN(PAGE_SIZE); 56 + __init_end = .; 48 57 49 58 _sdata = .; 50 59 RO_DATA(PAGE_SIZE)
+2 -3
arch/csky/mm/asid.c
··· 27 27 u64 asid; 28 28 29 29 /* Update the list of reserved ASIDs and the ASID bitmap. */ 30 - bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info)); 30 + bitmap_zero(info->map, NUM_CTXT_ASIDS(info)); 31 31 32 32 for_each_possible_cpu(i) { 33 33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); ··· 178 178 */ 179 179 WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); 180 180 atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); 181 - info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)), 182 - sizeof(*info->map), GFP_KERNEL); 181 + info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL); 183 182 if (!info->map) 184 183 return -ENOMEM; 185 184