Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

riscv: Replace __ASSEMBLY__ with __ASSEMBLER__ in non-uapi headers

While the GCC and Clang compilers already define __ASSEMBLER__
automatically when compiling assembly code, __ASSEMBLY__ is a
macro that only gets defined by the Makefiles in the kernel.
This can be very confusing when switching between userspace
and kernelspace coding, or when dealing with uapi headers that
rather should use __ASSEMBLER__ instead. So let's standardize on
the __ASSEMBLER__ macro that is provided by the compilers now.

This originally was a completely mechanical patch (done with a
simple "sed -i" statement), with some manual fixups during
rebasing of the patch later.

Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: linux-riscv@lists.infradead.org
Signed-off-by: Thomas Huth <thuth@redhat.com>
Link: https://lore.kernel.org/r/20250606070952.498274-3-thuth@redhat.com
Signed-off-by: Paul Walmsley <pjw@kernel.org>

authored by

Thomas Huth and committed by
Paul Walmsley
f811f585 35ebe003

+76 -76
+6 -6
arch/riscv/include/asm/alternative-macros.h
··· 4 4 5 5 #ifdef CONFIG_RISCV_ALTERNATIVE 6 6 7 - #ifdef __ASSEMBLY__ 7 + #ifdef __ASSEMBLER__ 8 8 9 9 .macro ALT_ENTRY oldptr newptr vendor_id patch_id new_len 10 10 .4byte \oldptr - . ··· 53 53 #define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__ 54 54 #define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__ 55 55 56 - #else /* !__ASSEMBLY__ */ 56 + #else /* !__ASSEMBLER__ */ 57 57 58 58 #include <asm/asm.h> 59 59 #include <linux/stringify.h> ··· 98 98 __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1) \ 99 99 ALT_NEW_CONTENT(vendor_id_2, patch_id_2, enable_2, new_c_2) 100 100 101 - #endif /* __ASSEMBLY__ */ 101 + #endif /* __ASSEMBLER__ */ 102 102 103 103 #define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, CONFIG_k) \ 104 104 __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, IS_ENABLED(CONFIG_k)) ··· 109 109 new_c_2, vendor_id_2, patch_id_2, IS_ENABLED(CONFIG_k_2)) 110 110 111 111 #else /* CONFIG_RISCV_ALTERNATIVE */ 112 - #ifdef __ASSEMBLY__ 112 + #ifdef __ASSEMBLER__ 113 113 114 114 .macro ALTERNATIVE_CFG old_c 115 115 \old_c ··· 118 118 #define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c 119 119 #define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c 120 120 121 - #else /* !__ASSEMBLY__ */ 121 + #else /* !__ASSEMBLER__ */ 122 122 123 123 #define __ALTERNATIVE_CFG(old_c, ...) old_c "\n" 124 124 #define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n" 125 125 126 - #endif /* __ASSEMBLY__ */ 126 + #endif /* __ASSEMBLER__ */ 127 127 128 128 #define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c) 129 129 #define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
+1 -1
arch/riscv/include/asm/alternative.h
··· 8 8 9 9 #include <asm/alternative-macros.h> 10 10 11 - #ifndef __ASSEMBLY__ 11 + #ifndef __ASSEMBLER__ 12 12 13 13 #ifdef CONFIG_RISCV_ALTERNATIVE 14 14
+3 -3
arch/riscv/include/asm/asm-extable.h
··· 10 10 11 11 #ifdef CONFIG_MMU 12 12 13 - #ifdef __ASSEMBLY__ 13 + #ifdef __ASSEMBLER__ 14 14 15 15 #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ 16 16 .pushsection __ex_table, "a"; \ ··· 25 25 __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) 26 26 .endm 27 27 28 - #else /* __ASSEMBLY__ */ 28 + #else /* __ASSEMBLER__ */ 29 29 30 30 #include <linux/bits.h> 31 31 #include <linux/stringify.h> ··· 77 77 EX_DATA_REG(ADDR, addr) \ 78 78 ")") 79 79 80 - #endif /* __ASSEMBLY__ */ 80 + #endif /* __ASSEMBLER__ */ 81 81 82 82 #else /* CONFIG_MMU */ 83 83 #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)
+5 -5
arch/riscv/include/asm/asm.h
··· 6 6 #ifndef _ASM_RISCV_ASM_H 7 7 #define _ASM_RISCV_ASM_H 8 8 9 - #ifdef __ASSEMBLY__ 9 + #ifdef __ASSEMBLER__ 10 10 #define __ASM_STR(x) x 11 11 #else 12 12 #define __ASM_STR(x) #x ··· 30 30 #define SRLI __REG_SEL(srliw, srli) 31 31 32 32 #if __SIZEOF_POINTER__ == 8 33 - #ifdef __ASSEMBLY__ 33 + #ifdef __ASSEMBLER__ 34 34 #define RISCV_PTR .dword 35 35 #define RISCV_SZPTR 8 36 36 #define RISCV_LGPTR 3 ··· 40 40 #define RISCV_LGPTR "3" 41 41 #endif 42 42 #elif __SIZEOF_POINTER__ == 4 43 - #ifdef __ASSEMBLY__ 43 + #ifdef __ASSEMBLER__ 44 44 #define RISCV_PTR .word 45 45 #define RISCV_SZPTR 4 46 46 #define RISCV_LGPTR 2 ··· 69 69 #error "Unexpected __SIZEOF_SHORT__" 70 70 #endif 71 71 72 - #ifdef __ASSEMBLY__ 72 + #ifdef __ASSEMBLER__ 73 73 #include <asm/asm-offsets.h> 74 74 75 75 /* Common assembly source macros */ ··· 194 194 #define ASM_NOKPROBE(name) 195 195 #endif 196 196 197 - #endif /* __ASSEMBLY__ */ 197 + #endif /* __ASSEMBLER__ */ 198 198 199 199 #endif /* _ASM_RISCV_ASM_H */
+1 -1
arch/riscv/include/asm/assembler.h
··· 5 5 * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com> 6 6 */ 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 #error "Only include this from assembly code" 10 10 #endif 11 11
+2 -2
arch/riscv/include/asm/barrier.h
··· 10 10 #ifndef _ASM_RISCV_BARRIER_H 11 11 #define _ASM_RISCV_BARRIER_H 12 12 13 - #ifndef __ASSEMBLY__ 13 + #ifndef __ASSEMBLER__ 14 14 #include <asm/cmpxchg.h> 15 15 #include <asm/fence.h> 16 16 ··· 82 82 83 83 #include <asm-generic/barrier.h> 84 84 85 - #endif /* __ASSEMBLY__ */ 85 + #endif /* __ASSEMBLER__ */ 86 86 87 87 #endif /* _ASM_RISCV_BARRIER_H */
+2 -2
arch/riscv/include/asm/cache.h
··· 24 24 #define ARCH_SLAB_MINALIGN 16 25 25 #endif 26 26 27 - #ifndef __ASSEMBLY__ 27 + #ifndef __ASSEMBLER__ 28 28 29 29 extern int dma_cache_alignment; 30 30 #ifdef CONFIG_RISCV_DMA_NONCOHERENT ··· 35 35 } 36 36 #endif 37 37 38 - #endif /* __ASSEMBLY__ */ 38 + #endif /* __ASSEMBLER__ */ 39 39 40 40 #endif /* _ASM_RISCV_CACHE_H */
+1 -1
arch/riscv/include/asm/cpu_ops_sbi.h
··· 5 5 #ifndef __ASM_CPU_OPS_SBI_H 6 6 #define __ASM_CPU_OPS_SBI_H 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 #include <linux/init.h> 10 10 #include <linux/sched.h> 11 11 #include <linux/threads.h>
+2 -2
arch/riscv/include/asm/csr.h
··· 513 513 #define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) 514 514 #define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) 515 515 516 - #ifndef __ASSEMBLY__ 516 + #ifndef __ASSEMBLER__ 517 517 518 518 #define csr_swap(csr, val) \ 519 519 ({ \ ··· 575 575 : "memory"); \ 576 576 }) 577 577 578 - #endif /* __ASSEMBLY__ */ 578 + #endif /* __ASSEMBLER__ */ 579 579 580 580 #endif /* _ASM_RISCV_CSR_H */
+2 -2
arch/riscv/include/asm/current.h
··· 13 13 #include <linux/bug.h> 14 14 #include <linux/compiler.h> 15 15 16 - #ifndef __ASSEMBLY__ 16 + #ifndef __ASSEMBLER__ 17 17 18 18 struct task_struct; 19 19 ··· 35 35 36 36 register unsigned long current_stack_pointer __asm__("sp"); 37 37 38 - #endif /* __ASSEMBLY__ */ 38 + #endif /* __ASSEMBLER__ */ 39 39 40 40 #endif /* _ASM_RISCV_CURRENT_H */
+3 -3
arch/riscv/include/asm/errata_list.h
··· 29 29 #define ERRATA_THEAD_NUMBER 3 30 30 #endif 31 31 32 - #ifdef __ASSEMBLY__ 32 + #ifdef __ASSEMBLER__ 33 33 34 34 #define ALT_INSN_FAULT(x) \ 35 35 ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \ ··· 42 42 __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \ 43 43 SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ 44 44 CONFIG_ERRATA_SIFIVE_CIP_453) 45 - #else /* !__ASSEMBLY__ */ 45 + #else /* !__ASSEMBLER__ */ 46 46 47 47 #define ALT_SFENCE_VMA_ASID(asid) \ 48 48 asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID, \ ··· 123 123 #define THEAD_C9XX_RV_IRQ_PMU 17 124 124 #define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5 125 125 126 - #endif /* __ASSEMBLY__ */ 126 + #endif /* __ASSEMBLER__ */ 127 127 128 128 #endif
+3 -3
arch/riscv/include/asm/ftrace.h
··· 13 13 #endif 14 14 15 15 #define ARCH_SUPPORTS_FTRACE_OPS 1 16 - #ifndef __ASSEMBLY__ 16 + #ifndef __ASSEMBLER__ 17 17 18 18 extern void *return_address(unsigned int level); 19 19 ··· 112 112 #define MCOUNT_JALR_SIZE 4 113 113 #define MCOUNT_NOP4_SIZE 4 114 114 115 - #ifndef __ASSEMBLY__ 115 + #ifndef __ASSEMBLER__ 116 116 struct dyn_ftrace; 117 117 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); 118 118 #define ftrace_init_nop ftrace_init_nop ··· 235 235 236 236 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ 237 237 238 - #endif /* __ASSEMBLY__ */ 238 + #endif /* __ASSEMBLER__ */ 239 239 240 240 #endif /* CONFIG_DYNAMIC_FTRACE */ 241 241
+3 -3
arch/riscv/include/asm/gpr-num.h
··· 2 2 #ifndef __ASM_GPR_NUM_H 3 3 #define __ASM_GPR_NUM_H 4 4 5 - #ifdef __ASSEMBLY__ 5 + #ifdef __ASSEMBLER__ 6 6 7 7 .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 8 8 .equ .L__gpr_num_x\num, \num ··· 41 41 .equ .L__gpr_num_t5, 30 42 42 .equ .L__gpr_num_t6, 31 43 43 44 - #else /* __ASSEMBLY__ */ 44 + #else /* __ASSEMBLER__ */ 45 45 46 46 #define __DEFINE_ASM_GPR_NUMS \ 47 47 " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ ··· 80 80 " .equ .L__gpr_num_t5, 30\n" \ 81 81 " .equ .L__gpr_num_t6, 31\n" 82 82 83 - #endif /* __ASSEMBLY__ */ 83 + #endif /* __ASSEMBLER__ */ 84 84 85 85 #endif /* __ASM_GPR_NUM_H */
+2 -2
arch/riscv/include/asm/image.h
··· 29 29 #define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \ 30 30 RISCV_HEADER_VERSION_MINOR) 31 31 32 - #ifndef __ASSEMBLY__ 32 + #ifndef __ASSEMBLER__ 33 33 #define riscv_image_flag_field(flags, field)\ 34 34 (((flags) >> field##_SHIFT) & field##_MASK) 35 35 /** ··· 63 63 u32 magic2; 64 64 u32 res3; 65 65 }; 66 - #endif /* __ASSEMBLY__ */ 66 + #endif /* __ASSEMBLER__ */ 67 67 #endif /* _ASM_RISCV_IMAGE_H */
+4 -4
arch/riscv/include/asm/insn-def.h
··· 25 25 #define INSN_S_SIMM5_SHIFT 7 26 26 #define INSN_S_OPCODE_SHIFT 0 27 27 28 - #ifdef __ASSEMBLY__ 28 + #ifdef __ASSEMBLER__ 29 29 30 30 #ifdef CONFIG_AS_HAS_INSN 31 31 ··· 77 77 #define __INSN_I(...) insn_i __VA_ARGS__ 78 78 #define __INSN_S(...) insn_s __VA_ARGS__ 79 79 80 - #else /* ! __ASSEMBLY__ */ 80 + #else /* ! __ASSEMBLER__ */ 81 81 82 82 #ifdef CONFIG_AS_HAS_INSN 83 83 ··· 153 153 154 154 #endif 155 155 156 - #endif /* ! __ASSEMBLY__ */ 156 + #endif /* ! __ASSEMBLER__ */ 157 157 158 158 #define INSN_R(opcode, func3, func7, rd, rs1, rs2) \ 159 159 __INSN_R(RV_##opcode, RV_##func3, RV_##func7, \ ··· 263 263 264 264 #define RISCV_INSN_NOP4 _AC(0x00000013, U) 265 265 266 - #ifndef __ASSEMBLY__ 266 + #ifndef __ASSEMBLER__ 267 267 #define nop() __asm__ __volatile__ ("nop") 268 268 #define __nops(n) ".rept " #n "\nnop\n.endr\n" 269 269 #define nops(n) __asm__ __volatile__ (__nops(n))
+2 -2
arch/riscv/include/asm/jump_label.h
··· 7 7 #ifndef __ASM_JUMP_LABEL_H 8 8 #define __ASM_JUMP_LABEL_H 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #include <linux/types.h> 13 13 #include <asm/asm.h> ··· 66 66 return true; 67 67 } 68 68 69 - #endif /* __ASSEMBLY__ */ 69 + #endif /* __ASSEMBLER__ */ 70 70 #endif /* __ASM_JUMP_LABEL_H */
+1 -1
arch/riscv/include/asm/kasan.h
··· 4 4 #ifndef __ASM_KASAN_H 5 5 #define __ASM_KASAN_H 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 9 9 /* 10 10 * The following comment was copied from arm64:
+2 -2
arch/riscv/include/asm/kgdb.h
··· 17 17 #define BREAK_INSTR_SIZE 4 18 18 #endif 19 19 20 - #ifndef __ASSEMBLY__ 20 + #ifndef __ASSEMBLER__ 21 21 22 22 void arch_kgdb_breakpoint(void); 23 23 extern unsigned long kgdb_compiled_break; 24 24 25 - #endif /* !__ASSEMBLY__ */ 25 + #endif /* !__ASSEMBLER__ */ 26 26 27 27 #define DBG_REG_ZERO "zero" 28 28 #define DBG_REG_RA "ra"
+2 -2
arch/riscv/include/asm/mmu.h
··· 7 7 #ifndef _ASM_RISCV_MMU_H 8 8 #define _ASM_RISCV_MMU_H 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 typedef struct { 13 13 #ifndef CONFIG_MMU ··· 40 40 41 41 void __meminit create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz, 42 42 pgprot_t prot); 43 - #endif /* __ASSEMBLY__ */ 43 + #endif /* __ASSEMBLER__ */ 44 44 45 45 #endif /* _ASM_RISCV_MMU_H */
+2 -2
arch/riscv/include/asm/page.h
··· 41 41 #define PAGE_OFFSET ((unsigned long)phys_ram_base) 42 42 #endif /* CONFIG_MMU */ 43 43 44 - #ifndef __ASSEMBLY__ 44 + #ifndef __ASSEMBLER__ 45 45 46 46 #ifdef CONFIG_RISCV_ISA_ZICBOZ 47 47 void clear_page(void *page); ··· 199 199 return __va(pfn << PAGE_SHIFT); 200 200 } 201 201 202 - #endif /* __ASSEMBLY__ */ 202 + #endif /* __ASSEMBLER__ */ 203 203 204 204 #define virt_addr_valid(vaddr) ({ \ 205 205 unsigned long _addr = (unsigned long)vaddr; \
+2 -2
arch/riscv/include/asm/pgtable.h
··· 111 111 112 112 #endif 113 113 114 - #ifndef __ASSEMBLY__ 114 + #ifndef __ASSEMBLER__ 115 115 116 116 #include <asm/page.h> 117 117 #include <asm/tlbflush.h> ··· 1119 1119 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ 1120 1120 set_pgd(pgdp, pgd); \ 1121 1121 }) 1122 - #endif /* !__ASSEMBLY__ */ 1122 + #endif /* !__ASSEMBLER__ */ 1123 1123 1124 1124 #endif /* _ASM_RISCV_PGTABLE_H */
+2 -2
arch/riscv/include/asm/processor.h
··· 54 54 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) 55 55 #endif 56 56 57 - #ifndef __ASSEMBLY__ 57 + #ifndef __ASSEMBLER__ 58 58 59 59 struct task_struct; 60 60 struct pt_regs; ··· 215 215 #define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current) 216 216 #endif 217 217 218 - #endif /* __ASSEMBLY__ */ 218 + #endif /* __ASSEMBLER__ */ 219 219 220 220 #endif /* _ASM_RISCV_PROCESSOR_H */
+2 -2
arch/riscv/include/asm/ptrace.h
··· 10 10 #include <asm/csr.h> 11 11 #include <linux/compiler.h> 12 12 13 - #ifndef __ASSEMBLY__ 13 + #ifndef __ASSEMBLER__ 14 14 15 15 struct pt_regs { 16 16 unsigned long epc; ··· 180 180 return !(regs->status & SR_PIE); 181 181 } 182 182 183 - #endif /* __ASSEMBLY__ */ 183 + #endif /* __ASSEMBLER__ */ 184 184 185 185 #endif /* _ASM_RISCV_PTRACE_H */
+2 -2
arch/riscv/include/asm/scs.h
··· 2 2 #ifndef _ASM_SCS_H 3 3 #define _ASM_SCS_H 4 4 5 - #ifdef __ASSEMBLY__ 5 + #ifdef __ASSEMBLER__ 6 6 #include <asm/asm-offsets.h> 7 7 8 8 #ifdef CONFIG_SHADOW_CALL_STACK ··· 49 49 .endm 50 50 51 51 #endif /* CONFIG_SHADOW_CALL_STACK */ 52 - #endif /* __ASSEMBLY__ */ 52 + #endif /* __ASSEMBLER__ */ 53 53 54 54 #endif /* _ASM_SCS_H */
+2 -2
arch/riscv/include/asm/set_memory.h
··· 6 6 #ifndef _ASM_RISCV_SET_MEMORY_H 7 7 #define _ASM_RISCV_SET_MEMORY_H 8 8 9 - #ifndef __ASSEMBLY__ 9 + #ifndef __ASSEMBLER__ 10 10 /* 11 11 * Functions to change memory attributes. 12 12 */ ··· 45 45 int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); 46 46 bool kernel_page_present(struct page *page); 47 47 48 - #endif /* __ASSEMBLY__ */ 48 + #endif /* __ASSEMBLER__ */ 49 49 50 50 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_XIP_KERNEL) 51 51 #ifdef CONFIG_64BIT
+2 -2
arch/riscv/include/asm/thread_info.h
··· 37 37 38 38 #define IRQ_STACK_SIZE THREAD_SIZE 39 39 40 - #ifndef __ASSEMBLY__ 40 + #ifndef __ASSEMBLER__ 41 41 42 42 #include <asm/processor.h> 43 43 #include <asm/csr.h> ··· 98 98 void arch_release_task_struct(struct task_struct *tsk); 99 99 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 100 100 101 - #endif /* !__ASSEMBLY__ */ 101 + #endif /* !__ASSEMBLER__ */ 102 102 103 103 /* 104 104 * thread information flags
+2 -2
arch/riscv/include/asm/vdso.h
··· 16 16 17 17 #define __VDSO_PAGES 4 18 18 19 - #ifndef __ASSEMBLY__ 19 + #ifndef __ASSEMBLER__ 20 20 #include <generated/vdso-offsets.h> 21 21 22 22 #define VDSO_SYMBOL(base, name) \ ··· 34 34 35 35 extern char vdso_start[], vdso_end[]; 36 36 37 - #endif /* !__ASSEMBLY__ */ 37 + #endif /* !__ASSEMBLER__ */ 38 38 39 39 #endif /* CONFIG_MMU */ 40 40
+2 -2
arch/riscv/include/asm/vdso/getrandom.h
··· 5 5 #ifndef __ASM_VDSO_GETRANDOM_H 6 6 #define __ASM_VDSO_GETRANDOM_H 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 10 10 #include <asm/unistd.h> 11 11 ··· 25 25 return ret; 26 26 } 27 27 28 - #endif /* !__ASSEMBLY__ */ 28 + #endif /* !__ASSEMBLER__ */ 29 29 30 30 #endif /* __ASM_VDSO_GETRANDOM_H */
+2 -2
arch/riscv/include/asm/vdso/gettimeofday.h
··· 2 2 #ifndef __ASM_VDSO_GETTIMEOFDAY_H 3 3 #define __ASM_VDSO_GETTIMEOFDAY_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <asm/barrier.h> 8 8 #include <asm/unistd.h> ··· 79 79 return csr_read(CSR_TIME); 80 80 } 81 81 82 - #endif /* !__ASSEMBLY__ */ 82 + #endif /* !__ASSEMBLER__ */ 83 83 84 84 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
+2 -2
arch/riscv/include/asm/vdso/processor.h
··· 2 2 #ifndef __ASM_VDSO_PROCESSOR_H 3 3 #define __ASM_VDSO_PROCESSOR_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <asm/barrier.h> 8 8 #include <asm/insn-def.h> ··· 23 23 barrier(); 24 24 } 25 25 26 - #endif /* __ASSEMBLY__ */ 26 + #endif /* __ASSEMBLER__ */ 27 27 28 28 #endif /* __ASM_VDSO_PROCESSOR_H */
+2 -2
arch/riscv/include/asm/vdso/vsyscall.h
··· 2 2 #ifndef __ASM_VDSO_VSYSCALL_H 3 3 #define __ASM_VDSO_VSYSCALL_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <vdso/datapage.h> 8 8 9 9 /* The asm-generic header needs to be included after the definitions above */ 10 10 #include <asm-generic/vdso/vsyscall.h> 11 11 12 - #endif /* !__ASSEMBLY__ */ 12 + #endif /* !__ASSEMBLER__ */ 13 13 14 14 #endif /* __ASM_VDSO_VSYSCALL_H */
+3 -3
tools/arch/riscv/include/asm/csr.h
··· 468 468 #define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) 469 469 #define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) 470 470 471 - #ifdef __ASSEMBLY__ 471 + #ifdef __ASSEMBLER__ 472 472 #define __ASM_STR(x) x 473 473 #else 474 474 #define __ASM_STR(x) #x 475 475 #endif 476 476 477 - #ifndef __ASSEMBLY__ 477 + #ifndef __ASSEMBLER__ 478 478 479 479 #define csr_swap(csr, val) \ 480 480 ({ \ ··· 536 536 : "memory"); \ 537 537 }) 538 538 539 - #endif /* __ASSEMBLY__ */ 539 + #endif /* __ASSEMBLER__ */ 540 540 541 541 #endif /* _ASM_RISCV_CSR_H */
+2 -2
tools/arch/riscv/include/asm/vdso/processor.h
··· 2 2 #ifndef __ASM_VDSO_PROCESSOR_H 3 3 #define __ASM_VDSO_PROCESSOR_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <asm-generic/barrier.h> 8 8 ··· 27 27 barrier(); 28 28 } 29 29 30 - #endif /* __ASSEMBLY__ */ 30 + #endif /* __ASSEMBLER__ */ 31 31 32 32 #endif /* __ASM_VDSO_PROCESSOR_H */