Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Replace __ASSEMBLY__ with __ASSEMBLER__ in non-uapi headers

While the GCC and Clang compilers already define __ASSEMBLER__
automatically when compiling assembler code, __ASSEMBLY__ is a
macro that only gets defined by the Makefiles in the kernel.
This is bad since macros starting with two underscores are names
that are reserved by the C language. It can also be very confusing
for the developers when switching between userspace and kernelspace
coding, or when dealing with uapi headers that rather should use
__ASSEMBLER__ instead. So let's standardize now on the __ASSEMBLER__
macro that is provided by the compilers.

This is almost a completely mechanical patch (done with a simple
"sed -i" statement), apart from tweaking two comments manually in
arch/powerpc/include/asm/bug.h and arch/powerpc/include/asm/kasan.h
(which did not have proper underscores at the end) and fixing a
checkpatch error about spaces in arch/powerpc/include/asm/spu_csa.h.

Signed-off-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250801082007.32904-3-thuth@redhat.com

authored by

Thomas Huth and committed by
Madhavan Srinivasan
74db6cc3 8e0665eb

+230 -230
+1 -1
arch/powerpc/boot/page.h
··· 5 5 * Copyright (C) 2001 PPC64 Team, IBM Corp 6 6 */ 7 7 8 - #ifdef __ASSEMBLY__ 8 + #ifdef __ASSEMBLER__ 9 9 #define ASM_CONST(x) x 10 10 #else 11 11 #define __ASM_CONST(x) x##UL
+1 -1
arch/powerpc/include/asm/asm-const.h
··· 1 1 #ifndef _ASM_POWERPC_ASM_CONST_H 2 2 #define _ASM_POWERPC_ASM_CONST_H 3 3 4 - #ifdef __ASSEMBLY__ 4 + #ifdef __ASSEMBLER__ 5 5 # define stringify_in_c(...) __VA_ARGS__ 6 6 # define ASM_CONST(x) x 7 7 #else
+1 -1
arch/powerpc/include/asm/barrier.h
··· 7 7 8 8 #include <asm/asm-const.h> 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 #include <asm/ppc-opcode.h> 12 12 #endif 13 13
+2 -2
arch/powerpc/include/asm/book3s/32/kup.h
··· 7 7 #include <asm/mmu.h> 8 8 #include <asm/synch.h> 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #ifdef CONFIG_PPC_KUAP 13 13 ··· 170 170 171 171 #endif /* CONFIG_PPC_KUAP */ 172 172 173 - #endif /* __ASSEMBLY__ */ 173 + #endif /* __ASSEMBLER__ */ 174 174 175 175 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
+4 -4
arch/powerpc/include/asm/book3s/32/mmu-hash.h
··· 29 29 #define BPP_RX 0x01 /* Read only */ 30 30 #define BPP_RW 0x02 /* Read/write */ 31 31 32 - #ifndef __ASSEMBLY__ 32 + #ifndef __ASSEMBLER__ 33 33 /* Contort a phys_addr_t into the right format/bits for a BAT */ 34 34 #ifdef CONFIG_PHYS_64BIT 35 35 #define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \ ··· 47 47 u32 batu; 48 48 u32 batl; 49 49 }; 50 - #endif /* !__ASSEMBLY__ */ 50 + #endif /* !__ASSEMBLER__ */ 51 51 52 52 /* 53 53 * Hash table ··· 64 64 #define SR_KP 0x20000000 /* User key */ 65 65 #define SR_KS 0x40000000 /* Supervisor key */ 66 66 67 - #ifdef __ASSEMBLY__ 67 + #ifdef __ASSEMBLER__ 68 68 69 69 #include <asm/asm-offsets.h> 70 70 ··· 225 225 226 226 int __init find_free_bat(void); 227 227 unsigned int bat_block_size(unsigned long base, unsigned long top); 228 - #endif /* !__ASSEMBLY__ */ 228 + #endif /* !__ASSEMBLER__ */ 229 229 230 230 /* We happily ignore the smaller BATs on 601, we don't actually use 231 231 * those definitions on hash32 at the moment anyway
+6 -6
arch/powerpc/include/asm/book3s/32/pgtable.h
··· 102 102 #define PMD_CACHE_INDEX PMD_INDEX_SIZE 103 103 #define PUD_CACHE_INDEX PUD_INDEX_SIZE 104 104 105 - #ifndef __ASSEMBLY__ 105 + #ifndef __ASSEMBLER__ 106 106 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 107 107 #define PMD_TABLE_SIZE 0 108 108 #define PUD_TABLE_SIZE 0 ··· 110 110 111 111 /* Bits to mask out from a PMD to get to the PTE page */ 112 112 #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) 113 - #endif /* __ASSEMBLY__ */ 113 + #endif /* __ASSEMBLER__ */ 114 114 115 115 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 116 116 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) ··· 132 132 133 133 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 134 134 135 - #ifndef __ASSEMBLY__ 135 + #ifndef __ASSEMBLER__ 136 136 137 137 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); 138 138 void unmap_kernel_page(unsigned long va); 139 139 140 - #endif /* !__ASSEMBLY__ */ 140 + #endif /* !__ASSEMBLER__ */ 141 141 142 142 /* 143 143 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary ··· 199 199 #define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) 200 200 #define MODULES_VADDR (MODULES_END - MODULES_SIZE) 201 201 202 - #ifndef __ASSEMBLY__ 202 + #ifndef __ASSEMBLER__ 203 203 #include <linux/sched.h> 204 204 #include <linux/threads.h> 205 205 ··· 602 602 return pgprot_noncached_wc(prot); 603 603 } 604 604 605 - #endif /* !__ASSEMBLY__ */ 605 + #endif /* !__ASSEMBLER__ */ 606 606 607 607 #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
+2 -2
arch/powerpc/include/asm/book3s/64/hash-4k.h
··· 32 32 */ 33 33 #define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000) 34 34 35 - #ifndef __ASSEMBLY__ 35 + #ifndef __ASSEMBLER__ 36 36 #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) 37 37 #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << H_PMD_INDEX_SIZE) 38 38 #define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE) ··· 168 168 extern int hash__has_transparent_hugepage(void); 169 169 #endif 170 170 171 - #endif /* !__ASSEMBLY__ */ 171 + #endif /* !__ASSEMBLER__ */ 172 172 173 173 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
+2 -2
arch/powerpc/include/asm/book3s/64/hash-64k.h
··· 79 79 #endif 80 80 #define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT) 81 81 82 - #ifndef __ASSEMBLY__ 82 + #ifndef __ASSEMBLER__ 83 83 #include <asm/errno.h> 84 84 85 85 /* ··· 281 281 extern int hash__has_transparent_hugepage(void); 282 282 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 283 283 284 - #endif /* __ASSEMBLY__ */ 284 + #endif /* __ASSEMBLER__ */ 285 285 286 286 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
+2 -2
arch/powerpc/include/asm/book3s/64/hash.h
··· 112 112 #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1) 113 113 #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1) 114 114 115 - #ifndef __ASSEMBLY__ 115 + #ifndef __ASSEMBLER__ 116 116 static inline int get_region_id(unsigned long ea) 117 117 { 118 118 int region_id; ··· 295 295 int nid, pgprot_t prot); 296 296 int hash__remove_section_mapping(unsigned long start, unsigned long end); 297 297 298 - #endif /* !__ASSEMBLY__ */ 298 + #endif /* !__ASSEMBLER__ */ 299 299 #endif /* __KERNEL__ */ 300 300 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
+3 -3
arch/powerpc/include/asm/book3s/64/kup.h
··· 10 10 #define AMR_KUEP_BLOCKED UL(0x5455555555555555) 11 11 #define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE) 12 12 13 - #ifdef __ASSEMBLY__ 13 + #ifdef __ASSEMBLER__ 14 14 15 15 .macro kuap_user_restore gpr1, gpr2 16 16 #if defined(CONFIG_PPC_PKEY) ··· 191 191 #endif 192 192 .endm 193 193 194 - #else /* !__ASSEMBLY__ */ 194 + #else /* !__ASSEMBLER__ */ 195 195 196 196 #include <linux/jump_label.h> 197 197 #include <linux/sched.h> ··· 413 413 if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) 414 414 do_uaccess_flush(); 415 415 } 416 - #endif /* __ASSEMBLY__ */ 416 + #endif /* __ASSEMBLER__ */ 417 417 418 418 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
+6 -6
arch/powerpc/include/asm/book3s/64/mmu-hash.h
··· 130 130 #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */ 131 131 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */ 132 132 133 - #ifndef __ASSEMBLY__ 133 + #ifndef __ASSEMBLER__ 134 134 135 135 struct mmu_hash_ops { 136 136 void (*hpte_invalidate)(unsigned long slot, ··· 220 220 return sllp; 221 221 } 222 222 223 - #endif /* __ASSEMBLY__ */ 223 + #endif /* __ASSEMBLER__ */ 224 224 225 225 /* 226 226 * Segment sizes. ··· 248 248 #define LP_BITS 8 249 249 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 250 250 251 - #ifndef __ASSEMBLY__ 251 + #ifndef __ASSEMBLER__ 252 252 253 253 static inline int slb_vsid_shift(int ssize) 254 254 { ··· 532 532 static inline void slb_set_size(u16 size) { } 533 533 #endif 534 534 535 - #endif /* __ASSEMBLY__ */ 535 + #endif /* __ASSEMBLER__ */ 536 536 537 537 /* 538 538 * VSID allocation (256MB segment) ··· 668 668 #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41) 669 669 #define LOW_SLICE_ARRAY_SZ (BITS_PER_LONG / BITS_PER_BYTE) 670 670 #define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41) 671 - #ifndef __ASSEMBLY__ 671 + #ifndef __ASSEMBLER__ 672 672 673 673 #ifdef CONFIG_PPC_SUBPAGE_PROT 674 674 /* ··· 881 881 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); 882 882 } 883 883 884 - #endif /* __ASSEMBLY__ */ 884 + #endif /* __ASSEMBLER__ */ 885 885 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
+4 -4
arch/powerpc/include/asm/book3s/64/mmu.h
··· 4 4 5 5 #include <asm/page.h> 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 /* 9 9 * Page size definition 10 10 * ··· 26 26 }; 27 27 }; 28 28 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 29 - #endif /* __ASSEMBLY__ */ 29 + #endif /* __ASSEMBLER__ */ 30 30 31 31 /* 64-bit classic hash table MMU */ 32 32 #include <asm/book3s/64/mmu-hash.h> 33 33 34 - #ifndef __ASSEMBLY__ 34 + #ifndef __ASSEMBLER__ 35 35 /* 36 36 * ISA 3.0 partition and process table entry format 37 37 */ ··· 288 288 } 289 289 #endif 290 290 291 - #endif /* __ASSEMBLY__ */ 291 + #endif /* __ASSEMBLER__ */ 292 292 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
+2 -2
arch/powerpc/include/asm/book3s/64/pgtable-64k.h
··· 2 2 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H 3 3 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 #ifdef CONFIG_HUGETLB_PAGE 7 7 8 8 #endif /* CONFIG_HUGETLB_PAGE */ ··· 14 14 BUG(); 15 15 return hash__remap_4k_pfn(vma, addr, pfn, prot); 16 16 } 17 - #endif /* __ASSEMBLY__ */ 17 + #endif /* __ASSEMBLER__ */ 18 18 #endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */
+5 -5
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 4 4 5 5 #include <asm-generic/pgtable-nop4d.h> 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 #include <linux/mmdebug.h> 9 9 #include <linux/bug.h> 10 10 #include <linux/sizes.h> ··· 143 143 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 144 144 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) 145 145 146 - #ifndef __ASSEMBLY__ 146 + #ifndef __ASSEMBLER__ 147 147 /* 148 148 * page table defines 149 149 */ ··· 291 291 else 292 292 return PUD_SIZE; 293 293 } 294 - #endif /* __ASSEMBLY__ */ 294 + #endif /* __ASSEMBLER__ */ 295 295 296 296 #include <asm/book3s/64/hash.h> 297 297 #include <asm/book3s/64/radix.h> ··· 327 327 #define FIXADDR_SIZE SZ_32M 328 328 #define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE) 329 329 330 - #ifndef __ASSEMBLY__ 330 + #ifndef __ASSEMBLER__ 331 331 332 332 static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, 333 333 pte_t *ptep, unsigned long clr, ··· 1381 1381 return false; 1382 1382 } 1383 1383 1384 - #endif /* __ASSEMBLY__ */ 1384 + #endif /* __ASSEMBLER__ */ 1385 1385 #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
+4 -4
arch/powerpc/include/asm/book3s/64/radix.h
··· 4 4 5 5 #include <asm/asm-const.h> 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 #include <asm/cmpxchg.h> 9 9 #endif 10 10 ··· 14 14 #include <asm/book3s/64/radix-4k.h> 15 15 #endif 16 16 17 - #ifndef __ASSEMBLY__ 17 + #ifndef __ASSEMBLER__ 18 18 #include <asm/book3s/64/tlbflush-radix.h> 19 19 #include <asm/cpu_has_feature.h> 20 20 #endif ··· 132 132 #define RADIX_VMEMMAP_SIZE RADIX_KERN_MAP_SIZE 133 133 #define RADIX_VMEMMAP_END (RADIX_VMEMMAP_START + RADIX_VMEMMAP_SIZE) 134 134 135 - #ifndef __ASSEMBLY__ 135 + #ifndef __ASSEMBLER__ 136 136 #define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE) 137 137 #define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE) 138 138 #define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE) ··· 362 362 unsigned long start, 363 363 unsigned long end, int node, 364 364 struct dev_pagemap *pgmap); 365 - #endif /* __ASSEMBLY__ */ 365 + #endif /* __ASSEMBLER__ */ 366 366 #endif
+2 -2
arch/powerpc/include/asm/book3s/64/slice.h
··· 2 2 #ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H 3 3 #define _ASM_POWERPC_BOOK3S_64_SLICE_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #ifdef CONFIG_PPC_64S_HASH_MMU 8 8 #ifdef CONFIG_HUGETLB_PAGE ··· 37 37 void slice_init_new_context_exec(struct mm_struct *mm); 38 38 void slice_setup_new_exec(void); 39 39 40 - #endif /* __ASSEMBLY__ */ 40 + #endif /* __ASSEMBLER__ */ 41 41 42 42 #endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
+7 -7
arch/powerpc/include/asm/bug.h
··· 7 7 8 8 #ifdef CONFIG_BUG 9 9 10 - #ifdef __ASSEMBLY__ 10 + #ifdef __ASSEMBLER__ 11 11 #include <asm/asm-offsets.h> 12 12 #ifdef CONFIG_DEBUG_BUGVERBOSE 13 13 .macro EMIT_BUG_ENTRY addr,file,line,flags ··· 31 31 .endm 32 32 #endif /* verbose */ 33 33 34 - #else /* !__ASSEMBLY__ */ 34 + #else /* !__ASSEMBLER__ */ 35 35 /* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and 36 36 sizeof(struct bug_entry), respectively */ 37 37 #ifdef CONFIG_DEBUG_BUGVERBOSE ··· 101 101 #define HAVE_ARCH_WARN_ON 102 102 #endif 103 103 104 - #endif /* __ASSEMBLY __ */ 104 + #endif /* __ASSEMBLER__ */ 105 105 #else 106 - #ifdef __ASSEMBLY__ 106 + #ifdef __ASSEMBLER__ 107 107 .macro EMIT_BUG_ENTRY addr,file,line,flags 108 108 .endm 109 - #else /* !__ASSEMBLY__ */ 109 + #else /* !__ASSEMBLER__ */ 110 110 #define _EMIT_BUG_ENTRY 111 111 #endif 112 112 #endif /* CONFIG_BUG */ ··· 115 115 116 116 #include <asm-generic/bug.h> 117 117 118 - #ifndef __ASSEMBLY__ 118 + #ifndef __ASSEMBLER__ 119 119 120 120 struct pt_regs; 121 121 void hash__do_page_fault(struct pt_regs *); ··· 128 128 extern bool die_will_crash(void); 129 129 extern void panic_flush_kmsg_start(void); 130 130 extern void panic_flush_kmsg_end(void); 131 - #endif /* !__ASSEMBLY__ */ 131 + #endif /* !__ASSEMBLER__ */ 132 132 133 133 #endif /* __KERNEL__ */ 134 134 #endif /* _ASM_POWERPC_BUG_H */
+2 -2
arch/powerpc/include/asm/cache.h
··· 37 37 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 38 38 #endif 39 39 40 - #if !defined(__ASSEMBLY__) 40 + #if !defined(__ASSEMBLER__) 41 41 #ifdef CONFIG_PPC64 42 42 43 43 struct ppc_cache_info { ··· 145 145 asm volatile ("iccci 0, %0" : : "r"(addr) : "memory"); 146 146 } 147 147 148 - #endif /* !__ASSEMBLY__ */ 148 + #endif /* !__ASSEMBLER__ */ 149 149 #endif /* __KERNEL__ */ 150 150 #endif /* _ASM_POWERPC_CACHE_H */
+2 -2
arch/powerpc/include/asm/cpu_has_feature.h
··· 2 2 #ifndef __ASM_POWERPC_CPU_HAS_FEATURE_H 3 3 #define __ASM_POWERPC_CPU_HAS_FEATURE_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <linux/bug.h> 8 8 #include <asm/cputable.h> ··· 51 51 } 52 52 #endif 53 53 54 - #endif /* __ASSEMBLY__ */ 54 + #endif /* __ASSEMBLER__ */ 55 55 #endif /* __ASM_POWERPC_CPU_HAS_FEATURE_H */
+1 -1
arch/powerpc/include/asm/cpuidle.h
··· 68 68 #define ERR_EC_ESL_MISMATCH -1 69 69 #define ERR_DEEP_STATE_ESL_MISMATCH -2 70 70 71 - #ifndef __ASSEMBLY__ 71 + #ifndef __ASSEMBLER__ 72 72 73 73 #define PNV_IDLE_NAME_LEN 16 74 74 struct pnv_idle_states_t {
+4 -4
arch/powerpc/include/asm/cputable.h
··· 7 7 #include <uapi/asm/cputable.h> 8 8 #include <asm/asm-const.h> 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 /* This structure can grow, it's real size is used by head.S code 13 13 * via the mkdefs mechanism. ··· 103 103 static inline void cpu_feature_keys_init(void) { } 104 104 #endif 105 105 106 - #endif /* __ASSEMBLY__ */ 106 + #endif /* __ASSEMBLER__ */ 107 107 108 108 /* CPU kernel features */ 109 109 ··· 195 195 #define CPU_FTR_DEXCR_NPHIE LONG_ASM_CONST(0x0010000000000000) 196 196 #define CPU_FTR_P11_PVR LONG_ASM_CONST(0x0020000000000000) 197 197 198 - #ifndef __ASSEMBLY__ 198 + #ifndef __ASSEMBLER__ 199 199 200 200 #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE) 201 201 ··· 602 602 */ 603 603 #define HBP_NUM_MAX 2 604 604 605 - #endif /* !__ASSEMBLY__ */ 605 + #endif /* !__ASSEMBLER__ */ 606 606 607 607 #endif /* __ASM_POWERPC_CPUTABLE_H */
+2 -2
arch/powerpc/include/asm/cputhreads.h
··· 2 2 #ifndef _ASM_POWERPC_CPUTHREADS_H 3 3 #define _ASM_POWERPC_CPUTHREADS_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 #include <linux/cpumask.h> 7 7 #include <asm/cpu_has_feature.h> 8 8 ··· 107 107 void book3e_start_thread(int thread, unsigned long addr); 108 108 void book3e_stop_thread(int thread); 109 109 110 - #endif /* __ASSEMBLY__ */ 110 + #endif /* __ASSEMBLER__ */ 111 111 112 112 #define INVALID_THREAD_HWID 0x0fff 113 113
+2 -2
arch/powerpc/include/asm/dcr-native.h
··· 7 7 #ifndef _ASM_POWERPC_DCR_NATIVE_H 8 8 #define _ASM_POWERPC_DCR_NATIVE_H 9 9 #ifdef __KERNEL__ 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #include <linux/spinlock.h> 13 13 #include <asm/cputable.h> ··· 139 139 DCRN_ ## base ## _CONFIG_DATA, \ 140 140 reg, clr, set) 141 141 142 - #endif /* __ASSEMBLY__ */ 142 + #endif /* __ASSEMBLER__ */ 143 143 #endif /* __KERNEL__ */ 144 144 #endif /* _ASM_POWERPC_DCR_NATIVE_H */
+2 -2
arch/powerpc/include/asm/dcr.h
··· 7 7 #ifndef _ASM_POWERPC_DCR_H 8 8 #define _ASM_POWERPC_DCR_H 9 9 #ifdef __KERNEL__ 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 #ifdef CONFIG_PPC_DCR 12 12 13 13 #include <asm/dcr-native.h> ··· 28 28 extern unsigned int dcr_resource_len(const struct device_node *np, 29 29 unsigned int index); 30 30 #endif /* CONFIG_PPC_DCR */ 31 - #endif /* __ASSEMBLY__ */ 31 + #endif /* __ASSEMBLER__ */ 32 32 #endif /* __KERNEL__ */ 33 33 #endif /* _ASM_POWERPC_DCR_H */
+2 -2
arch/powerpc/include/asm/epapr_hcalls.h
··· 52 52 53 53 #include <uapi/asm/epapr_hcalls.h> 54 54 55 - #ifndef __ASSEMBLY__ 55 + #ifndef __ASSEMBLER__ 56 56 #include <linux/types.h> 57 57 #include <linux/errno.h> 58 58 #include <asm/byteorder.h> ··· 571 571 in[3] = p4; 572 572 return epapr_hypercall(in, out, nr); 573 573 } 574 - #endif /* !__ASSEMBLY__ */ 574 + #endif /* !__ASSEMBLER__ */ 575 575 #endif /* _EPAPR_HCALLS_H */
+1 -1
arch/powerpc/include/asm/exception-64e.h
··· 149 149 addi r11,r13,PACA_EXTLB; \ 150 150 TLB_MISS_RESTORE(r11) 151 151 152 - #ifndef __ASSEMBLY__ 152 + #ifndef __ASSEMBLER__ 153 153 extern unsigned int interrupt_base_book3e; 154 154 #endif 155 155
+3 -3
arch/powerpc/include/asm/exception-64s.h
··· 53 53 */ 54 54 #define MAX_MCE_DEPTH 4 55 55 56 - #ifdef __ASSEMBLY__ 56 + #ifdef __ASSEMBLER__ 57 57 58 58 #define STF_ENTRY_BARRIER_SLOT \ 59 59 STF_ENTRY_BARRIER_FIXUP_SECTION; \ ··· 170 170 RFSCV; \ 171 171 b rfscv_flush_fallback 172 172 173 - #else /* __ASSEMBLY__ */ 173 + #else /* __ASSEMBLER__ */ 174 174 /* Prototype for function defined in exceptions-64s.S */ 175 175 void do_uaccess_flush(void); 176 - #endif /* __ASSEMBLY__ */ 176 + #endif /* __ASSEMBLER__ */ 177 177 178 178 #endif /* _ASM_POWERPC_EXCEPTION_H */
+1 -1
arch/powerpc/include/asm/extable.h
··· 17 17 18 18 #define ARCH_HAS_RELATIVE_EXTABLE 19 19 20 - #ifndef __ASSEMBLY__ 20 + #ifndef __ASSEMBLER__ 21 21 22 22 struct exception_table_entry { 23 23 int insn;
+3 -3
arch/powerpc/include/asm/feature-fixups.h
··· 168 168 #define ALT_FW_FTR_SECTION_END_IFCLR(msk) \ 169 169 ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97) 170 170 171 - #ifndef __ASSEMBLY__ 171 + #ifndef __ASSEMBLER__ 172 172 173 173 #define ASM_FTR_IF(section_if, section_else, msk, val) \ 174 174 stringify_in_c(BEGIN_FTR_SECTION) \ ··· 196 196 #define ASM_MMU_FTR_IFCLR(section_if, section_else, msk) \ 197 197 ASM_MMU_FTR_IF(section_if, section_else, (msk), 0) 198 198 199 - #endif /* __ASSEMBLY__ */ 199 + #endif /* __ASSEMBLER__ */ 200 200 201 201 /* LWSYNC feature sections */ 202 202 #define START_LWSYNC_SECTION(label) label##1: ··· 276 276 FTR_ENTRY_OFFSET 956b-957b; \ 277 277 .popsection; 278 278 279 - #ifndef __ASSEMBLY__ 279 + #ifndef __ASSEMBLER__ 280 280 #include <linux/types.h> 281 281 282 282 extern long stf_barrier_fallback;
+2 -2
arch/powerpc/include/asm/firmware.h
··· 58 58 #define FW_FEATURE_WATCHDOG ASM_CONST(0x0000080000000000) 59 59 #define FW_FEATURE_PLPKS ASM_CONST(0x0000100000000000) 60 60 61 - #ifndef __ASSEMBLY__ 61 + #ifndef __ASSEMBLER__ 62 62 63 63 enum { 64 64 #ifdef CONFIG_PPC64 ··· 146 146 static inline void pseries_probe_fw_features(void) { } 147 147 #endif 148 148 149 - #endif /* __ASSEMBLY__ */ 149 + #endif /* __ASSEMBLER__ */ 150 150 #endif /* __KERNEL__ */ 151 151 #endif /* __ASM_POWERPC_FIRMWARE_H */
+2 -2
arch/powerpc/include/asm/fixmap.h
··· 14 14 #ifndef _ASM_FIXMAP_H 15 15 #define _ASM_FIXMAP_H 16 16 17 - #ifndef __ASSEMBLY__ 17 + #ifndef __ASSEMBLER__ 18 18 #include <linux/sizes.h> 19 19 #include <linux/pgtable.h> 20 20 #include <asm/page.h> ··· 111 111 #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) 112 112 #endif 113 113 114 - #endif /* !__ASSEMBLY__ */ 114 + #endif /* !__ASSEMBLER__ */ 115 115 #endif
+4 -4
arch/powerpc/include/asm/ftrace.h
··· 15 15 #define FTRACE_MCOUNT_MAX_OFFSET 8 16 16 #endif 17 17 18 - #ifndef __ASSEMBLY__ 18 + #ifndef __ASSEMBLER__ 19 19 extern void _mcount(void); 20 20 21 21 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, ··· 69 69 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 70 70 struct ftrace_ops *op, struct ftrace_regs *fregs); 71 71 #endif 72 - #endif /* __ASSEMBLY__ */ 72 + #endif /* __ASSEMBLER__ */ 73 73 74 74 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 75 75 #define ARCH_SUPPORTS_FTRACE_OPS 1 76 76 #endif 77 77 #endif /* CONFIG_FUNCTION_TRACER */ 78 78 79 - #ifndef __ASSEMBLY__ 79 + #ifndef __ASSEMBLER__ 80 80 #ifdef CONFIG_FTRACE_SYSCALLS 81 81 /* 82 82 * Some syscall entry functions on powerpc start with "ppc_" (fork and clone, ··· 160 160 static inline void ftrace_free_init_tramp(void) { } 161 161 static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; } 162 162 #endif 163 - #endif /* !__ASSEMBLY__ */ 163 + #endif /* !__ASSEMBLER__ */ 164 164 165 165 #endif /* _ASM_POWERPC_FTRACE */
+2 -2
arch/powerpc/include/asm/head-64.h
··· 4 4 5 5 #include <asm/cache.h> 6 6 7 - #ifdef __ASSEMBLY__ 7 + #ifdef __ASSEMBLER__ 8 8 /* 9 9 * We can't do CPP stringification and concatination directly into the section 10 10 * name for some reason, so these macros can do it for us. ··· 167 167 // find label from _within_ sname 168 168 #define ABS_ADDR(label, sname) (label - start_ ## sname + sname ## _start) 169 169 170 - #endif /* __ASSEMBLY__ */ 170 + #endif /* __ASSEMBLER__ */ 171 171 172 172 #endif /* _ASM_POWERPC_HEAD_64_H */
+2 -2
arch/powerpc/include/asm/hvcall.h
··· 534 534 #define H_HTM_TARGET_NODAL_CHIP_INDEX(x) ((unsigned long)(x)<<(63-31)) 535 535 #define H_HTM_TARGET_CORE_INDEX_ON_CHIP(x) ((unsigned long)(x)<<(63-47)) 536 536 537 - #ifndef __ASSEMBLY__ 537 + #ifndef __ASSEMBLER__ 538 538 #include <linux/types.h> 539 539 540 540 /** ··· 735 735 uint8_t bytes[HGPCI_MAX_DATA_BYTES]; 736 736 } __packed; 737 737 738 - #endif /* __ASSEMBLY__ */ 738 + #endif /* __ASSEMBLER__ */ 739 739 #endif /* __KERNEL__ */ 740 740 #endif /* _ASM_POWERPC_HVCALL_H */
+2 -2
arch/powerpc/include/asm/hw_irq.h
··· 59 59 #define IRQS_PMI_DISABLED 2 60 60 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED) 61 61 62 - #ifndef __ASSEMBLY__ 62 + #ifndef __ASSEMBLER__ 63 63 64 64 static inline void __hard_irq_enable(void) 65 65 { ··· 516 516 517 517 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST 518 518 519 - #endif /* __ASSEMBLY__ */ 519 + #endif /* __ASSEMBLER__ */ 520 520 #endif /* __KERNEL__ */ 521 521 #endif /* _ASM_POWERPC_HW_IRQ_H */
+2 -2
arch/powerpc/include/asm/interrupt.h
··· 64 64 #define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100 65 65 #define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200 66 66 67 - #ifndef __ASSEMBLY__ 67 + #ifndef __ASSEMBLER__ 68 68 69 69 #include <linux/context_tracking.h> 70 70 #include <linux/hardirq.h> ··· 675 675 unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs); 676 676 #endif 677 677 678 - #endif /* __ASSEMBLY__ */ 678 + #endif /* __ASSEMBLER__ */ 679 679 680 680 #endif /* _ASM_POWERPC_INTERRUPT_H */
+1 -1
arch/powerpc/include/asm/irqflags.h
··· 5 5 #ifndef _ASM_IRQFLAGS_H 6 6 #define _ASM_IRQFLAGS_H 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 /* 10 10 * Get definitions for arch_local_save_flags(x), etc. 11 11 */
+1 -1
arch/powerpc/include/asm/jump_label.h
··· 6 6 * Copyright 2010 Michael Ellerman, IBM Corp. 7 7 */ 8 8 9 - #ifndef __ASSEMBLY__ 9 + #ifndef __ASSEMBLER__ 10 10 #include <linux/types.h> 11 11 12 12 #include <asm/feature-fixups.h>
+2 -2
arch/powerpc/include/asm/kasan.h
··· 12 12 #define EXPORT_SYMBOL_KASAN(fn) 13 13 #endif 14 14 15 - #ifndef __ASSEMBLY__ 15 + #ifndef __ASSEMBLER__ 16 16 17 17 #include <asm/page.h> 18 18 #include <linux/sizes.h> ··· 80 80 int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end); 81 81 int kasan_init_region(void *start, size_t size); 82 82 83 - #endif /* __ASSEMBLY */ 83 + #endif /* __ASSEMBLER__ */ 84 84 #endif
+2 -2
arch/powerpc/include/asm/kdump.h
··· 31 31 32 32 #endif /* CONFIG_CRASH_DUMP */ 33 33 34 - #ifndef __ASSEMBLY__ 34 + #ifndef __ASSEMBLER__ 35 35 36 36 #if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL) 37 37 extern void reserve_kdump_trampoline(void); ··· 42 42 static inline void setup_kdump_trampoline(void) { ; } 43 43 #endif 44 44 45 - #endif /* __ASSEMBLY__ */ 45 + #endif /* __ASSEMBLER__ */ 46 46 47 47 #endif /* __PPC64_KDUMP_H */
+2 -2
arch/powerpc/include/asm/kexec.h
··· 49 49 #define KEXEC_STATE_IRQS_OFF 1 50 50 #define KEXEC_STATE_REAL_MODE 2 51 51 52 - #ifndef __ASSEMBLY__ 52 + #ifndef __ASSEMBLER__ 53 53 #include <asm/reg.h> 54 54 55 55 typedef void (*crash_shutdown_t)(void); ··· 210 210 } 211 211 #endif 212 212 213 - #endif /* ! __ASSEMBLY__ */ 213 + #endif /* ! __ASSEMBLER__ */ 214 214 #endif /* __KERNEL__ */ 215 215 #endif /* _ASM_POWERPC_KEXEC_H */
+2 -2
arch/powerpc/include/asm/kgdb.h
··· 21 21 #ifndef __POWERPC_KGDB_H__ 22 22 #define __POWERPC_KGDB_H__ 23 23 24 - #ifndef __ASSEMBLY__ 24 + #ifndef __ASSEMBLER__ 25 25 26 26 #define BREAK_INSTR_SIZE 4 27 27 #define BUFMAX ((NUMREGBYTES * 2) + 512) ··· 62 62 /* CR/LR, R1, R2, R13-R31 inclusive. */ 63 63 #define NUMCRITREGBYTES (23 * sizeof(int)) 64 64 #endif /* 32/64 */ 65 - #endif /* !(__ASSEMBLY__) */ 65 + #endif /* !(__ASSEMBLER__) */ 66 66 #endif /* !__POWERPC_KGDB_H__ */ 67 67 #endif /* __KERNEL__ */
+4 -4
arch/powerpc/include/asm/kup.h
··· 6 6 #define KUAP_WRITE 2 7 7 #define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE) 8 8 9 - #ifndef __ASSEMBLY__ 9 + #ifndef __ASSEMBLER__ 10 10 #include <linux/types.h> 11 11 12 12 static __always_inline bool kuap_is_disabled(void); ··· 28 28 #include <asm/book3s/32/kup.h> 29 29 #endif 30 30 31 - #ifdef __ASSEMBLY__ 31 + #ifdef __ASSEMBLER__ 32 32 #ifndef CONFIG_PPC_KUAP 33 33 .macro kuap_check_amr gpr1, gpr2 34 34 .endm 35 35 36 36 #endif 37 37 38 - #else /* !__ASSEMBLY__ */ 38 + #else /* !__ASSEMBLER__ */ 39 39 40 40 extern bool disable_kuep; 41 41 extern bool disable_kuap; ··· 181 181 prevent_user_access(KUAP_WRITE); 182 182 } 183 183 184 - #endif /* !__ASSEMBLY__ */ 184 + #endif /* !__ASSEMBLER__ */ 185 185 186 186 #endif /* _ASM_POWERPC_KUAP_H_ */
+1 -1
arch/powerpc/include/asm/kvm_asm.h
··· 9 9 #ifndef __POWERPC_KVM_ASM_H__ 10 10 #define __POWERPC_KVM_ASM_H__ 11 11 12 - #ifdef __ASSEMBLY__ 12 + #ifdef __ASSEMBLER__ 13 13 #ifdef CONFIG_64BIT 14 14 #define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg) 15 15 #define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
+3 -3
arch/powerpc/include/asm/kvm_book3s_asm.h
··· 20 20 /* Maximum number of subcores per physical core */ 21 21 #define MAX_SUBCORES 4 22 22 23 - #ifdef __ASSEMBLY__ 23 + #ifdef __ASSEMBLER__ 24 24 25 25 #ifdef CONFIG_KVM_BOOK3S_HANDLER 26 26 ··· 58 58 59 59 #endif /* CONFIG_KVM_BOOK3S_HANDLER */ 60 60 61 - #else /*__ASSEMBLY__ */ 61 + #else /*__ASSEMBLER__ */ 62 62 63 63 struct kvmppc_vcore; 64 64 ··· 150 150 #endif 151 151 }; 152 152 153 - #endif /*__ASSEMBLY__ */ 153 + #endif /*__ASSEMBLER__ */ 154 154 155 155 /* Values for kvm_state */ 156 156 #define KVM_HWTHREAD_IN_KERNEL 0
+2 -2
arch/powerpc/include/asm/kvm_booke_hv_asm.h
··· 8 8 9 9 #include <asm/feature-fixups.h> 10 10 11 - #ifdef __ASSEMBLY__ 11 + #ifdef __ASSEMBLER__ 12 12 13 13 /* 14 14 * All exceptions from guest state must go through KVM ··· 64 64 #endif 65 65 .endm 66 66 67 - #endif /*__ASSEMBLY__ */ 67 + #endif /*__ASSEMBLER__ */ 68 68 #endif /* ASM_KVM_BOOKE_HV_ASM_H */
+2 -2
arch/powerpc/include/asm/lv1call.h
··· 10 10 #if !defined(_ASM_POWERPC_LV1CALL_H) 11 11 #define _ASM_POWERPC_LV1CALL_H 12 12 13 - #if !defined(__ASSEMBLY__) 13 + #if !defined(__ASSEMBLER__) 14 14 15 15 #include <linux/types.h> 16 16 #include <linux/export.h> ··· 211 211 {return _lv1_##name(LV1_##in##_IN_##out##_OUT_ARGS);} 212 212 #endif 213 213 214 - #endif /* !defined(__ASSEMBLY__) */ 214 + #endif /* !defined(__ASSEMBLER__) */ 215 215 216 216 /* lv1 call table */ 217 217
+4 -4
arch/powerpc/include/asm/mmu.h
··· 137 137 MMU_FTR_CI_LARGE_PAGE 138 138 #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 139 139 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B 140 - #ifndef __ASSEMBLY__ 140 + #ifndef __ASSEMBLER__ 141 141 #include <linux/bug.h> 142 142 #include <asm/cputable.h> 143 143 #include <asm/page.h> ··· 332 332 { 333 333 return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled(); 334 334 } 335 - #endif /* !__ASSEMBLY__ */ 335 + #endif /* !__ASSEMBLER__ */ 336 336 337 337 /* The kernel use the constants below to index in the page sizes array. 338 338 * The use of fixed constants for this purpose is better for performances ··· 377 377 #include <asm/book3s/64/mmu.h> 378 378 #else /* CONFIG_PPC_BOOK3S_64 */ 379 379 380 - #ifndef __ASSEMBLY__ 380 + #ifndef __ASSEMBLER__ 381 381 /* MMU initialization */ 382 382 extern void early_init_mmu(void); 383 383 extern void early_init_mmu_secondary(void); ··· 388 388 static inline void pkey_early_init_devtree(void) {} 389 389 390 390 extern void *abatron_pteptrs[2]; 391 - #endif /* __ASSEMBLY__ */ 391 + #endif /* __ASSEMBLER__ */ 392 392 #endif 393 393 394 394 #if defined(CONFIG_PPC_BOOK3S_32)
+6 -6
arch/powerpc/include/asm/mpc52xx.h
··· 13 13 #ifndef __ASM_POWERPC_MPC52xx_H__ 14 14 #define __ASM_POWERPC_MPC52xx_H__ 15 15 16 - #ifndef __ASSEMBLY__ 16 + #ifndef __ASSEMBLER__ 17 17 #include <asm/types.h> 18 18 #include <asm/mpc5xxx.h> 19 - #endif /* __ASSEMBLY__ */ 19 + #endif /* __ASSEMBLER__ */ 20 20 21 21 #include <linux/suspend.h> 22 22 ··· 30 30 /* Structures mapping of some unit register set */ 31 31 /* ======================================================================== */ 32 32 33 - #ifndef __ASSEMBLY__ 33 + #ifndef __ASSEMBLER__ 34 34 35 35 /* Memory Mapping Control */ 36 36 struct mpc52xx_mmap_ctl { ··· 258 258 u32 per_error; /* INTR + 0x38 */ 259 259 }; 260 260 261 - #endif /* __ASSEMBLY__ */ 261 + #endif /* __ASSEMBLER__ */ 262 262 263 263 264 264 /* ========================================================================= */ 265 265 /* Prototypes for MPC52xx sysdev */ 266 266 /* ========================================================================= */ 267 267 268 - #ifndef __ASSEMBLY__ 268 + #ifndef __ASSEMBLER__ 269 269 270 270 struct device_node; 271 271 ··· 297 297 static inline void mpc52xx_setup_pci(void) { } 298 298 #endif 299 299 300 - #endif /* __ASSEMBLY__ */ 300 + #endif /* __ASSEMBLER__ */ 301 301 302 302 #ifdef CONFIG_PM 303 303 struct mpc52xx_suspend {
+2 -2
arch/powerpc/include/asm/nohash/32/kup-8xx.h
··· 7 7 8 8 #ifdef CONFIG_PPC_KUAP 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #include <asm/reg.h> 13 13 ··· 82 82 return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000); 83 83 } 84 84 85 - #endif /* !__ASSEMBLY__ */ 85 + #endif /* !__ASSEMBLER__ */ 86 86 87 87 #endif /* CONFIG_PPC_KUAP */ 88 88
+2 -2
arch/powerpc/include/asm/nohash/32/mmu-44x.h
··· 100 100 #define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR) 101 101 #define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G) 102 102 103 - #ifndef __ASSEMBLY__ 103 + #ifndef __ASSEMBLER__ 104 104 105 105 extern unsigned int tlb_44x_hwater; 106 106 extern unsigned int tlb_44x_index; ··· 114 114 /* patch sites */ 115 115 extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I; 116 116 117 - #endif /* !__ASSEMBLY__ */ 117 + #endif /* !__ASSEMBLER__ */ 118 118 119 119 #ifndef CONFIG_PPC_EARLY_DEBUG_44x 120 120 #define PPC44x_EARLY_TLBS 1
+2 -2
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
··· 174 174 #define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) 175 175 #define MODULES_VADDR (MODULES_END - MODULES_SIZE) 176 176 177 - #ifndef __ASSEMBLY__ 177 + #ifndef __ASSEMBLER__ 178 178 179 179 #include <linux/mmdebug.h> 180 180 #include <linux/sizes.h> ··· 265 265 extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1; 266 266 extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; 267 267 268 - #endif /* !__ASSEMBLY__ */ 268 + #endif /* !__ASSEMBLER__ */ 269 269 270 270 #endif /* _ASM_POWERPC_MMU_8XX_H_ */
+6 -6
arch/powerpc/include/asm/nohash/32/pgtable.h
··· 4 4 5 5 #include <asm-generic/pgtable-nopmd.h> 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 #include <linux/sched.h> 9 9 #include <linux/threads.h> 10 10 #include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ 11 11 12 - #endif /* __ASSEMBLY__ */ 12 + #endif /* __ASSEMBLER__ */ 13 13 14 14 #define PTE_INDEX_SIZE PTE_SHIFT 15 15 #define PMD_INDEX_SIZE 0 ··· 19 19 #define PMD_CACHE_INDEX PMD_INDEX_SIZE 20 20 #define PUD_CACHE_INDEX PUD_INDEX_SIZE 21 21 22 - #ifndef __ASSEMBLY__ 22 + #ifndef __ASSEMBLER__ 23 23 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 24 24 #define PMD_TABLE_SIZE 0 25 25 #define PUD_TABLE_SIZE 0 26 26 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 27 27 28 28 #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) 29 - #endif /* __ASSEMBLY__ */ 29 + #endif /* __ASSEMBLER__ */ 30 30 31 31 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 32 32 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) ··· 149 149 #define MAX_POSSIBLE_PHYSMEM_BITS 32 150 150 #endif 151 151 152 - #ifndef __ASSEMBLY__ 152 + #ifndef __ASSEMBLER__ 153 153 154 154 #define pmd_none(pmd) (!pmd_val(pmd)) 155 155 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) ··· 199 199 /* We borrow LSB 2 to store the exclusive marker in swap PTEs. */ 200 200 #define _PAGE_SWP_EXCLUSIVE 0x000004 201 201 202 - #endif /* !__ASSEMBLY__ */ 202 + #endif /* !__ASSEMBLER__ */ 203 203 204 204 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
+1 -1
arch/powerpc/include/asm/nohash/32/pte-8xx.h
··· 83 83 84 84 #include <asm/pgtable-masks.h> 85 85 86 - #ifndef __ASSEMBLY__ 86 + #ifndef __ASSEMBLER__ 87 87 static inline pte_t pte_wrprotect(pte_t pte) 88 88 { 89 89 return __pte(pte_val(pte) | _PAGE_RO);
+4 -4
arch/powerpc/include/asm/nohash/64/pgtable-4k.h
··· 14 14 #define PUD_INDEX_SIZE 9 15 15 #define PGD_INDEX_SIZE 9 16 16 17 - #ifndef __ASSEMBLY__ 17 + #ifndef __ASSEMBLER__ 18 18 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 19 19 #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 20 20 #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) 21 21 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 22 - #endif /* __ASSEMBLY__ */ 22 + #endif /* __ASSEMBLER__ */ 23 23 24 24 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 25 25 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) ··· 57 57 #define p4d_bad(p4d) (p4d_val(p4d) == 0) 58 58 #define p4d_present(p4d) (p4d_val(p4d) != 0) 59 59 60 - #ifndef __ASSEMBLY__ 60 + #ifndef __ASSEMBLER__ 61 61 62 62 static inline pud_t *p4d_pgtable(p4d_t p4d) 63 63 { ··· 80 80 } 81 81 extern struct page *p4d_page(p4d_t p4d); 82 82 83 - #endif /* !__ASSEMBLY__ */ 83 + #endif /* !__ASSEMBLER__ */ 84 84 85 85 #define pud_ERROR(e) \ 86 86 pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+2 -2
arch/powerpc/include/asm/nohash/64/pgtable.h
··· 77 77 78 78 #define H_PAGE_4K_PFN 0 79 79 80 - #ifndef __ASSEMBLY__ 80 + #ifndef __ASSEMBLER__ 81 81 /* pte_clear moved to later in this file */ 82 82 83 83 #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) ··· 209 209 __patch_exception((exc), (unsigned long)&name); \ 210 210 } while (0) 211 211 212 - #endif /* __ASSEMBLY__ */ 212 + #endif /* __ASSEMBLER__ */ 213 213 214 214 #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
+2 -2
arch/powerpc/include/asm/nohash/kup-booke.h
··· 7 7 8 8 #ifdef CONFIG_PPC_KUAP 9 9 10 - #ifdef __ASSEMBLY__ 10 + #ifdef __ASSEMBLER__ 11 11 12 12 .macro kuap_check_amr gpr1, gpr2 13 13 .endm ··· 105 105 return !regs->kuap; 106 106 } 107 107 108 - #endif /* !__ASSEMBLY__ */ 108 + #endif /* !__ASSEMBLER__ */ 109 109 110 110 #endif /* CONFIG_PPC_KUAP */ 111 111
+2 -2
arch/powerpc/include/asm/nohash/mmu-e500.h
··· 230 230 #define MAS2_M_IF_NEEDED 0 231 231 #endif 232 232 233 - #ifndef __ASSEMBLY__ 233 + #ifndef __ASSEMBLER__ 234 234 #include <asm/bug.h> 235 235 236 236 extern unsigned int tlbcam_index; ··· 318 318 #include <asm/percpu.h> 319 319 DECLARE_PER_CPU(int, next_tlbcam_idx); 320 320 321 - #endif /* !__ASSEMBLY__ */ 321 + #endif /* !__ASSEMBLER__ */ 322 322 323 323 #endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
+3 -3
arch/powerpc/include/asm/nohash/pgtable.h
··· 2 2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H 3 3 #define _ASM_POWERPC_NOHASH_PGTABLE_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, 7 7 unsigned long clr, unsigned long set, int huge); 8 8 #endif ··· 27 27 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 28 28 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) 29 29 30 - #ifndef __ASSEMBLY__ 30 + #ifndef __ASSEMBLER__ 31 31 32 32 extern int icache_44x_need_flush; 33 33 ··· 373 373 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); 374 374 void unmap_kernel_page(unsigned long va); 375 375 376 - #endif /* __ASSEMBLY__ */ 376 + #endif /* __ASSEMBLER__ */ 377 377 #endif
+2 -2
arch/powerpc/include/asm/nohash/pte-e500.h
··· 86 86 87 87 #include <asm/pgtable-masks.h> 88 88 89 - #ifndef __ASSEMBLY__ 89 + #ifndef __ASSEMBLER__ 90 90 static inline pte_t pte_mkexec(pte_t pte) 91 91 { 92 92 return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX); ··· 134 134 135 135 #endif 136 136 137 - #endif /* __ASSEMBLY__ */ 137 + #endif /* __ASSEMBLER__ */ 138 138 139 139 #endif /* __KERNEL__ */ 140 140 #endif /* _ASM_POWERPC_NOHASH_PTE_E500_H */
+2 -2
arch/powerpc/include/asm/opal-api.h
··· 246 246 #define OPAL_CONFIG_IDLE_UNDO 0 247 247 #define OPAL_CONFIG_IDLE_APPLY 1 248 248 249 - #ifndef __ASSEMBLY__ 249 + #ifndef __ASSEMBLER__ 250 250 251 251 /* Other enums */ 252 252 enum OpalFreezeState { ··· 1183 1183 struct opal_mpipl_region region[]; 1184 1184 } __packed; 1185 1185 1186 - #endif /* __ASSEMBLY__ */ 1186 + #endif /* __ASSEMBLER__ */ 1187 1187 1188 1188 #endif /* __OPAL_API_H */
+2 -2
arch/powerpc/include/asm/opal.h
··· 10 10 11 11 #include <asm/opal-api.h> 12 12 13 - #ifndef __ASSEMBLY__ 13 + #ifndef __ASSEMBLER__ 14 14 15 15 #include <linux/notifier.h> 16 16 ··· 390 390 void opal_psr_init(void); 391 391 void opal_sensor_groups_init(void); 392 392 393 - #endif /* __ASSEMBLY__ */ 393 + #endif /* __ASSEMBLER__ */ 394 394 395 395 #endif /* _ASM_POWERPC_OPAL_H */
+7 -7
arch/powerpc/include/asm/page.h
··· 6 6 * Copyright (C) 2001,2005 IBM Corporation. 7 7 */ 8 8 9 - #ifndef __ASSEMBLY__ 9 + #ifndef __ASSEMBLER__ 10 10 #include <linux/types.h> 11 11 #include <linux/kernel.h> 12 12 #include <linux/bug.h> ··· 23 23 */ 24 24 #include <vdso/page.h> 25 25 26 - #ifndef __ASSEMBLY__ 26 + #ifndef __ASSEMBLER__ 27 27 #ifndef CONFIG_HUGETLB_PAGE 28 28 #define HPAGE_SHIFT PAGE_SHIFT 29 29 #elif defined(CONFIG_PPC_BOOK3S_64) ··· 75 75 #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) 76 76 77 77 #if defined(CONFIG_NONSTATIC_KERNEL) 78 - #ifndef __ASSEMBLY__ 78 + #ifndef __ASSEMBLER__ 79 79 80 80 extern phys_addr_t memstart_addr; 81 81 extern phys_addr_t kernstart_addr; ··· 84 84 extern long long virt_phys_offset; 85 85 #endif 86 86 87 - #endif /* __ASSEMBLY__ */ 87 + #endif /* __ASSEMBLER__ */ 88 88 #define PHYSICAL_START kernstart_addr 89 89 90 90 #else /* !CONFIG_NONSTATIC_KERNEL */ ··· 216 216 #endif 217 217 #endif 218 218 219 - #ifndef __ASSEMBLY__ 219 + #ifndef __ASSEMBLER__ 220 220 static inline unsigned long virt_to_pfn(const void *kaddr) 221 221 { 222 222 return __pa(kaddr) >> PAGE_SHIFT; ··· 261 261 #define is_kernel_addr(x) ((x) >= TASK_SIZE) 262 262 #endif 263 263 264 - #ifndef __ASSEMBLY__ 264 + #ifndef __ASSEMBLER__ 265 265 266 266 #ifdef CONFIG_PPC_BOOK3S_64 267 267 #include <asm/pgtable-be-types.h> ··· 290 290 } 291 291 292 292 #include <asm-generic/memory_model.h> 293 - #endif /* __ASSEMBLY__ */ 293 + #endif /* __ASSEMBLER__ */ 294 294 295 295 #endif /* _ASM_POWERPC_PAGE_H */
+2 -2
arch/powerpc/include/asm/page_32.h
··· 19 19 #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */ 20 20 #endif 21 21 22 - #ifndef __ASSEMBLY__ 22 + #ifndef __ASSEMBLER__ 23 23 /* 24 24 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit 25 25 * physical addressing. ··· 53 53 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 54 54 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 55 55 56 - #endif /* __ASSEMBLY__ */ 56 + #endif /* __ASSEMBLER__ */ 57 57 58 58 #endif /* _ASM_POWERPC_PAGE_32_H */
+2 -2
arch/powerpc/include/asm/page_64.h
··· 35 35 #define ESID_MASK_1T 0xffffff0000000000UL 36 36 #define GET_ESID_1T(x) (((x) >> SID_SHIFT_1T) & SID_MASK_1T) 37 37 38 - #ifndef __ASSEMBLY__ 38 + #ifndef __ASSEMBLER__ 39 39 #include <asm/cache.h> 40 40 41 41 typedef unsigned long pte_basic_t; ··· 82 82 /* Log 2 of page table size */ 83 83 extern u64 ppc64_pft_size; 84 84 85 - #endif /* __ASSEMBLY__ */ 85 + #endif /* __ASSEMBLER__ */ 86 86 87 87 #define VM_DATA_DEFAULT_FLAGS \ 88 88 (is_32bit_task() ? \
+4 -4
arch/powerpc/include/asm/pgtable.h
··· 2 2 #ifndef _ASM_POWERPC_PGTABLE_H 3 3 #define _ASM_POWERPC_PGTABLE_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 #include <linux/mmdebug.h> 7 7 #include <linux/mmzone.h> 8 8 #include <asm/processor.h> /* For TASK_SIZE */ ··· 12 12 13 13 struct mm_struct; 14 14 15 - #endif /* !__ASSEMBLY__ */ 15 + #endif /* !__ASSEMBLER__ */ 16 16 17 17 #ifdef CONFIG_PPC_BOOK3S 18 18 #include <asm/book3s/pgtable.h> ··· 39 39 #define PAGE_AGP (PAGE_KERNEL_NC) 40 40 #define HAVE_PAGE_AGP 41 41 42 - #ifndef __ASSEMBLY__ 42 + #ifndef __ASSEMBLER__ 43 43 44 44 #define PFN_PTE_SHIFT PTE_RPN_SHIFT 45 45 ··· 214 214 215 215 #endif /* CONFIG_PPC64 */ 216 216 217 - #endif /* __ASSEMBLY__ */ 217 + #endif /* __ASSEMBLER__ */ 218 218 219 219 #endif /* _ASM_POWERPC_PGTABLE_H */
+2 -2
arch/powerpc/include/asm/ppc_asm.h
··· 12 12 #include <asm/feature-fixups.h> 13 13 #include <asm/extable.h> 14 14 15 - #ifdef __ASSEMBLY__ 15 + #ifdef __ASSEMBLER__ 16 16 17 17 #define SZL (BITS_PER_LONG/8) 18 18 ··· 868 868 869 869 #endif /* !CONFIG_PPC_BOOK3E_64 */ 870 870 871 - #endif /* __ASSEMBLY__ */ 871 + #endif /* __ASSEMBLER__ */ 872 872 873 873 #define SOFT_MASK_TABLE(_start, _end) \ 874 874 stringify_in_c(.section __soft_mask_table,"a";)\
+4 -4
arch/powerpc/include/asm/processor.h
··· 29 29 #ifdef CONFIG_PPC64 30 30 /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */ 31 31 #define PPR_PRIORITY 3 32 - #ifdef __ASSEMBLY__ 32 + #ifdef __ASSEMBLER__ 33 33 #define DEFAULT_PPR (PPR_PRIORITY << 50) 34 34 #else 35 35 #define DEFAULT_PPR ((u64)PPR_PRIORITY << 50) 36 - #endif /* __ASSEMBLY__ */ 36 + #endif /* __ASSEMBLER__ */ 37 37 #endif /* CONFIG_PPC64 */ 38 38 39 - #ifndef __ASSEMBLY__ 39 + #ifndef __ASSEMBLER__ 40 40 #include <linux/types.h> 41 41 #include <linux/thread_info.h> 42 42 #include <asm/ptrace.h> ··· 460 460 void *exit_vmx_ops(void *dest); 461 461 462 462 #endif /* __KERNEL__ */ 463 - #endif /* __ASSEMBLY__ */ 463 + #endif /* __ASSEMBLER__ */ 464 464 #endif /* _ASM_POWERPC_PROCESSOR_H */
+3 -3
arch/powerpc/include/asm/ptrace.h
··· 24 24 #include <asm/asm-const.h> 25 25 #include <asm/reg.h> 26 26 27 - #ifndef __ASSEMBLY__ 27 + #ifndef __ASSEMBLER__ 28 28 struct pt_regs 29 29 { 30 30 union { ··· 165 165 #define STACK_INT_FRAME_SIZE (KERNEL_REDZONE_SIZE + STACK_USER_INT_FRAME_SIZE) 166 166 #define STACK_INT_FRAME_MARKER_LONGS (STACK_INT_FRAME_MARKER/sizeof(long)) 167 167 168 - #ifndef __ASSEMBLY__ 168 + #ifndef __ASSEMBLER__ 169 169 #include <asm/paca.h> 170 170 171 171 #ifdef CONFIG_SMP ··· 414 414 return 0; 415 415 } 416 416 417 - #endif /* __ASSEMBLY__ */ 417 + #endif /* __ASSEMBLER__ */ 418 418 419 419 #ifndef __powerpc64__ 420 420 /* We need PT_SOFTE defined at all time to avoid #ifdefs */
+3 -3
arch/powerpc/include/asm/reg.h
··· 60 60 #define MSR_RI_LG 1 /* Recoverable Exception */ 61 61 #define MSR_LE_LG 0 /* Little Endian */ 62 62 63 - #ifdef __ASSEMBLY__ 63 + #ifdef __ASSEMBLER__ 64 64 #define __MASK(X) (1<<(X)) 65 65 #else 66 66 #define __MASK(X) (1UL<<(X)) ··· 1358 1358 #define PVR_ARCH_31_P11 0x0f000007 1359 1359 1360 1360 /* Macros for setting and retrieving special purpose registers */ 1361 - #ifndef __ASSEMBLY__ 1361 + #ifndef __ASSEMBLER__ 1362 1362 1363 1363 #if defined(CONFIG_PPC64) || defined(__CHECKER__) 1364 1364 typedef struct { ··· 1450 1450 struct pt_regs; 1451 1451 1452 1452 extern void ppc_save_regs(struct pt_regs *regs); 1453 - #endif /* __ASSEMBLY__ */ 1453 + #endif /* __ASSEMBLER__ */ 1454 1454 #endif /* __KERNEL__ */ 1455 1455 #endif /* _ASM_POWERPC_REG_H */
+2 -2
arch/powerpc/include/asm/reg_booke.h
··· 576 576 577 577 #define TEN_THREAD(x) (1 << (x)) 578 578 579 - #ifndef __ASSEMBLY__ 579 + #ifndef __ASSEMBLER__ 580 580 #define mftmr(rn) ({unsigned long rval; \ 581 581 asm volatile(MFTMR(rn, %0) : "=r" (rval)); rval;}) 582 582 #define mttmr(rn, v) asm volatile(MTTMR(rn, %0) : \ ··· 585 585 586 586 extern unsigned long global_dbcr0[]; 587 587 588 - #endif /* !__ASSEMBLY__ */ 588 + #endif /* !__ASSEMBLER__ */ 589 589 590 590 #endif /* __ASM_POWERPC_REG_BOOKE_H__ */ 591 591 #endif /* __KERNEL__ */
+2 -2
arch/powerpc/include/asm/reg_fsl_emb.h
··· 9 9 10 10 #include <linux/stringify.h> 11 11 12 - #ifndef __ASSEMBLY__ 12 + #ifndef __ASSEMBLER__ 13 13 /* Performance Monitor Registers */ 14 14 static __always_inline unsigned int mfpmr(unsigned int rn) 15 15 { ··· 32 32 ".machine pop;" 33 33 : [val] "=r" (val) : [rn] "i" (rn)); 34 34 } 35 - #endif /* __ASSEMBLY__ */ 35 + #endif /* __ASSEMBLER__ */ 36 36 37 37 /* Freescale Book E Performance Monitor APU Registers */ 38 38 #define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */
+2 -2
arch/powerpc/include/asm/setup.h
··· 4 4 5 5 #include <uapi/asm/setup.h> 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 extern void ppc_printk_progress(char *s, unsigned short hex); 9 9 10 10 extern unsigned long long memory_limit; ··· 89 89 90 90 extern struct seq_buf ppc_hw_desc; 91 91 92 - #endif /* !__ASSEMBLY__ */ 92 + #endif /* !__ASSEMBLER__ */ 93 93 94 94 #endif /* _ASM_POWERPC_SETUP_H */ 95 95
+2 -2
arch/powerpc/include/asm/smp.h
··· 18 18 #include <linux/kernel.h> 19 19 #include <linux/irqreturn.h> 20 20 21 - #ifndef __ASSEMBLY__ 21 + #ifndef __ASSEMBLER__ 22 22 23 23 #ifdef CONFIG_PPC64 24 24 #include <asm/paca.h> ··· 266 266 extern unsigned int booting_thread_hwid; 267 267 268 268 extern void __early_start(void); 269 - #endif /* __ASSEMBLY__ */ 269 + #endif /* __ASSEMBLER__ */ 270 270 271 271 #endif /* __KERNEL__ */ 272 272 #endif /* _ASM_POWERPC_SMP_H) */
+2 -2
arch/powerpc/include/asm/spu_csa.h
··· 43 43 #define SPU_DECR_STATUS_RUNNING 0x1 44 44 #define SPU_DECR_STATUS_WRAPPED 0x2 45 45 46 - #ifndef __ASSEMBLY__ 46 + #ifndef __ASSEMBLER__ 47 47 /** 48 48 * spu_reg128 - generic 128-bit register definition. 49 49 */ ··· 243 243 244 244 #endif /* !__SPU__ */ 245 245 #endif /* __KERNEL__ */ 246 - #endif /* !__ASSEMBLY__ */ 246 + #endif /* !__ASSEMBLER__ */ 247 247 #endif /* _SPU_CSA_H_ */
+2 -2
arch/powerpc/include/asm/synch.h
··· 7 7 #include <asm/feature-fixups.h> 8 8 #include <asm/ppc-opcode.h> 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; 12 12 extern void do_lwsync_fixups(unsigned long value, void *fixup_start, 13 13 void *fixup_end); ··· 40 40 */ 41 41 asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory"); 42 42 } 43 - #endif /* __ASSEMBLY__ */ 43 + #endif /* __ASSEMBLER__ */ 44 44 45 45 #if defined(__powerpc64__) 46 46 # define LWSYNC lwsync
+4 -4
arch/powerpc/include/asm/thread_info.h
··· 41 41 42 42 #define THREAD_ALIGN (1 << THREAD_ALIGN_SHIFT) 43 43 44 - #ifndef __ASSEMBLY__ 44 + #ifndef __ASSEMBLER__ 45 45 #include <linux/cache.h> 46 46 #include <asm/processor.h> 47 47 #include <asm/accounting.h> ··· 89 89 void arch_setup_new_exec(void); 90 90 #define arch_setup_new_exec arch_setup_new_exec 91 91 92 - #endif /* __ASSEMBLY__ */ 92 + #endif /* __ASSEMBLER__ */ 93 93 94 94 /* 95 95 * thread information flag bit numbers ··· 162 162 #define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU) 163 163 #define _TLF_RUNLATCH (1 << TLF_RUNLATCH) 164 164 165 - #ifndef __ASSEMBLY__ 165 + #ifndef __ASSEMBLER__ 166 166 167 167 static inline void clear_thread_local_flags(unsigned int flags) 168 168 { ··· 233 233 extern void *emergency_ctx[]; 234 234 #endif 235 235 236 - #endif /* !__ASSEMBLY__ */ 236 + #endif /* !__ASSEMBLER__ */ 237 237 238 238 #endif /* __KERNEL__ */ 239 239
+2 -2
arch/powerpc/include/asm/tm.h
··· 8 8 9 9 #include <uapi/asm/tm.h> 10 10 11 - #ifndef __ASSEMBLY__ 11 + #ifndef __ASSEMBLER__ 12 12 13 13 extern void tm_reclaim(struct thread_struct *thread, 14 14 uint8_t cause); ··· 19 19 20 20 extern bool tm_suspend_disabled; 21 21 22 - #endif /* __ASSEMBLY__ */ 22 + #endif /* __ASSEMBLER__ */
+2 -2
arch/powerpc/include/asm/types.h
··· 11 11 12 12 #include <uapi/asm/types.h> 13 13 14 - #ifndef __ASSEMBLY__ 14 + #ifndef __ASSEMBLER__ 15 15 16 16 typedef __vector128 vector128; 17 17 18 - #endif /* __ASSEMBLY__ */ 18 + #endif /* __ASSEMBLER__ */ 19 19 20 20 #endif /* _ASM_POWERPC_TYPES_H */
+2 -2
arch/powerpc/include/asm/unistd.h
··· 9 9 10 10 #define NR_syscalls __NR_syscalls 11 11 12 - #ifndef __ASSEMBLY__ 12 + #ifndef __ASSEMBLER__ 13 13 14 14 #include <linux/types.h> 15 15 #include <linux/compiler.h> ··· 52 52 #define __ARCH_WANT_SYS_VFORK 53 53 #define __ARCH_WANT_SYS_CLONE 54 54 55 - #endif /* __ASSEMBLY__ */ 55 + #endif /* __ASSEMBLER__ */ 56 56 #endif /* _ASM_POWERPC_UNISTD_H_ */
+3 -3
arch/powerpc/include/asm/vdso.h
··· 5 5 #define VDSO_VERSION_STRING LINUX_2.6.15 6 6 #define __VDSO_PAGES 4 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 10 10 #ifdef CONFIG_PPC64 11 11 #include <generated/vdso64-offsets.h> ··· 21 21 22 22 int vdso_getcpu_init(void); 23 23 24 - #else /* __ASSEMBLY__ */ 24 + #else /* __ASSEMBLER__ */ 25 25 26 26 #ifdef __VDSO64__ 27 27 #define V_FUNCTION_BEGIN(name) \ ··· 49 49 50 50 #endif /* __VDSO32__ */ 51 51 52 - #endif /* __ASSEMBLY__ */ 52 + #endif /* __ASSEMBLER__ */ 53 53 54 54 #endif /* _ASM_POWERPC_VDSO_H */
+2 -2
arch/powerpc/include/asm/vdso/getrandom.h
··· 5 5 #ifndef _ASM_POWERPC_VDSO_GETRANDOM_H 6 6 #define _ASM_POWERPC_VDSO_GETRANDOM_H 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 10 10 #include <asm/vdso_datapage.h> 11 11 ··· 62 62 ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, 63 63 size_t opaque_len); 64 64 65 - #endif /* !__ASSEMBLY__ */ 65 + #endif /* !__ASSEMBLER__ */ 66 66 67 67 #endif /* _ASM_POWERPC_VDSO_GETRANDOM_H */
+2 -2
arch/powerpc/include/asm/vdso/gettimeofday.h
··· 2 2 #ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H 3 3 #define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <asm/vdso/timebase.h> 8 8 #include <asm/barrier.h> ··· 141 141 __kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, 142 142 const struct vdso_time_data *vd); 143 143 144 - #endif /* __ASSEMBLY__ */ 144 + #endif /* __ASSEMBLER__ */ 145 145 146 146 #endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */
+2 -2
arch/powerpc/include/asm/vdso/processor.h
··· 2 2 #ifndef _ASM_POWERPC_VDSO_PROCESSOR_H 3 3 #define _ASM_POWERPC_VDSO_PROCESSOR_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 /* Macros for adjusting thread priority (hardware multi-threading) */ 8 8 #ifdef CONFIG_PPC64 ··· 33 33 #define cpu_relax() barrier() 34 34 #endif 35 35 36 - #endif /* __ASSEMBLY__ */ 36 + #endif /* __ASSEMBLER__ */ 37 37 38 38 #endif /* _ASM_POWERPC_VDSO_PROCESSOR_H */
+2 -2
arch/powerpc/include/asm/vdso/vsyscall.h
··· 2 2 #ifndef _ASM_POWERPC_VDSO_VSYSCALL_H 3 3 #define _ASM_POWERPC_VDSO_VSYSCALL_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <asm/vdso_datapage.h> 8 8 9 9 /* The asm-generic header needs to be included after the definitions above */ 10 10 #include <asm-generic/vdso/vsyscall.h> 11 11 12 - #endif /* !__ASSEMBLY__ */ 12 + #endif /* !__ASSEMBLER__ */ 13 13 14 14 #endif /* _ASM_POWERPC_VDSO_VSYSCALL_H */
+3 -3
arch/powerpc/include/asm/vdso_datapage.h
··· 9 9 * IBM Corp. 10 10 */ 11 11 12 - #ifndef __ASSEMBLY__ 12 + #ifndef __ASSEMBLER__ 13 13 14 14 #include <vdso/datapage.h> 15 15 16 - #else /* __ASSEMBLY__ */ 16 + #else /* __ASSEMBLER__ */ 17 17 18 18 .macro get_datapage ptr symbol 19 19 bcl 20, 31, .+4 ··· 23 23 addi \ptr, \ptr, (\symbol - 999b)@l 24 24 .endm 25 25 26 - #endif /* __ASSEMBLY__ */ 26 + #endif /* __ASSEMBLER__ */ 27 27 28 28 #endif /* __KERNEL__ */ 29 29 #endif /* _SYSTEMCFG_H */
+2 -2
arch/powerpc/kernel/head_booke.h
··· 7 7 #include <asm/kvm_booke_hv_asm.h> 8 8 #include <asm/thread_info.h> /* for THREAD_SHIFT */ 9 9 10 - #ifdef __ASSEMBLY__ 10 + #ifdef __ASSEMBLER__ 11 11 12 12 /* 13 13 * Macros used for common Book-e exception handling ··· 522 522 bl kernel_fp_unavailable_exception; \ 523 523 b interrupt_return 524 524 525 - #endif /* __ASSEMBLY__ */ 525 + #endif /* __ASSEMBLER__ */ 526 526 #endif /* __HEAD_BOOKE_H__ */
+1 -1
arch/powerpc/net/bpf_jit.h
··· 8 8 #ifndef _BPF_JIT_H 9 9 #define _BPF_JIT_H 10 10 11 - #ifndef __ASSEMBLY__ 11 + #ifndef __ASSEMBLER__ 12 12 13 13 #include <asm/types.h> 14 14 #include <asm/ppc-opcode.h>
+2 -2
arch/powerpc/platforms/powernv/subcore.h
··· 9 9 #define SYNC_STEP_REAL_MODE 2 /* Set by secondary when in real mode */ 10 10 #define SYNC_STEP_FINISHED 3 /* Set by secondary when split/unsplit is done */ 11 11 12 - #ifndef __ASSEMBLY__ 12 + #ifndef __ASSEMBLER__ 13 13 14 14 #ifdef CONFIG_SMP 15 15 void split_core_secondary_loop(u8 *state); ··· 18 18 static inline void update_subcore_sibling_mask(void) { } 19 19 #endif /* CONFIG_SMP */ 20 20 21 - #endif /* __ASSEMBLY__ */ 21 + #endif /* __ASSEMBLER__ */
+2 -2
arch/powerpc/xmon/xmon_bpts.h
··· 3 3 #define XMON_BPTS_H 4 4 5 5 #define NBPTS 256 6 - #ifndef __ASSEMBLY__ 6 + #ifndef __ASSEMBLER__ 7 7 #include <asm/inst.h> 8 8 #define BPT_SIZE (sizeof(ppc_inst_t) * 2) 9 9 #define BPT_WORDS (BPT_SIZE / sizeof(ppc_inst_t)) 10 10 11 11 extern unsigned int bpt_table[NBPTS * BPT_WORDS]; 12 - #endif /* __ASSEMBLY__ */ 12 + #endif /* __ASSEMBLER__ */ 13 13 14 14 #endif /* XMON_BPTS_H */
+1 -1
tools/testing/selftests/powerpc/include/instructions.h
··· 67 67 #define PPC_INST_PASTE_LAST __PASTE(0, 0, 1, 1) 68 68 69 69 /* This defines the prefixed load/store instructions */ 70 - #ifdef __ASSEMBLY__ 70 + #ifdef __ASSEMBLER__ 71 71 # define stringify_in_c(...) __VA_ARGS__ 72 72 #else 73 73 # define __stringify_in_c(...) #__VA_ARGS__