Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Annotate endianness of various variables and functions

Sparse reports several endianness warnings on variables and functions
that are consistently treated as big endian. There are no
multi-endianness shenanigans going on here so fix these low hanging
fruit up in one patch.

All changes are just type annotations; no endianness switching
operations are introduced by this patch.

Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20231011053711.93427-7-bgray@linux.ibm.com

authored by

Benjamin Gray and committed by
Michael Ellerman
2b4a6cc9 419d5d11

+26 -23
+1 -1
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 626 626 */ 627 627 static inline int pte_devmap(pte_t pte) 628 628 { 629 - u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE); 629 + __be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE); 630 630 631 631 return (pte_raw(pte) & mask) == mask; 632 632 }
+8 -8
arch/powerpc/include/asm/imc-pmu.h
··· 74 74 * The following is the data structure to hold trace imc data. 75 75 */ 76 76 struct trace_imc_data { 77 - u64 tb1; 78 - u64 ip; 79 - u64 val; 80 - u64 cpmc1; 81 - u64 cpmc2; 82 - u64 cpmc3; 83 - u64 cpmc4; 84 - u64 tb2; 77 + __be64 tb1; 78 + __be64 ip; 79 + __be64 val; 80 + __be64 cpmc1; 81 + __be64 cpmc2; 82 + __be64 cpmc3; 83 + __be64 cpmc4; 84 + __be64 tb2; 85 85 }; 86 86 87 87 /* Event attribute array index */
+1 -1
arch/powerpc/kernel/prom_init.c
··· 947 947 } __packed; 948 948 949 949 struct ibm_arch_vec { 950 - struct { u32 mask, val; } pvrs[14]; 950 + struct { __be32 mask, val; } pvrs[14]; 951 951 952 952 u8 num_vectors; 953 953
+2 -2
arch/powerpc/kexec/core_64.c
··· 379 379 380 380 #ifdef CONFIG_PPC_64S_HASH_MMU 381 381 /* Values we need to export to the second kernel via the device tree. */ 382 - static unsigned long htab_base; 383 - static unsigned long htab_size; 382 + static __be64 htab_base; 383 + static __be64 htab_size; 384 384 385 385 static struct property htab_base_prop = { 386 386 .name = "linux,htab-base",
+3 -3
arch/powerpc/kexec/file_load_64.c
··· 32 32 #include <asm/plpks.h> 33 33 34 34 struct umem_info { 35 - u64 *buf; /* data buffer for usable-memory property */ 35 + __be64 *buf; /* data buffer for usable-memory property */ 36 36 u32 size; /* size allocated for the data buffer */ 37 37 u32 max_entries; /* maximum no. of entries */ 38 38 u32 idx; /* index of current entry */ ··· 443 443 * 444 444 * Returns buffer on success, NULL on error. 445 445 */ 446 - static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt) 446 + static __be64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt) 447 447 { 448 448 u32 new_size; 449 - u64 *tbuf; 449 + __be64 *tbuf; 450 450 451 451 if ((um_info->idx + cnt) <= um_info->max_entries) 452 452 return um_info->buf;
+1 -1
arch/powerpc/mm/drmem.c
··· 67 67 struct property *new_prop; 68 68 struct of_drconf_cell_v1 *dr_cell; 69 69 struct drmem_lmb *lmb; 70 - u32 *p; 70 + __be32 *p; 71 71 72 72 new_prop = clone_property(prop, prop->length); 73 73 if (!new_prop)
+1 -1
arch/powerpc/perf/hv-24x7.c
··· 1338 1338 for (i = count = 0, element_data = res->elements + data_offset; 1339 1339 i < num_elements; 1340 1340 i++, element_data += data_size + data_offset) 1341 - count += be64_to_cpu(*((u64 *) element_data)); 1341 + count += be64_to_cpu(*((__be64 *)element_data)); 1342 1342 1343 1343 *countp = count; 1344 1344
+5 -4
arch/powerpc/perf/imc-pmu.c
··· 1025 1025 return false; 1026 1026 } 1027 1027 1028 - static u64 * get_event_base_addr(struct perf_event *event) 1028 + static __be64 *get_event_base_addr(struct perf_event *event) 1029 1029 { 1030 1030 u64 addr; 1031 1031 1032 1032 if (is_thread_imc_pmu(event)) { 1033 1033 addr = (u64)per_cpu(thread_imc_mem, smp_processor_id()); 1034 - return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); 1034 + return (__be64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); 1035 1035 } 1036 1036 1037 - return (u64 *)event->hw.event_base; 1037 + return (__be64 *)event->hw.event_base; 1038 1038 } 1039 1039 1040 1040 static void thread_imc_pmu_start_txn(struct pmu *pmu, ··· 1058 1058 1059 1059 static u64 imc_read_counter(struct perf_event *event) 1060 1060 { 1061 - u64 *addr, data; 1061 + __be64 *addr; 1062 + u64 data; 1062 1063 1063 1064 /* 1064 1065 * In-Memory Collection (IMC) counters are free flowing counters.
+2 -1
arch/powerpc/platforms/powermac/feature.c
··· 2614 2614 struct device_node* node; 2615 2615 int i; 2616 2616 volatile u32 __iomem *base; 2617 - const u32 *addrp, *revp; 2617 + const __be32 *addrp; 2618 + const u32 *revp; 2618 2619 phys_addr_t addr; 2619 2620 u64 size; 2620 2621
+2 -1
arch/powerpc/platforms/pseries/hotplug-memory.c
··· 55 55 struct property *ala_prop, 56 56 const u32 *lmb_assoc, u32 *aa_index) 57 57 { 58 - u32 *assoc_arrays, new_prop_size; 58 + __be32 *assoc_arrays; 59 + u32 new_prop_size; 59 60 struct property *new_prop; 60 61 int aa_arrays, aa_array_entries, aa_array_sz; 61 62 int i, index;