Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Add a proper syscall for switching endianness

We currently have a "special" syscall for switching endianness. This is
syscall number 0x1ebe, which is handled explicitly in the 64-bit syscall
exception entry.

That has a few problems, firstly the syscall number is outside of the
usual range, which confuses various tools. For example strace doesn't
recognise the syscall at all.

Secondly it's handled explicitly as a special case in the syscall
exception entry, which is complicated enough without it.

As a first step toward removing the special syscall, we need to add a
regular syscall that implements the same functionality.

The logic is simple, it simply toggles the MSR_LE bit in the userspace
MSR. This is the same as the special syscall, with the caveat that the
special syscall clobbers fewer registers.

This version clobbers r9-r12, XER, CTR, and CR0-1,5-7.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

+30 -1
+1
arch/powerpc/include/asm/systbl.h
··· 367 367 SYSCALL_SPU(memfd_create) 368 368 SYSCALL_SPU(bpf) 369 369 COMPAT_SYS(execveat) 370 + PPC64ONLY(switch_endian)
+1 -1
arch/powerpc/include/asm/unistd.h
··· 12 12 #include <uapi/asm/unistd.h> 13 13 14 14 15 - #define __NR_syscalls 363 15 + #define __NR_syscalls 364 16 16 17 17 #define __NR__exit __NR_exit 18 18 #define NR_syscalls __NR_syscalls
+1
arch/powerpc/include/uapi/asm/unistd.h
··· 385 385 #define __NR_memfd_create 360 386 386 #define __NR_bpf 361 387 387 #define __NR_execveat 362 388 + #define __NR_switch_endian 363 388 389 389 390 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
+5
arch/powerpc/kernel/entry_64.S
··· 356 356 bl sys_swapcontext 357 357 b .Lsyscall_exit 358 358 359 + _GLOBAL(ppc_switch_endian) 360 + bl save_nvgprs 361 + bl sys_switch_endian 362 + b .Lsyscall_exit 363 + 359 364 _GLOBAL(ret_from_fork) 360 365 bl schedule_tail 361 366 REST_NVGPRS(r1)
+17
arch/powerpc/kernel/syscalls.c
··· 121 121 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, 122 122 (u64)len_high << 32 | len_low, advice); 123 123 } 124 + 125 + long sys_switch_endian(void) 126 + { 127 + struct thread_info *ti; 128 + 129 + current->thread.regs->msr ^= MSR_LE; 130 + 131 + /* 132 + * Set TIF_RESTOREALL so that r3 isn't clobbered on return to 133 + * userspace. That also has the effect of restoring the non-volatile 134 + * GPRs, so we saved them on the way in here. 135 + */ 136 + ti = current_thread_info(); 137 + ti->flags |= _TIF_RESTOREALL; 138 + 139 + return 0; 140 + }
+2
arch/powerpc/kernel/systbl.S
··· 22 22 #define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) 23 23 #define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) 24 24 #define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) 25 + #define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall) 25 26 #define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) 26 27 #else 27 28 #define SYSCALL(func) .long sys_##func ··· 30 29 #define PPC_SYS(func) .long ppc_##func 31 30 #define OLDSYS(func) .long sys_##func 32 31 #define SYS32ONLY(func) .long sys_##func 32 + #define PPC64ONLY(func) .long sys_ni_syscall 33 33 #define SYSX(f, f3264, f32) .long f32 34 34 #endif 35 35 #define SYSCALL_SPU(func) SYSCALL(func)
+2
arch/powerpc/kernel/systbl_chk.c
··· 21 21 #ifdef CONFIG_PPC64 22 22 #define OLDSYS(func) -1 23 23 #define SYS32ONLY(func) -1 24 + #define PPC64ONLY(func) __NR_##func 24 25 #else 25 26 #define OLDSYS(func) __NR_old##func 26 27 #define SYS32ONLY(func) __NR_##func 28 + #define PPC64ONLY(func) -1 27 29 #endif 28 30 #define SYSX(f, f3264, f32) -1 29 31
+1
arch/powerpc/platforms/cell/spu_callbacks.c
··· 39 39 #define PPC_SYS(func) sys_ni_syscall, 40 40 #define OLDSYS(func) sys_ni_syscall, 41 41 #define SYS32ONLY(func) sys_ni_syscall, 42 + #define PPC64ONLY(func) sys_ni_syscall, 42 43 #define SYSX(f, f3264, f32) sys_ni_syscall, 43 44 44 45 #define SYSCALL_SPU(func) sys_##func,