Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: always save and restore all registers on context switch

The switch_to() macro has an optimization to avoid saving and
restoring register contents that aren't needed for kernel threads.

There is however the possibility that a kernel thread execve's a user
space program. In such a case the execve'd process can partially see
the contents of the previous process, which shouldn't be allowed.

To avoid this, simply always save and restore register contents on
context switch.

Cc: <stable@vger.kernel.org> # v2.6.37+
Fixes: fdb6d070effba ("switch_to: dont restore/save access & fpu regs for kernel threads")
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Heiko Carstens and committed by
Martin Schwidefsky
fbbd7f1a bd7a9b37

+13 -14
+13 -14
arch/s390/include/asm/switch_to.h
··· 30 30 asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); 31 31 } 32 32 33 - #define switch_to(prev,next,last) do { \ 34 - if (prev->mm) { \ 35 - save_fpu_regs(); \ 36 - save_access_regs(&prev->thread.acrs[0]); \ 37 - save_ri_cb(prev->thread.ri_cb); \ 38 - save_gs_cb(prev->thread.gs_cb); \ 39 - } \ 33 + #define switch_to(prev, next, last) do { \ 34 + /* save_fpu_regs() sets the CIF_FPU flag, which enforces \ 35 + * a restore of the floating point / vector registers as \ 36 + * soon as the next task returns to user space \ 37 + */ \ 38 + save_fpu_regs(); \ 39 + save_access_regs(&prev->thread.acrs[0]); \ 40 + save_ri_cb(prev->thread.ri_cb); \ 41 + save_gs_cb(prev->thread.gs_cb); \ 40 42 update_cr_regs(next); \ 41 - if (next->mm) { \ 42 - set_cpu_flag(CIF_FPU); \ 43 - restore_access_regs(&next->thread.acrs[0]); \ 44 - restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 45 - restore_gs_cb(next->thread.gs_cb); \ 46 - } \ 47 - prev = __switch_to(prev,next); \ 43 + restore_access_regs(&next->thread.acrs[0]); \ 44 + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 45 + restore_gs_cb(next->thread.gs_cb); \ 46 + prev = __switch_to(prev, next); \ 48 47 } while (0) 49 48 50 49 #endif /* __ASM_SWITCH_TO_H */