Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/32: Remove lazy GS macros

GS is always a user segment now.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20220325153953.162643-4-brgerst@gmail.com

authored by

Brian Gerst and committed by
Borislav Petkov
3a24a608 9554e908

+20 -28
+1 -1
arch/x86/include/asm/mmu_context.h
··· 141 141 #ifdef CONFIG_X86_32 142 142 #define deactivate_mm(tsk, mm) \ 143 143 do { \ 144 - lazy_load_gs(0); \ 144 + loadsegment(gs, 0); \ 145 145 } while (0) 146 146 #else 147 147 #define deactivate_mm(tsk, mm) \
-5
arch/x86/include/asm/segment.h
··· 354 354 * x86-32 user GS accessors. This is ugly and could do with some cleaning up. 355 355 */ 356 356 #ifdef CONFIG_X86_32 357 - # define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; }) 358 - # define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) 359 - # define task_user_gs(tsk) ((tsk)->thread.gs) 360 - # define lazy_save_gs(v) savesegment(gs, (v)) 361 - # define lazy_load_gs(v) loadsegment(gs, (v)) 362 357 # define load_gs_index(v) loadsegment(gs, (v)) 363 358 #endif /* X86_32 */ 364 359
+1 -4
arch/x86/kernel/process.c
··· 160 160 savesegment(ds, p->thread.ds); 161 161 #else 162 162 p->thread.sp0 = (unsigned long) (childregs + 1); 163 + savesegment(gs, p->thread.gs); 163 164 /* 164 165 * Clear all status flags including IF and set fixed bit. 64bit 165 166 * does not have this initialization as the frame does not contain ··· 191 190 childregs->ax = 0; 192 191 if (sp) 193 192 childregs->sp = sp; 194 - 195 - #ifdef CONFIG_X86_32 196 - task_user_gs(p) = get_user_gs(current_pt_regs()); 197 - #endif 198 193 199 194 if (unlikely(p->flags & PF_IO_WORKER)) { 200 195 /*
+4 -7
arch/x86/kernel/process_32.c
··· 63 63 unsigned long d0, d1, d2, d3, d6, d7; 64 64 unsigned short gs; 65 65 66 - if (user_mode(regs)) 67 - gs = get_user_gs(regs); 68 - else 69 - savesegment(gs, gs); 66 + savesegment(gs, gs); 70 67 71 68 show_ip(regs, log_lvl); 72 69 ··· 111 114 void 112 115 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) 113 116 { 114 - set_user_gs(regs, 0); 117 + loadsegment(gs, 0); 115 118 regs->fs = 0; 116 119 regs->ds = __USER_DS; 117 120 regs->es = __USER_DS; ··· 174 177 * used %fs or %gs (it does not today), or if the kernel is 175 178 * running inside of a hypervisor layer. 176 179 */ 177 - lazy_save_gs(prev->gs); 180 + savesegment(gs, prev->gs); 178 181 179 182 /* 180 183 * Load the per-thread Thread-Local Storage descriptor. ··· 205 208 * Restore %gs if needed (which is common) 206 209 */ 207 210 if (prev->gs | next->gs) 208 - lazy_load_gs(next->gs); 211 + loadsegment(gs, next->gs); 209 212 210 213 this_cpu_write(current_task, next_p); 211 214
+3 -3
arch/x86/kernel/ptrace.c
··· 170 170 retval = *pt_regs_access(task_pt_regs(task), offset); 171 171 else { 172 172 if (task == current) 173 - retval = get_user_gs(task_pt_regs(task)); 173 + savesegment(gs, retval); 174 174 else 175 - retval = task_user_gs(task); 175 + retval = task->thread.gs; 176 176 } 177 177 return retval; 178 178 } ··· 210 210 break; 211 211 212 212 case offsetof(struct user_regs_struct, gs): 213 - task_user_gs(task) = value; 213 + task->thread.gs = value; 214 214 } 215 215 216 216 return 0;
+5 -3
arch/x86/kernel/signal.c
··· 93 93 return false; 94 94 95 95 #ifdef CONFIG_X86_32 96 - set_user_gs(regs, sc.gs); 96 + loadsegment(gs, sc.gs); 97 97 regs->fs = sc.fs; 98 98 regs->es = sc.es; 99 99 regs->ds = sc.ds; ··· 146 146 struct pt_regs *regs, unsigned long mask) 147 147 { 148 148 #ifdef CONFIG_X86_32 149 - unsafe_put_user(get_user_gs(regs), 150 - (unsigned int __user *)&sc->gs, Efault); 149 + unsigned int gs; 150 + savesegment(gs, gs); 151 + 152 + unsafe_put_user(gs, (unsigned int __user *)&sc->gs, Efault); 151 153 unsafe_put_user(regs->fs, (unsigned int __user *)&sc->fs, Efault); 152 154 unsafe_put_user(regs->es, (unsigned int __user *)&sc->es, Efault); 153 155 unsafe_put_user(regs->ds, (unsigned int __user *)&sc->ds, Efault);
+2 -2
arch/x86/kernel/vm86_32.c
··· 151 151 152 152 memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs)); 153 153 154 - lazy_load_gs(vm86->regs32.gs); 154 + loadsegment(gs, vm86->regs32.gs); 155 155 156 156 regs->pt.ax = retval; 157 157 return; ··· 325 325 * Save old state 326 326 */ 327 327 vm86->saved_sp0 = tsk->thread.sp0; 328 - lazy_save_gs(vm86->regs32.gs); 328 + savesegment(gs, vm86->regs32.gs); 329 329 330 330 /* make room for real-mode segments */ 331 331 preempt_disable();
+3 -2
arch/x86/lib/insn-eval.c
··· 342 342 */ 343 343 static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) 344 344 { 345 - #ifdef CONFIG_X86_64 346 345 unsigned short sel; 347 346 347 + #ifdef CONFIG_X86_64 348 348 switch (seg_reg_idx) { 349 349 case INAT_SEG_REG_IGNORE: 350 350 return 0; ··· 402 402 case INAT_SEG_REG_FS: 403 403 return (unsigned short)(regs->fs & 0xffff); 404 404 case INAT_SEG_REG_GS: 405 - return get_user_gs(regs); 405 + savesegment(gs, sel); 406 + return sel; 406 407 case INAT_SEG_REG_IGNORE: 407 408 default: 408 409 return -EINVAL;
+1 -1
arch/x86/math-emu/get_address.c
··· 153 153 switch (segment) { 154 154 case PREFIX_GS_ - 1: 155 155 /* user gs handling can be lazy, use special accessors */ 156 - addr->selector = get_user_gs(FPU_info->regs); 156 + savesegment(gs, addr->selector); 157 157 break; 158 158 default: 159 159 addr->selector = PM_REG_(segment);