x86/asm: Tidy up TSS limit code

In an earlier version of the patch ("x86/kvm/vmx: Defer TR reload
after VM exit") that introduced TSS limit validity tracking, I
confused which helper was which. On reflection, the names I chose
sucked. Rename the helpers to make it more obvious what's going on
and add some comments.

While I'm at it, clear __tss_limit_invalid when force-reloading as
well as when contitionally reloading, since any TR reload fixes the
limit.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>

authored by Andy Lutomirski and committed by Radim Krčmář b7ceaec1 e3736c3e

+21 -11
+11 -7
arch/x86/include/asm/desc.h
··· 205 205 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); 206 206 } 207 207 208 + DECLARE_PER_CPU(bool, __tss_limit_invalid); 209 + 208 210 static inline void force_reload_TR(void) 209 211 { 210 212 struct desc_struct *d = get_cpu_gdt_table(smp_processor_id()); ··· 222 220 write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); 223 221 224 222 load_TR_desc(); 223 + this_cpu_write(__tss_limit_invalid, false); 225 224 } 226 225 227 - DECLARE_PER_CPU(bool, need_tr_refresh); 228 - 229 - static inline void refresh_TR(void) 226 + /* 227 + * Call this if you need the TSS limit to be correct, which should be the case 228 + * if and only if you have TIF_IO_BITMAP set or you're switching to a task 229 + * with TIF_IO_BITMAP set. 230 + */ 231 + static inline void refresh_tss_limit(void) 230 232 { 231 233 DEBUG_LOCKS_WARN_ON(preemptible()); 232 234 233 - if (unlikely(this_cpu_read(need_tr_refresh))) { 235 + if (unlikely(this_cpu_read(__tss_limit_invalid))) 234 236 force_reload_TR(); 235 - this_cpu_write(need_tr_refresh, false); 236 - } 237 237 } 238 238 239 239 /* ··· 254 250 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) 255 251 force_reload_TR(); 256 252 else 257 - this_cpu_write(need_tr_refresh, true); 253 + this_cpu_write(__tss_limit_invalid, true); 258 254 } 259 255 260 256 static inline void native_load_gdt(const struct desc_ptr *dtr)
+7 -1
arch/x86/kernel/ioport.c
··· 47 47 t->io_bitmap_ptr = bitmap; 48 48 set_thread_flag(TIF_IO_BITMAP); 49 49 50 + /* 51 + * Now that we have an IO bitmap, we need our TSS limit to be 52 + * correct. It's fine if we are preempted after doing this: 53 + * with TIF_IO_BITMAP set, context switches will keep our TSS 54 + * limit correct. 55 + */ 50 56 preempt_disable(); 51 - refresh_TR(); 57 + refresh_tss_limit(); 52 58 preempt_enable(); 53 59 } 54 60
+3 -3
arch/x86/kernel/process.c
··· 65 65 }; 66 66 EXPORT_PER_CPU_SYMBOL(cpu_tss); 67 67 68 - DEFINE_PER_CPU(bool, need_tr_refresh); 69 - EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh); 68 + DEFINE_PER_CPU(bool, __tss_limit_invalid); 69 + EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); 70 70 71 71 /* 72 72 * this gets called so that we can store lazy state into memory and copy the ··· 218 218 * Make sure that the TSS limit is correct for the CPU 219 219 * to notice the IO bitmap. 220 220 */ 221 - refresh_TR(); 221 + refresh_tss_limit(); 222 222 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { 223 223 /* 224 224 * Clear any possible leftover bits: