Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: entry_64.S - trivial: space, comments fixup

Impact: cleanup

Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Cyrill Gorcunov and committed by
Ingo Molnar
9f1e87ea 5ae3a139

+48 -46
+48 -46
arch/x86/kernel/entry_64.S
··· 1020 1020 1021 1021 .macro paranoidzeroentry_ist sym do_sym ist 1022 1022 ENTRY(\sym) 1023 - INTR_FRAME 1023 + INTR_FRAME 1024 1024 PARAVIRT_ADJUST_EXCEPTION_FRAME 1025 1025 pushq $-1 /* ORIG_RAX: no syscall to restart */ 1026 1026 CFI_ADJUST_CFA_OFFSET 8 ··· 1088 1088 errorentry alignment_check do_alignment_check 1089 1089 zeroentry simd_coprocessor_error do_simd_coprocessor_error 1090 1090 1091 - /* Reload gs selector with exception handling */ 1092 - /* edi: new selector */ 1091 + /* Reload gs selector with exception handling */ 1092 + /* edi: new selector */ 1093 1093 ENTRY(native_load_gs_index) 1094 1094 CFI_STARTPROC 1095 1095 pushf 1096 1096 CFI_ADJUST_CFA_OFFSET 8 1097 1097 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) 1098 - SWAPGS 1098 + SWAPGS 1099 1099 gs_change: 1100 - movl %edi,%gs 1100 + movl %edi,%gs 1101 1101 2: mfence /* workaround */ 1102 1102 SWAPGS 1103 - popf 1103 + popf 1104 1104 CFI_ADJUST_CFA_OFFSET -8 1105 - ret 1105 + ret 1106 1106 CFI_ENDPROC 1107 1107 END(native_load_gs_index) 1108 1108 1109 - .section __ex_table,"a" 1110 - .align 8 1111 - .quad gs_change,bad_gs 1112 - .previous 1113 - .section .fixup,"ax" 1109 + .section __ex_table,"a" 1110 + .align 8 1111 + .quad gs_change,bad_gs 1112 + .previous 1113 + .section .fixup,"ax" 1114 1114 /* running with kernelgs */ 1115 1115 bad_gs: 1116 1116 SWAPGS /* switch back to user gs */ 1117 1117 xorl %eax,%eax 1118 - movl %eax,%gs 1119 - jmp 2b 1120 - .previous 1118 + movl %eax,%gs 1119 + jmp 2b 1120 + .previous 1121 1121 1122 1122 /* 1123 1123 * Create a kernel thread. ··· 1152 1152 * so internally to the x86_64 port you can rely on kernel_thread() 1153 1153 * not to reschedule the child before returning, this avoids the need 1154 1154 * of hacks for example to fork off the per-CPU idle tasks. 1155 - * [Hopefully no generic code relies on the reschedule -AK] 1155 + * [Hopefully no generic code relies on the reschedule -AK] 1156 1156 */ 1157 1157 RESTORE_ALL 1158 1158 UNFAKE_STACK_FRAME ··· 1231 1231 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback 1232 1232 1233 1233 /* 1234 - # A note on the "critical region" in our callback handler. 1235 - # We want to avoid stacking callback handlers due to events occurring 1236 - # during handling of the last event. To do this, we keep events disabled 1237 - # until we've done all processing. HOWEVER, we must enable events before 1238 - # popping the stack frame (can't be done atomically) and so it would still 1239 - # be possible to get enough handler activations to overflow the stack. 1240 - # Although unlikely, bugs of that kind are hard to track down, so we'd 1241 - # like to avoid the possibility. 1242 - # So, on entry to the handler we detect whether we interrupted an 1243 - # existing activation in its critical region -- if so, we pop the current 1244 - # activation and restart the handler using the previous one. 1245 - */ 1234 + * A note on the "critical region" in our callback handler. 1235 + * We want to avoid stacking callback handlers due to events occurring 1236 + * during handling of the last event. To do this, we keep events disabled 1237 + * until we've done all processing. HOWEVER, we must enable events before 1238 + * popping the stack frame (can't be done atomically) and so it would still 1239 + * be possible to get enough handler activations to overflow the stack. 1240 + * Although unlikely, bugs of that kind are hard to track down, so we'd 1241 + * like to avoid the possibility. 1242 + * So, on entry to the handler we detect whether we interrupted an 1243 + * existing activation in its critical region -- if so, we pop the current 1244 + * activation and restart the handler using the previous one. 1245 + */ 1246 1246 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) 1247 1247 CFI_STARTPROC 1248 - /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1249 - see the correct pointer to the pt_regs */ 1248 + /* 1249 + * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1250 + * see the correct pointer to the pt_regs 1251 + */ 1250 1252 movq %rdi, %rsp # we don't return, adjust the stack frame 1251 1253 CFI_ENDPROC 1252 1254 DEFAULT_FRAME ··· 1266 1264 END(do_hypervisor_callback) 1267 1265 1268 1266 /* 1269 - # Hypervisor uses this for application faults while it executes. 1270 - # We get here for two reasons: 1271 - # 1. Fault while reloading DS, ES, FS or GS 1272 - # 2. Fault while executing IRET 1273 - # Category 1 we do not need to fix up as Xen has already reloaded all segment 1274 - # registers that could be reloaded and zeroed the others. 1275 - # Category 2 we fix up by killing the current process. We cannot use the 1276 - # normal Linux return path in this case because if we use the IRET hypercall 1277 - # to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1278 - # We distinguish between categories by comparing each saved segment register 1279 - # with its current contents: any discrepancy means we in category 1. 1280 - */ 1267 + * Hypervisor uses this for application faults while it executes. 1268 + * We get here for two reasons: 1269 + * 1. Fault while reloading DS, ES, FS or GS 1270 + * 2. Fault while executing IRET 1271 + * Category 1 we do not need to fix up as Xen has already reloaded all segment 1272 + * registers that could be reloaded and zeroed the others. 1273 + * Category 2 we fix up by killing the current process. We cannot use the 1274 + * normal Linux return path in this case because if we use the IRET hypercall 1275 + * to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1276 + * We distinguish between categories by comparing each saved segment register 1277 + * with its current contents: any discrepancy means we in category 1. 1278 + */ 1281 1279 ENTRY(xen_failsafe_callback) 1282 1280 INTR_FRAME 1 (6*8) 1283 1281 /*CFI_REL_OFFSET gs,GS*/ ··· 1341 1339 #endif 1342 1340 1343 1341 /* 1344 - * "Paranoid" exit path from exception stack. 1345 - * Paranoid because this is used by NMIs and cannot take 1342 + * "Paranoid" exit path from exception stack. 1343 + * Paranoid because this is used by NMIs and cannot take 1346 1344 * any kernel state for granted. 1347 1345 * We don't do kernel preemption checks here, because only 1348 1346 * NMI should be common and it does not enable IRQs and ··· 1447 1445 cmpq %rcx,RIP+8(%rsp) 1448 1446 je error_swapgs 1449 1447 cmpq $gs_change,RIP+8(%rsp) 1450 - je error_swapgs 1448 + je error_swapgs 1451 1449 jmp error_sti 1452 1450 END(error_entry) 1453 1451 ··· 1523 1521 CFI_ENDPROC 1524 1522 #else 1525 1523 jmp paranoid_exit 1526 - CFI_ENDPROC 1524 + CFI_ENDPROC 1527 1525 #endif 1528 1526 END(nmi) 1529 1527