+9
-7
arch/x86/kernel/unwind_orc.c
+9
-7
arch/x86/kernel/unwind_orc.c
···
617
617
void __unwind_start(struct unwind_state *state, struct task_struct *task,
618
618
struct pt_regs *regs, unsigned long *first_frame)
619
619
{
620
-
if (!orc_init)
621
-
goto done;
622
-
623
620
memset(state, 0, sizeof(*state));
624
621
state->task = task;
622
+
623
+
if (!orc_init)
624
+
goto err;
625
625
626
626
/*
627
627
* Refuse to unwind the stack of a task while it's executing on another
···
629
629
* checks to prevent it from going off the rails.
630
630
*/
631
631
if (task_on_another_cpu(task))
632
-
goto done;
632
+
goto err;
633
633
634
634
if (regs) {
635
635
if (user_mode(regs))
636
-
goto done;
636
+
goto the_end;
637
637
638
638
state->ip = regs->ip;
639
639
state->sp = regs->sp;
···
666
666
* generate some kind of backtrace if this happens.
667
667
*/
668
668
void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
669
+
state->error = true;
669
670
if (get_stack_info(next_page, state->task, &state->stack_info,
670
671
&state->stack_mask))
671
672
return;
···
692
691
693
692
return;
694
693
695
-
done:
694
+
err:
695
+
state->error = true;
696
+
the_end:
696
697
state->stack_info.type = STACK_TYPE_UNKNOWN;
697
-
return;
698
698
}
699
699
EXPORT_SYMBOL_GPL(__unwind_start);