+8
-4
kernel/softirq.c
+8
-4
kernel/softirq.c
···
508
508
static inline void lockdep_softirq_end(bool in_hardirq) { }
509
509
#endif
510
510
511
-
asmlinkage __visible void __softirq_entry __do_softirq(void)
511
+
static void handle_softirqs(bool ksirqd)
512
512
{
513
513
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
514
514
unsigned long old_flags = current->flags;
···
563
563
pending >>= softirq_bit;
564
564
}
565
565
566
-
if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
567
-
__this_cpu_read(ksoftirqd) == current)
566
+
if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
568
567
rcu_softirq_qs();
569
568
570
569
local_irq_disable();
···
581
582
lockdep_softirq_end(in_hardirq);
582
583
softirq_handle_end();
583
584
current_restore_flags(old_flags, PF_MEMALLOC);
585
+
}
586
+
587
+
asmlinkage __visible void __softirq_entry __do_softirq(void)
588
+
{
589
+
handle_softirqs(false);
584
590
}
585
591
586
592
/**
···
925
921
* We can safely run softirq on inline stack, as we are not deep
926
922
* in the task stack here.
927
923
*/
928
-
__do_softirq();
924
+
handle_softirqs(true);
929
925
ksoftirqd_run_end();
930
926
cond_resched();
931
927
return;