+26
-14
kernel/sched/core.c
+26
-14
kernel/sched/core.c
···
7845
7845
}
7846
7846
}
7847
7847
7848
+
static inline void sched_set_rq_online(struct rq *rq, int cpu)
7849
+
{
7850
+
struct rq_flags rf;
7851
+
7852
+
rq_lock_irqsave(rq, &rf);
7853
+
if (rq->rd) {
7854
+
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7855
+
set_rq_online(rq);
7856
+
}
7857
+
rq_unlock_irqrestore(rq, &rf);
7858
+
}
7859
+
7860
+
static inline void sched_set_rq_offline(struct rq *rq, int cpu)
7861
+
{
7862
+
struct rq_flags rf;
7863
+
7864
+
rq_lock_irqsave(rq, &rf);
7865
+
if (rq->rd) {
7866
+
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7867
+
set_rq_offline(rq);
7868
+
}
7869
+
rq_unlock_irqrestore(rq, &rf);
7870
+
}
7871
+
7848
7872
/*
7849
7873
* used to mark begin/end of suspend/resume:
7850
7874
*/
···
7938
7914
int sched_cpu_activate(unsigned int cpu)
7939
7915
{
7940
7916
struct rq *rq = cpu_rq(cpu);
7941
-
struct rq_flags rf;
7942
7917
7943
7918
/*
7944
7919
* Clear the balance_push callback and prepare to schedule
···
7966
7943
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7967
7944
* domains.
7968
7945
*/
7969
-
rq_lock_irqsave(rq, &rf);
7970
-
if (rq->rd) {
7971
-
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7972
-
set_rq_online(rq);
7973
-
}
7974
-
rq_unlock_irqrestore(rq, &rf);
7946
+
sched_set_rq_online(rq, cpu);
7975
7947
7976
7948
return 0;
7977
7949
}
···
7974
7956
int sched_cpu_deactivate(unsigned int cpu)
7975
7957
{
7976
7958
struct rq *rq = cpu_rq(cpu);
7977
-
struct rq_flags rf;
7978
7959
int ret;
7979
7960
7980
7961
/*
···
8004
7987
*/
8005
7988
synchronize_rcu();
8006
7989
8007
-
rq_lock_irqsave(rq, &rf);
8008
-
if (rq->rd) {
8009
-
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8010
-
set_rq_offline(rq);
8011
-
}
8012
-
rq_unlock_irqrestore(rq, &rf);
7990
+
sched_set_rq_offline(rq, cpu);
8013
7991
8014
7992
/*
8015
7993
* When going down, decrement the number of cores with SMT present.