Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

freezer,sched: Rewrite core freezer logic

Rewrite the core freezer to behave better wrt thawing and be simpler
in general.

By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.

As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).

Specifically; the current scheme works a little like:

freezer_do_not_count();
schedule();
freezer_count();

And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.

However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.

That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.

This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.

As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:

TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN

The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).

The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.

With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org

+210 -395
+1 -3
drivers/android/binder.c
··· 4247 4247 struct binder_proc *proc = thread->proc; 4248 4248 int ret = 0; 4249 4249 4250 - freezer_do_not_count(); 4251 4250 binder_inner_proc_lock(proc); 4252 4251 for (;;) { 4253 - prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); 4252 + prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); 4254 4253 if (binder_has_work_ilocked(thread, do_proc_work)) 4255 4254 break; 4256 4255 if (do_proc_work) ··· 4266 4267 } 4267 4268 finish_wait(&thread->wait, &wait); 4268 4269 binder_inner_proc_unlock(proc); 4269 - freezer_count(); 4270 4270 4271 4271 return ret; 4272 4272 }
+2 -2
drivers/media/pci/pt3/pt3.c
··· 445 445 pt3_proc_dma(adap); 446 446 447 447 delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC); 448 - set_current_state(TASK_UNINTERRUPTIBLE); 449 - freezable_schedule_hrtimeout_range(&delay, 448 + set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 449 + schedule_hrtimeout_range(&delay, 450 450 PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC, 451 451 HRTIMER_MODE_REL); 452 452 }
+2 -2
fs/cifs/inode.c
··· 2326 2326 static int 2327 2327 cifs_wait_bit_killable(struct wait_bit_key *key, int mode) 2328 2328 { 2329 - freezable_schedule_unsafe(); 2329 + schedule(); 2330 2330 if (signal_pending_state(mode, current)) 2331 2331 return -ERESTARTSYS; 2332 2332 return 0; ··· 2344 2344 return 0; 2345 2345 2346 2346 rc = wait_on_bit_lock_action(flags, CIFS_INO_LOCK, cifs_wait_bit_killable, 2347 - TASK_KILLABLE); 2347 + TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 2348 2348 if (rc) 2349 2349 return rc; 2350 2350
+3 -2
fs/cifs/transport.c
··· 757 757 { 758 758 int error; 759 759 760 - error = wait_event_freezekillable_unsafe(server->response_q, 761 - midQ->mid_state != MID_REQUEST_SUBMITTED); 760 + error = wait_event_state(server->response_q, 761 + midQ->mid_state != MID_REQUEST_SUBMITTED, 762 + (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE)); 762 763 if (error < 0) 763 764 return -ERESTARTSYS; 764 765
+2 -3
fs/coredump.c
··· 402 402 if (core_waiters > 0) { 403 403 struct core_thread *ptr; 404 404 405 - freezer_do_not_count(); 406 - wait_for_completion(&core_state->startup); 407 - freezer_count(); 405 + wait_for_completion_state(&core_state->startup, 406 + TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 408 407 /* 409 408 * Wait for all the threads to become inactive, so that 410 409 * all the thread context (extended register state, like
+2 -1
fs/nfs/file.c
··· 570 570 } 571 571 572 572 wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING, 573 - nfs_wait_bit_killable, TASK_KILLABLE); 573 + nfs_wait_bit_killable, 574 + TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 574 575 575 576 lock_page(page); 576 577 mapping = page_file_mapping(page);
+4 -8
fs/nfs/inode.c
··· 72 72 return nfs_fileid_to_ino_t(fattr->fileid); 73 73 } 74 74 75 - static int nfs_wait_killable(int mode) 75 + int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) 76 76 { 77 - freezable_schedule_unsafe(); 77 + schedule(); 78 78 if (signal_pending_state(mode, current)) 79 79 return -ERESTARTSYS; 80 80 return 0; 81 - } 82 - 83 - int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) 84 - { 85 - return nfs_wait_killable(mode); 86 81 } 87 82 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); 88 83 ··· 1326 1331 */ 1327 1332 for (;;) { 1328 1333 ret = wait_on_bit_action(bitlock, NFS_INO_INVALIDATING, 1329 - nfs_wait_bit_killable, TASK_KILLABLE); 1334 + nfs_wait_bit_killable, 1335 + TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 1330 1336 if (ret) 1331 1337 goto out; 1332 1338 spin_lock(&inode->i_lock);
+2 -1
fs/nfs/nfs3proc.c
··· 36 36 res = rpc_call_sync(clnt, msg, flags); 37 37 if (res != -EJUKEBOX) 38 38 break; 39 - freezable_schedule_timeout_killable_unsafe(NFS_JUKEBOX_RETRY_TIME); 39 + __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 40 + schedule_timeout(NFS_JUKEBOX_RETRY_TIME); 40 41 res = -ERESTARTSYS; 41 42 } while (!fatal_signal_pending(current)); 42 43 return res;
+7 -7
fs/nfs/nfs4proc.c
··· 416 416 { 417 417 might_sleep(); 418 418 419 - freezable_schedule_timeout_killable_unsafe( 420 - nfs4_update_delay(timeout)); 419 + __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 420 + schedule_timeout(nfs4_update_delay(timeout)); 421 421 if (!__fatal_signal_pending(current)) 422 422 return 0; 423 423 return -EINTR; ··· 427 427 { 428 428 might_sleep(); 429 429 430 - freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout)); 430 + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 431 + schedule_timeout(nfs4_update_delay(timeout)); 431 432 if (!signal_pending(current)) 432 433 return 0; 433 434 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; ··· 7407 7406 status = nfs4_proc_setlk(state, cmd, request); 7408 7407 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7409 7408 break; 7410 - freezable_schedule_timeout_interruptible(timeout); 7409 + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7410 + schedule_timeout(timeout); 7411 7411 timeout *= 2; 7412 7412 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7413 7413 status = -ERESTARTSYS; ··· 7476 7474 break; 7477 7475 7478 7476 status = -ERESTARTSYS; 7479 - freezer_do_not_count(); 7480 - wait_woken(&waiter.wait, TASK_INTERRUPTIBLE, 7477 + wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7481 7478 NFS4_LOCK_MAXTIMEOUT); 7482 - freezer_count(); 7483 7479 } while (!signalled()); 7484 7480 7485 7481 remove_wait_queue(q, &waiter.wait);
+2 -1
fs/nfs/nfs4state.c
··· 1314 1314 1315 1315 refcount_inc(&clp->cl_count); 1316 1316 res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, 1317 - nfs_wait_bit_killable, TASK_KILLABLE); 1317 + nfs_wait_bit_killable, 1318 + TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 1318 1319 if (res) 1319 1320 goto out; 1320 1321 if (clp->cl_cons_state < 0)
+2 -2
fs/nfs/pnfs.c
··· 1908 1908 pnfs_layoutcommit_inode(lo->plh_inode, false); 1909 1909 return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, 1910 1910 nfs_wait_bit_killable, 1911 - TASK_KILLABLE); 1911 + TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 1912 1912 } 1913 1913 1914 1914 static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) ··· 3193 3193 status = wait_on_bit_lock_action(&nfsi->flags, 3194 3194 NFS_INO_LAYOUTCOMMITTING, 3195 3195 nfs_wait_bit_killable, 3196 - TASK_KILLABLE); 3196 + TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 3197 3197 if (status) 3198 3198 goto out; 3199 3199 }
+4 -4
fs/xfs/xfs_trans_ail.c
··· 602 602 603 603 while (1) { 604 604 if (tout && tout <= 20) 605 - set_current_state(TASK_KILLABLE); 605 + set_current_state(TASK_KILLABLE|TASK_FREEZABLE); 606 606 else 607 - set_current_state(TASK_INTERRUPTIBLE); 607 + set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 608 608 609 609 /* 610 610 * Check kthread_should_stop() after we set the task state to ··· 653 653 ailp->ail_target == ailp->ail_target_prev && 654 654 list_empty(&ailp->ail_buf_list)) { 655 655 spin_unlock(&ailp->ail_lock); 656 - freezable_schedule(); 656 + schedule(); 657 657 tout = 0; 658 658 continue; 659 659 } 660 660 spin_unlock(&ailp->ail_lock); 661 661 662 662 if (tout) 663 - freezable_schedule_timeout(msecs_to_jiffies(tout)); 663 + schedule_timeout(msecs_to_jiffies(tout)); 664 664 665 665 __set_current_state(TASK_RUNNING); 666 666
+10 -235
include/linux/freezer.h
··· 8 8 #include <linux/sched.h> 9 9 #include <linux/wait.h> 10 10 #include <linux/atomic.h> 11 + #include <linux/jump_label.h> 11 12 12 13 #ifdef CONFIG_FREEZER 13 - extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */ 14 + DECLARE_STATIC_KEY_FALSE(freezer_active); 15 + 14 16 extern bool pm_freezing; /* PM freezing in effect */ 15 17 extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ 16 18 ··· 24 22 /* 25 23 * Check if a process has been frozen 26 24 */ 27 - static inline bool frozen(struct task_struct *p) 28 - { 29 - return p->flags & PF_FROZEN; 30 - } 25 + extern bool frozen(struct task_struct *p); 31 26 32 27 extern bool freezing_slow_path(struct task_struct *p); 33 28 ··· 33 34 */ 34 35 static inline bool freezing(struct task_struct *p) 35 36 { 36 - if (likely(!atomic_read(&system_freezing_cnt))) 37 - return false; 38 - return freezing_slow_path(p); 37 + if (static_branch_unlikely(&freezer_active)) 38 + return freezing_slow_path(p); 39 + 40 + return false; 39 41 } 40 42 41 43 /* Takes and releases task alloc lock using task_lock() */ ··· 48 48 extern void thaw_processes(void); 49 49 extern void thaw_kernel_threads(void); 50 50 51 - /* 52 - * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION 53 - * If try_to_freeze causes a lockdep warning it means the caller may deadlock 54 - */ 55 - static inline bool try_to_freeze_unsafe(void) 51 + static inline bool try_to_freeze(void) 56 52 { 57 53 might_sleep(); 58 54 if (likely(!freezing(current))) 59 55 return false; 60 - return __refrigerator(false); 61 - } 62 - 63 - static inline bool try_to_freeze(void) 64 - { 65 56 if (!(current->flags & PF_NOFREEZE)) 66 57 debug_check_no_locks_held(); 67 - return try_to_freeze_unsafe(); 58 + return __refrigerator(false); 68 59 } 69 60 70 61 extern bool freeze_task(struct task_struct *p); ··· 70 79 } 71 80 #endif /* !CONFIG_CGROUP_FREEZER */ 72 81 73 - /* 74 - * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it 75 - * calls wait_for_completion(&vfork) and reset right after it returns from this 76 - * function. Next, the parent should call try_to_freeze() to freeze itself 77 - * appropriately in case the child has exited before the freezing of tasks is 78 - * complete. However, we don't want kernel threads to be frozen in unexpected 79 - * places, so we allow them to block freeze_processes() instead or to set 80 - * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the 81 - * parent won't really block freeze_processes(), since ____call_usermodehelper() 82 - * (the child) does a little before exec/exit and it can't be frozen before 83 - * waking up the parent. 84 - */ 85 - 86 - 87 - /** 88 - * freezer_do_not_count - tell freezer to ignore %current 89 - * 90 - * Tell freezers to ignore the current task when determining whether the 91 - * target frozen state is reached. IOW, the current task will be 92 - * considered frozen enough by freezers. 93 - * 94 - * The caller shouldn't do anything which isn't allowed for a frozen task 95 - * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair 96 - * wrap a scheduling operation and nothing much else. 97 - */ 98 - static inline void freezer_do_not_count(void) 99 - { 100 - current->flags |= PF_FREEZER_SKIP; 101 - } 102 - 103 - /** 104 - * freezer_count - tell freezer to stop ignoring %current 105 - * 106 - * Undo freezer_do_not_count(). It tells freezers that %current should be 107 - * considered again and tries to freeze if freezing condition is already in 108 - * effect. 109 - */ 110 - static inline void freezer_count(void) 111 - { 112 - current->flags &= ~PF_FREEZER_SKIP; 113 - /* 114 - * If freezing is in progress, the following paired with smp_mb() 115 - * in freezer_should_skip() ensures that either we see %true 116 - * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP. 117 - */ 118 - smp_mb(); 119 - try_to_freeze(); 120 - } 121 - 122 - /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ 123 - static inline void freezer_count_unsafe(void) 124 - { 125 - current->flags &= ~PF_FREEZER_SKIP; 126 - smp_mb(); 127 - try_to_freeze_unsafe(); 128 - } 129 - 130 - /** 131 - * freezer_should_skip - whether to skip a task when determining frozen 132 - * state is reached 133 - * @p: task in quesion 134 - * 135 - * This function is used by freezers after establishing %true freezing() to 136 - * test whether a task should be skipped when determining the target frozen 137 - * state is reached. IOW, if this function returns %true, @p is considered 138 - * frozen enough. 139 - */ 140 - static inline bool freezer_should_skip(struct task_struct *p) 141 - { 142 - /* 143 - * The following smp_mb() paired with the one in freezer_count() 144 - * ensures that either freezer_count() sees %true freezing() or we 145 - * see cleared %PF_FREEZER_SKIP and return %false. This makes it 146 - * impossible for a task to slip frozen state testing after 147 - * clearing %PF_FREEZER_SKIP. 148 - */ 149 - smp_mb(); 150 - return p->flags & PF_FREEZER_SKIP; 151 - } 152 - 153 - /* 154 - * These functions are intended to be used whenever you want allow a sleeping 155 - * task to be frozen. Note that neither return any clear indication of 156 - * whether a freeze event happened while in this function. 157 - */ 158 - 159 - /* Like schedule(), but should not block the freezer. */ 160 - static inline void freezable_schedule(void) 161 - { 162 - freezer_do_not_count(); 163 - schedule(); 164 - freezer_count(); 165 - } 166 - 167 - /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ 168 - static inline void freezable_schedule_unsafe(void) 169 - { 170 - freezer_do_not_count(); 171 - schedule(); 172 - freezer_count_unsafe(); 173 - } 174 - 175 - /* 176 - * Like schedule_timeout(), but should not block the freezer. Do not 177 - * call this with locks held. 178 - */ 179 - static inline long freezable_schedule_timeout(long timeout) 180 - { 181 - long __retval; 182 - freezer_do_not_count(); 183 - __retval = schedule_timeout(timeout); 184 - freezer_count(); 185 - return __retval; 186 - } 187 - 188 - /* 189 - * Like schedule_timeout_interruptible(), but should not block the freezer. Do not 190 - * call this with locks held. 191 - */ 192 - static inline long freezable_schedule_timeout_interruptible(long timeout) 193 - { 194 - long __retval; 195 - freezer_do_not_count(); 196 - __retval = schedule_timeout_interruptible(timeout); 197 - freezer_count(); 198 - return __retval; 199 - } 200 - 201 - /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ 202 - static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout) 203 - { 204 - long __retval; 205 - 206 - freezer_do_not_count(); 207 - __retval = schedule_timeout_interruptible(timeout); 208 - freezer_count_unsafe(); 209 - return __retval; 210 - } 211 - 212 - /* Like schedule_timeout_killable(), but should not block the freezer. */ 213 - static inline long freezable_schedule_timeout_killable(long timeout) 214 - { 215 - long __retval; 216 - freezer_do_not_count(); 217 - __retval = schedule_timeout_killable(timeout); 218 - freezer_count(); 219 - return __retval; 220 - } 221 - 222 - /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ 223 - static inline long freezable_schedule_timeout_killable_unsafe(long timeout) 224 - { 225 - long __retval; 226 - freezer_do_not_count(); 227 - __retval = schedule_timeout_killable(timeout); 228 - freezer_count_unsafe(); 229 - return __retval; 230 - } 231 - 232 - /* 233 - * Like schedule_hrtimeout_range(), but should not block the freezer. Do not 234 - * call this with locks held. 235 - */ 236 - static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, 237 - u64 delta, const enum hrtimer_mode mode) 238 - { 239 - int __retval; 240 - freezer_do_not_count(); 241 - __retval = schedule_hrtimeout_range(expires, delta, mode); 242 - freezer_count(); 243 - return __retval; 244 - } 245 - 246 - /* 247 - * Freezer-friendly wrappers around wait_event_interruptible(), 248 - * wait_event_killable() and wait_event_interruptible_timeout(), originally 249 - * defined in <linux/wait.h> 250 - */ 251 - 252 - /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ 253 - #define wait_event_freezekillable_unsafe(wq, condition) \ 254 - ({ \ 255 - int __retval; \ 256 - freezer_do_not_count(); \ 257 - __retval = wait_event_killable(wq, (condition)); \ 258 - freezer_count_unsafe(); \ 259 - __retval; \ 260 - }) 261 - 262 82 #else /* !CONFIG_FREEZER */ 263 83 static inline bool frozen(struct task_struct *p) { return false; } 264 84 static inline bool freezing(struct task_struct *p) { return false; } ··· 83 281 84 282 static inline bool try_to_freeze(void) { return false; } 85 283 86 - static inline void freezer_do_not_count(void) {} 87 - static inline void freezer_count(void) {} 88 - static inline int freezer_should_skip(struct task_struct *p) { return 0; } 89 284 static inline void set_freezable(void) {} 90 - 91 - #define freezable_schedule() schedule() 92 - 93 - #define freezable_schedule_unsafe() schedule() 94 - 95 - #define freezable_schedule_timeout(timeout) schedule_timeout(timeout) 96 - 97 - #define freezable_schedule_timeout_interruptible(timeout) \ 98 - schedule_timeout_interruptible(timeout) 99 - 100 - #define freezable_schedule_timeout_interruptible_unsafe(timeout) \ 101 - schedule_timeout_interruptible(timeout) 102 - 103 - #define freezable_schedule_timeout_killable(timeout) \ 104 - schedule_timeout_killable(timeout) 105 - 106 - #define freezable_schedule_timeout_killable_unsafe(timeout) \ 107 - schedule_timeout_killable(timeout) 108 - 109 - #define freezable_schedule_hrtimeout_range(expires, delta, mode) \ 110 - schedule_hrtimeout_range(expires, delta, mode) 111 - 112 - #define wait_event_freezekillable_unsafe(wq, condition) \ 113 - wait_event_killable(wq, condition) 114 285 115 286 #endif /* !CONFIG_FREEZER */ 116 287
+9 -4
include/linux/sched.h
··· 97 97 #define TASK_WAKING 0x00000200 98 98 #define TASK_NOLOAD 0x00000400 99 99 #define TASK_NEW 0x00000800 100 - /* RT specific auxilliary flag to mark RT lock waiters */ 101 100 #define TASK_RTLOCK_WAIT 0x00001000 102 - #define TASK_STATE_MAX 0x00002000 101 + #define TASK_FREEZABLE 0x00002000 102 + #define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP)) 103 + #define TASK_FROZEN 0x00008000 104 + #define TASK_STATE_MAX 0x00010000 103 105 104 106 #define TASK_ANY (TASK_STATE_MAX-1) 107 + 108 + /* 109 + * DO NOT ADD ANY NEW USERS ! 110 + */ 111 + #define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE) 105 112 106 113 /* Convenience macros for the sake of set_current_state: */ 107 114 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) ··· 1723 1716 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 1724 1717 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1725 1718 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1726 - #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ 1727 1719 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 1728 1720 #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ 1729 1721 #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ ··· 1733 1727 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ 1734 1728 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1735 1729 #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ 1736 - #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 1737 1730 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 1738 1731 1739 1732 /*
+1 -6
include/linux/sunrpc/sched.h
··· 252 252 void rpc_free(struct rpc_task *); 253 253 int rpciod_up(void); 254 254 void rpciod_down(void); 255 - int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); 255 + int rpc_wait_for_completion_task(struct rpc_task *task); 256 256 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 257 257 struct net; 258 258 void rpc_show_tasks(struct net *); ··· 263 263 extern struct workqueue_struct *xprtiod_workqueue; 264 264 void rpc_prepare_task(struct rpc_task *task); 265 265 gfp_t rpc_task_gfp_mask(void); 266 - 267 - static inline int rpc_wait_for_completion_task(struct rpc_task *task) 268 - { 269 - return __rpc_wait_for_completion_task(task, NULL); 270 - } 271 266 272 267 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) 273 268 static inline const char * rpc_qname(const struct rpc_wait_queue *q)
+6 -6
include/linux/wait.h
··· 361 361 } while (0) 362 362 363 363 #define __wait_event_freezable(wq_head, condition) \ 364 - ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ 365 - freezable_schedule()) 364 + ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \ 365 + 0, 0, schedule()) 366 366 367 367 /** 368 368 * wait_event_freezable - sleep (or freeze) until a condition gets true ··· 420 420 421 421 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ 422 422 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 423 - TASK_INTERRUPTIBLE, 0, timeout, \ 424 - __ret = freezable_schedule_timeout(__ret)) 423 + (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \ 424 + __ret = schedule_timeout(__ret)) 425 425 426 426 /* 427 427 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid ··· 642 642 643 643 644 644 #define __wait_event_freezable_exclusive(wq, condition) \ 645 - ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ 646 - freezable_schedule()) 645 + ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\ 646 + schedule()) 647 647 648 648 #define wait_event_freezable_exclusive(wq, condition) \ 649 649 ({ \
+8 -15
kernel/cgroup/legacy_freezer.c
··· 113 113 114 114 if (parent && (parent->state & CGROUP_FREEZING)) { 115 115 freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN; 116 - atomic_inc(&system_freezing_cnt); 116 + static_branch_inc(&freezer_active); 117 117 } 118 118 119 119 mutex_unlock(&freezer_mutex); ··· 134 134 mutex_lock(&freezer_mutex); 135 135 136 136 if (freezer->state & CGROUP_FREEZING) 137 - atomic_dec(&system_freezing_cnt); 137 + static_branch_dec(&freezer_active); 138 138 139 139 freezer->state = 0; 140 140 ··· 179 179 __thaw_task(task); 180 180 } else { 181 181 freeze_task(task); 182 + 182 183 /* clear FROZEN and propagate upwards */ 183 184 while (freezer && (freezer->state & CGROUP_FROZEN)) { 184 185 freezer->state &= ~CGROUP_FROZEN; ··· 272 271 css_task_iter_start(css, 0, &it); 273 272 274 273 while ((task = css_task_iter_next(&it))) { 275 - if (freezing(task)) { 276 - /* 277 - * freezer_should_skip() indicates that the task 278 - * should be skipped when determining freezing 279 - * completion. Consider it frozen in addition to 280 - * the usual frozen condition. 281 - */ 282 - if (!frozen(task) && !freezer_should_skip(task)) 283 - goto out_iter_end; 284 - } 274 + if (freezing(task) && !frozen(task)) 275 + goto out_iter_end; 285 276 } 286 277 287 278 freezer->state |= CGROUP_FROZEN; ··· 350 357 351 358 if (freeze) { 352 359 if (!(freezer->state & CGROUP_FREEZING)) 353 - atomic_inc(&system_freezing_cnt); 360 + static_branch_inc(&freezer_active); 354 361 freezer->state |= state; 355 362 freeze_cgroup(freezer); 356 363 } else { ··· 359 366 freezer->state &= ~state; 360 367 361 368 if (!(freezer->state & CGROUP_FREEZING)) { 362 - if (was_freezing) 363 - atomic_dec(&system_freezing_cnt); 364 369 freezer->state &= ~CGROUP_FROZEN; 370 + if (was_freezing) 371 + static_branch_dec(&freezer_active); 365 372 unfreeze_cgroup(freezer); 366 373 } 367 374 }
+2 -2
kernel/exit.c
··· 374 374 complete(&core_state->startup); 375 375 376 376 for (;;) { 377 - set_current_state(TASK_UNINTERRUPTIBLE); 377 + set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 378 378 if (!self.task) /* see coredump_finish() */ 379 379 break; 380 - freezable_schedule(); 380 + schedule(); 381 381 } 382 382 __set_current_state(TASK_RUNNING); 383 383 }
+2 -3
kernel/fork.c
··· 1420 1420 static int wait_for_vfork_done(struct task_struct *child, 1421 1421 struct completion *vfork) 1422 1422 { 1423 + unsigned int state = TASK_UNINTERRUPTIBLE|TASK_KILLABLE|TASK_FREEZABLE; 1423 1424 int killed; 1424 1425 1425 - freezer_do_not_count(); 1426 1426 cgroup_enter_frozen(); 1427 - killed = wait_for_completion_killable(vfork); 1427 + killed = wait_for_completion_state(vfork, state); 1428 1428 cgroup_leave_frozen(false); 1429 - freezer_count(); 1430 1429 1431 1430 if (killed) { 1432 1431 task_lock(child);
+96 -37
kernel/freezer.c
··· 13 13 #include <linux/kthread.h> 14 14 15 15 /* total number of freezing conditions in effect */ 16 - atomic_t system_freezing_cnt = ATOMIC_INIT(0); 17 - EXPORT_SYMBOL(system_freezing_cnt); 16 + DEFINE_STATIC_KEY_FALSE(freezer_active); 17 + EXPORT_SYMBOL(freezer_active); 18 18 19 - /* indicate whether PM freezing is in effect, protected by 19 + /* 20 + * indicate whether PM freezing is in effect, protected by 20 21 * system_transition_mutex 21 22 */ 22 23 bool pm_freezing; ··· 30 29 * freezing_slow_path - slow path for testing whether a task needs to be frozen 31 30 * @p: task to be tested 32 31 * 33 - * This function is called by freezing() if system_freezing_cnt isn't zero 32 + * This function is called by freezing() if freezer_active isn't zero 34 33 * and tests whether @p needs to enter and stay in frozen state. Can be 35 34 * called under any context. The freezers are responsible for ensuring the 36 35 * target tasks see the updated state. ··· 53 52 } 54 53 EXPORT_SYMBOL(freezing_slow_path); 55 54 55 + bool frozen(struct task_struct *p) 56 + { 57 + return READ_ONCE(p->__state) & TASK_FROZEN; 58 + } 59 + 56 60 /* Refrigerator is place where frozen processes are stored :-). */ 57 61 bool __refrigerator(bool check_kthr_stop) 58 62 { 59 - /* Hmm, should we be allowed to suspend when there are realtime 60 - processes around? */ 63 + unsigned int state = get_current_state(); 61 64 bool was_frozen = false; 62 - unsigned int save = get_current_state(); 63 65 64 66 pr_debug("%s entered refrigerator\n", current->comm); 65 67 68 + WARN_ON_ONCE(state && !(state & TASK_NORMAL)); 69 + 66 70 for (;;) { 67 - set_current_state(TASK_UNINTERRUPTIBLE); 71 + bool freeze; 72 + 73 + set_current_state(TASK_FROZEN); 68 74 69 75 spin_lock_irq(&freezer_lock); 70 - current->flags |= PF_FROZEN; 71 - if (!freezing(current) || 72 - (check_kthr_stop && kthread_should_stop())) 73 - current->flags &= ~PF_FROZEN; 76 + freeze = freezing(current) && !(check_kthr_stop && kthread_should_stop()); 74 77 spin_unlock_irq(&freezer_lock); 75 78 76 - if (!(current->flags & PF_FROZEN)) 79 + if (!freeze) 77 80 break; 81 + 78 82 was_frozen = true; 79 83 schedule(); 80 84 } 85 + __set_current_state(TASK_RUNNING); 81 86 82 87 pr_debug("%s left refrigerator\n", current->comm); 83 - 84 - /* 85 - * Restore saved task state before returning. The mb'd version 86 - * needs to be used; otherwise, it might silently break 87 - * synchronization which depends on ordered task state change. 88 - */ 89 - set_current_state(save); 90 88 91 89 return was_frozen; 92 90 } ··· 99 99 signal_wake_up(p, 0); 100 100 unlock_task_sighand(p, &flags); 101 101 } 102 + } 103 + 104 + static int __set_task_frozen(struct task_struct *p, void *arg) 105 + { 106 + unsigned int state = READ_ONCE(p->__state); 107 + 108 + if (p->on_rq) 109 + return 0; 110 + 111 + if (p != current && task_curr(p)) 112 + return 0; 113 + 114 + if (!(state & (TASK_FREEZABLE | __TASK_STOPPED | __TASK_TRACED))) 115 + return 0; 116 + 117 + /* 118 + * Only TASK_NORMAL can be augmented with TASK_FREEZABLE, since they 119 + * can suffer spurious wakeups. 120 + */ 121 + if (state & TASK_FREEZABLE) 122 + WARN_ON_ONCE(!(state & TASK_NORMAL)); 123 + 124 + #ifdef CONFIG_LOCKDEP 125 + /* 126 + * It's dangerous to freeze with locks held; there be dragons there. 127 + */ 128 + if (!(state & __TASK_FREEZABLE_UNSAFE)) 129 + WARN_ON_ONCE(debug_locks && p->lockdep_depth); 130 + #endif 131 + 132 + WRITE_ONCE(p->__state, TASK_FROZEN); 133 + return TASK_FROZEN; 134 + } 135 + 136 + static bool __freeze_task(struct task_struct *p) 137 + { 138 + /* TASK_FREEZABLE|TASK_STOPPED|TASK_TRACED -> TASK_FROZEN */ 139 + return task_call_func(p, __set_task_frozen, NULL); 102 140 } 103 141 104 142 /** ··· 154 116 { 155 117 unsigned long flags; 156 118 157 - /* 158 - * This check can race with freezer_do_not_count, but worst case that 159 - * will result in an extra wakeup being sent to the task. It does not 160 - * race with freezer_count(), the barriers in freezer_count() and 161 - * freezer_should_skip() ensure that either freezer_count() sees 162 - * freezing == true in try_to_freeze() and freezes, or 163 - * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task 164 - * normally. 165 - */ 166 - if (freezer_should_skip(p)) 167 - return false; 168 - 169 119 spin_lock_irqsave(&freezer_lock, flags); 170 - if (!freezing(p) || frozen(p)) { 120 + if (!freezing(p) || frozen(p) || __freeze_task(p)) { 171 121 spin_unlock_irqrestore(&freezer_lock, flags); 172 122 return false; 173 123 } ··· 163 137 if (!(p->flags & PF_KTHREAD)) 164 138 fake_signal_wake_up(p); 165 139 else 166 - wake_up_state(p, TASK_INTERRUPTIBLE); 140 + wake_up_state(p, TASK_NORMAL); 167 141 168 142 spin_unlock_irqrestore(&freezer_lock, flags); 169 143 return true; 170 144 } 171 145 146 + /* 147 + * The special task states (TASK_STOPPED, TASK_TRACED) keep their canonical 148 + * state in p->jobctl. If either of them got a wakeup that was missed because 149 + * TASK_FROZEN, then their canonical state reflects that and the below will 150 + * refuse to restore the special state and instead issue the wakeup. 151 + */ 152 + static int __set_task_special(struct task_struct *p, void *arg) 153 + { 154 + unsigned int state = 0; 155 + 156 + if (p->jobctl & JOBCTL_TRACED) 157 + state = TASK_TRACED; 158 + 159 + else if (p->jobctl & JOBCTL_STOPPED) 160 + state = TASK_STOPPED; 161 + 162 + if (state) 163 + WRITE_ONCE(p->__state, state); 164 + 165 + return state; 166 + } 167 + 172 168 void __thaw_task(struct task_struct *p) 173 169 { 174 - unsigned long flags; 170 + unsigned long flags, flags2; 175 171 176 172 spin_lock_irqsave(&freezer_lock, flags); 177 - if (frozen(p)) 178 - wake_up_process(p); 173 + if (WARN_ON_ONCE(freezing(p))) 174 + goto unlock; 175 + 176 + if (lock_task_sighand(p, &flags2)) { 177 + /* TASK_FROZEN -> TASK_{STOPPED,TRACED} */ 178 + bool ret = task_call_func(p, __set_task_special, NULL); 179 + unlock_task_sighand(p, &flags2); 180 + if (ret) 181 + goto unlock; 182 + } 183 + 184 + wake_up_state(p, TASK_FROZEN); 185 + unlock: 179 186 spin_unlock_irqrestore(&freezer_lock, flags); 180 187 } 181 188
+4 -4
kernel/futex/waitwake.c
··· 334 334 * futex_queue() calls spin_unlock() upon completion, both serializing 335 335 * access to the hash list and forcing another memory barrier. 336 336 */ 337 - set_current_state(TASK_INTERRUPTIBLE); 337 + set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 338 338 futex_queue(q, hb); 339 339 340 340 /* Arm the timer */ ··· 352 352 * is no timeout, or if it has yet to expire. 353 353 */ 354 354 if (!timeout || timeout->task) 355 - freezable_schedule(); 355 + schedule(); 356 356 } 357 357 __set_current_state(TASK_RUNNING); 358 358 } ··· 430 430 return ret; 431 431 } 432 432 433 - set_current_state(TASK_INTERRUPTIBLE); 433 + set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 434 434 435 435 for (i = 0; i < count; i++) { 436 436 u32 __user *uaddr = (u32 __user *)(unsigned long)vs[i].w.uaddr; ··· 504 504 return; 505 505 } 506 506 507 - freezable_schedule(); 507 + schedule(); 508 508 } 509 509 510 510 /**
+2 -2
kernel/hung_task.c
··· 95 95 * Ensure the task is not frozen. 96 96 * Also, skip vfork and any other user process that freezer should skip. 97 97 */ 98 - if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP))) 99 - return; 98 + if (unlikely(READ_ONCE(t->__state) & (TASK_FREEZABLE | TASK_FROZEN))) 99 + return; 100 100 101 101 /* 102 102 * When a freshly created task is scheduled once, changes its state to
+3 -3
kernel/power/main.c
··· 24 24 unsigned int lock_system_sleep(void) 25 25 { 26 26 unsigned int flags = current->flags; 27 - current->flags |= PF_FREEZER_SKIP; 27 + current->flags |= PF_NOFREEZE; 28 28 mutex_lock(&system_transition_mutex); 29 29 return flags; 30 30 } ··· 48 48 * Which means, if we use try_to_freeze() here, it would make them 49 49 * enter the refrigerator, thus causing hibernation to lockup. 50 50 */ 51 - if (!(flags & PF_FREEZER_SKIP)) 52 - current->flags &= ~PF_FREEZER_SKIP; 51 + if (!(flags & PF_NOFREEZE)) 52 + current->flags &= ~PF_NOFREEZE; 53 53 mutex_unlock(&system_transition_mutex); 54 54 } 55 55 EXPORT_SYMBOL_GPL(unlock_system_sleep);
+4 -6
kernel/power/process.c
··· 50 50 if (p == current || !freeze_task(p)) 51 51 continue; 52 52 53 - if (!freezer_should_skip(p)) 54 - todo++; 53 + todo++; 55 54 } 56 55 read_unlock(&tasklist_lock); 57 56 ··· 95 96 if (!wakeup || pm_debug_messages_on) { 96 97 read_lock(&tasklist_lock); 97 98 for_each_process_thread(g, p) { 98 - if (p != current && !freezer_should_skip(p) 99 - && freezing(p) && !frozen(p)) 99 + if (p != current && freezing(p) && !frozen(p)) 100 100 sched_show_task(p); 101 101 } 102 102 read_unlock(&tasklist_lock); ··· 127 129 current->flags |= PF_SUSPEND_TASK; 128 130 129 131 if (!pm_freezing) 130 - atomic_inc(&system_freezing_cnt); 132 + static_branch_inc(&freezer_active); 131 133 132 134 pm_wakeup_clear(0); 133 135 pr_info("Freezing user space processes ... "); ··· 188 190 189 191 trace_suspend_resume(TPS("thaw_processes"), 0, true); 190 192 if (pm_freezing) 191 - atomic_dec(&system_freezing_cnt); 193 + static_branch_dec(&freezer_active); 192 194 pm_freezing = false; 193 195 pm_nosig_freezing = false; 194 196
+1 -1
kernel/ptrace.c
··· 269 269 read_unlock(&tasklist_lock); 270 270 271 271 if (!ret && !ignore_state && 272 - WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED))) 272 + WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED|TASK_FROZEN))) 273 273 ret = -ESRCH; 274 274 275 275 return ret;
+1 -1
kernel/sched/core.c
··· 6428 6428 prev->sched_contributes_to_load = 6429 6429 (prev_state & TASK_UNINTERRUPTIBLE) && 6430 6430 !(prev_state & TASK_NOLOAD) && 6431 - !(prev->flags & PF_FROZEN); 6431 + !(prev_state & TASK_FROZEN); 6432 6432 6433 6433 if (prev->sched_contributes_to_load) 6434 6434 rq->nr_uninterruptible++;
+7 -7
kernel/signal.c
··· 2304 2304 read_unlock(&tasklist_lock); 2305 2305 cgroup_enter_frozen(); 2306 2306 preempt_enable_no_resched(); 2307 - freezable_schedule(); 2307 + schedule(); 2308 2308 cgroup_leave_frozen(true); 2309 2309 2310 2310 /* ··· 2473 2473 2474 2474 /* Now we don't run again until woken by SIGCONT or SIGKILL */ 2475 2475 cgroup_enter_frozen(); 2476 - freezable_schedule(); 2476 + schedule(); 2477 2477 return true; 2478 2478 } else { 2479 2479 /* ··· 2548 2548 * immediately (if there is a non-fatal signal pending), and 2549 2549 * put the task into sleep. 2550 2550 */ 2551 - __set_current_state(TASK_INTERRUPTIBLE); 2551 + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 2552 2552 clear_thread_flag(TIF_SIGPENDING); 2553 2553 spin_unlock_irq(&current->sighand->siglock); 2554 2554 cgroup_enter_frozen(); 2555 - freezable_schedule(); 2555 + schedule(); 2556 2556 } 2557 2557 2558 2558 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) ··· 3600 3600 recalc_sigpending(); 3601 3601 spin_unlock_irq(&tsk->sighand->siglock); 3602 3602 3603 - __set_current_state(TASK_INTERRUPTIBLE); 3604 - ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, 3605 - HRTIMER_MODE_REL); 3603 + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 3604 + ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns, 3605 + HRTIMER_MODE_REL); 3606 3606 spin_lock_irq(&tsk->sighand->siglock); 3607 3607 __set_task_blocked(tsk, &tsk->real_blocked); 3608 3608 sigemptyset(&tsk->real_blocked);
+2 -2
kernel/time/hrtimer.c
··· 2037 2037 struct restart_block *restart; 2038 2038 2039 2039 do { 2040 - set_current_state(TASK_INTERRUPTIBLE); 2040 + set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 2041 2041 hrtimer_sleeper_start_expires(t, mode); 2042 2042 2043 2043 if (likely(t->task)) 2044 - freezable_schedule(); 2044 + schedule(); 2045 2045 2046 2046 hrtimer_cancel(&t->timer); 2047 2047 mode = HRTIMER_MODE_ABS;
+9 -11
kernel/umh.c
··· 404 404 */ 405 405 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) 406 406 { 407 + unsigned int state = TASK_UNINTERRUPTIBLE; 407 408 DECLARE_COMPLETION_ONSTACK(done); 408 409 int retval = 0; 409 410 ··· 438 437 if (wait == UMH_NO_WAIT) /* task has freed sub_info */ 439 438 goto unlock; 440 439 440 + if (wait & UMH_KILLABLE) 441 + state |= TASK_KILLABLE; 442 + 441 443 if (wait & UMH_FREEZABLE) 442 - freezer_do_not_count(); 444 + state |= TASK_FREEZABLE; 445 + 446 + retval = wait_for_completion_state(&done, state); 447 + if (!retval) 448 + goto wait_done; 443 449 444 450 if (wait & UMH_KILLABLE) { 445 - retval = wait_for_completion_killable(&done); 446 - if (!retval) 447 - goto wait_done; 448 - 449 451 /* umh_complete() will see NULL and free sub_info */ 450 452 if (xchg(&sub_info->complete, NULL)) 451 453 goto unlock; 452 - /* fallthrough, umh_complete() was already called */ 453 454 } 454 - 455 - wait_for_completion(&done); 456 - 457 - if (wait & UMH_FREEZABLE) 458 - freezer_count(); 459 455 460 456 wait_done: 461 457 retval = sub_info->retval;
+2 -2
mm/khugepaged.c
··· 730 730 DEFINE_WAIT(wait); 731 731 732 732 add_wait_queue(&khugepaged_wait, &wait); 733 - freezable_schedule_timeout_interruptible( 734 - msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 733 + __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 734 + schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); 735 735 remove_wait_queue(&khugepaged_wait, &wait); 736 736 } 737 737
+5 -7
net/sunrpc/sched.c
··· 269 269 270 270 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 271 271 { 272 - freezable_schedule_unsafe(); 272 + schedule(); 273 273 if (signal_pending_state(mode, current)) 274 274 return -ERESTARTSYS; 275 275 return 0; ··· 333 333 * to enforce taking of the wq->lock and hence avoid races with 334 334 * rpc_complete_task(). 335 335 */ 336 - int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) 336 + int rpc_wait_for_completion_task(struct rpc_task *task) 337 337 { 338 - if (action == NULL) 339 - action = rpc_wait_bit_killable; 340 338 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 341 - action, TASK_KILLABLE); 339 + rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 342 340 } 343 - EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 341 + EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task); 344 342 345 343 /* 346 344 * Make an RPC task runnable. ··· 962 964 trace_rpc_task_sync_sleep(task, task->tk_action); 963 965 status = out_of_line_wait_on_bit(&task->tk_runstate, 964 966 RPC_TASK_QUEUED, rpc_wait_bit_killable, 965 - TASK_KILLABLE); 967 + TASK_KILLABLE|TASK_FREEZABLE); 966 968 if (status < 0) { 967 969 /* 968 970 * When a sync task receives a signal, it exits with
+3 -5
net/unix/af_unix.c
··· 2543 2543 struct sk_buff *last, unsigned int last_len, 2544 2544 bool freezable) 2545 2545 { 2546 + unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE; 2546 2547 struct sk_buff *tail; 2547 2548 DEFINE_WAIT(wait); 2548 2549 2549 2550 unix_state_lock(sk); 2550 2551 2551 2552 for (;;) { 2552 - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2553 + prepare_to_wait(sk_sleep(sk), &wait, state); 2553 2554 2554 2555 tail = skb_peek_tail(&sk->sk_receive_queue); 2555 2556 if (tail != last || ··· 2563 2562 2564 2563 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2565 2564 unix_state_unlock(sk); 2566 - if (freezable) 2567 - timeo = freezable_schedule_timeout(timeo); 2568 - else 2569 - timeo = schedule_timeout(timeo); 2565 + timeo = schedule_timeout(timeo); 2570 2566 unix_state_lock(sk); 2571 2567 2572 2568 if (sock_flag(sk, SOCK_DEAD))