Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

notifier: Fix broken error handling pattern

The current notifiers have the following error handling pattern all
over the place:

int err, nr;

err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)

And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.

Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.

Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.

Note: software_resume() error handling was broken afaict.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org

authored by

Peter Zijlstra and committed by
Ingo Molnar
70d93298 f75aef39

+147 -140
+7 -8
include/linux/notifier.h
··· 161 161 162 162 extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, 163 163 unsigned long val, void *v); 164 - extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, 165 - unsigned long val, void *v, int nr_to_call, int *nr_calls); 166 164 extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, 167 165 unsigned long val, void *v); 168 - extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, 169 - unsigned long val, void *v, int nr_to_call, int *nr_calls); 170 166 extern int raw_notifier_call_chain(struct raw_notifier_head *nh, 171 167 unsigned long val, void *v); 172 - extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, 173 - unsigned long val, void *v, int nr_to_call, int *nr_calls); 174 168 extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, 175 169 unsigned long val, void *v); 176 - extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, 177 - unsigned long val, void *v, int nr_to_call, int *nr_calls); 170 + 171 + extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh, 172 + unsigned long val_up, unsigned long val_down, void *v); 173 + extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh, 174 + unsigned long val_up, unsigned long val_down, void *v); 175 + extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh, 176 + unsigned long val_up, unsigned long val_down, void *v); 178 177 179 178 #define NOTIFY_DONE 0x0000 /* Don't care */ 180 179 #define NOTIFY_OK 0x0001 /* Suits me */
+18 -30
kernel/cpu_pm.c
··· 15 15 16 16 static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain); 17 17 18 - static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) 18 + static int cpu_pm_notify(enum cpu_pm_event event) 19 19 { 20 20 int ret; 21 21 22 22 /* 23 - * __atomic_notifier_call_chain has a RCU read critical section, which 23 + * atomic_notifier_call_chain has a RCU read critical section, which 24 24 * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let 25 25 * RCU know this. 26 26 */ 27 27 rcu_irq_enter_irqson(); 28 - ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, 29 - nr_to_call, nr_calls); 28 + ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL); 29 + rcu_irq_exit_irqson(); 30 + 31 + return notifier_to_errno(ret); 32 + } 33 + 34 + static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down) 35 + { 36 + int ret; 37 + 38 + rcu_irq_enter_irqson(); 39 + ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL); 30 40 rcu_irq_exit_irqson(); 31 41 32 42 return notifier_to_errno(ret); ··· 90 80 */ 91 81 int cpu_pm_enter(void) 92 82 { 93 - int nr_calls = 0; 94 - int ret = 0; 95 - 96 - ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); 97 - if (ret) 98 - /* 99 - * Inform listeners (nr_calls - 1) about failure of CPU PM 100 - * PM entry who are notified earlier to prepare for it. 101 - */ 102 - cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL); 103 - 104 - return ret; 83 + return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED); 105 84 } 106 85 EXPORT_SYMBOL_GPL(cpu_pm_enter); 107 86 ··· 108 109 */ 109 110 int cpu_pm_exit(void) 110 111 { 111 - return cpu_pm_notify(CPU_PM_EXIT, -1, NULL); 112 + return cpu_pm_notify(CPU_PM_EXIT); 112 113 } 113 114 EXPORT_SYMBOL_GPL(cpu_pm_exit); 114 115 ··· 130 131 */ 131 132 int cpu_cluster_pm_enter(void) 132 133 { 133 - int nr_calls = 0; 134 - int ret = 0; 135 - 136 - ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); 137 - if (ret) 138 - /* 139 - * Inform listeners (nr_calls - 1) about failure of CPU cluster 140 - * PM entry who are notified earlier to prepare for it. 141 - */ 142 - cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL); 143 - 144 - return ret; 134 + return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED); 145 135 } 146 136 EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); 147 137 ··· 151 163 */ 152 164 int cpu_cluster_pm_exit(void) 153 165 { 154 - return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); 166 + return cpu_pm_notify(CPU_CLUSTER_PM_EXIT); 155 167 } 156 168 EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); 157 169
+88 -56
kernel/notifier.c
··· 94 94 } 95 95 NOKPROBE_SYMBOL(notifier_call_chain); 96 96 97 + /** 98 + * notifier_call_chain_robust - Inform the registered notifiers about an event 99 + * and rollback on error. 100 + * @nl: Pointer to head of the blocking notifier chain 101 + * @val_up: Value passed unmodified to the notifier function 102 + * @val_down: Value passed unmodified to the notifier function when recovering 103 + * from an error on @val_up 104 + * @v Pointer passed unmodified to the notifier function 105 + * 106 + * NOTE: It is important the @nl chain doesn't change between the two 107 + * invocations of notifier_call_chain() such that we visit the 108 + * exact same notifier callbacks; this rules out any RCU usage. 109 + * 110 + * Returns: the return value of the @val_up call. 111 + */ 112 + static int notifier_call_chain_robust(struct notifier_block **nl, 113 + unsigned long val_up, unsigned long val_down, 114 + void *v) 115 + { 116 + int ret, nr = 0; 117 + 118 + ret = notifier_call_chain(nl, val_up, v, -1, &nr); 119 + if (ret & NOTIFY_STOP_MASK) 120 + notifier_call_chain(nl, val_down, v, nr-1, NULL); 121 + 122 + return ret; 123 + } 124 + 97 125 /* 98 126 * Atomic notifier chain routines. Registration and unregistration 99 127 * use a spinlock, and call_chain is synchronized by RCU (no locks). ··· 172 144 } 173 145 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); 174 146 147 + int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh, 148 + unsigned long val_up, unsigned long val_down, void *v) 149 + { 150 + unsigned long flags; 151 + int ret; 152 + 153 + /* 154 + * Musn't use RCU; because then the notifier list can 155 + * change between the up and down traversal. 156 + */ 157 + spin_lock_irqsave(&nh->lock, flags); 158 + ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v); 159 + spin_unlock_irqrestore(&nh->lock, flags); 160 + 161 + return ret; 162 + } 163 + EXPORT_SYMBOL_GPL(atomic_notifier_call_chain_robust); 164 + NOKPROBE_SYMBOL(atomic_notifier_call_chain_robust); 165 + 175 166 /** 176 - * __atomic_notifier_call_chain - Call functions in an atomic notifier chain 167 + * atomic_notifier_call_chain - Call functions in an atomic notifier chain 177 168 * @nh: Pointer to head of the atomic notifier chain 178 169 * @val: Value passed unmodified to notifier function 179 170 * @v: Pointer passed unmodified to notifier function 180 - * @nr_to_call: See the comment for notifier_call_chain. 181 - * @nr_calls: See the comment for notifier_call_chain. 182 171 * 183 172 * Calls each function in a notifier chain in turn. The functions 184 173 * run in an atomic context, so they must not block. ··· 208 163 * Otherwise the return value is the return value 209 164 * of the last notifier function called. 210 165 */ 211 - int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, 212 - unsigned long val, void *v, 213 - int nr_to_call, int *nr_calls) 166 + int atomic_notifier_call_chain(struct atomic_notifier_head *nh, 167 + unsigned long val, void *v) 214 168 { 215 169 int ret; 216 170 217 171 rcu_read_lock(); 218 - ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); 172 + ret = notifier_call_chain(&nh->head, val, v, -1, NULL); 219 173 rcu_read_unlock(); 220 - return ret; 221 - } 222 - EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); 223 - NOKPROBE_SYMBOL(__atomic_notifier_call_chain); 224 174 225 - int atomic_notifier_call_chain(struct atomic_notifier_head *nh, 226 - unsigned long val, void *v) 227 - { 228 - return __atomic_notifier_call_chain(nh, val, v, -1, NULL); 175 + return ret; 229 176 } 230 177 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); 231 178 NOKPROBE_SYMBOL(atomic_notifier_call_chain); ··· 287 250 } 288 251 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); 289 252 253 + int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh, 254 + unsigned long val_up, unsigned long val_down, void *v) 255 + { 256 + int ret = NOTIFY_DONE; 257 + 258 + /* 259 + * We check the head outside the lock, but if this access is 260 + * racy then it does not matter what the result of the test 261 + * is, we re-check the list after having taken the lock anyway: 262 + */ 263 + if (rcu_access_pointer(nh->head)) { 264 + down_read(&nh->rwsem); 265 + ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v); 266 + up_read(&nh->rwsem); 267 + } 268 + return ret; 269 + } 270 + EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_robust); 271 + 290 272 /** 291 - * __blocking_notifier_call_chain - Call functions in a blocking notifier chain 273 + * blocking_notifier_call_chain - Call functions in a blocking notifier chain 292 274 * @nh: Pointer to head of the blocking notifier chain 293 275 * @val: Value passed unmodified to notifier function 294 276 * @v: Pointer passed unmodified to notifier function 295 - * @nr_to_call: See comment for notifier_call_chain. 296 - * @nr_calls: See comment for notifier_call_chain. 297 277 * 298 278 * Calls each function in a notifier chain in turn. The functions 299 279 * run in a process context, so they are allowed to block. ··· 322 268 * Otherwise the return value is the return value 323 269 * of the last notifier function called. 324 270 */ 325 - int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, 326 - unsigned long val, void *v, 327 - int nr_to_call, int *nr_calls) 271 + int blocking_notifier_call_chain(struct blocking_notifier_head *nh, 272 + unsigned long val, void *v) 328 273 { 329 274 int ret = NOTIFY_DONE; 330 275 ··· 334 281 */ 335 282 if (rcu_access_pointer(nh->head)) { 336 283 down_read(&nh->rwsem); 337 - ret = notifier_call_chain(&nh->head, val, v, nr_to_call, 338 - nr_calls); 284 + ret = notifier_call_chain(&nh->head, val, v, -1, NULL); 339 285 up_read(&nh->rwsem); 340 286 } 341 287 return ret; 342 - } 343 - EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); 344 - 345 - int blocking_notifier_call_chain(struct blocking_notifier_head *nh, 346 - unsigned long val, void *v) 347 - { 348 - return __blocking_notifier_call_chain(nh, val, v, -1, NULL); 349 288 } 350 289 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); 351 290 ··· 380 335 } 381 336 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); 382 337 338 + int raw_notifier_call_chain_robust(struct raw_notifier_head *nh, 339 + unsigned long val_up, unsigned long val_down, void *v) 340 + { 341 + return notifier_call_chain_robust(&nh->head, val_up, val_down, v); 342 + } 343 + EXPORT_SYMBOL_GPL(raw_notifier_call_chain_robust); 344 + 383 345 /** 384 - * __raw_notifier_call_chain - Call functions in a raw notifier chain 346 + * raw_notifier_call_chain - Call functions in a raw notifier chain 385 347 * @nh: Pointer to head of the raw notifier chain 386 348 * @val: Value passed unmodified to notifier function 387 349 * @v: Pointer passed unmodified to notifier function 388 - * @nr_to_call: See comment for notifier_call_chain. 389 - * @nr_calls: See comment for notifier_call_chain 390 350 * 391 351 * Calls each function in a notifier chain in turn. The functions 392 352 * run in an undefined context. ··· 404 354 * Otherwise the return value is the return value 405 355 * of the last notifier function called. 406 356 */ 407 - int __raw_notifier_call_chain(struct raw_notifier_head *nh, 408 - unsigned long val, void *v, 409 - int nr_to_call, int *nr_calls) 410 - { 411 - return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); 412 - } 413 - EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); 414 - 415 357 int raw_notifier_call_chain(struct raw_notifier_head *nh, 416 358 unsigned long val, void *v) 417 359 { 418 - return __raw_notifier_call_chain(nh, val, v, -1, NULL); 360 + return notifier_call_chain(&nh->head, val, v, -1, NULL); 419 361 } 420 362 EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 421 363 ··· 479 437 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); 480 438 481 439 /** 482 - * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain 440 + * srcu_notifier_call_chain - Call functions in an SRCU notifier chain 483 441 * @nh: Pointer to head of the SRCU notifier chain 484 442 * @val: Value passed unmodified to notifier function 485 443 * @v: Pointer passed unmodified to notifier function 486 - * @nr_to_call: See comment for notifier_call_chain. 487 - * @nr_calls: See comment for notifier_call_chain 488 444 * 489 445 * Calls each function in a notifier chain in turn. The functions 490 446 * run in a process context, so they are allowed to block. ··· 494 454 * Otherwise the return value is the return value 495 455 * of the last notifier function called. 496 456 */ 497 - int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, 498 - unsigned long val, void *v, 499 - int nr_to_call, int *nr_calls) 457 + int srcu_notifier_call_chain(struct srcu_notifier_head *nh, 458 + unsigned long val, void *v) 500 459 { 501 460 int ret; 502 461 int idx; 503 462 504 463 idx = srcu_read_lock(&nh->srcu); 505 - ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); 464 + ret = notifier_call_chain(&nh->head, val, v, -1, NULL); 506 465 srcu_read_unlock(&nh->srcu, idx); 507 466 return ret; 508 - } 509 - EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); 510 - 511 - int srcu_notifier_call_chain(struct srcu_notifier_head *nh, 512 - unsigned long val, void *v) 513 - { 514 - return __srcu_notifier_call_chain(nh, val, v, -1, NULL); 515 467 } 516 468 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); 517 469
+18 -21
kernel/power/hibernate.c
··· 706 706 */ 707 707 int hibernate(void) 708 708 { 709 - int error, nr_calls = 0; 710 709 bool snapshot_test = false; 710 + int error; 711 711 712 712 if (!hibernation_available()) { 713 713 pm_pr_dbg("Hibernation not available.\n"); ··· 723 723 724 724 pr_info("hibernation entry\n"); 725 725 pm_prepare_console(); 726 - error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); 727 - if (error) { 728 - nr_calls--; 729 - goto Exit; 730 - } 726 + error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); 727 + if (error) 728 + goto Restore; 731 729 732 730 ksys_sync_helper(); 733 731 ··· 783 785 /* Don't bother checking whether freezer_test_done is true */ 784 786 freezer_test_done = false; 785 787 Exit: 786 - __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); 788 + pm_notifier_call_chain(PM_POST_HIBERNATION); 789 + Restore: 787 790 pm_restore_console(); 788 791 hibernate_release(); 789 792 Unlock: ··· 803 804 */ 804 805 int hibernate_quiet_exec(int (*func)(void *data), void *data) 805 806 { 806 - int error, nr_calls = 0; 807 + int error; 807 808 808 809 lock_system_sleep(); 809 810 ··· 814 815 815 816 pm_prepare_console(); 816 817 817 - error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); 818 - if (error) { 819 - nr_calls--; 820 - goto exit; 821 - } 818 + error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); 819 + if (error) 820 + goto restore; 822 821 823 822 error = freeze_processes(); 824 823 if (error) ··· 877 880 thaw_processes(); 878 881 879 882 exit: 880 - __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); 883 + pm_notifier_call_chain(PM_POST_HIBERNATION); 881 884 885 + restore: 882 886 pm_restore_console(); 883 887 884 888 hibernate_release(); ··· 908 910 */ 909 911 static int software_resume(void) 910 912 { 911 - int error, nr_calls = 0; 913 + int error; 912 914 913 915 /* 914 916 * If the user said "noresume".. bail out early. ··· 995 997 996 998 pr_info("resume from hibernation\n"); 997 999 pm_prepare_console(); 998 - error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); 999 - if (error) { 1000 - nr_calls--; 1001 - goto Close_Finish; 1002 - } 1000 + error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE); 1001 + if (error) 1002 + goto Restore; 1003 1003 1004 1004 pm_pr_dbg("Preparing processes for hibernation restore.\n"); 1005 1005 error = freeze_processes(); ··· 1013 1017 error = load_image_and_restore(); 1014 1018 thaw_processes(); 1015 1019 Finish: 1016 - __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); 1020 + pm_notifier_call_chain(PM_POST_RESTORE); 1021 + Restore: 1017 1022 pm_restore_console(); 1018 1023 pr_info("resume failed (%d)\n", error); 1019 1024 hibernate_release();
+4 -4
kernel/power/main.c
··· 80 80 } 81 81 EXPORT_SYMBOL_GPL(unregister_pm_notifier); 82 82 83 - int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls) 83 + int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down) 84 84 { 85 85 int ret; 86 86 87 - ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL, 88 - nr_to_call, nr_calls); 87 + ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL); 89 88 90 89 return notifier_to_errno(ret); 91 90 } 91 + 92 92 int pm_notifier_call_chain(unsigned long val) 93 93 { 94 - return __pm_notifier_call_chain(val, -1, NULL); 94 + return blocking_notifier_call_chain(&pm_chain_head, val, NULL); 95 95 } 96 96 97 97 /* If set, devices may be suspended and resumed asynchronously. */
+1 -2
kernel/power/power.h
··· 210 210 211 211 #ifdef CONFIG_PM_SLEEP 212 212 /* kernel/power/main.c */ 213 - extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call, 214 - int *nr_calls); 213 + extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down); 215 214 extern int pm_notifier_call_chain(unsigned long val); 216 215 #endif 217 216
+6 -8
kernel/power/suspend.c
··· 342 342 */ 343 343 static int suspend_prepare(suspend_state_t state) 344 344 { 345 - int error, nr_calls = 0; 345 + int error; 346 346 347 347 if (!sleep_state_supported(state)) 348 348 return -EPERM; 349 349 350 350 pm_prepare_console(); 351 351 352 - error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls); 353 - if (error) { 354 - nr_calls--; 355 - goto Finish; 356 - } 352 + error = pm_notifier_call_chain_robust(PM_SUSPEND_PREPARE, PM_POST_SUSPEND); 353 + if (error) 354 + goto Restore; 357 355 358 356 trace_suspend_resume(TPS("freeze_processes"), 0, true); 359 357 error = suspend_freeze_processes(); ··· 361 363 362 364 suspend_stats.failed_freeze++; 363 365 dpm_save_failed_step(SUSPEND_FREEZE); 364 - Finish: 365 - __pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL); 366 + pm_notifier_call_chain(PM_POST_SUSPEND); 367 + Restore: 366 368 pm_restore_console(); 367 369 return error; 368 370 }
+4 -10
kernel/power/user.c
··· 46 46 static int snapshot_open(struct inode *inode, struct file *filp) 47 47 { 48 48 struct snapshot_data *data; 49 - int error, nr_calls = 0; 49 + int error; 50 50 51 51 if (!hibernation_available()) 52 52 return -EPERM; ··· 73 73 swap_type_of(swsusp_resume_device, 0, NULL) : -1; 74 74 data->mode = O_RDONLY; 75 75 data->free_bitmaps = false; 76 - error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); 77 - if (error) 78 - __pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL); 76 + error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); 79 77 } else { 80 78 /* 81 79 * Resuming. We may need to wait for the image device to ··· 83 85 84 86 data->swap = -1; 85 87 data->mode = O_WRONLY; 86 - error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); 88 + error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE); 87 89 if (!error) { 88 90 error = create_basic_memory_bitmaps(); 89 91 data->free_bitmaps = !error; 90 - } else 91 - nr_calls--; 92 - 93 - if (error) 94 - __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); 92 + } 95 93 } 96 94 if (error) 97 95 hibernate_release();
+1 -1
tools/power/pm-graph/sleepgraph.py
··· 171 171 tracefuncs = { 172 172 'sys_sync': {}, 173 173 'ksys_sync': {}, 174 - '__pm_notifier_call_chain': {}, 174 + 'pm_notifier_call_chain_robust': {}, 175 175 'pm_prepare_console': {}, 176 176 'pm_notifier_call_chain': {}, 177 177 'freeze_processes': {},