Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ipc/sem.c: move wake_up_process out of the spinlock section

The wake-up part of semtimedop() consists out of two steps:

- the right tasks must be identified.
- they must be woken up.

Right now, both steps run while the array spinlock is held. This patch
reorders the code and moves the actual wake_up_process() behind the point
where the spinlock is dropped.

The code also moves setting sem->sem_otime to one place: It does not make
sense to set the last modify time multiple times.

[akpm@linux-foundation.org: repair kerneldoc]
[akpm@linux-foundation.org: fix uninitialised retval]
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Zach Brown <zach.brown@oracle.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Manfred Spraul and committed by
Linus Torvalds
0a2b9d4c fd5db422

+91 -32
+91 -32
ipc/sem.c
··· 381 381 sop--; 382 382 } 383 383 384 - sma->sem_otime = get_seconds(); 385 384 return 0; 386 385 387 386 out_of_range: ··· 403 404 return result; 404 405 } 405 406 406 - /* 407 - * Wake up a process waiting on the sem queue with a given error. 408 - * The queue is invalid (may not be accessed) after the function returns. 407 + /** wake_up_sem_queue_prepare(q, error): Prepare wake-up 408 + * @q: queue entry that must be signaled 409 + * @error: Error value for the signal 410 + * 411 + * Prepare the wake-up of the queue entry q. 409 412 */ 410 - static void wake_up_sem_queue(struct sem_queue *q, int error) 413 + static void wake_up_sem_queue_prepare(struct list_head *pt, 414 + struct sem_queue *q, int error) 411 415 { 412 - /* 413 - * Hold preempt off so that we don't get preempted and have the 414 - * wakee busy-wait until we're scheduled back on. We're holding 415 - * locks here so it may not strictly be needed, however if the 416 - * locks become preemptible then this prevents such a problem. 417 - */ 418 - preempt_disable(); 416 + if (list_empty(pt)) { 417 + /* 418 + * Hold preempt off so that we don't get preempted and have the 419 + * wakee busy-wait until we're scheduled back on. 420 + */ 421 + preempt_disable(); 422 + } 419 423 q->status = IN_WAKEUP; 420 - wake_up_process(q->sleeper); 421 - /* hands-off: q can disappear immediately after writing q->status. */ 422 - smp_wmb(); 423 - q->status = error; 424 - preempt_enable(); 424 + q->pid = error; 425 + 426 + list_add_tail(&q->simple_list, pt); 427 + } 428 + 429 + /** 430 + * wake_up_sem_queue_do(pt) - do the actual wake-up 431 + * @pt: list of tasks to be woken up 432 + * 433 + * Do the actual wake-up. 434 + * The function is called without any locks held, thus the semaphore array 435 + * could be destroyed already and the tasks can disappear as soon as the 436 + * status is set to the actual return code. 437 + */ 438 + static void wake_up_sem_queue_do(struct list_head *pt) 439 + { 440 + struct sem_queue *q, *t; 441 + int did_something; 442 + 443 + did_something = !list_empty(pt); 444 + list_for_each_entry_safe(q, t, pt, simple_list) { 445 + wake_up_process(q->sleeper); 446 + /* q can disappear immediately after writing q->status. */ 447 + smp_wmb(); 448 + q->status = q->pid; 449 + } 450 + if (did_something) 451 + preempt_enable(); 425 452 } 426 453 427 454 static void unlink_queue(struct sem_array *sma, struct sem_queue *q) ··· 527 502 * update_queue(sma, semnum): Look for tasks that can be completed. 528 503 * @sma: semaphore array. 529 504 * @semnum: semaphore that was modified. 505 + * @pt: list head for the tasks that must be woken up. 530 506 * 531 507 * update_queue must be called after a semaphore in a semaphore array 532 508 * was modified. If multiple semaphore were modified, then @semnum 533 509 * must be set to -1. 510 + * The tasks that must be woken up are added to @pt. The return code 511 + * is stored in q->pid. 512 + * The function return 1 if at least one semop was completed successfully. 534 513 */ 535 - static void update_queue(struct sem_array *sma, int semnum) 514 + static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) 536 515 { 537 516 struct sem_queue *q; 538 517 struct list_head *walk; 539 518 struct list_head *pending_list; 540 519 int offset; 520 + int semop_completed = 0; 541 521 542 522 /* if there are complex operations around, then knowing the semaphore 543 523 * that was modified doesn't help us. Assume that multiple semaphores ··· 587 557 588 558 unlink_queue(sma, q); 589 559 590 - if (error) 560 + if (error) { 591 561 restart = 0; 592 - else 562 + } else { 563 + semop_completed = 1; 593 564 restart = check_restart(sma, q); 565 + } 594 566 595 - wake_up_sem_queue(q, error); 567 + wake_up_sem_queue_prepare(pt, q, error); 596 568 if (restart) 597 569 goto again; 598 570 } 571 + return semop_completed; 599 572 } 600 573 601 - /** do_smart_update(sma, sops, nsops): Optimized update_queue 574 + /** 575 + * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue 602 576 * @sma: semaphore array 603 577 * @sops: operations that were performed 604 578 * @nsops: number of operations 579 + * @otime: force setting otime 580 + * @pt: list head of the tasks that must be woken up. 605 581 * 606 582 * do_smart_update() does the required called to update_queue, based on the 607 583 * actual changes that were performed on the semaphore array. 584 + * Note that the function does not do the actual wake-up: the caller is 585 + * responsible for calling wake_up_sem_queue_do(@pt). 586 + * It is safe to perform this call after dropping all locks. 608 587 */ 609 - static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops) 588 + static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, 589 + int otime, struct list_head *pt) 610 590 { 611 591 int i; 612 592 613 593 if (sma->complex_count || sops == NULL) { 614 - update_queue(sma, -1); 615 - return; 594 + if (update_queue(sma, -1, pt)) 595 + otime = 1; 596 + goto done; 616 597 } 617 598 618 599 for (i = 0; i < nsops; i++) { 619 600 if (sops[i].sem_op > 0 || 620 601 (sops[i].sem_op < 0 && 621 602 sma->sem_base[sops[i].sem_num].semval == 0)) 622 - update_queue(sma, sops[i].sem_num); 603 + if (update_queue(sma, sops[i].sem_num, pt)) 604 + otime = 1; 623 605 } 606 + done: 607 + if (otime) 608 + sma->sem_otime = get_seconds(); 624 609 } 625 610 626 611 ··· 701 656 struct sem_undo *un, *tu; 702 657 struct sem_queue *q, *tq; 703 658 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); 659 + struct list_head tasks; 704 660 705 661 /* Free the existing undo structures for this semaphore set. */ 706 662 assert_spin_locked(&sma->sem_perm.lock); ··· 715 669 } 716 670 717 671 /* Wake up all pending processes and let them fail with EIDRM. */ 672 + INIT_LIST_HEAD(&tasks); 718 673 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { 719 674 unlink_queue(sma, q); 720 - wake_up_sem_queue(q, -EIDRM); 675 + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); 721 676 } 722 677 723 678 /* Remove the semaphore set from the IDR */ 724 679 sem_rmid(ns, sma); 725 680 sem_unlock(sma); 726 681 682 + wake_up_sem_queue_do(&tasks); 727 683 ns->used_sems -= sma->sem_nsems; 728 684 security_sem_free(sma); 729 685 ipc_rcu_putref(sma); ··· 847 799 ushort fast_sem_io[SEMMSL_FAST]; 848 800 ushort* sem_io = fast_sem_io; 849 801 int nsems; 802 + struct list_head tasks; 850 803 851 804 sma = sem_lock_check(ns, semid); 852 805 if (IS_ERR(sma)) 853 806 return PTR_ERR(sma); 854 807 808 + INIT_LIST_HEAD(&tasks); 855 809 nsems = sma->sem_nsems; 856 810 857 811 err = -EACCES; ··· 941 891 } 942 892 sma->sem_ctime = get_seconds(); 943 893 /* maybe some queued-up processes were waiting for this */ 944 - update_queue(sma, -1); 894 + do_smart_update(sma, NULL, 0, 0, &tasks); 945 895 err = 0; 946 896 goto out_unlock; 947 897 } ··· 983 933 curr->sempid = task_tgid_vnr(current); 984 934 sma->sem_ctime = get_seconds(); 985 935 /* maybe some queued-up processes were waiting for this */ 986 - update_queue(sma, semnum); 936 + do_smart_update(sma, NULL, 0, 0, &tasks); 987 937 err = 0; 988 938 goto out_unlock; 989 939 } 990 940 } 991 941 out_unlock: 992 942 sem_unlock(sma); 943 + wake_up_sem_queue_do(&tasks); 944 + 993 945 out_free: 994 946 if(sem_io != fast_sem_io) 995 947 ipc_free(sem_io, sizeof(ushort)*nsems); ··· 1265 1213 struct sem_queue queue; 1266 1214 unsigned long jiffies_left = 0; 1267 1215 struct ipc_namespace *ns; 1216 + struct list_head tasks; 1268 1217 1269 1218 ns = current->nsproxy->ipc_ns; 1270 1219 ··· 1313 1260 } 1314 1261 } else 1315 1262 un = NULL; 1263 + 1264 + INIT_LIST_HEAD(&tasks); 1316 1265 1317 1266 sma = sem_lock_check(ns, semid); 1318 1267 if (IS_ERR(sma)) { ··· 1364 1309 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); 1365 1310 if (error <= 0) { 1366 1311 if (alter && error == 0) 1367 - do_smart_update(sma, sops, nsops); 1312 + do_smart_update(sma, sops, nsops, 1, &tasks); 1368 1313 1369 1314 goto out_unlock_free; 1370 1315 } ··· 1441 1386 1442 1387 out_unlock_free: 1443 1388 sem_unlock(sma); 1389 + 1390 + wake_up_sem_queue_do(&tasks); 1444 1391 out_free: 1445 1392 if(sops != fast_sops) 1446 1393 kfree(sops); ··· 1503 1446 for (;;) { 1504 1447 struct sem_array *sma; 1505 1448 struct sem_undo *un; 1449 + struct list_head tasks; 1506 1450 int semid; 1507 1451 int i; 1508 1452 ··· 1567 1509 semaphore->sempid = task_tgid_vnr(current); 1568 1510 } 1569 1511 } 1570 - sma->sem_otime = get_seconds(); 1571 1512 /* maybe some queued-up processes were waiting for this */ 1572 - update_queue(sma, -1); 1513 + INIT_LIST_HEAD(&tasks); 1514 + do_smart_update(sma, NULL, 0, 1, &tasks); 1573 1515 sem_unlock(sma); 1516 + wake_up_sem_queue_do(&tasks); 1574 1517 1575 1518 call_rcu(&un->rcu, free_un); 1576 1519 }