Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/cputime: Convert vtime_seqlock to seqcount

The cputime can only be updated by the current task itself, even in
vtime case. So we can safely use seqcount instead of seqlock as there
is no writer concurrency involved.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1447948054-28668-8-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Frederic Weisbecker and committed by
Ingo Molnar
b7ce2277 e5925394

+27 -25
+1 -1
include/linux/init_task.h
··· 150 150 151 151 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 152 152 # define INIT_VTIME(tsk) \ 153 - .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ 153 + .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ 154 154 .vtime_snap = 0, \ 155 155 .vtime_snap_whence = VTIME_SYS, 156 156 #else
+1 -1
include/linux/sched.h
··· 1519 1519 cputime_t gtime; 1520 1520 struct prev_cputime prev_cputime; 1521 1521 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1522 - seqlock_t vtime_seqlock; 1522 + seqcount_t vtime_seqcount; 1523 1523 unsigned long long vtime_snap; 1524 1524 enum { 1525 1525 /* Task is sleeping or running in a CPU with VTIME inactive */
+1 -1
kernel/fork.c
··· 1348 1348 prev_cputime_init(&p->prev_cputime); 1349 1349 1350 1350 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1351 - seqlock_init(&p->vtime_seqlock); 1351 + seqcount_init(&p->vtime_seqcount); 1352 1352 p->vtime_snap = 0; 1353 1353 p->vtime_snap_whence = VTIME_INACTIVE; 1354 1354 #endif
+24 -22
kernel/sched/cputime.c
··· 696 696 697 697 void vtime_account_system(struct task_struct *tsk) 698 698 { 699 - write_seqlock(&tsk->vtime_seqlock); 699 + write_seqcount_begin(&tsk->vtime_seqcount); 700 700 __vtime_account_system(tsk); 701 - write_sequnlock(&tsk->vtime_seqlock); 701 + write_seqcount_end(&tsk->vtime_seqcount); 702 702 } 703 703 704 704 void vtime_gen_account_irq_exit(struct task_struct *tsk) 705 705 { 706 - write_seqlock(&tsk->vtime_seqlock); 706 + write_seqcount_begin(&tsk->vtime_seqcount); 707 707 __vtime_account_system(tsk); 708 708 if (context_tracking_in_user()) 709 709 tsk->vtime_snap_whence = VTIME_USER; 710 - write_sequnlock(&tsk->vtime_seqlock); 710 + write_seqcount_end(&tsk->vtime_seqcount); 711 711 } 712 712 713 713 void vtime_account_user(struct task_struct *tsk) 714 714 { 715 715 cputime_t delta_cpu; 716 716 717 - write_seqlock(&tsk->vtime_seqlock); 717 + write_seqcount_begin(&tsk->vtime_seqcount); 718 718 delta_cpu = get_vtime_delta(tsk); 719 719 tsk->vtime_snap_whence = VTIME_SYS; 720 720 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); 721 - write_sequnlock(&tsk->vtime_seqlock); 721 + write_seqcount_end(&tsk->vtime_seqcount); 722 722 } 723 723 724 724 void vtime_user_enter(struct task_struct *tsk) 725 725 { 726 - write_seqlock(&tsk->vtime_seqlock); 726 + write_seqcount_begin(&tsk->vtime_seqcount); 727 727 __vtime_account_system(tsk); 728 728 tsk->vtime_snap_whence = VTIME_USER; 729 - write_sequnlock(&tsk->vtime_seqlock); 729 + write_seqcount_end(&tsk->vtime_seqcount); 730 730 } 731 731 732 732 void vtime_guest_enter(struct task_struct *tsk) ··· 738 738 * synchronization against the reader (task_gtime()) 739 739 * that can thus safely catch up with a tickless delta. 740 740 */ 741 - write_seqlock(&tsk->vtime_seqlock); 741 + write_seqcount_begin(&tsk->vtime_seqcount); 742 742 __vtime_account_system(tsk); 743 743 current->flags |= PF_VCPU; 744 - write_sequnlock(&tsk->vtime_seqlock); 744 + write_seqcount_end(&tsk->vtime_seqcount); 745 745 } 746 746 EXPORT_SYMBOL_GPL(vtime_guest_enter); 747 747 748 748 void vtime_guest_exit(struct task_struct *tsk) 749 749 { 750 - write_seqlock(&tsk->vtime_seqlock); 750 + write_seqcount_begin(&tsk->vtime_seqcount); 751 751 __vtime_account_system(tsk); 752 752 current->flags &= ~PF_VCPU; 753 - write_sequnlock(&tsk->vtime_seqlock); 753 + write_seqcount_end(&tsk->vtime_seqcount); 754 754 } 755 755 EXPORT_SYMBOL_GPL(vtime_guest_exit); 756 756 ··· 763 763 764 764 void arch_vtime_task_switch(struct task_struct *prev) 765 765 { 766 - write_seqlock(&prev->vtime_seqlock); 766 + write_seqcount_begin(&prev->vtime_seqcount); 767 767 prev->vtime_snap_whence = VTIME_INACTIVE; 768 - write_sequnlock(&prev->vtime_seqlock); 768 + write_seqcount_end(&prev->vtime_seqcount); 769 769 770 - write_seqlock(&current->vtime_seqlock); 770 + write_seqcount_begin(&current->vtime_seqcount); 771 771 current->vtime_snap_whence = VTIME_SYS; 772 772 current->vtime_snap = sched_clock_cpu(smp_processor_id()); 773 - write_sequnlock(&current->vtime_seqlock); 773 + write_seqcount_end(&current->vtime_seqcount); 774 774 } 775 775 776 776 void vtime_init_idle(struct task_struct *t, int cpu) 777 777 { 778 778 unsigned long flags; 779 779 780 - write_seqlock_irqsave(&t->vtime_seqlock, flags); 780 + local_irq_save(flags); 781 + write_seqcount_begin(&t->vtime_seqcount); 781 782 t->vtime_snap_whence = VTIME_SYS; 782 783 t->vtime_snap = sched_clock_cpu(cpu); 783 - write_sequnlock_irqrestore(&t->vtime_seqlock, flags); 784 + write_seqcount_end(&t->vtime_seqcount); 785 + local_irq_restore(flags); 784 786 } 785 787 786 788 cputime_t task_gtime(struct task_struct *t) ··· 794 792 return t->gtime; 795 793 796 794 do { 797 - seq = read_seqbegin(&t->vtime_seqlock); 795 + seq = read_seqcount_begin(&t->vtime_seqcount); 798 796 799 797 gtime = t->gtime; 800 798 if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU) 801 799 gtime += vtime_delta(t); 802 800 803 - } while (read_seqretry(&t->vtime_seqlock, seq)); 801 + } while (read_seqcount_retry(&t->vtime_seqcount, seq)); 804 802 805 803 return gtime; 806 804 } ··· 823 821 *udelta = 0; 824 822 *sdelta = 0; 825 823 826 - seq = read_seqbegin(&t->vtime_seqlock); 824 + seq = read_seqcount_begin(&t->vtime_seqcount); 827 825 828 826 if (u_dst) 829 827 *u_dst = *u_src; ··· 847 845 if (t->vtime_snap_whence == VTIME_SYS) 848 846 *sdelta = delta; 849 847 } 850 - } while (read_seqretry(&t->vtime_seqlock, seq)); 848 + } while (read_seqcount_retry(&t->vtime_seqcount, seq)); 851 849 } 852 850 853 851