Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'idle.2013.09.25a' into HEAD

idle.2013.09.25a: Topic branch for idle entry-/exit-related changes.

+72 -36
+12 -12
include/linux/rcupdate.h
··· 261 261 rcu_irq_exit(); \ 262 262 } while (0) 263 263 264 + #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 265 + extern bool __rcu_is_watching(void); 266 + #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 267 + 264 268 /* 265 269 * Infrastructure to implement the synchronize_() primitives in 266 270 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. ··· 300 296 { 301 297 } 302 298 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 303 - 304 - #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) 305 - extern int rcu_is_cpu_idle(void); 306 - #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */ 307 299 308 300 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) 309 301 bool rcu_lockdep_current_cpu_online(void); ··· 351 351 { 352 352 if (!debug_lockdep_rcu_enabled()) 353 353 return 1; 354 - if (rcu_is_cpu_idle()) 354 + if (!rcu_is_watching()) 355 355 return 0; 356 356 if (!rcu_lockdep_current_cpu_online()) 357 357 return 0; ··· 402 402 403 403 if (!debug_lockdep_rcu_enabled()) 404 404 return 1; 405 - if (rcu_is_cpu_idle()) 405 + if (!rcu_is_watching()) 406 406 return 0; 407 407 if (!rcu_lockdep_current_cpu_online()) 408 408 return 0; ··· 771 771 __rcu_read_lock(); 772 772 __acquire(RCU); 773 773 rcu_lock_acquire(&rcu_lock_map); 774 - rcu_lockdep_assert(!rcu_is_cpu_idle(), 774 + rcu_lockdep_assert(rcu_is_watching(), 775 775 "rcu_read_lock() used illegally while idle"); 776 776 } 777 777 ··· 792 792 */ 793 793 static inline void rcu_read_unlock(void) 794 794 { 795 - rcu_lockdep_assert(!rcu_is_cpu_idle(), 795 + rcu_lockdep_assert(rcu_is_watching(), 796 796 "rcu_read_unlock() used illegally while idle"); 797 797 rcu_lock_release(&rcu_lock_map); 798 798 __release(RCU); ··· 821 821 local_bh_disable(); 822 822 __acquire(RCU_BH); 823 823 rcu_lock_acquire(&rcu_bh_lock_map); 824 - rcu_lockdep_assert(!rcu_is_cpu_idle(), 824 + rcu_lockdep_assert(rcu_is_watching(), 825 825 "rcu_read_lock_bh() used illegally while idle"); 826 826 } 827 827 ··· 832 832 */ 833 833 static inline void rcu_read_unlock_bh(void) 834 834 { 835 - rcu_lockdep_assert(!rcu_is_cpu_idle(), 835 + rcu_lockdep_assert(rcu_is_watching(), 836 836 "rcu_read_unlock_bh() used illegally while idle"); 837 837 rcu_lock_release(&rcu_bh_lock_map); 838 838 __release(RCU_BH); ··· 857 857 preempt_disable(); 858 858 __acquire(RCU_SCHED); 859 859 rcu_lock_acquire(&rcu_sched_lock_map); 860 - rcu_lockdep_assert(!rcu_is_cpu_idle(), 860 + rcu_lockdep_assert(rcu_is_watching(), 861 861 "rcu_read_lock_sched() used illegally while idle"); 862 862 } 863 863 ··· 875 875 */ 876 876 static inline void rcu_read_unlock_sched(void) 877 877 { 878 - rcu_lockdep_assert(!rcu_is_cpu_idle(), 878 + rcu_lockdep_assert(rcu_is_watching(), 879 879 "rcu_read_unlock_sched() used illegally while idle"); 880 880 rcu_lock_release(&rcu_sched_lock_map); 881 881 __release(RCU_SCHED);
+17
include/linux/rcutiny.h
··· 132 132 } 133 133 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 134 134 135 + #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) 136 + 137 + static inline bool rcu_is_watching(void) 138 + { 139 + return __rcu_is_watching(); 140 + } 141 + 142 + #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 143 + 144 + static inline bool rcu_is_watching(void) 145 + { 146 + return true; 147 + } 148 + 149 + 150 + #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 151 + 135 152 #endif /* __LINUX_RCUTINY_H */
+2
include/linux/rcutree.h
··· 90 90 extern void rcu_scheduler_starting(void); 91 91 extern int rcu_scheduler_active __read_mostly; 92 92 93 + extern bool rcu_is_watching(void); 94 + 93 95 #endif /* __LINUX_RCUTREE_H */
+2 -2
kernel/lockdep.c
··· 4224 4224 printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", 4225 4225 !rcu_lockdep_current_cpu_online() 4226 4226 ? "RCU used illegally from offline CPU!\n" 4227 - : rcu_is_cpu_idle() 4227 + : !rcu_is_watching() 4228 4228 ? "RCU used illegally from idle CPU!\n" 4229 4229 : "", 4230 4230 rcu_scheduler_active, debug_locks); ··· 4247 4247 * So complain bitterly if someone does call rcu_read_lock(), 4248 4248 * rcu_read_lock_bh() and so on from extended quiescent states. 4249 4249 */ 4250 - if (rcu_is_cpu_idle()) 4250 + if (!rcu_is_watching()) 4251 4251 printk("RCU used illegally from extended quiescent state!\n"); 4252 4252 4253 4253 lockdep_print_held_locks(curr);
+1 -1
kernel/rcupdate.c
··· 148 148 { 149 149 if (!debug_lockdep_rcu_enabled()) 150 150 return 1; 151 - if (rcu_is_cpu_idle()) 151 + if (!rcu_is_watching()) 152 152 return 0; 153 153 if (!rcu_lockdep_current_cpu_online()) 154 154 return 0;
+5 -5
kernel/rcutiny.c
··· 176 176 } 177 177 EXPORT_SYMBOL_GPL(rcu_irq_enter); 178 178 179 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 179 + #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) 180 180 181 181 /* 182 182 * Test whether RCU thinks that the current CPU is idle. 183 183 */ 184 - int rcu_is_cpu_idle(void) 184 + bool __rcu_is_watching(void) 185 185 { 186 - return !rcu_dynticks_nesting; 186 + return rcu_dynticks_nesting; 187 187 } 188 - EXPORT_SYMBOL(rcu_is_cpu_idle); 188 + EXPORT_SYMBOL(__rcu_is_watching); 189 189 190 - #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 190 + #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ 191 191 192 192 /* 193 193 * Test whether the current CPU was interrupted from idle. Nested
+18 -5
kernel/rcutree.c
··· 650 650 } 651 651 652 652 /** 653 - * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle 653 + * __rcu_is_watching - are RCU read-side critical sections safe? 654 + * 655 + * Return true if RCU is watching the running CPU, which means that 656 + * this CPU can safely enter RCU read-side critical sections. Unlike 657 + * rcu_is_watching(), the caller of __rcu_is_watching() must have at 658 + * least disabled preemption. 659 + */ 660 + bool __rcu_is_watching(void) 661 + { 662 + return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; 663 + } 664 + 665 + /** 666 + * rcu_is_watching - see if RCU thinks that the current CPU is idle 654 667 * 655 668 * If the current CPU is in its idle loop and is neither in an interrupt 656 669 * or NMI handler, return true. 657 670 */ 658 - int rcu_is_cpu_idle(void) 671 + bool rcu_is_watching(void) 659 672 { 660 673 int ret; 661 674 662 675 preempt_disable(); 663 - ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0; 676 + ret = __rcu_is_watching(); 664 677 preempt_enable(); 665 678 return ret; 666 679 } 667 - EXPORT_SYMBOL(rcu_is_cpu_idle); 680 + EXPORT_SYMBOL_GPL(rcu_is_watching); 668 681 669 682 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 670 683 ··· 2334 2321 * If called from an extended quiescent state, invoke the RCU 2335 2322 * core in order to force a re-evaluation of RCU's idleness. 2336 2323 */ 2337 - if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) 2324 + if (!rcu_is_watching() && cpu_online(smp_processor_id())) 2338 2325 invoke_rcu_core(); 2339 2326 2340 2327 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
+2
kernel/rcutree.h
··· 104 104 /* idle-period nonlazy_posted snapshot. */ 105 105 unsigned long last_accelerate; 106 106 /* Last jiffy CBs were accelerated. */ 107 + unsigned long last_advance_all; 108 + /* Last jiffy CBs were all advanced. */ 107 109 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ 108 110 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 109 111 };
+13 -11
kernel/rcutree_plugin.h
··· 1635 1635 extern int tick_nohz_enabled; 1636 1636 1637 1637 /* 1638 - * Try to advance callbacks for all flavors of RCU on the current CPU. 1639 - * Afterwards, if there are any callbacks ready for immediate invocation, 1640 - * return true. 1638 + * Try to advance callbacks for all flavors of RCU on the current CPU, but 1639 + * only if it has been awhile since the last time we did so. Afterwards, 1640 + * if there are any callbacks ready for immediate invocation, return true. 1641 1641 */ 1642 1642 static bool rcu_try_advance_all_cbs(void) 1643 1643 { 1644 1644 bool cbs_ready = false; 1645 1645 struct rcu_data *rdp; 1646 + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 1646 1647 struct rcu_node *rnp; 1647 1648 struct rcu_state *rsp; 1649 + 1650 + /* Exit early if we advanced recently. */ 1651 + if (jiffies == rdtp->last_advance_all) 1652 + return 0; 1653 + rdtp->last_advance_all = jiffies; 1648 1654 1649 1655 for_each_rcu_flavor(rsp) { 1650 1656 rdp = this_cpu_ptr(rsp->rda); ··· 1750 1744 */ 1751 1745 if (rdtp->all_lazy && 1752 1746 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { 1747 + rdtp->all_lazy = false; 1748 + rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 1753 1749 invoke_rcu_core(); 1754 1750 return; 1755 1751 } ··· 1781 1773 */ 1782 1774 static void rcu_cleanup_after_idle(int cpu) 1783 1775 { 1784 - struct rcu_data *rdp; 1785 - struct rcu_state *rsp; 1786 1776 1787 1777 if (rcu_is_nocb_cpu(cpu)) 1788 1778 return; 1789 - rcu_try_advance_all_cbs(); 1790 - for_each_rcu_flavor(rsp) { 1791 - rdp = per_cpu_ptr(rsp->rda, cpu); 1792 - if (cpu_has_callbacks_ready_to_invoke(rdp)) 1793 - invoke_rcu_core(); 1794 - } 1779 + if (rcu_try_advance_all_cbs()) 1780 + invoke_rcu_core(); 1795 1781 } 1796 1782 1797 1783 /*