Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
rcu: Add missing __cpuinit annotation in rcutorture code
sched: Add "const" to is_idle_task() parameter
rcu: Make rcutorture bool parameters really bool (core code)
memblock: Fix alloc failure due to dumb underflow protection in memblock_find_in_range_node()

+10 -7
+1 -1
include/linux/sched.h
··· 2090 2090 * is_idle_task - is the specified task an idle task? 2091 2091 * @p: the task in question. 2092 2092 */ 2093 - static inline bool is_idle_task(struct task_struct *p) 2093 + static inline bool is_idle_task(const struct task_struct *p) 2094 2094 { 2095 2095 return p->pid == 0; 2096 2096 }
+4 -4
kernel/rcutorture.c
··· 56 56 static int nfakewriters = 4; /* # fake writer threads */ 57 57 static int stat_interval; /* Interval between stats, in seconds. */ 58 58 /* Defaults to "only at end of test". */ 59 - static int verbose; /* Print more debug info. */ 60 - static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ 59 + static bool verbose; /* Print more debug info. */ 60 + static bool test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ 61 61 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ 62 62 static int stutter = 5; /* Start/stop testing interval (in sec) */ 63 63 static int irqreader = 1; /* RCU readers from irq (timers). */ ··· 1399 1399 * Execute random CPU-hotplug operations at the interval specified 1400 1400 * by the onoff_interval. 1401 1401 */ 1402 - static int 1402 + static int __cpuinit 1403 1403 rcu_torture_onoff(void *arg) 1404 1404 { 1405 1405 int cpu; ··· 1447 1447 return 0; 1448 1448 } 1449 1449 1450 - static int 1450 + static int __cpuinit 1451 1451 rcu_torture_onoff_init(void) 1452 1452 { 1453 1453 if (onoff_interval <= 0)
+5 -2
mm/memblock.c
··· 106 106 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 107 107 end = memblock.current_limit; 108 108 109 - /* adjust @start to avoid underflow and allocating the first page */ 110 - start = max3(start, size, (phys_addr_t)PAGE_SIZE); 109 + /* avoid allocating the first page */ 110 + start = max_t(phys_addr_t, start, PAGE_SIZE); 111 111 end = max(start, end); 112 112 113 113 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 114 114 this_start = clamp(this_start, start, end); 115 115 this_end = clamp(this_end, start, end); 116 + 117 + if (this_end < size) 118 + continue; 116 119 117 120 cand = round_down(this_end - size, align); 118 121 if (cand >= this_start)