Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.11-rc8 491 lines 13 kB view raw
1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License version 2 as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * Copyright (C) 2016 ARM Limited 12 */ 13 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16#include <linux/atomic.h> 17#include <linux/completion.h> 18#include <linux/cpu.h> 19#include <linux/cpuidle.h> 20#include <linux/cpu_pm.h> 21#include <linux/kernel.h> 22#include <linux/kthread.h> 23#include <uapi/linux/sched/types.h> 24#include <linux/module.h> 25#include <linux/preempt.h> 26#include <linux/psci.h> 27#include <linux/slab.h> 28#include <linux/tick.h> 29#include <linux/topology.h> 30 31#include <asm/cpuidle.h> 32 33#include <uapi/linux/psci.h> 34 35#define NUM_SUSPEND_CYCLE (10) 36 37static unsigned int nb_available_cpus; 38static int tos_resident_cpu = -1; 39 40static atomic_t nb_active_threads; 41static struct completion suspend_threads_started = 42 COMPLETION_INITIALIZER(suspend_threads_started); 43static struct completion suspend_threads_done = 44 COMPLETION_INITIALIZER(suspend_threads_done); 45 46/* 47 * We assume that PSCI operations are used if they are available. This is not 48 * necessarily true on arm64, since the decision is based on the 49 * "enable-method" property of each CPU in the DT, but given that there is no 50 * arch-specific way to check this, we assume that the DT is sensible. 51 */ 52static int psci_ops_check(void) 53{ 54 int migrate_type = -1; 55 int cpu; 56 57 if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { 58 pr_warn("Missing PSCI operations, aborting tests\n"); 59 return -EOPNOTSUPP; 60 } 61 62 if (psci_ops.migrate_info_type) 63 migrate_type = psci_ops.migrate_info_type(); 64 65 if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || 66 migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { 67 /* There is a UP Trusted OS, find on which core it resides. */ 68 for_each_online_cpu(cpu) 69 if (psci_tos_resident_on(cpu)) { 70 tos_resident_cpu = cpu; 71 break; 72 } 73 if (tos_resident_cpu == -1) 74 pr_warn("UP Trusted OS resides on no online CPU\n"); 75 } 76 77 return 0; 78} 79 80static int find_clusters(const struct cpumask *cpus, 81 const struct cpumask **clusters) 82{ 83 unsigned int nb = 0; 84 cpumask_var_t tmp; 85 86 if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) 87 return -ENOMEM; 88 cpumask_copy(tmp, cpus); 89 90 while (!cpumask_empty(tmp)) { 91 const struct cpumask *cluster = 92 topology_core_cpumask(cpumask_any(tmp)); 93 94 clusters[nb++] = cluster; 95 cpumask_andnot(tmp, tmp, cluster); 96 } 97 98 free_cpumask_var(tmp); 99 return nb; 100} 101 102/* 103 * offlined_cpus is a temporary array but passing it as an argument avoids 104 * multiple allocations. 105 */ 106static unsigned int down_and_up_cpus(const struct cpumask *cpus, 107 struct cpumask *offlined_cpus) 108{ 109 int cpu; 110 int err = 0; 111 112 cpumask_clear(offlined_cpus); 113 114 /* Try to power down all CPUs in the mask. */ 115 for_each_cpu(cpu, cpus) { 116 int ret = cpu_down(cpu); 117 118 /* 119 * cpu_down() checks the number of online CPUs before the TOS 120 * resident CPU. 121 */ 122 if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { 123 if (ret != -EBUSY) { 124 pr_err("Unexpected return code %d while trying " 125 "to power down last online CPU %d\n", 126 ret, cpu); 127 ++err; 128 } 129 } else if (cpu == tos_resident_cpu) { 130 if (ret != -EPERM) { 131 pr_err("Unexpected return code %d while trying " 132 "to power down TOS resident CPU %d\n", 133 ret, cpu); 134 ++err; 135 } 136 } else if (ret != 0) { 137 pr_err("Error occurred (%d) while trying " 138 "to power down CPU %d\n", ret, cpu); 139 ++err; 140 } 141 142 if (ret == 0) 143 cpumask_set_cpu(cpu, offlined_cpus); 144 } 145 146 /* Try to power up all the CPUs that have been offlined. */ 147 for_each_cpu(cpu, offlined_cpus) { 148 int ret = cpu_up(cpu); 149 150 if (ret != 0) { 151 pr_err("Error occurred (%d) while trying " 152 "to power up CPU %d\n", ret, cpu); 153 ++err; 154 } else { 155 cpumask_clear_cpu(cpu, offlined_cpus); 156 } 157 } 158 159 /* 160 * Something went bad at some point and some CPUs could not be turned 161 * back on. 162 */ 163 WARN_ON(!cpumask_empty(offlined_cpus) || 164 num_online_cpus() != nb_available_cpus); 165 166 return err; 167} 168 169static int hotplug_tests(void) 170{ 171 int err; 172 cpumask_var_t offlined_cpus; 173 int i, nb_cluster; 174 const struct cpumask **clusters; 175 char *page_buf; 176 177 err = -ENOMEM; 178 if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) 179 return err; 180 /* We may have up to nb_available_cpus clusters. */ 181 clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters), 182 GFP_KERNEL); 183 if (!clusters) 184 goto out_free_cpus; 185 page_buf = (char *)__get_free_page(GFP_KERNEL); 186 if (!page_buf) 187 goto out_free_clusters; 188 189 err = 0; 190 nb_cluster = find_clusters(cpu_online_mask, clusters); 191 192 /* 193 * Of course the last CPU cannot be powered down and cpu_down() should 194 * refuse doing that. 195 */ 196 pr_info("Trying to turn off and on again all CPUs\n"); 197 err += down_and_up_cpus(cpu_online_mask, offlined_cpus); 198 199 /* 200 * Take down CPUs by cluster this time. When the last CPU is turned 201 * off, the cluster itself should shut down. 202 */ 203 for (i = 0; i < nb_cluster; ++i) { 204 int cluster_id = 205 topology_physical_package_id(cpumask_any(clusters[i])); 206 ssize_t len = cpumap_print_to_pagebuf(true, page_buf, 207 clusters[i]); 208 /* Remove trailing newline. */ 209 page_buf[len - 1] = '\0'; 210 pr_info("Trying to turn off and on again cluster %d " 211 "(CPUs %s)\n", cluster_id, page_buf); 212 err += down_and_up_cpus(clusters[i], offlined_cpus); 213 } 214 215 free_page((unsigned long)page_buf); 216out_free_clusters: 217 kfree(clusters); 218out_free_cpus: 219 free_cpumask_var(offlined_cpus); 220 return err; 221} 222 223static void dummy_callback(unsigned long ignored) {} 224 225static int suspend_cpu(int index, bool broadcast) 226{ 227 int ret; 228 229 arch_cpu_idle_enter(); 230 231 if (broadcast) { 232 /* 233 * The local timer will be shut down, we need to enter tick 234 * broadcast. 235 */ 236 ret = tick_broadcast_enter(); 237 if (ret) { 238 /* 239 * In the absence of hardware broadcast mechanism, 240 * this CPU might be used to broadcast wakeups, which 241 * may be why entering tick broadcast has failed. 242 * There is little the kernel can do to work around 243 * that, so enter WFI instead (idle state 0). 244 */ 245 cpu_do_idle(); 246 ret = 0; 247 goto out_arch_exit; 248 } 249 } 250 251 /* 252 * Replicate the common ARM cpuidle enter function 253 * (arm_enter_idle_state). 254 */ 255 ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); 256 257 if (broadcast) 258 tick_broadcast_exit(); 259 260out_arch_exit: 261 arch_cpu_idle_exit(); 262 263 return ret; 264} 265 266static int suspend_test_thread(void *arg) 267{ 268 int cpu = (long)arg; 269 int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; 270 struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 }; 271 struct cpuidle_device *dev; 272 struct cpuidle_driver *drv; 273 /* No need for an actual callback, we just want to wake up the CPU. */ 274 struct timer_list wakeup_timer; 275 276 /* Wait for the main thread to give the start signal. */ 277 wait_for_completion(&suspend_threads_started); 278 279 /* Set maximum priority to preempt all other threads on this CPU. */ 280 if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority)) 281 pr_warn("Failed to set suspend thread scheduler on CPU %d\n", 282 cpu); 283 284 dev = this_cpu_read(cpuidle_devices); 285 drv = cpuidle_get_cpu_driver(dev); 286 287 pr_info("CPU %d entering suspend cycles, states 1 through %d\n", 288 cpu, drv->state_count - 1); 289 290 setup_timer_on_stack(&wakeup_timer, dummy_callback, 0); 291 for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { 292 int index; 293 /* 294 * Test all possible states, except 0 (which is usually WFI and 295 * doesn't use PSCI). 296 */ 297 for (index = 1; index < drv->state_count; ++index) { 298 struct cpuidle_state *state = &drv->states[index]; 299 bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; 300 int ret; 301 302 /* 303 * Set the timer to wake this CPU up in some time (which 304 * should be largely sufficient for entering suspend). 305 * If the local tick is disabled when entering suspend, 306 * suspend_cpu() takes care of switching to a broadcast 307 * tick, so the timer will still wake us up. 308 */ 309 mod_timer(&wakeup_timer, jiffies + 310 usecs_to_jiffies(state->target_residency)); 311 312 /* IRQs must be disabled during suspend operations. */ 313 local_irq_disable(); 314 315 ret = suspend_cpu(index, broadcast); 316 317 /* 318 * We have woken up. Re-enable IRQs to handle any 319 * pending interrupt, do not wait until the end of the 320 * loop. 321 */ 322 local_irq_enable(); 323 324 if (ret == index) { 325 ++nb_suspend; 326 } else if (ret >= 0) { 327 /* We did not enter the expected state. */ 328 ++nb_shallow_sleep; 329 } else { 330 pr_err("Failed to suspend CPU %d: error %d " 331 "(requested state %d, cycle %d)\n", 332 cpu, ret, index, i); 333 ++nb_err; 334 } 335 } 336 } 337 338 /* 339 * Disable the timer to make sure that the timer will not trigger 340 * later. 341 */ 342 del_timer(&wakeup_timer); 343 344 if (atomic_dec_return_relaxed(&nb_active_threads) == 0) 345 complete(&suspend_threads_done); 346 347 /* Give up on RT scheduling and wait for termination. */ 348 sched_priority.sched_priority = 0; 349 if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority)) 350 pr_warn("Failed to set suspend thread scheduler on CPU %d\n", 351 cpu); 352 for (;;) { 353 /* Needs to be set first to avoid missing a wakeup. */ 354 set_current_state(TASK_INTERRUPTIBLE); 355 if (kthread_should_stop()) { 356 __set_current_state(TASK_RUNNING); 357 break; 358 } 359 schedule(); 360 } 361 362 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", 363 cpu, nb_suspend, nb_shallow_sleep, nb_err); 364 365 return nb_err; 366} 367 368static int suspend_tests(void) 369{ 370 int i, cpu, err = 0; 371 struct task_struct **threads; 372 int nb_threads = 0; 373 374 threads = kmalloc_array(nb_available_cpus, sizeof(*threads), 375 GFP_KERNEL); 376 if (!threads) 377 return -ENOMEM; 378 379 /* 380 * Stop cpuidle to prevent the idle tasks from entering a deep sleep 381 * mode, as it might interfere with the suspend threads on other CPUs. 382 * This does not prevent the suspend threads from using cpuidle (only 383 * the idle tasks check this status). Take the idle lock so that 384 * the cpuidle driver and device look-up can be carried out safely. 385 */ 386 cpuidle_pause_and_lock(); 387 388 for_each_online_cpu(cpu) { 389 struct task_struct *thread; 390 /* Check that cpuidle is available on that CPU. */ 391 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); 392 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 393 394 if (!dev || !drv) { 395 pr_warn("cpuidle not available on CPU %d, ignoring\n", 396 cpu); 397 continue; 398 } 399 400 thread = kthread_create_on_cpu(suspend_test_thread, 401 (void *)(long)cpu, cpu, 402 "psci_suspend_test"); 403 if (IS_ERR(thread)) 404 pr_err("Failed to create kthread on CPU %d\n", cpu); 405 else 406 threads[nb_threads++] = thread; 407 } 408 409 if (nb_threads < 1) { 410 err = -ENODEV; 411 goto out; 412 } 413 414 atomic_set(&nb_active_threads, nb_threads); 415 416 /* 417 * Wake up the suspend threads. To avoid the main thread being preempted 418 * before all the threads have been unparked, the suspend threads will 419 * wait for the completion of suspend_threads_started. 420 */ 421 for (i = 0; i < nb_threads; ++i) 422 wake_up_process(threads[i]); 423 complete_all(&suspend_threads_started); 424 425 wait_for_completion(&suspend_threads_done); 426 427 428 /* Stop and destroy all threads, get return status. */ 429 for (i = 0; i < nb_threads; ++i) 430 err += kthread_stop(threads[i]); 431 out: 432 cpuidle_resume_and_unlock(); 433 kfree(threads); 434 return err; 435} 436 437static int __init psci_checker(void) 438{ 439 int ret; 440 441 /* 442 * Since we're in an initcall, we assume that all the CPUs that all 443 * CPUs that can be onlined have been onlined. 444 * 445 * The tests assume that hotplug is enabled but nobody else is using it, 446 * otherwise the results will be unpredictable. However, since there 447 * is no userspace yet in initcalls, that should be fine, as long as 448 * no torture test is running at the same time (see Kconfig). 449 */ 450 nb_available_cpus = num_online_cpus(); 451 452 /* Check PSCI operations are set up and working. */ 453 ret = psci_ops_check(); 454 if (ret) 455 return ret; 456 457 pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus); 458 459 pr_info("Starting hotplug tests\n"); 460 ret = hotplug_tests(); 461 if (ret == 0) 462 pr_info("Hotplug tests passed OK\n"); 463 else if (ret > 0) 464 pr_err("%d error(s) encountered in hotplug tests\n", ret); 465 else { 466 pr_err("Out of memory\n"); 467 return ret; 468 } 469 470 pr_info("Starting suspend tests (%d cycles per state)\n", 471 NUM_SUSPEND_CYCLE); 472 ret = suspend_tests(); 473 if (ret == 0) 474 pr_info("Suspend tests passed OK\n"); 475 else if (ret > 0) 476 pr_err("%d error(s) encountered in suspend tests\n", ret); 477 else { 478 switch (ret) { 479 case -ENOMEM: 480 pr_err("Out of memory\n"); 481 break; 482 case -ENODEV: 483 pr_warn("Could not start suspend tests on any CPU\n"); 484 break; 485 } 486 } 487 488 pr_info("PSCI checker completed\n"); 489 return ret < 0 ? ret : 0; 490} 491late_initcall(psci_checker);