Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'sched-fifo-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull sched/fifo updates from Ingo Molnar:
"This adds the sched_set_fifo*() encapsulation APIs to remove static
priority level knowledge from non-scheduler code.

The three APIs for non-scheduler code to set SCHED_FIFO are:

- sched_set_fifo()
- sched_set_fifo_low()
- sched_set_normal()

These are two FIFO priority levels: default (high), and a 'low'
priority level, plus sched_set_normal() to set the policy back to
non-SCHED_FIFO.

Since the changes affect a lot of non-scheduler code, we kept this in
a separate tree"

* tag 'sched-fifo-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
sched,tracing: Convert to sched_set_fifo()
sched: Remove sched_set_*() return value
sched: Remove sched_setscheduler*() EXPORTs
sched,psi: Convert to sched_set_fifo_low()
sched,rcutorture: Convert to sched_set_fifo_low()
sched,rcuperf: Convert to sched_set_fifo_low()
sched,locktorture: Convert to sched_set_fifo()
sched,irq: Convert to sched_set_fifo()
sched,watchdog: Convert to sched_set_fifo()
sched,serial: Convert to sched_set_fifo()
sched,powerclamp: Convert to sched_set_fifo()
sched,ion: Convert to sched_set_normal()
sched,powercap: Convert to sched_set_fifo*()
sched,spi: Convert to sched_set_fifo*()
sched,mmc: Convert to sched_set_fifo*()
sched,ivtv: Convert to sched_set_fifo*()
sched,drm/scheduler: Convert to sched_set_fifo*()
sched,msm: Convert to sched_set_fifo*()
sched,psci: Convert to sched_set_fifo*()
sched,drbd: Convert to sched_set_fifo*()
...

+100 -124
+1 -2
arch/arm/common/bL_switcher.c
··· 270 270 static int bL_switcher_thread(void *arg) 271 271 { 272 272 struct bL_thread *t = arg; 273 - struct sched_param param = { .sched_priority = 1 }; 274 273 int cluster; 275 274 bL_switch_completion_handler completer; 276 275 void *completer_cookie; 277 276 278 - sched_setscheduler_nocheck(current, SCHED_FIFO, &param); 277 + sched_set_fifo_low(current); 279 278 complete(&t->started); 280 279 281 280 do {
+1 -2
crypto/crypto_engine.c
··· 482 482 int (*cbk_do_batch)(struct crypto_engine *engine), 483 483 bool rt, int qlen) 484 484 { 485 - struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; 486 485 struct crypto_engine *engine; 487 486 488 487 if (!dev) ··· 519 520 520 521 if (engine->rt) { 521 522 dev_info(dev, "will run requests pump with realtime priority\n"); 522 - sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param); 523 + sched_set_fifo(engine->kworker->task); 523 524 } 524 525 525 526 return engine;
+1 -2
drivers/acpi/acpi_pad.c
··· 136 136 static unsigned int round_robin_time = 1; /* second */ 137 137 static int power_saving_thread(void *data) 138 138 { 139 - struct sched_param param = {.sched_priority = 1}; 140 139 int do_sleep; 141 140 unsigned int tsk_index = (unsigned long)data; 142 141 u64 last_jiffies = 0; 143 142 144 - sched_setscheduler(current, SCHED_RR, &param); 143 + sched_set_fifo_low(current); 145 144 146 145 while (!kthread_should_stop()) { 147 146 unsigned long expire_time;
+1 -4
drivers/block/drbd/drbd_receiver.c
··· 6019 6019 unsigned int header_size = drbd_header_size(connection); 6020 6020 int expect = header_size; 6021 6021 bool ping_timeout_active = false; 6022 - struct sched_param param = { .sched_priority = 2 }; 6023 6022 6024 - rv = sched_setscheduler(current, SCHED_RR, &param); 6025 - if (rv < 0) 6026 - drbd_err(connection, "drbd_ack_receiver: ERROR set priority, ret=%d\n", rv); 6023 + sched_set_fifo_low(current); 6027 6024 6028 6025 while (get_t_state(thi) == RUNNING) { 6029 6026 drbd_thread_current_set_cpu(thi);
+1 -9
drivers/firmware/psci/psci_checker.c
··· 274 274 { 275 275 int cpu = (long)arg; 276 276 int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; 277 - struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 }; 278 277 struct cpuidle_device *dev; 279 278 struct cpuidle_driver *drv; 280 279 /* No need for an actual callback, we just want to wake up the CPU. */ ··· 283 284 wait_for_completion(&suspend_threads_started); 284 285 285 286 /* Set maximum priority to preempt all other threads on this CPU. */ 286 - if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority)) 287 - pr_warn("Failed to set suspend thread scheduler on CPU %d\n", 288 - cpu); 287 + sched_set_fifo(current); 289 288 290 289 dev = this_cpu_read(cpuidle_devices); 291 290 drv = cpuidle_get_cpu_driver(dev); ··· 348 351 if (atomic_dec_return_relaxed(&nb_active_threads) == 0) 349 352 complete(&suspend_threads_done); 350 353 351 - /* Give up on RT scheduling and wait for termination. */ 352 - sched_priority.sched_priority = 0; 353 - if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority)) 354 - pr_warn("Failed to set suspend thread scheduler on CPU %d\n", 355 - cpu); 356 354 for (;;) { 357 355 /* Needs to be set first to avoid missing a wakeup. */ 358 356 set_current_state(TASK_INTERRUPTIBLE);
+2 -4
drivers/gpu/drm/drm_vblank_work.c
··· 248 248 249 249 int drm_vblank_worker_init(struct drm_vblank_crtc *vblank) 250 250 { 251 - struct sched_param param = { 252 - .sched_priority = MAX_RT_PRIO - 1, 253 - }; 254 251 struct kthread_worker *worker; 255 252 256 253 INIT_LIST_HEAD(&vblank->pending_work); ··· 260 263 261 264 vblank->worker = worker; 262 265 263 - return sched_setscheduler(vblank->worker->task, SCHED_FIFO, &param); 266 + sched_set_fifo(worker->task); 267 + return 0; 264 268 }
+1 -12
drivers/gpu/drm/msm/msm_drv.c
··· 401 401 struct msm_kms *kms; 402 402 struct msm_mdss *mdss; 403 403 int ret, i; 404 - struct sched_param param; 405 404 406 405 ddev = drm_dev_alloc(drv, dev); 407 406 if (IS_ERR(ddev)) { ··· 506 507 ddev->mode_config.funcs = &mode_config_funcs; 507 508 ddev->mode_config.helper_private = &mode_config_helper_funcs; 508 509 509 - /** 510 - * this priority was found during empiric testing to have appropriate 511 - * realtime scheduling to process display updates and interact with 512 - * other real time and normal priority task 513 - */ 514 - param.sched_priority = 16; 515 510 for (i = 0; i < priv->num_crtcs; i++) { 516 511 /* initialize event thread */ 517 512 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; ··· 517 524 goto err_msm_uninit; 518 525 } 519 526 520 - ret = sched_setscheduler(priv->event_thread[i].worker->task, 521 - SCHED_FIFO, &param); 522 - if (ret) 523 - dev_warn(dev, "event_thread set priority failed:%d\n", 524 - ret); 527 + sched_set_fifo(priv->event_thread[i].worker->task); 525 528 } 526 529 527 530 ret = drm_vblank_init(ddev, priv->num_crtcs);
+1 -2
drivers/gpu/drm/scheduler/sched_main.c
··· 762 762 */ 763 763 static int drm_sched_main(void *param) 764 764 { 765 - struct sched_param sparam = {.sched_priority = 1}; 766 765 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; 767 766 int r; 768 767 769 - sched_setscheduler(current, SCHED_FIFO, &sparam); 768 + sched_set_fifo_low(current); 770 769 771 770 while (!kthread_should_stop()) { 772 771 struct drm_sched_entity *entity = NULL;
+1 -3
drivers/media/pci/ivtv/ivtv-driver.c
··· 737 737 */ 738 738 static int ivtv_init_struct1(struct ivtv *itv) 739 739 { 740 - struct sched_param param = { .sched_priority = 99 }; 741 - 742 740 itv->base_addr = pci_resource_start(itv->pdev, 0); 743 741 itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ 744 742 itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ ··· 756 758 return -1; 757 759 } 758 760 /* must use the FIFO scheduler as it is realtime sensitive */ 759 - sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param); 761 + sched_set_fifo(itv->irq_worker_task); 760 762 761 763 kthread_init_work(&itv->irq_work, ivtv_irq_work_handler); 762 764
+1 -2
drivers/mmc/core/sdio_irq.c
··· 139 139 static int sdio_irq_thread(void *_host) 140 140 { 141 141 struct mmc_host *host = _host; 142 - struct sched_param param = { .sched_priority = 1 }; 143 142 unsigned long period, idle_period; 144 143 int ret; 145 144 146 - sched_setscheduler(current, SCHED_FIFO, &param); 145 + sched_set_fifo_low(current); 147 146 148 147 /* 149 148 * We want to allow for SDIO cards to work even on non SDIO
+3 -8
drivers/platform/chrome/cros_ec_spi.c
··· 709 709 static int cros_ec_spi_devm_high_pri_alloc(struct device *dev, 710 710 struct cros_ec_spi *ec_spi) 711 711 { 712 - struct sched_param sched_priority = { 713 - .sched_priority = MAX_RT_PRIO / 2, 714 - }; 715 712 int err; 716 713 717 714 ec_spi->high_pri_worker = ··· 725 728 if (err) 726 729 return err; 727 730 728 - err = sched_setscheduler_nocheck(ec_spi->high_pri_worker->task, 729 - SCHED_FIFO, &sched_priority); 730 - if (err) 731 - dev_err(dev, "Can't set cros_ec high pri priority: %d\n", err); 732 - return err; 731 + sched_set_fifo(ec_spi->high_pri_worker->task); 732 + 733 + return 0; 733 734 } 734 735 735 736 static int cros_ec_spi_probe(struct spi_device *spi)
+1 -3
drivers/powercap/idle_inject.c
··· 268 268 */ 269 269 static void idle_inject_setup(unsigned int cpu) 270 270 { 271 - struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 }; 272 - 273 - sched_setscheduler(current, SCHED_FIFO, &param); 271 + sched_set_fifo(current); 274 272 } 275 273 276 274 /**
+1 -3
drivers/spi/spi.c
··· 1626 1626 */ 1627 1627 static void spi_set_thread_rt(struct spi_controller *ctlr) 1628 1628 { 1629 - struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; 1630 - 1631 1629 dev_info(&ctlr->dev, 1632 1630 "will run message pump with realtime priority\n"); 1633 - sched_setscheduler(ctlr->kworker->task, SCHED_FIFO, &param); 1631 + sched_set_fifo(ctlr->kworker->task); 1634 1632 } 1635 1633 1636 1634 static int spi_init_queue(struct spi_controller *ctlr)
+1 -3
drivers/staging/android/ion/ion_heap.c
··· 244 244 245 245 int ion_heap_init_deferred_free(struct ion_heap *heap) 246 246 { 247 - struct sched_param param = { .sched_priority = 0 }; 248 - 249 247 INIT_LIST_HEAD(&heap->free_list); 250 248 init_waitqueue_head(&heap->waitqueue); 251 249 heap->task = kthread_run(ion_heap_deferred_free, heap, ··· 253 255 __func__); 254 256 return PTR_ERR_OR_ZERO(heap->task); 255 257 } 256 - sched_setscheduler(heap->task, SCHED_IDLE, &param); 258 + sched_set_normal(heap->task, 19); 257 259 258 260 return 0; 259 261 }
+1 -4
drivers/thermal/intel/intel_powerclamp.c
··· 70 70 */ 71 71 static bool clamping; 72 72 73 - static const struct sched_param sparam = { 74 - .sched_priority = MAX_USER_RT_PRIO / 2, 75 - }; 76 73 struct powerclamp_worker_data { 77 74 struct kthread_worker *worker; 78 75 struct kthread_work balancing_work; ··· 485 488 w_data->cpu = cpu; 486 489 w_data->clamping = true; 487 490 set_bit(cpu, cpu_clamping_mask); 488 - sched_setscheduler(worker->task, SCHED_FIFO, &sparam); 491 + sched_set_fifo(worker->task); 489 492 kthread_init_work(&w_data->balancing_work, clamp_balancing_func); 490 493 kthread_init_delayed_work(&w_data->idle_injection_work, 491 494 clamp_idle_injection_func);
+1 -2
drivers/tty/serial/sc16is7xx.c
··· 1179 1179 const struct sc16is7xx_devtype *devtype, 1180 1180 struct regmap *regmap, int irq) 1181 1181 { 1182 - struct sched_param sched_param = { .sched_priority = MAX_RT_PRIO / 2 }; 1183 1182 unsigned long freq = 0, *pfreq = dev_get_platdata(dev); 1184 1183 unsigned int val; 1185 1184 u32 uartclk = 0; ··· 1238 1239 ret = PTR_ERR(s->kworker_task); 1239 1240 goto out_clk; 1240 1241 } 1241 - sched_setscheduler(s->kworker_task, SCHED_FIFO, &sched_param); 1242 + sched_set_fifo(s->kworker_task); 1242 1243 1243 1244 #ifdef CONFIG_GPIOLIB 1244 1245 if (devtype->nr_gpio) {
+1 -2
drivers/watchdog/watchdog_dev.c
··· 1144 1144 int __init watchdog_dev_init(void) 1145 1145 { 1146 1146 int err; 1147 - struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1,}; 1148 1147 1149 1148 watchdog_kworker = kthread_create_worker(0, "watchdogd"); 1150 1149 if (IS_ERR(watchdog_kworker)) { 1151 1150 pr_err("Failed to create watchdog kworker\n"); 1152 1151 return PTR_ERR(watchdog_kworker); 1153 1152 } 1154 - sched_setscheduler(watchdog_kworker->task, SCHED_FIFO, &param); 1153 + sched_set_fifo(watchdog_kworker->task); 1155 1154 1156 1155 err = class_register(&watchdog_class); 1157 1156 if (err < 0) {
+3
include/linux/sched.h
··· 1648 1648 extern int available_idle_cpu(int cpu); 1649 1649 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 1650 1650 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 1651 + extern void sched_set_fifo(struct task_struct *p); 1652 + extern void sched_set_fifo_low(struct task_struct *p); 1653 + extern void sched_set_normal(struct task_struct *p, int nice); 1651 1654 extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1652 1655 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); 1653 1656 extern struct task_struct *idle_task(int cpu);
+1 -5
kernel/irq/manage.c
··· 1308 1308 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) 1309 1309 { 1310 1310 struct task_struct *t; 1311 - struct sched_param param = { 1312 - .sched_priority = MAX_USER_RT_PRIO/2, 1313 - }; 1314 1311 1315 1312 if (!secondary) { 1316 1313 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, ··· 1315 1318 } else { 1316 1319 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, 1317 1320 new->name); 1318 - param.sched_priority -= 1; 1319 1321 } 1320 1322 1321 1323 if (IS_ERR(t)) 1322 1324 return PTR_ERR(t); 1323 1325 1324 - sched_setscheduler_nocheck(t, SCHED_FIFO, &param); 1326 + sched_set_fifo(t); 1325 1327 1326 1328 /* 1327 1329 * We keep the reference to the task struct even if
+2 -8
kernel/locking/locktorture.c
··· 436 436 437 437 static void torture_rtmutex_boost(struct torture_random_state *trsp) 438 438 { 439 - int policy; 440 - struct sched_param param; 441 439 const unsigned int factor = 50000; /* yes, quite arbitrary */ 442 440 443 441 if (!rt_task(current)) { ··· 446 448 */ 447 449 if (trsp && !(torture_random(trsp) % 448 450 (cxt.nrealwriters_stress * factor))) { 449 - policy = SCHED_FIFO; 450 - param.sched_priority = MAX_RT_PRIO - 1; 451 + sched_set_fifo(current); 451 452 } else /* common case, do nothing */ 452 453 return; 453 454 } else { ··· 459 462 */ 460 463 if (!trsp || !(torture_random(trsp) % 461 464 (cxt.nrealwriters_stress * factor * 2))) { 462 - policy = SCHED_NORMAL; 463 - param.sched_priority = 0; 465 + sched_set_normal(current, 0); 464 466 } else /* common case, do nothing */ 465 467 return; 466 468 } 467 - 468 - sched_setscheduler_nocheck(current, policy, &param); 469 469 } 470 470 471 471 static void torture_rtmutex_delay(struct torture_random_state *trsp)
+2 -6
kernel/rcu/rcuperf.c
··· 361 361 int i_max; 362 362 long me = (long)arg; 363 363 struct rcu_head *rhp = NULL; 364 - struct sched_param sp; 365 364 bool started = false, done = false, alldone = false; 366 365 u64 t; 367 366 u64 *wdp; ··· 369 370 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started"); 370 371 WARN_ON(!wdpp); 371 372 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); 372 - sp.sched_priority = 1; 373 - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 373 + sched_set_fifo_low(current); 374 374 375 375 if (holdoff) 376 376 schedule_timeout_uninterruptible(holdoff * HZ); ··· 425 427 started = true; 426 428 if (!done && i >= MIN_MEAS) { 427 429 done = true; 428 - sp.sched_priority = 0; 429 - sched_setscheduler_nocheck(current, 430 - SCHED_NORMAL, &sp); 430 + sched_set_normal(current, 0); 431 431 pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n", 432 432 perf_type, PERF_FLAG, me, MIN_MEAS); 433 433 if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
+1 -6
kernel/rcu/rcutorture.c
··· 895 895 unsigned long endtime; 896 896 unsigned long oldstarttime; 897 897 struct rcu_boost_inflight rbi = { .inflight = 0 }; 898 - struct sched_param sp; 899 898 900 899 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 901 900 902 901 /* Set real-time priority. */ 903 - sp.sched_priority = 1; 904 - if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 905 - VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 906 - n_rcu_torture_boost_rterror++; 907 - } 902 + sched_set_fifo_low(current); 908 903 909 904 init_rcu_head_on_stack(&rbi.rcu); 910 905 /* Each pass through the following loop does one boost-test cycle. */
+47 -3
kernel/sched/core.c
··· 5496 5496 * @policy: new policy. 5497 5497 * @param: structure containing the new RT priority. 5498 5498 * 5499 + * Use sched_set_fifo(), read its comment. 5500 + * 5499 5501 * Return: 0 on success. An error code otherwise. 5500 5502 * 5501 5503 * NOTE that the task may be already dead. ··· 5507 5505 { 5508 5506 return _sched_setscheduler(p, policy, param, true); 5509 5507 } 5510 - EXPORT_SYMBOL_GPL(sched_setscheduler); 5511 5508 5512 5509 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 5513 5510 { 5514 5511 return __sched_setscheduler(p, attr, true, true); 5515 5512 } 5516 - EXPORT_SYMBOL_GPL(sched_setattr); 5517 5513 5518 5514 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 5519 5515 { ··· 5536 5536 { 5537 5537 return _sched_setscheduler(p, policy, param, false); 5538 5538 } 5539 - EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 5539 + 5540 + /* 5541 + * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 5542 + * incapable of resource management, which is the one thing an OS really should 5543 + * be doing. 5544 + * 5545 + * This is of course the reason it is limited to privileged users only. 5546 + * 5547 + * Worse still; it is fundamentally impossible to compose static priority 5548 + * workloads. You cannot take two correctly working static prio workloads 5549 + * and smash them together and still expect them to work. 5550 + * 5551 + * For this reason 'all' FIFO tasks the kernel creates are basically at: 5552 + * 5553 + * MAX_RT_PRIO / 2 5554 + * 5555 + * The administrator _MUST_ configure the system, the kernel simply doesn't 5556 + * know enough information to make a sensible choice. 5557 + */ 5558 + void sched_set_fifo(struct task_struct *p) 5559 + { 5560 + struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 5561 + WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 5562 + } 5563 + EXPORT_SYMBOL_GPL(sched_set_fifo); 5564 + 5565 + /* 5566 + * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 5567 + */ 5568 + void sched_set_fifo_low(struct task_struct *p) 5569 + { 5570 + struct sched_param sp = { .sched_priority = 1 }; 5571 + WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 5572 + } 5573 + EXPORT_SYMBOL_GPL(sched_set_fifo_low); 5574 + 5575 + void sched_set_normal(struct task_struct *p, int nice) 5576 + { 5577 + struct sched_attr attr = { 5578 + .sched_policy = SCHED_NORMAL, 5579 + .sched_nice = nice, 5580 + }; 5581 + WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 5582 + } 5583 + EXPORT_SYMBOL_GPL(sched_set_normal); 5540 5584 5541 5585 static int 5542 5586 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+1 -4
kernel/sched/psi.c
··· 616 616 static int psi_poll_worker(void *data) 617 617 { 618 618 struct psi_group *group = (struct psi_group *)data; 619 - struct sched_param param = { 620 - .sched_priority = 1, 621 - }; 622 619 623 - sched_setscheduler_nocheck(current, SCHED_FIFO, &param); 620 + sched_set_fifo_low(current); 624 621 625 622 while (true) { 626 623 wait_event_interruptible(group->poll_wait,
+23 -25
kernel/trace/ring_buffer_benchmark.c
··· 45 45 static int producer_nice = MAX_NICE; 46 46 static int consumer_nice = MAX_NICE; 47 47 48 - static int producer_fifo = -1; 49 - static int consumer_fifo = -1; 48 + static int producer_fifo; 49 + static int consumer_fifo; 50 50 51 51 module_param(producer_nice, int, 0644); 52 52 MODULE_PARM_DESC(producer_nice, "nice prio for producer"); ··· 55 55 MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); 56 56 57 57 module_param(producer_fifo, int, 0644); 58 - MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); 58 + MODULE_PARM_DESC(producer_fifo, "use fifo for producer: 0 - disabled, 1 - low prio, 2 - fifo"); 59 59 60 60 module_param(consumer_fifo, int, 0644); 61 - MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); 61 + MODULE_PARM_DESC(consumer_fifo, "use fifo for consumer: 0 - disabled, 1 - low prio, 2 - fifo"); 62 62 63 63 static int read_events; 64 64 ··· 303 303 trace_printk("ERROR!\n"); 304 304 305 305 if (!disable_reader) { 306 - if (consumer_fifo < 0) 306 + if (consumer_fifo) 307 + trace_printk("Running Consumer at SCHED_FIFO %s\n", 308 + consumer_fifo == 1 ? "low" : "high"); 309 + else 307 310 trace_printk("Running Consumer at nice: %d\n", 308 311 consumer_nice); 309 - else 310 - trace_printk("Running Consumer at SCHED_FIFO %d\n", 311 - consumer_fifo); 312 312 } 313 - if (producer_fifo < 0) 313 + if (producer_fifo) 314 + trace_printk("Running Producer at SCHED_FIFO %s\n", 315 + producer_fifo == 1 ? "low" : "high"); 316 + else 314 317 trace_printk("Running Producer at nice: %d\n", 315 318 producer_nice); 316 - else 317 - trace_printk("Running Producer at SCHED_FIFO %d\n", 318 - producer_fifo); 319 319 320 320 /* Let the user know that the test is running at low priority */ 321 - if (producer_fifo < 0 && consumer_fifo < 0 && 321 + if (!producer_fifo && !consumer_fifo && 322 322 producer_nice == MAX_NICE && consumer_nice == MAX_NICE) 323 323 trace_printk("WARNING!!! This test is running at lowest priority.\n"); 324 324 ··· 455 455 * Run them as low-prio background tasks by default: 456 456 */ 457 457 if (!disable_reader) { 458 - if (consumer_fifo >= 0) { 459 - struct sched_param param = { 460 - .sched_priority = consumer_fifo 461 - }; 462 - sched_setscheduler(consumer, SCHED_FIFO, &param); 463 - } else 458 + if (consumer_fifo >= 2) 459 + sched_set_fifo(consumer); 460 + else if (consumer_fifo == 1) 461 + sched_set_fifo_low(consumer); 462 + else 464 463 set_user_nice(consumer, consumer_nice); 465 464 } 466 465 467 - if (producer_fifo >= 0) { 468 - struct sched_param param = { 469 - .sched_priority = producer_fifo 470 - }; 471 - sched_setscheduler(producer, SCHED_FIFO, &param); 472 - } else 466 + if (producer_fifo >= 2) 467 + sched_set_fifo(producer); 468 + else if (producer_fifo == 1) 469 + sched_set_fifo_low(producer); 470 + else 473 471 set_user_nice(producer, producer_nice); 474 472 475 473 return 0;