Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] hibernate: directly trigger subchannel evaluation

Using the generic css_schedule_eval to evaluate subchannels
while resuming from hibernation is very slow when used with
many devices. Provide a new evaluation trigger which exploits
css_sched_sch_todo and use this in the resume callback for
ccw devices.

Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Sebastian Ott and committed by
Martin Schwidefsky
817e5000 cfc9066b

+66 -47
+5
drivers/s390/cio/cio.h
··· 68 68 __u8 mda[4]; /* model dependent area */ 69 69 } __attribute__ ((packed,aligned(4))); 70 70 71 + /* 72 + * When rescheduled, todo's with higher values will overwrite those 73 + * with lower values. 74 + */ 71 75 enum sch_todo { 72 76 SCH_TODO_NOTHING, 77 + SCH_TODO_EVAL, 73 78 SCH_TODO_UNREG, 74 79 }; 75 80
+59 -45
drivers/s390/cio/css.c
··· 195 195 } 196 196 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 197 197 198 - static void css_sch_todo(struct work_struct *work) 199 - { 200 - struct subchannel *sch; 201 - enum sch_todo todo; 202 - 203 - sch = container_of(work, struct subchannel, todo_work); 204 - /* Find out todo. */ 205 - spin_lock_irq(sch->lock); 206 - todo = sch->todo; 207 - CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 208 - sch->schid.sch_no, todo); 209 - sch->todo = SCH_TODO_NOTHING; 210 - spin_unlock_irq(sch->lock); 211 - /* Perform todo. */ 212 - if (todo == SCH_TODO_UNREG) 213 - css_sch_device_unregister(sch); 214 - /* Release workqueue ref. */ 215 - put_device(&sch->dev); 216 - } 217 - 218 - /** 219 - * css_sched_sch_todo - schedule a subchannel operation 220 - * @sch: subchannel 221 - * @todo: todo 222 - * 223 - * Schedule the operation identified by @todo to be performed on the slow path 224 - * workqueue. Do nothing if another operation with higher priority is already 225 - * scheduled. Needs to be called with subchannel lock held. 226 - */ 227 - void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 228 - { 229 - CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 230 - sch->schid.ssid, sch->schid.sch_no, todo); 231 - if (sch->todo >= todo) 232 - return; 233 - /* Get workqueue ref. */ 234 - if (!get_device(&sch->dev)) 235 - return; 236 - sch->todo = todo; 237 - if (!queue_work(cio_work_q, &sch->todo_work)) { 238 - /* Already queued, release workqueue ref. */ 239 - put_device(&sch->dev); 240 - } 241 - } 242 - 243 198 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 244 199 { 245 200 int i; ··· 419 464 ret = css_evaluate_new_subchannel(schid, slow); 420 465 if (ret == -EAGAIN) 421 466 css_schedule_eval(schid); 467 + } 468 + 469 + /** 470 + * css_sched_sch_todo - schedule a subchannel operation 471 + * @sch: subchannel 472 + * @todo: todo 473 + * 474 + * Schedule the operation identified by @todo to be performed on the slow path 475 + * workqueue. Do nothing if another operation with higher priority is already 476 + * scheduled. Needs to be called with subchannel lock held. 477 + */ 478 + void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 479 + { 480 + CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 481 + sch->schid.ssid, sch->schid.sch_no, todo); 482 + if (sch->todo >= todo) 483 + return; 484 + /* Get workqueue ref. */ 485 + if (!get_device(&sch->dev)) 486 + return; 487 + sch->todo = todo; 488 + if (!queue_work(cio_work_q, &sch->todo_work)) { 489 + /* Already queued, release workqueue ref. */ 490 + put_device(&sch->dev); 491 + } 492 + } 493 + 494 + static void css_sch_todo(struct work_struct *work) 495 + { 496 + struct subchannel *sch; 497 + enum sch_todo todo; 498 + int ret; 499 + 500 + sch = container_of(work, struct subchannel, todo_work); 501 + /* Find out todo. */ 502 + spin_lock_irq(sch->lock); 503 + todo = sch->todo; 504 + CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 505 + sch->schid.sch_no, todo); 506 + sch->todo = SCH_TODO_NOTHING; 507 + spin_unlock_irq(sch->lock); 508 + /* Perform todo. */ 509 + switch (todo) { 510 + case SCH_TODO_NOTHING: 511 + break; 512 + case SCH_TODO_EVAL: 513 + ret = css_evaluate_known_subchannel(sch, 1); 514 + if (ret == -EAGAIN) { 515 + spin_lock_irq(sch->lock); 516 + css_sched_sch_todo(sch, todo); 517 + spin_unlock_irq(sch->lock); 518 + } 519 + break; 520 + case SCH_TODO_UNREG: 521 + css_sch_device_unregister(sch); 522 + break; 523 + } 524 + /* Release workqueue ref. */ 525 + put_device(&sch->dev); 422 526 } 423 527 424 528 static struct idset *slow_subchannel_set;
+2 -2
drivers/s390/cio/device.c
··· 1868 1868 */ 1869 1869 cdev->private->flags.resuming = 1; 1870 1870 cdev->private->path_new_mask = LPM_ANYPATH; 1871 - css_schedule_eval(sch->schid); 1871 + css_sched_sch_todo(sch, SCH_TODO_EVAL); 1872 1872 spin_unlock_irq(sch->lock); 1873 - css_complete_work(); 1873 + css_wait_for_slow_path(); 1874 1874 1875 1875 /* cdev may have been moved to a different subchannel. */ 1876 1876 sch = to_subchannel(cdev->dev.parent);