Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] implement runtime Power Management

This patch (as1398b) adds runtime PM support to the SCSI layer. Only
the machanism is provided; use of it is up to the various high-level
drivers, and the patch doesn't change any of them. Except for sg --
the patch expicitly prevents a device from being runtime-suspended
while its sg device file is open.

The implementation is simplistic. In general, hosts and targets are
automatically suspended when all their children are asleep, but for
them the runtime-suspend code doesn't actually do anything. (A host's
runtime PM status is propagated up the device tree, though, so a
runtime-PM-aware lower-level driver could power down the host adapter
hardware at the appropriate times.) There are comments indicating
where a transport class might be notified or some other hooks added.

LUNs are runtime-suspended by calling the drivers' existing suspend
handlers (and likewise for runtime-resume). Somewhat arbitrarily, the
implementation delays for 100 ms before suspending an eligible LUN.
This is because there typically are occasions during bootup when the
same device file is opened and closed several times in quick
succession.

The way this all works is that the SCSI core increments a device's
PM-usage count when it is registered. If a high-level driver does
nothing then the device will not be eligible for runtime-suspend
because of the elevated usage count. If a high-level driver wants to
use runtime PM then it can call scsi_autopm_put_device() in its probe
routine to decrement the usage count and scsi_autopm_get_device() in
its remove routine to restore the original count.

Hosts, targets, and LUNs are not suspended while they are being probed
or removed, or while the error handler is running. In fact, a fairly
large part of the patch consists of code to make sure that things
aren't suspended at such times.

[jejb: fix up compile issues in PM config variations]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>

authored by

Alan Stern and committed by
James Bottomley
bc4f2401 db5bd1e0

+201 -11
+8 -2
drivers/scsi/hosts.c
··· 32 32 #include <linux/completion.h> 33 33 #include <linux/transport_class.h> 34 34 #include <linux/platform_device.h> 35 + #include <linux/pm_runtime.h> 35 36 36 37 #include <scsi/scsi_device.h> 37 38 #include <scsi/scsi_host.h> ··· 157 156 void scsi_remove_host(struct Scsi_Host *shost) 158 157 { 159 158 unsigned long flags; 159 + 160 160 mutex_lock(&shost->scan_mutex); 161 161 spin_lock_irqsave(shost->host_lock, flags); 162 162 if (scsi_host_set_state(shost, SHOST_CANCEL)) ··· 167 165 return; 168 166 } 169 167 spin_unlock_irqrestore(shost->host_lock, flags); 168 + 169 + scsi_autopm_get_host(shost); 170 170 scsi_forget_host(shost); 171 171 mutex_unlock(&shost->scan_mutex); 172 172 scsi_proc_host_rm(shost); ··· 220 216 shost->shost_gendev.parent = dev ? dev : &platform_bus; 221 217 shost->dma_dev = dma_dev; 222 218 223 - device_enable_async_suspend(&shost->shost_gendev); 224 - 225 219 error = device_add(&shost->shost_gendev); 226 220 if (error) 227 221 goto out; 222 + 223 + pm_runtime_set_active(&shost->shost_gendev); 224 + pm_runtime_enable(&shost->shost_gendev); 225 + device_enable_async_suspend(&shost->shost_gendev); 228 226 229 227 scsi_host_set_state(shost, SHOST_RUNNING); 230 228 get_device(shost->shost_gendev.parent);
+15 -1
drivers/scsi/scsi_error.c
··· 1775 1775 * what we need to do to get it up and online again (if we can). 1776 1776 * If we fail, we end up taking the thing offline. 1777 1777 */ 1778 + if (scsi_autopm_get_host(shost) != 0) { 1779 + SCSI_LOG_ERROR_RECOVERY(1, 1780 + printk(KERN_ERR "Error handler scsi_eh_%d " 1781 + "unable to autoresume\n", 1782 + shost->host_no)); 1783 + continue; 1784 + } 1785 + 1778 1786 if (shost->transportt->eh_strategy_handler) 1779 1787 shost->transportt->eh_strategy_handler(shost); 1780 1788 else ··· 1796 1788 * which are still online. 1797 1789 */ 1798 1790 scsi_restart_operations(shost); 1791 + scsi_autopm_put_host(shost); 1799 1792 set_current_state(TASK_INTERRUPTIBLE); 1800 1793 } 1801 1794 __set_current_state(TASK_RUNNING); ··· 1894 1885 int 1895 1886 scsi_reset_provider(struct scsi_device *dev, int flag) 1896 1887 { 1897 - struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); 1888 + struct scsi_cmnd *scmd; 1898 1889 struct Scsi_Host *shost = dev->host; 1899 1890 struct request req; 1900 1891 unsigned long flags; 1901 1892 int rtn; 1902 1893 1894 + if (scsi_autopm_get_host(shost) < 0) 1895 + return FAILED; 1896 + 1897 + scmd = scsi_get_command(dev, GFP_KERNEL); 1903 1898 blk_rq_init(NULL, &req); 1904 1899 scmd->request = &req; 1905 1900 ··· 1960 1947 scsi_run_host_queues(shost); 1961 1948 1962 1949 scsi_next_command(scmd); 1950 + scsi_autopm_put_host(shost); 1963 1951 return rtn; 1964 1952 } 1965 1953 EXPORT_SYMBOL(scsi_reset_provider);
+110
drivers/scsi/scsi_pm.c
··· 59 59 60 60 if (scsi_is_sdev_device(dev)) 61 61 err = scsi_dev_type_resume(dev); 62 + 63 + if (err == 0) { 64 + pm_runtime_disable(dev); 65 + pm_runtime_set_active(dev); 66 + pm_runtime_enable(dev); 67 + } 62 68 return err; 63 69 } 64 70 ··· 92 86 93 87 #endif /* CONFIG_PM_SLEEP */ 94 88 89 + #ifdef CONFIG_PM_RUNTIME 90 + 91 + static int scsi_runtime_suspend(struct device *dev) 92 + { 93 + int err = 0; 94 + 95 + dev_dbg(dev, "scsi_runtime_suspend\n"); 96 + if (scsi_is_sdev_device(dev)) { 97 + err = scsi_dev_type_suspend(dev, PMSG_AUTO_SUSPEND); 98 + if (err == -EAGAIN) 99 + pm_schedule_suspend(dev, jiffies_to_msecs( 100 + round_jiffies_up_relative(HZ/10))); 101 + } 102 + 103 + /* Insert hooks here for targets, hosts, and transport classes */ 104 + 105 + return err; 106 + } 107 + 108 + static int scsi_runtime_resume(struct device *dev) 109 + { 110 + int err = 0; 111 + 112 + dev_dbg(dev, "scsi_runtime_resume\n"); 113 + if (scsi_is_sdev_device(dev)) 114 + err = scsi_dev_type_resume(dev); 115 + 116 + /* Insert hooks here for targets, hosts, and transport classes */ 117 + 118 + return err; 119 + } 120 + 121 + static int scsi_runtime_idle(struct device *dev) 122 + { 123 + int err; 124 + 125 + dev_dbg(dev, "scsi_runtime_idle\n"); 126 + 127 + /* Insert hooks here for targets, hosts, and transport classes */ 128 + 129 + if (scsi_is_sdev_device(dev)) 130 + err = pm_schedule_suspend(dev, 100); 131 + else 132 + err = pm_runtime_suspend(dev); 133 + return err; 134 + } 135 + 136 + int scsi_autopm_get_device(struct scsi_device *sdev) 137 + { 138 + int err; 139 + 140 + err = pm_runtime_get_sync(&sdev->sdev_gendev); 141 + if (err < 0) 142 + pm_runtime_put_sync(&sdev->sdev_gendev); 143 + else if (err > 0) 144 + err = 0; 145 + return err; 146 + } 147 + EXPORT_SYMBOL_GPL(scsi_autopm_get_device); 148 + 149 + void scsi_autopm_put_device(struct scsi_device *sdev) 150 + { 151 + pm_runtime_put_sync(&sdev->sdev_gendev); 152 + } 153 + EXPORT_SYMBOL_GPL(scsi_autopm_put_device); 154 + 155 + void scsi_autopm_get_target(struct scsi_target *starget) 156 + { 157 + pm_runtime_get_sync(&starget->dev); 158 + } 159 + 160 + void scsi_autopm_put_target(struct scsi_target *starget) 161 + { 162 + pm_runtime_put_sync(&starget->dev); 163 + } 164 + 165 + int scsi_autopm_get_host(struct Scsi_Host *shost) 166 + { 167 + int err; 168 + 169 + err = pm_runtime_get_sync(&shost->shost_gendev); 170 + if (err < 0) 171 + pm_runtime_put_sync(&shost->shost_gendev); 172 + else if (err > 0) 173 + err = 0; 174 + return err; 175 + } 176 + 177 + void scsi_autopm_put_host(struct Scsi_Host *shost) 178 + { 179 + pm_runtime_put_sync(&shost->shost_gendev); 180 + } 181 + 182 + #else 183 + 184 + #define scsi_runtime_suspend NULL 185 + #define scsi_runtime_resume NULL 186 + #define scsi_runtime_idle NULL 187 + 188 + #endif /* CONFIG_PM_RUNTIME */ 189 + 95 190 const struct dev_pm_ops scsi_bus_pm_ops = { 96 191 .suspend = scsi_bus_suspend, 97 192 .resume = scsi_bus_resume_common, ··· 200 93 .thaw = scsi_bus_resume_common, 201 94 .poweroff = scsi_bus_poweroff, 202 95 .restore = scsi_bus_resume_common, 96 + .runtime_suspend = scsi_runtime_suspend, 97 + .runtime_resume = scsi_runtime_resume, 98 + .runtime_idle = scsi_runtime_idle, 203 99 };
+13 -1
drivers/scsi/scsi_priv.h
··· 7 7 struct request; 8 8 struct scsi_cmnd; 9 9 struct scsi_device; 10 + struct scsi_target; 10 11 struct scsi_host_template; 11 12 struct Scsi_Host; 12 13 struct scsi_nl_hdr; ··· 148 147 /* scsi_pm.c */ 149 148 #ifdef CONFIG_PM_OPS 150 149 extern const struct dev_pm_ops scsi_bus_pm_ops; 151 - #else 150 + #else /* CONFIG_PM_OPS */ 152 151 #define scsi_bus_pm_ops (*NULL) 153 152 #endif 153 + #ifdef CONFIG_PM_RUNTIME 154 + extern void scsi_autopm_get_target(struct scsi_target *); 155 + extern void scsi_autopm_put_target(struct scsi_target *); 156 + extern int scsi_autopm_get_host(struct Scsi_Host *); 157 + extern void scsi_autopm_put_host(struct Scsi_Host *); 158 + #else 159 + static inline void scsi_autopm_get_target(struct scsi_target *t) {} 160 + static inline void scsi_autopm_put_target(struct scsi_target *t) {} 161 + static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; } 162 + static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} 163 + #endif /* CONFIG_PM_RUNTIME */ 154 164 155 165 /* 156 166 * internal scsi timeout functions: for use by mid-layer and transport
+20 -4
drivers/scsi/scsi_scan.c
··· 1513 1513 starget = scsi_alloc_target(parent, channel, id); 1514 1514 if (!starget) 1515 1515 return ERR_PTR(-ENOMEM); 1516 + scsi_autopm_get_target(starget); 1516 1517 1517 1518 mutex_lock(&shost->scan_mutex); 1518 1519 if (!shost->async_scan) 1519 1520 scsi_complete_async_scans(); 1520 1521 1521 - if (scsi_host_scan_allowed(shost)) 1522 + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1522 1523 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1524 + scsi_autopm_put_host(shost); 1525 + } 1523 1526 mutex_unlock(&shost->scan_mutex); 1527 + scsi_autopm_put_target(starget); 1524 1528 scsi_target_reap(starget); 1525 1529 put_device(&starget->dev); 1526 1530 ··· 1578 1574 starget = scsi_alloc_target(parent, channel, id); 1579 1575 if (!starget) 1580 1576 return; 1577 + scsi_autopm_get_target(starget); 1581 1578 1582 1579 if (lun != SCAN_WILD_CARD) { 1583 1580 /* ··· 1604 1599 } 1605 1600 1606 1601 out_reap: 1602 + scsi_autopm_put_target(starget); 1607 1603 /* now determine if the target has any children at all 1608 1604 * and if not, nuke it */ 1609 1605 scsi_target_reap(starget); ··· 1639 1633 if (!shost->async_scan) 1640 1634 scsi_complete_async_scans(); 1641 1635 1642 - if (scsi_host_scan_allowed(shost)) 1636 + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1643 1637 __scsi_scan_target(parent, channel, id, lun, rescan); 1638 + scsi_autopm_put_host(shost); 1639 + } 1644 1640 mutex_unlock(&shost->scan_mutex); 1645 1641 } 1646 1642 EXPORT_SYMBOL(scsi_scan_target); ··· 1694 1686 if (!shost->async_scan) 1695 1687 scsi_complete_async_scans(); 1696 1688 1697 - if (scsi_host_scan_allowed(shost)) { 1689 + if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1698 1690 if (channel == SCAN_WILD_CARD) 1699 1691 for (channel = 0; channel <= shost->max_channel; 1700 1692 channel++) ··· 1702 1694 rescan); 1703 1695 else 1704 1696 scsi_scan_channel(shost, channel, id, lun, rescan); 1697 + scsi_autopm_put_host(shost); 1705 1698 } 1706 1699 mutex_unlock(&shost->scan_mutex); 1707 1700 ··· 1840 1831 static int do_scan_async(void *_data) 1841 1832 { 1842 1833 struct async_scan_data *data = _data; 1843 - do_scsi_scan_host(data->shost); 1834 + struct Scsi_Host *shost = data->shost; 1835 + 1836 + do_scsi_scan_host(shost); 1844 1837 scsi_finish_async_scan(data); 1838 + scsi_autopm_put_host(shost); 1845 1839 return 0; 1846 1840 } 1847 1841 ··· 1859 1847 1860 1848 if (strncmp(scsi_scan_type, "none", 4) == 0) 1861 1849 return; 1850 + if (scsi_autopm_get_host(shost) < 0) 1851 + return; 1862 1852 1863 1853 data = scsi_prep_async_scan(shost); 1864 1854 if (!data) { 1865 1855 do_scsi_scan_host(shost); 1856 + scsi_autopm_put_host(shost); 1866 1857 return; 1867 1858 } 1868 1859 1869 1860 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); 1870 1861 if (IS_ERR(p)) 1871 1862 do_scan_async(data); 1863 + /* scsi_autopm_put_host(shost) is called in do_scan_async() */ 1872 1864 } 1873 1865 EXPORT_SYMBOL(scsi_scan_host); 1874 1866
+18 -2
drivers/scsi/scsi_sysfs.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/blkdev.h> 13 13 #include <linux/device.h> 14 + #include <linux/pm_runtime.h> 14 15 15 16 #include <scsi/scsi.h> 16 17 #include <scsi/scsi_device.h> ··· 803 802 if (starget->state != STARGET_CREATED) 804 803 return 0; 805 804 806 - device_enable_async_suspend(&starget->dev); 807 - 808 805 error = device_add(&starget->dev); 809 806 if (error) { 810 807 dev_err(&starget->dev, "target device_add failed, error %d\n", error); ··· 810 811 } 811 812 transport_add_device(&starget->dev); 812 813 starget->state = STARGET_RUNNING; 814 + 815 + pm_runtime_set_active(&starget->dev); 816 + pm_runtime_enable(&starget->dev); 817 + device_enable_async_suspend(&starget->dev); 813 818 814 819 return 0; 815 820 } ··· 844 841 return error; 845 842 846 843 transport_configure_device(&starget->dev); 844 + 847 845 device_enable_async_suspend(&sdev->sdev_gendev); 846 + scsi_autopm_get_target(starget); 847 + pm_runtime_set_active(&sdev->sdev_gendev); 848 + pm_runtime_forbid(&sdev->sdev_gendev); 849 + pm_runtime_enable(&sdev->sdev_gendev); 850 + scsi_autopm_put_target(starget); 851 + 852 + /* The following call will keep sdev active indefinitely, until 853 + * its driver does a corresponding scsi_autopm_pm_device(). Only 854 + * drivers supporting autosuspend will do this. 855 + */ 856 + scsi_autopm_get_device(sdev); 857 + 848 858 error = device_add(&sdev->sdev_gendev); 849 859 if (error) { 850 860 printk(KERN_INFO "error 1\n");
+9 -1
drivers/scsi/sg.c
··· 245 245 if (retval) 246 246 goto sg_put; 247 247 248 + retval = scsi_autopm_get_device(sdp->device); 249 + if (retval) 250 + goto sdp_put; 251 + 248 252 if (!((flags & O_NONBLOCK) || 249 253 scsi_block_when_processing_errors(sdp->device))) { 250 254 retval = -ENXIO; ··· 306 302 } 307 303 retval = 0; 308 304 error_out: 309 - if (retval) 305 + if (retval) { 306 + scsi_autopm_put_device(sdp->device); 307 + sdp_put: 310 308 scsi_device_put(sdp->device); 309 + } 311 310 sg_put: 312 311 if (sdp) 313 312 sg_put_dev(sdp); ··· 334 327 sdp->exclude = 0; 335 328 wake_up_interruptible(&sdp->o_excl_wait); 336 329 330 + scsi_autopm_put_device(sdp->device); 337 331 kref_put(&sfp->f_ref, sg_remove_sfp); 338 332 return 0; 339 333 }
+8
include/scsi/scsi_device.h
··· 381 381 struct scsi_sense_hdr *, int timeout, int retries, 382 382 int *resid); 383 383 384 + #ifdef CONFIG_PM_RUNTIME 385 + extern int scsi_autopm_get_device(struct scsi_device *); 386 + extern void scsi_autopm_put_device(struct scsi_device *); 387 + #else 388 + static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; } 389 + static inline void scsi_autopm_put_device(struct scsi_device *d) {} 390 + #endif /* CONFIG_PM_RUNTIME */ 391 + 384 392 static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) 385 393 { 386 394 return device_reprobe(&sdev->sdev_gendev);