Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.16-rc2 282 lines 7.5 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2#include <linux/kernel.h> 3#include <linux/gfp.h> 4#include <linux/ide.h> 5 6int generic_ide_suspend(struct device *dev, pm_message_t mesg) 7{ 8 ide_drive_t *drive = to_ide_device(dev); 9 ide_drive_t *pair = ide_get_pair_dev(drive); 10 ide_hwif_t *hwif = drive->hwif; 11 struct request *rq; 12 struct ide_pm_state rqpm; 13 int ret; 14 15 if (ide_port_acpi(hwif)) { 16 /* call ACPI _GTM only once */ 17 if ((drive->dn & 1) == 0 || pair == NULL) 18 ide_acpi_get_timing(hwif); 19 } 20 21 memset(&rqpm, 0, sizeof(rqpm)); 22 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 23 ide_req(rq)->type = ATA_PRIV_PM_SUSPEND; 24 rq->special = &rqpm; 25 rqpm.pm_step = IDE_PM_START_SUSPEND; 26 if (mesg.event == PM_EVENT_PRETHAW) 27 mesg.event = PM_EVENT_FREEZE; 28 rqpm.pm_state = mesg.event; 29 30 blk_execute_rq(drive->queue, NULL, rq, 0); 31 ret = scsi_req(rq)->result ? -EIO : 0; 32 blk_put_request(rq); 33 34 if (ret == 0 && ide_port_acpi(hwif)) { 35 /* call ACPI _PS3 only after both devices are suspended */ 36 if ((drive->dn & 1) || pair == NULL) 37 ide_acpi_set_state(hwif, 0); 38 } 39 40 return ret; 41} 42 43static void ide_end_sync_rq(struct request *rq, blk_status_t error) 44{ 45 complete(rq->end_io_data); 46} 47 48static int ide_pm_execute_rq(struct request *rq) 49{ 50 struct request_queue *q = rq->q; 51 DECLARE_COMPLETION_ONSTACK(wait); 52 53 rq->end_io_data = &wait; 54 rq->end_io = ide_end_sync_rq; 55 56 spin_lock_irq(q->queue_lock); 57 if (unlikely(blk_queue_dying(q))) { 58 rq->rq_flags |= RQF_QUIET; 59 scsi_req(rq)->result = -ENXIO; 60 __blk_end_request_all(rq, BLK_STS_OK); 61 spin_unlock_irq(q->queue_lock); 62 return -ENXIO; 63 } 64 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); 65 __blk_run_queue_uncond(q); 66 spin_unlock_irq(q->queue_lock); 67 68 wait_for_completion_io(&wait); 69 70 return scsi_req(rq)->result ? -EIO : 0; 71} 72 73int generic_ide_resume(struct device *dev) 74{ 75 ide_drive_t *drive = to_ide_device(dev); 76 ide_drive_t *pair = ide_get_pair_dev(drive); 77 ide_hwif_t *hwif = drive->hwif; 78 struct request *rq; 79 struct ide_pm_state rqpm; 80 int err; 81 82 if (ide_port_acpi(hwif)) { 83 /* call ACPI _PS0 / _STM only once */ 84 if ((drive->dn & 1) == 0 || pair == NULL) { 85 ide_acpi_set_state(hwif, 1); 86 ide_acpi_push_timing(hwif); 87 } 88 89 ide_acpi_exec_tfs(drive); 90 } 91 92 memset(&rqpm, 0, sizeof(rqpm)); 93 rq = blk_get_request_flags(drive->queue, REQ_OP_DRV_IN, 94 BLK_MQ_REQ_PREEMPT); 95 ide_req(rq)->type = ATA_PRIV_PM_RESUME; 96 rq->special = &rqpm; 97 rqpm.pm_step = IDE_PM_START_RESUME; 98 rqpm.pm_state = PM_EVENT_ON; 99 100 err = ide_pm_execute_rq(rq); 101 blk_put_request(rq); 102 103 if (err == 0 && dev->driver) { 104 struct ide_driver *drv = to_ide_driver(dev->driver); 105 106 if (drv->resume) 107 drv->resume(drive); 108 } 109 110 return err; 111} 112 113void ide_complete_power_step(ide_drive_t *drive, struct request *rq) 114{ 115 struct ide_pm_state *pm = rq->special; 116 117#ifdef DEBUG_PM 118 printk(KERN_INFO "%s: complete_power_step(step: %d)\n", 119 drive->name, pm->pm_step); 120#endif 121 if (drive->media != ide_disk) 122 return; 123 124 switch (pm->pm_step) { 125 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 126 if (pm->pm_state == PM_EVENT_FREEZE) 127 pm->pm_step = IDE_PM_COMPLETED; 128 else 129 pm->pm_step = IDE_PM_STANDBY; 130 break; 131 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 132 pm->pm_step = IDE_PM_COMPLETED; 133 break; 134 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 135 pm->pm_step = IDE_PM_IDLE; 136 break; 137 case IDE_PM_IDLE: /* Resume step 2 (idle)*/ 138 pm->pm_step = IDE_PM_RESTORE_DMA; 139 break; 140 } 141} 142 143ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 144{ 145 struct ide_pm_state *pm = rq->special; 146 struct ide_cmd cmd = { }; 147 148 switch (pm->pm_step) { 149 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 150 if (drive->media != ide_disk) 151 break; 152 /* Not supported? Switch to next step now. */ 153 if (ata_id_flush_enabled(drive->id) == 0 || 154 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { 155 ide_complete_power_step(drive, rq); 156 return ide_stopped; 157 } 158 if (ata_id_flush_ext_enabled(drive->id)) 159 cmd.tf.command = ATA_CMD_FLUSH_EXT; 160 else 161 cmd.tf.command = ATA_CMD_FLUSH; 162 goto out_do_tf; 163 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 164 cmd.tf.command = ATA_CMD_STANDBYNOW1; 165 goto out_do_tf; 166 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 167 ide_set_max_pio(drive); 168 /* 169 * skip IDE_PM_IDLE for ATAPI devices 170 */ 171 if (drive->media != ide_disk) 172 pm->pm_step = IDE_PM_RESTORE_DMA; 173 else 174 ide_complete_power_step(drive, rq); 175 return ide_stopped; 176 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 177 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE; 178 goto out_do_tf; 179 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ 180 /* 181 * Right now, all we do is call ide_set_dma(drive), 182 * we could be smarter and check for current xfer_speed 183 * in struct drive etc... 184 */ 185 if (drive->hwif->dma_ops == NULL) 186 break; 187 /* 188 * TODO: respect IDE_DFLAG_USING_DMA 189 */ 190 ide_set_dma(drive); 191 break; 192 } 193 194 pm->pm_step = IDE_PM_COMPLETED; 195 196 return ide_stopped; 197 198out_do_tf: 199 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 200 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 201 cmd.protocol = ATA_PROT_NODATA; 202 203 return do_rw_taskfile(drive, &cmd); 204} 205 206/** 207 * ide_complete_pm_rq - end the current Power Management request 208 * @drive: target drive 209 * @rq: request 210 * 211 * This function cleans up the current PM request and stops the queue 212 * if necessary. 213 */ 214void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) 215{ 216 struct request_queue *q = drive->queue; 217 struct ide_pm_state *pm = rq->special; 218 unsigned long flags; 219 220 ide_complete_power_step(drive, rq); 221 if (pm->pm_step != IDE_PM_COMPLETED) 222 return; 223 224#ifdef DEBUG_PM 225 printk("%s: completing PM request, %s\n", drive->name, 226 (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume"); 227#endif 228 spin_lock_irqsave(q->queue_lock, flags); 229 if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) 230 blk_stop_queue(q); 231 else 232 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; 233 spin_unlock_irqrestore(q->queue_lock, flags); 234 235 drive->hwif->rq = NULL; 236 237 if (blk_end_request(rq, BLK_STS_OK, 0)) 238 BUG(); 239} 240 241void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 242{ 243 struct ide_pm_state *pm = rq->special; 244 245 if (blk_rq_is_private(rq) && 246 ide_req(rq)->type == ATA_PRIV_PM_SUSPEND && 247 pm->pm_step == IDE_PM_START_SUSPEND) 248 /* Mark drive blocked when starting the suspend sequence. */ 249 drive->dev_flags |= IDE_DFLAG_BLOCKED; 250 else if (blk_rq_is_private(rq) && 251 ide_req(rq)->type == ATA_PRIV_PM_RESUME && 252 pm->pm_step == IDE_PM_START_RESUME) { 253 /* 254 * The first thing we do on wakeup is to wait for BSY bit to 255 * go away (with a looong timeout) as a drive on this hwif may 256 * just be POSTing itself. 257 * We do that before even selecting as the "other" device on 258 * the bus may be broken enough to walk on our toes at this 259 * point. 260 */ 261 ide_hwif_t *hwif = drive->hwif; 262 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 263 struct request_queue *q = drive->queue; 264 unsigned long flags; 265 int rc; 266#ifdef DEBUG_PM 267 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 268#endif 269 rc = ide_wait_not_busy(hwif, 35000); 270 if (rc) 271 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 272 tp_ops->dev_select(drive); 273 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 274 rc = ide_wait_not_busy(hwif, 100000); 275 if (rc) 276 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 277 278 spin_lock_irqsave(q->queue_lock, flags); 279 blk_start_queue(q); 280 spin_unlock_irqrestore(q->queue_lock, flags); 281 } 282}