Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.18 281 lines 7.5 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2#include <linux/kernel.h> 3#include <linux/gfp.h> 4#include <linux/ide.h> 5 6int generic_ide_suspend(struct device *dev, pm_message_t mesg) 7{ 8 ide_drive_t *drive = to_ide_device(dev); 9 ide_drive_t *pair = ide_get_pair_dev(drive); 10 ide_hwif_t *hwif = drive->hwif; 11 struct request *rq; 12 struct ide_pm_state rqpm; 13 int ret; 14 15 if (ide_port_acpi(hwif)) { 16 /* call ACPI _GTM only once */ 17 if ((drive->dn & 1) == 0 || pair == NULL) 18 ide_acpi_get_timing(hwif); 19 } 20 21 memset(&rqpm, 0, sizeof(rqpm)); 22 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0); 23 ide_req(rq)->type = ATA_PRIV_PM_SUSPEND; 24 rq->special = &rqpm; 25 rqpm.pm_step = IDE_PM_START_SUSPEND; 26 if (mesg.event == PM_EVENT_PRETHAW) 27 mesg.event = PM_EVENT_FREEZE; 28 rqpm.pm_state = mesg.event; 29 30 blk_execute_rq(drive->queue, NULL, rq, 0); 31 ret = scsi_req(rq)->result ? -EIO : 0; 32 blk_put_request(rq); 33 34 if (ret == 0 && ide_port_acpi(hwif)) { 35 /* call ACPI _PS3 only after both devices are suspended */ 36 if ((drive->dn & 1) || pair == NULL) 37 ide_acpi_set_state(hwif, 0); 38 } 39 40 return ret; 41} 42 43static void ide_end_sync_rq(struct request *rq, blk_status_t error) 44{ 45 complete(rq->end_io_data); 46} 47 48static int ide_pm_execute_rq(struct request *rq) 49{ 50 struct request_queue *q = rq->q; 51 DECLARE_COMPLETION_ONSTACK(wait); 52 53 rq->end_io_data = &wait; 54 rq->end_io = ide_end_sync_rq; 55 56 spin_lock_irq(q->queue_lock); 57 if (unlikely(blk_queue_dying(q))) { 58 rq->rq_flags |= RQF_QUIET; 59 scsi_req(rq)->result = -ENXIO; 60 __blk_end_request_all(rq, BLK_STS_OK); 61 spin_unlock_irq(q->queue_lock); 62 return -ENXIO; 63 } 64 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); 65 __blk_run_queue_uncond(q); 66 spin_unlock_irq(q->queue_lock); 67 68 wait_for_completion_io(&wait); 69 70 return scsi_req(rq)->result ? -EIO : 0; 71} 72 73int generic_ide_resume(struct device *dev) 74{ 75 ide_drive_t *drive = to_ide_device(dev); 76 ide_drive_t *pair = ide_get_pair_dev(drive); 77 ide_hwif_t *hwif = drive->hwif; 78 struct request *rq; 79 struct ide_pm_state rqpm; 80 int err; 81 82 if (ide_port_acpi(hwif)) { 83 /* call ACPI _PS0 / _STM only once */ 84 if ((drive->dn & 1) == 0 || pair == NULL) { 85 ide_acpi_set_state(hwif, 1); 86 ide_acpi_push_timing(hwif); 87 } 88 89 ide_acpi_exec_tfs(drive); 90 } 91 92 memset(&rqpm, 0, sizeof(rqpm)); 93 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT); 94 ide_req(rq)->type = ATA_PRIV_PM_RESUME; 95 rq->special = &rqpm; 96 rqpm.pm_step = IDE_PM_START_RESUME; 97 rqpm.pm_state = PM_EVENT_ON; 98 99 err = ide_pm_execute_rq(rq); 100 blk_put_request(rq); 101 102 if (err == 0 && dev->driver) { 103 struct ide_driver *drv = to_ide_driver(dev->driver); 104 105 if (drv->resume) 106 drv->resume(drive); 107 } 108 109 return err; 110} 111 112void ide_complete_power_step(ide_drive_t *drive, struct request *rq) 113{ 114 struct ide_pm_state *pm = rq->special; 115 116#ifdef DEBUG_PM 117 printk(KERN_INFO "%s: complete_power_step(step: %d)\n", 118 drive->name, pm->pm_step); 119#endif 120 if (drive->media != ide_disk) 121 return; 122 123 switch (pm->pm_step) { 124 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 125 if (pm->pm_state == PM_EVENT_FREEZE) 126 pm->pm_step = IDE_PM_COMPLETED; 127 else 128 pm->pm_step = IDE_PM_STANDBY; 129 break; 130 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 131 pm->pm_step = IDE_PM_COMPLETED; 132 break; 133 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 134 pm->pm_step = IDE_PM_IDLE; 135 break; 136 case IDE_PM_IDLE: /* Resume step 2 (idle)*/ 137 pm->pm_step = IDE_PM_RESTORE_DMA; 138 break; 139 } 140} 141 142ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 143{ 144 struct ide_pm_state *pm = rq->special; 145 struct ide_cmd cmd = { }; 146 147 switch (pm->pm_step) { 148 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 149 if (drive->media != ide_disk) 150 break; 151 /* Not supported? Switch to next step now. */ 152 if (ata_id_flush_enabled(drive->id) == 0 || 153 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { 154 ide_complete_power_step(drive, rq); 155 return ide_stopped; 156 } 157 if (ata_id_flush_ext_enabled(drive->id)) 158 cmd.tf.command = ATA_CMD_FLUSH_EXT; 159 else 160 cmd.tf.command = ATA_CMD_FLUSH; 161 goto out_do_tf; 162 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 163 cmd.tf.command = ATA_CMD_STANDBYNOW1; 164 goto out_do_tf; 165 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 166 ide_set_max_pio(drive); 167 /* 168 * skip IDE_PM_IDLE for ATAPI devices 169 */ 170 if (drive->media != ide_disk) 171 pm->pm_step = IDE_PM_RESTORE_DMA; 172 else 173 ide_complete_power_step(drive, rq); 174 return ide_stopped; 175 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 176 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE; 177 goto out_do_tf; 178 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ 179 /* 180 * Right now, all we do is call ide_set_dma(drive), 181 * we could be smarter and check for current xfer_speed 182 * in struct drive etc... 183 */ 184 if (drive->hwif->dma_ops == NULL) 185 break; 186 /* 187 * TODO: respect IDE_DFLAG_USING_DMA 188 */ 189 ide_set_dma(drive); 190 break; 191 } 192 193 pm->pm_step = IDE_PM_COMPLETED; 194 195 return ide_stopped; 196 197out_do_tf: 198 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 199 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 200 cmd.protocol = ATA_PROT_NODATA; 201 202 return do_rw_taskfile(drive, &cmd); 203} 204 205/** 206 * ide_complete_pm_rq - end the current Power Management request 207 * @drive: target drive 208 * @rq: request 209 * 210 * This function cleans up the current PM request and stops the queue 211 * if necessary. 212 */ 213void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) 214{ 215 struct request_queue *q = drive->queue; 216 struct ide_pm_state *pm = rq->special; 217 unsigned long flags; 218 219 ide_complete_power_step(drive, rq); 220 if (pm->pm_step != IDE_PM_COMPLETED) 221 return; 222 223#ifdef DEBUG_PM 224 printk("%s: completing PM request, %s\n", drive->name, 225 (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume"); 226#endif 227 spin_lock_irqsave(q->queue_lock, flags); 228 if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) 229 blk_stop_queue(q); 230 else 231 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; 232 spin_unlock_irqrestore(q->queue_lock, flags); 233 234 drive->hwif->rq = NULL; 235 236 if (blk_end_request(rq, BLK_STS_OK, 0)) 237 BUG(); 238} 239 240void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 241{ 242 struct ide_pm_state *pm = rq->special; 243 244 if (blk_rq_is_private(rq) && 245 ide_req(rq)->type == ATA_PRIV_PM_SUSPEND && 246 pm->pm_step == IDE_PM_START_SUSPEND) 247 /* Mark drive blocked when starting the suspend sequence. */ 248 drive->dev_flags |= IDE_DFLAG_BLOCKED; 249 else if (blk_rq_is_private(rq) && 250 ide_req(rq)->type == ATA_PRIV_PM_RESUME && 251 pm->pm_step == IDE_PM_START_RESUME) { 252 /* 253 * The first thing we do on wakeup is to wait for BSY bit to 254 * go away (with a looong timeout) as a drive on this hwif may 255 * just be POSTing itself. 256 * We do that before even selecting as the "other" device on 257 * the bus may be broken enough to walk on our toes at this 258 * point. 259 */ 260 ide_hwif_t *hwif = drive->hwif; 261 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 262 struct request_queue *q = drive->queue; 263 unsigned long flags; 264 int rc; 265#ifdef DEBUG_PM 266 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 267#endif 268 rc = ide_wait_not_busy(hwif, 35000); 269 if (rc) 270 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 271 tp_ops->dev_select(drive); 272 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 273 rc = ide_wait_not_busy(hwif, 100000); 274 if (rc) 275 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 276 277 spin_lock_irqsave(q->queue_lock, flags); 278 blk_start_queue(q); 279 spin_unlock_irqrestore(q->queue_lock, flags); 280 } 281}