Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: idxd: remove interrupt disable for dev_lock

The spinlock is not being used in hard interrupt context. There is no need
to disable irq when acquiring the lock. The interrupt thread handler also
is not in bottom half context, therefore we can also remove disabling of
the bh. Convert all dev_lock acquisition to plain spin_lock() calls.

Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/162984026772.1939166.11504067782824765879.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Dave Jiang and committed by
Vinod Koul
cf84a4b9 f9f4082d

+22 -32
+2 -3
drivers/dma/idxd/cdev.c
··· 218 218 struct idxd_user_context *ctx = filp->private_data; 219 219 struct idxd_wq *wq = ctx->wq; 220 220 struct idxd_device *idxd = wq->idxd; 221 - unsigned long flags; 222 221 __poll_t out = 0; 223 222 224 223 poll_wait(filp, &wq->err_queue, wait); 225 - spin_lock_irqsave(&idxd->dev_lock, flags); 224 + spin_lock(&idxd->dev_lock); 226 225 if (idxd->sw_err.valid) 227 226 out = EPOLLIN | EPOLLRDNORM; 228 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 227 + spin_unlock(&idxd->dev_lock); 229 228 230 229 return out; 231 230 }
+12 -19
drivers/dma/idxd/device.c
··· 341 341 int rc; 342 342 union wqcfg wqcfg; 343 343 unsigned int offset; 344 - unsigned long flags; 345 344 346 345 rc = idxd_wq_disable(wq, false); 347 346 if (rc < 0) 348 347 return rc; 349 348 350 349 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); 351 - spin_lock_irqsave(&idxd->dev_lock, flags); 350 + spin_lock(&idxd->dev_lock); 352 351 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); 353 352 wqcfg.pasid_en = 1; 354 353 wqcfg.pasid = pasid; 355 354 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); 356 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 355 + spin_unlock(&idxd->dev_lock); 357 356 358 357 rc = idxd_wq_enable(wq); 359 358 if (rc < 0) ··· 367 368 int rc; 368 369 union wqcfg wqcfg; 369 370 unsigned int offset; 370 - unsigned long flags; 371 371 372 372 rc = idxd_wq_disable(wq, false); 373 373 if (rc < 0) 374 374 return rc; 375 375 376 376 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); 377 - spin_lock_irqsave(&idxd->dev_lock, flags); 377 + spin_lock(&idxd->dev_lock); 378 378 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); 379 379 wqcfg.pasid_en = 0; 380 380 wqcfg.pasid = 0; 381 381 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); 382 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 382 + spin_unlock(&idxd->dev_lock); 383 383 384 384 rc = idxd_wq_enable(wq); 385 385 if (rc < 0) ··· 556 558 { 557 559 struct device *dev = &idxd->pdev->dev; 558 560 u32 status; 559 - unsigned long flags; 560 561 561 562 if (!idxd_is_enabled(idxd)) { 562 563 dev_dbg(dev, "Device is not enabled\n"); ··· 571 574 return -ENXIO; 572 575 } 573 576 574 - spin_lock_irqsave(&idxd->dev_lock, flags); 577 + spin_lock(&idxd->dev_lock); 575 578 idxd_device_clear_state(idxd); 576 579 idxd->state = IDXD_DEV_DISABLED; 577 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 580 + spin_unlock(&idxd->dev_lock); 578 581 return 0; 579 582 } 580 583 581 584 void idxd_device_reset(struct idxd_device *idxd) 582 585 { 583 - unsigned long flags; 584 - 585 586 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); 586 - spin_lock_irqsave(&idxd->dev_lock, flags); 587 + spin_lock(&idxd->dev_lock); 587 588 idxd_device_clear_state(idxd); 588 589 idxd->state = IDXD_DEV_DISABLED; 589 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 590 + spin_unlock(&idxd->dev_lock); 590 591 } 591 592 592 593 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid) ··· 1159 1164 { 1160 1165 struct idxd_device *idxd = wq->idxd; 1161 1166 struct device *dev = &idxd->pdev->dev; 1162 - unsigned long flags; 1163 1167 int rc = -ENXIO; 1164 1168 1165 1169 lockdep_assert_held(&wq->wq_lock); ··· 1210 1216 } 1211 1217 1212 1218 rc = 0; 1213 - spin_lock_irqsave(&idxd->dev_lock, flags); 1219 + spin_lock(&idxd->dev_lock); 1214 1220 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1215 1221 rc = idxd_device_config(idxd); 1216 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 1222 + spin_unlock(&idxd->dev_lock); 1217 1223 if (rc < 0) { 1218 1224 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc); 1219 1225 goto err; ··· 1282 1288 int idxd_device_drv_probe(struct idxd_dev *idxd_dev) 1283 1289 { 1284 1290 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); 1285 - unsigned long flags; 1286 1291 int rc = 0; 1287 1292 1288 1293 /* ··· 1295 1302 } 1296 1303 1297 1304 /* Device configuration */ 1298 - spin_lock_irqsave(&idxd->dev_lock, flags); 1305 + spin_lock(&idxd->dev_lock); 1299 1306 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1300 1307 rc = idxd_device_config(idxd); 1301 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 1308 + spin_unlock(&idxd->dev_lock); 1302 1309 if (rc < 0) 1303 1310 return -ENXIO; 1304 1311
+4 -4
drivers/dma/idxd/irq.c
··· 64 64 bool err = false; 65 65 66 66 if (cause & IDXD_INTC_ERR) { 67 - spin_lock_bh(&idxd->dev_lock); 67 + spin_lock(&idxd->dev_lock); 68 68 for (i = 0; i < 4; i++) 69 69 idxd->sw_err.bits[i] = ioread64(idxd->reg_base + 70 70 IDXD_SWERR_OFFSET + i * sizeof(u64)); ··· 89 89 } 90 90 } 91 91 92 - spin_unlock_bh(&idxd->dev_lock); 92 + spin_unlock(&idxd->dev_lock); 93 93 val |= IDXD_INTC_ERR; 94 94 95 95 for (i = 0; i < 4; i++) ··· 133 133 INIT_WORK(&idxd->work, idxd_device_reinit); 134 134 queue_work(idxd->wq, &idxd->work); 135 135 } else { 136 - spin_lock_bh(&idxd->dev_lock); 136 + spin_lock(&idxd->dev_lock); 137 137 idxd_wqs_quiesce(idxd); 138 138 idxd_wqs_unmap_portal(idxd); 139 139 idxd_device_clear_state(idxd); ··· 141 141 "idxd halted, need %s.\n", 142 142 gensts.reset_type == IDXD_DEVICE_RESET_FLR ? 143 143 "FLR" : "system reset"); 144 - spin_unlock_bh(&idxd->dev_lock); 144 + spin_unlock(&idxd->dev_lock); 145 145 return -ENXIO; 146 146 } 147 147 }
+4 -6
drivers/dma/idxd/sysfs.c
··· 1099 1099 struct device_attribute *attr, char *buf) 1100 1100 { 1101 1101 struct idxd_device *idxd = confdev_to_idxd(dev); 1102 - unsigned long flags; 1103 1102 int count = 0, i; 1104 1103 1105 - spin_lock_irqsave(&idxd->dev_lock, flags); 1104 + spin_lock(&idxd->dev_lock); 1106 1105 for (i = 0; i < idxd->max_wqs; i++) { 1107 1106 struct idxd_wq *wq = idxd->wqs[i]; 1108 1107 1109 1108 count += wq->client_count; 1110 1109 } 1111 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 1110 + spin_unlock(&idxd->dev_lock); 1112 1111 1113 1112 return sysfs_emit(buf, "%d\n", count); 1114 1113 } ··· 1145 1146 { 1146 1147 struct idxd_device *idxd = confdev_to_idxd(dev); 1147 1148 int i, out = 0; 1148 - unsigned long flags; 1149 1149 1150 - spin_lock_irqsave(&idxd->dev_lock, flags); 1150 + spin_lock(&idxd->dev_lock); 1151 1151 for (i = 0; i < 4; i++) 1152 1152 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]); 1153 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 1153 + spin_unlock(&idxd->dev_lock); 1154 1154 out--; 1155 1155 out += sysfs_emit_at(buf, out, "\n"); 1156 1156 return out;