Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: idxd: device cmd should use dedicated lock

Create a dedicated lock for device command operations. Put the device
command operation under finer grained locking instead of using the
idxd->dev_lock.

Suggested-by: Sanjay Kumar <sanjay.k.kumar@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/161894525685.3210132.16160045731436382560.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Dave Jiang and committed by
Vinod Koul
53b2ee7f 5b0c68c4

+11 -9
+9 -9
drivers/dma/idxd/device.c
··· 465 465 memset(&cmd, 0, sizeof(cmd)); 466 466 cmd.cmd = IDXD_CMD_RESET_DEVICE; 467 467 dev_dbg(dev, "%s: sending reset for init.\n", __func__); 468 - spin_lock_irqsave(&idxd->dev_lock, flags); 468 + spin_lock_irqsave(&idxd->cmd_lock, flags); 469 469 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 470 470 471 471 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & 472 472 IDXD_CMDSTS_ACTIVE) 473 473 cpu_relax(); 474 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 474 + spin_unlock_irqrestore(&idxd->cmd_lock, flags); 475 475 return 0; 476 476 } 477 477 ··· 494 494 cmd.operand = operand; 495 495 cmd.int_req = 1; 496 496 497 - spin_lock_irqsave(&idxd->dev_lock, flags); 497 + spin_lock_irqsave(&idxd->cmd_lock, flags); 498 498 wait_event_lock_irq(idxd->cmd_waitq, 499 499 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), 500 - idxd->dev_lock); 500 + idxd->cmd_lock); 501 501 502 502 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", 503 503 __func__, cmd_code, operand); ··· 511 511 * After command submitted, release lock and go to sleep until 512 512 * the command completes via interrupt. 513 513 */ 514 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 514 + spin_unlock_irqrestore(&idxd->cmd_lock, flags); 515 515 wait_for_completion(&done); 516 - spin_lock_irqsave(&idxd->dev_lock, flags); 516 + spin_lock_irqsave(&idxd->cmd_lock, flags); 517 517 if (status) { 518 518 *status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); 519 519 idxd->cmd_status = *status & GENMASK(7, 0); ··· 522 522 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); 523 523 /* Wake up other pending commands */ 524 524 wake_up(&idxd->cmd_waitq); 525 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 525 + spin_unlock_irqrestore(&idxd->cmd_lock, flags); 526 526 } 527 527 528 528 int idxd_device_enable(struct idxd_device *idxd) ··· 667 667 668 668 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand); 669 669 670 - spin_lock_irqsave(&idxd->dev_lock, flags); 670 + spin_lock_irqsave(&idxd->cmd_lock, flags); 671 671 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 672 672 673 673 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) 674 674 cpu_relax(); 675 675 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); 676 - spin_unlock_irqrestore(&idxd->dev_lock, flags); 676 + spin_unlock_irqrestore(&idxd->cmd_lock, flags); 677 677 678 678 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { 679 679 dev_dbg(dev, "release int handle failed: %#x\n", status);
+1
drivers/dma/idxd/idxd.h
··· 204 204 void __iomem *reg_base; 205 205 206 206 spinlock_t dev_lock; /* spinlock for device */ 207 + spinlock_t cmd_lock; /* spinlock for device commands */ 207 208 struct completion *cmd_done; 208 209 struct idxd_group **groups; 209 210 struct idxd_wq **wqs;
+1
drivers/dma/idxd/init.c
··· 449 449 } 450 450 451 451 spin_lock_init(&idxd->dev_lock); 452 + spin_lock_init(&idxd->cmd_lock); 452 453 453 454 return idxd; 454 455 }