Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mtd: intel-dg: wake card on operations

The Intel DG cards do not have separate power control for
persistent memory.
The memory is available when the whole card is awake.

Enable runtime PM in mtd driver to notify parent graphics driver
that whole card should be kept awake while nvm operations are
performed through this driver.

Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>

authored by

Alexander Usyskin and committed by
Miquel Raynal
3e9c49d4 9d4d01a2

+62 -12
+62 -12
drivers/mtd/devices/mtd_intel_dg.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/mtd/mtd.h> 17 17 #include <linux/mtd/partitions.h> 18 + #include <linux/pm_runtime.h> 18 19 #include <linux/string.h> 19 20 #include <linux/slab.h> 20 21 #include <linux/sizes.h> 21 22 #include <linux/types.h> 22 23 24 + #define INTEL_DG_NVM_RPM_TIMEOUT_MS 500 25 + 23 26 struct intel_dg_nvm { 24 27 struct kref refcnt; 25 28 struct mtd_info mtd; 29 + struct device *dev; 26 30 struct mutex lock; /* region access lock */ 27 31 void __iomem *base; 28 32 void __iomem *base2; ··· 425 421 unsigned int i, n; 426 422 int ret; 427 423 424 + nvm->dev = device; 425 + 428 426 /* clean error register, previous errors are ignored */ 429 427 idg_nvm_error(nvm); 430 428 ··· 504 498 size_t len; 505 499 u8 region; 506 500 u64 addr; 501 + int ret; 507 502 508 503 if (WARN_ON(!nvm)) 509 504 return -EINVAL; ··· 519 512 total_len = info->len; 520 513 addr = info->addr; 521 514 515 + ret = pm_runtime_resume_and_get(nvm->dev); 516 + if (ret < 0) { 517 + dev_err(&mtd->dev, "rpm: get failed %d\n", ret); 518 + return ret; 519 + } 520 + 521 + ret = 0; 522 522 guard(mutex)(&nvm->lock); 523 523 524 524 while (total_len > 0) { 525 525 if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) { 526 526 dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len); 527 527 info->fail_addr = addr; 528 - return -ERANGE; 528 + ret = -ERANGE; 529 + break; 529 530 } 530 531 531 532 idx = idg_nvm_get_region(nvm, addr); 532 533 if (idx >= nvm->nregions) { 533 534 dev_err(&mtd->dev, "out of range"); 534 535 info->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 535 - return -ERANGE; 536 + ret = -ERANGE; 537 + break; 536 538 } 537 539 538 540 from = addr - nvm->regions[idx].offset; ··· 557 541 if (bytes < 0) { 558 542 dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes); 559 543 info->fail_addr += nvm->regions[idx].offset; 560 - return bytes; 544 + ret = bytes; 545 + break; 561 546 } 562 547 563 548 addr += len; 564 549 total_len -= len; 565 550 } 566 551 567 - return 0; 552 + pm_runtime_put_autosuspend(nvm->dev); 553 + return ret; 568 554 } 569 555 570 556 static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, ··· 595 577 if (len > nvm->regions[idx].size - from) 596 578 len = nvm->regions[idx].size - from; 597 579 580 + ret = pm_runtime_resume_and_get(nvm->dev); 581 + if (ret < 0) { 582 + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret); 583 + return ret; 584 + } 585 + 598 586 guard(mutex)(&nvm->lock); 599 587 600 588 ret = idg_read(nvm, region, from, len, buf); 601 589 if (ret < 0) { 602 590 dev_dbg(&mtd->dev, "read failed with %zd\n", ret); 603 - return ret; 591 + } else { 592 + *retlen = ret; 593 + ret = 0; 604 594 } 605 595 606 - *retlen = ret; 607 - 608 - return 0; 596 + pm_runtime_put_autosuspend(nvm->dev); 597 + return ret; 609 598 } 610 599 611 600 static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, ··· 641 616 if (len > nvm->regions[idx].size - to) 642 617 len = nvm->regions[idx].size - to; 643 618 619 + ret = pm_runtime_resume_and_get(nvm->dev); 620 + if (ret < 0) { 621 + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret); 622 + return ret; 623 + } 624 + 644 625 guard(mutex)(&nvm->lock); 645 626 646 627 ret = idg_write(nvm, region, to, len, buf); 647 628 if (ret < 0) { 648 629 dev_dbg(&mtd->dev, "write failed with %zd\n", ret); 649 - return ret; 630 + } else { 631 + *retlen = ret; 632 + ret = 0; 650 633 } 651 634 652 - *retlen = ret; 653 - 654 - return 0; 635 + pm_runtime_put_autosuspend(nvm->dev); 636 + return ret; 655 637 } 656 638 657 639 static void intel_dg_nvm_release(struct kref *kref) ··· 785 753 } 786 754 nvm->nregions = n; /* in case where kasprintf fail */ 787 755 756 + ret = devm_pm_runtime_enable(device); 757 + if (ret < 0) { 758 + dev_err(device, "rpm: enable failed %d\n", ret); 759 + goto err_norpm; 760 + } 761 + 762 + pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT_MS); 763 + pm_runtime_use_autosuspend(device); 764 + 765 + ret = pm_runtime_resume_and_get(device); 766 + if (ret < 0) { 767 + dev_err(device, "rpm: get failed %d\n", ret); 768 + goto err_norpm; 769 + } 770 + 788 771 nvm->base = devm_ioremap_resource(device, &invm->bar); 789 772 if (IS_ERR(nvm->base)) { 790 773 ret = PTR_ERR(nvm->base); ··· 828 781 829 782 dev_set_drvdata(&aux_dev->dev, nvm); 830 783 784 + pm_runtime_put(device); 831 785 return 0; 832 786 833 787 err: 788 + pm_runtime_put(device); 789 + err_norpm: 834 790 kref_put(&nvm->refcnt, intel_dg_nvm_release); 835 791 return ret; 836 792 }