Merge tag 'i3c/for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux

Pull i3c updates from Alexandre Belloni:
"HDR support has finally been added. mipi-i3c-hci has been reworked and
Intel Nova Lake-S support has been added.

Subsystem:
- Add HDR transfer support

Drivers:
- dw: fix bus hang on Agilex5
- mipi-i3c-hci: Intel Nova Lake-S support, IOMMU support
- svc: HDR support"

* tag 'i3c/for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux: (28 commits)
regmap: i3c: switch to use i3c_xfer from i3c_priv_xfer
net: mctp i3c: switch to use i3c_xfer from i3c_priv_xfer
hwmon: (lm75): switch to use i3c_xfer from i3c_priv_xfer
i3c: document i3c_xfers
i3c: fix I3C_SDR bit number
i3c: master: svc: Add basic HDR mode support
i3c: master: svc: Replace bool rnw with union for HDR support
i3c: Switch to use new i3c_xfer from i3c_priv_xfer
i3c: Add HDR API support
i3c: master: add WQ_PERCPU to alloc_workqueue users
i3c: master: Remove i3c_device_free_ibi from i3c_device_remove
i3c: mipi-i3c-hci-pci: Set d3cold_delay to 0 for Intel controllers
i3c: mipi-i3c-hci-pci: Add LTR support for Intel controllers
i3c: mipi-i3c-hci-pci: Add exit callback
i3c: mipi-i3c-hci-pci: Change callback parameter
i3c: mipi-i3c-hci-pci: Allocate a structure for mipi_i3c_hci_pci device information
i3c: mipi-i3c-hci-pci: Factor out intel_reset()
i3c: mipi-i3c-hci-pci: Factor out private registers ioremapping
i3c: mipi-i3c-hci-pci: Constify driver data
i3c: mipi-i3c-hci-pci: Use readl_poll_timeout()
...

+427 -116
+5 -1
Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
··· 14 15 properties: 16 compatible: 17 - const: snps,dw-i3c-master-1.00a 18 19 reg: 20 maxItems: 1
··· 14 15 properties: 16 compatible: 17 + oneOf: 18 + - const: snps,dw-i3c-master-1.00a 19 + - items: 20 + - const: altr,agilex5-dw-i3c-master 21 + - const: snps,dw-i3c-master-1.00a 22 23 reg: 24 maxItems: 1
+4 -4
drivers/base/regmap/regmap-i3c.c
··· 11 { 12 struct device *dev = context; 13 struct i3c_device *i3c = dev_to_i3cdev(dev); 14 - struct i3c_priv_xfer xfers[] = { 15 { 16 .rnw = false, 17 .len = count, ··· 19 }, 20 }; 21 22 - return i3c_device_do_priv_xfers(i3c, xfers, ARRAY_SIZE(xfers)); 23 } 24 25 static int regmap_i3c_read(void *context, ··· 28 { 29 struct device *dev = context; 30 struct i3c_device *i3c = dev_to_i3cdev(dev); 31 - struct i3c_priv_xfer xfers[2]; 32 33 xfers[0].rnw = false; 34 xfers[0].len = reg_size; ··· 38 xfers[1].len = val_size; 39 xfers[1].data.in = val; 40 41 - return i3c_device_do_priv_xfers(i3c, xfers, ARRAY_SIZE(xfers)); 42 } 43 44 static const struct regmap_bus regmap_i3c = {
··· 11 { 12 struct device *dev = context; 13 struct i3c_device *i3c = dev_to_i3cdev(dev); 14 + struct i3c_xfer xfers[] = { 15 { 16 .rnw = false, 17 .len = count, ··· 19 }, 20 }; 21 22 + return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR); 23 } 24 25 static int regmap_i3c_read(void *context, ··· 28 { 29 struct device *dev = context; 30 struct i3c_device *i3c = dev_to_i3cdev(dev); 31 + struct i3c_xfer xfers[2]; 32 33 xfers[0].rnw = false; 34 xfers[0].len = reg_size; ··· 38 xfers[1].len = val_size; 39 xfers[1].data.in = val; 40 41 + return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR); 42 } 43 44 static const struct regmap_bus regmap_i3c = {
+4 -4
drivers/hwmon/lm75.c
··· 621 { 622 struct i3c_device *i3cdev = context; 623 struct lm75_data *data = i3cdev_get_drvdata(i3cdev); 624 - struct i3c_priv_xfer xfers[] = { 625 { 626 .rnw = false, 627 .len = 1, ··· 640 if (reg == LM75_REG_CONF && !data->params->config_reg_16bits) 641 xfers[1].len--; 642 643 - ret = i3c_device_do_priv_xfers(i3cdev, xfers, 2); 644 if (ret < 0) 645 return ret; 646 ··· 658 { 659 struct i3c_device *i3cdev = context; 660 struct lm75_data *data = i3cdev_get_drvdata(i3cdev); 661 - struct i3c_priv_xfer xfers[] = { 662 { 663 .rnw = false, 664 .len = 3, ··· 680 data->val_buf[2] = val & 0xff; 681 } 682 683 - return i3c_device_do_priv_xfers(i3cdev, xfers, 1); 684 } 685 686 static const struct regmap_bus lm75_i3c_regmap_bus = {
··· 621 { 622 struct i3c_device *i3cdev = context; 623 struct lm75_data *data = i3cdev_get_drvdata(i3cdev); 624 + struct i3c_xfer xfers[] = { 625 { 626 .rnw = false, 627 .len = 1, ··· 640 if (reg == LM75_REG_CONF && !data->params->config_reg_16bits) 641 xfers[1].len--; 642 643 + ret = i3c_device_do_xfers(i3cdev, xfers, 2, I3C_SDR); 644 if (ret < 0) 645 return ret; 646 ··· 658 { 659 struct i3c_device *i3cdev = context; 660 struct lm75_data *data = i3cdev_get_drvdata(i3cdev); 661 + struct i3c_xfer xfers[] = { 662 { 663 .rnw = false, 664 .len = 3, ··· 680 data->val_buf[2] = val & 0xff; 681 } 682 683 + return i3c_device_do_xfers(i3cdev, xfers, 1, I3C_SDR); 684 } 685 686 static const struct regmap_bus lm75_i3c_regmap_bus = {
+20 -7
drivers/i3c/device.c
··· 15 #include "internals.h" 16 17 /** 18 - * i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a 19 - * specific device 20 * 21 * @dev: device with which the transfers should be done 22 * @xfers: array of transfers 23 * @nxfers: number of transfers 24 * 25 * Initiate one or several private SDR transfers with @dev. 26 * ··· 33 * 'xfers' some time later. See I3C spec ver 1.1.1 09-Jun-2021. Section: 34 * 5.1.2.2.3. 35 */ 36 - int i3c_device_do_priv_xfers(struct i3c_device *dev, 37 - struct i3c_priv_xfer *xfers, 38 - int nxfers) 39 { 40 int ret, i; 41 ··· 47 } 48 49 i3c_bus_normaluse_lock(dev->bus); 50 - ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers); 51 i3c_bus_normaluse_unlock(dev->bus); 52 53 return ret; 54 } 55 - EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers); 56 57 /** 58 * i3c_device_do_setdasa() - do I3C dynamic address assignement with ··· 258 return NULL; 259 } 260 EXPORT_SYMBOL_GPL(i3c_device_match_id); 261 262 /** 263 * i3c_driver_register_with_owner() - register an I3C device driver
··· 15 #include "internals.h" 16 17 /** 18 + * i3c_device_do_xfers() - do I3C transfers directed to a specific device 19 * 20 * @dev: device with which the transfers should be done 21 * @xfers: array of transfers 22 * @nxfers: number of transfers 23 + * @mode: transfer mode 24 * 25 * Initiate one or several private SDR transfers with @dev. 26 * ··· 33 * 'xfers' some time later. See I3C spec ver 1.1.1 09-Jun-2021. Section: 34 * 5.1.2.2.3. 35 */ 36 + int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers, 37 + int nxfers, enum i3c_xfer_mode mode) 38 { 39 int ret, i; 40 ··· 48 } 49 50 i3c_bus_normaluse_lock(dev->bus); 51 + ret = i3c_dev_do_xfers_locked(dev->desc, xfers, nxfers, mode); 52 i3c_bus_normaluse_unlock(dev->bus); 53 54 return ret; 55 } 56 + EXPORT_SYMBOL_GPL(i3c_device_do_xfers); 57 58 /** 59 * i3c_device_do_setdasa() - do I3C dynamic address assignement with ··· 259 return NULL; 260 } 261 EXPORT_SYMBOL_GPL(i3c_device_match_id); 262 + 263 + /** 264 + * i3c_device_get_supported_xfer_mode - Returns the supported transfer mode by 265 + * connected master controller. 266 + * @dev: I3C device 267 + * 268 + * Return: a bit mask, which supported transfer mode, bit position is defined at 269 + * enum i3c_hdr_mode 270 + */ 271 + u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev) 272 + { 273 + return i3c_dev_get_master(dev->desc)->this->info.hdr_cap | BIT(I3C_SDR); 274 + } 275 + EXPORT_SYMBOL_GPL(i3c_device_get_supported_xfer_mode); 276 277 /** 278 * i3c_driver_register_with_owner() - register an I3C device driver
+3 -3
drivers/i3c/internals.h
··· 15 void i3c_bus_normaluse_unlock(struct i3c_bus *bus); 16 17 int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev); 18 - int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev, 19 - struct i3c_priv_xfer *xfers, 20 - int nxfers); 21 int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev); 22 int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev); 23 int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
··· 15 void i3c_bus_normaluse_unlock(struct i3c_bus *bus); 16 17 int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev); 18 + int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev, 19 + struct i3c_xfer *xfers, 20 + int nxfers, enum i3c_xfer_mode mode); 21 int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev); 22 int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev); 23 int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+19 -12
drivers/i3c/master.c
··· 334 335 if (driver->remove) 336 driver->remove(i3cdev); 337 - 338 - i3c_device_free_ibi(i3cdev); 339 } 340 341 const struct bus_type i3c_bus_type = { ··· 2819 2820 static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops) 2821 { 2822 - if (!ops || !ops->bus_init || !ops->priv_xfers || 2823 !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers) 2824 return -EINVAL; 2825 2826 if (ops->request_ibi && ··· 2885 INIT_LIST_HEAD(&master->boardinfo.i2c); 2886 INIT_LIST_HEAD(&master->boardinfo.i3c); 2887 2888 - ret = i3c_bus_init(i3cbus, master->dev.of_node); 2889 - if (ret) 2890 - return ret; 2891 - 2892 device_initialize(&master->dev); 2893 dev_set_name(&master->dev, "i3c-%d", i3cbus->id); 2894 2895 master->dev.dma_mask = parent->dma_mask; 2896 master->dev.coherent_dma_mask = parent->coherent_dma_mask; 2897 master->dev.dma_parms = parent->dma_parms; 2898 2899 ret = of_populate_i3c_bus(master); 2900 if (ret) ··· 2927 if (ret) 2928 goto err_put_dev; 2929 2930 - master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent)); 2931 if (!master->wq) { 2932 ret = -ENOMEM; 2933 goto err_put_dev; ··· 3016 dev->boardinfo->init_dyn_addr); 3017 } 3018 3019 - int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev, 3020 - struct i3c_priv_xfer *xfers, 3021 - int nxfers) 3022 { 3023 struct i3c_master_controller *master; 3024 ··· 3028 if (!master || !xfers) 3029 return -EINVAL; 3030 3031 - if (!master->ops->priv_xfers) 3032 return -EOPNOTSUPP; 3033 3034 return master->ops->priv_xfers(dev, xfers, nxfers); 3035 }
··· 334 335 if (driver->remove) 336 driver->remove(i3cdev); 337 } 338 339 const struct bus_type i3c_bus_type = { ··· 2821 2822 static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops) 2823 { 2824 + if (!ops || !ops->bus_init || 2825 !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers) 2826 + return -EINVAL; 2827 + 2828 + /* Must provide one of priv_xfers (SDR only) or i3c_xfers (all modes) */ 2829 + if (!ops->priv_xfers && !ops->i3c_xfers) 2830 return -EINVAL; 2831 2832 if (ops->request_ibi && ··· 2883 INIT_LIST_HEAD(&master->boardinfo.i2c); 2884 INIT_LIST_HEAD(&master->boardinfo.i3c); 2885 2886 device_initialize(&master->dev); 2887 dev_set_name(&master->dev, "i3c-%d", i3cbus->id); 2888 2889 master->dev.dma_mask = parent->dma_mask; 2890 master->dev.coherent_dma_mask = parent->coherent_dma_mask; 2891 master->dev.dma_parms = parent->dma_parms; 2892 + 2893 + ret = i3c_bus_init(i3cbus, master->dev.of_node); 2894 + if (ret) 2895 + goto err_put_dev; 2896 2897 ret = of_populate_i3c_bus(master); 2898 if (ret) ··· 2925 if (ret) 2926 goto err_put_dev; 2927 2928 + master->wq = alloc_workqueue("%s", WQ_PERCPU, 0, dev_name(parent)); 2929 if (!master->wq) { 2930 ret = -ENOMEM; 2931 goto err_put_dev; ··· 3014 dev->boardinfo->init_dyn_addr); 3015 } 3016 3017 + int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev, struct i3c_xfer *xfers, 3018 + int nxfers, enum i3c_xfer_mode mode) 3019 { 3020 struct i3c_master_controller *master; 3021 ··· 3027 if (!master || !xfers) 3028 return -EINVAL; 3029 3030 + if (mode != I3C_SDR && !(master->this->info.hdr_cap & BIT(mode))) 3031 return -EOPNOTSUPP; 3032 + 3033 + if (master->ops->i3c_xfers) 3034 + return master->ops->i3c_xfers(dev, xfers, nxfers, mode); 3035 + 3036 + if (mode != I3C_SDR) 3037 + return -EINVAL; 3038 3039 return master->ops->priv_xfers(dev, xfers, nxfers); 3040 }
+30 -1
drivers/i3c/master/dw-i3c-master.c
··· 228 229 /* List of quirks */ 230 #define AMD_I3C_OD_PP_TIMING BIT(1) 231 232 struct dw_i3c_cmd { 233 u32 cmd_lo; ··· 251 struct dw_i3c_i2c_dev_data { 252 u8 index; 253 struct i3c_generic_ibi_pool *ibi_pool; 254 }; 255 256 static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, ··· 1540 struct platform_device *pdev) 1541 { 1542 int ret, irq; 1543 1544 if (!master->platform_ops) 1545 master->platform_ops = &dw_i3c_platform_ops_default; ··· 1597 master->maxdevs = ret >> 16; 1598 master->free_pos = GENMASK(master->maxdevs - 1, 0); 1599 1600 - master->quirks = (unsigned long)device_get_match_data(&pdev->dev); 1601 1602 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1603 ret = i3c_master_register(&master->base, &pdev->dev, ··· 1634 { 1635 cancel_work_sync(&master->hj_work); 1636 i3c_master_unregister(&master->base); 1637 1638 pm_runtime_disable(master->dev); 1639 pm_runtime_set_suspended(master->dev); ··· 1781 pm_runtime_put_autosuspend(master->dev); 1782 } 1783 1784 static const struct of_device_id dw_i3c_master_of_match[] = { 1785 { .compatible = "snps,dw-i3c-master-1.00a", }, 1786 {}, 1787 }; 1788 MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
··· 228 229 /* List of quirks */ 230 #define AMD_I3C_OD_PP_TIMING BIT(1) 231 + #define DW_I3C_DISABLE_RUNTIME_PM_QUIRK BIT(2) 232 233 struct dw_i3c_cmd { 234 u32 cmd_lo; ··· 250 struct dw_i3c_i2c_dev_data { 251 u8 index; 252 struct i3c_generic_ibi_pool *ibi_pool; 253 + }; 254 + 255 + struct dw_i3c_drvdata { 256 + u32 flags; 257 }; 258 259 static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, ··· 1535 struct platform_device *pdev) 1536 { 1537 int ret, irq; 1538 + const struct dw_i3c_drvdata *drvdata; 1539 + unsigned long quirks = 0; 1540 1541 if (!master->platform_ops) 1542 master->platform_ops = &dw_i3c_platform_ops_default; ··· 1590 master->maxdevs = ret >> 16; 1591 master->free_pos = GENMASK(master->maxdevs - 1, 0); 1592 1593 + if (has_acpi_companion(&pdev->dev)) { 1594 + quirks = (unsigned long)device_get_match_data(&pdev->dev); 1595 + } else if (pdev->dev.of_node) { 1596 + drvdata = device_get_match_data(&pdev->dev); 1597 + if (drvdata) 1598 + quirks = drvdata->flags; 1599 + } 1600 + master->quirks = quirks; 1601 + 1602 + /* Keep controller enabled by preventing runtime suspend */ 1603 + if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK) 1604 + pm_runtime_get_noresume(&pdev->dev); 1605 1606 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1607 ret = i3c_master_register(&master->base, &pdev->dev, ··· 1616 { 1617 cancel_work_sync(&master->hj_work); 1618 i3c_master_unregister(&master->base); 1619 + 1620 + /* Balance pm_runtime_get_noresume() from probe() */ 1621 + if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK) 1622 + pm_runtime_put_noidle(master->dev); 1623 1624 pm_runtime_disable(master->dev); 1625 pm_runtime_set_suspended(master->dev); ··· 1759 pm_runtime_put_autosuspend(master->dev); 1760 } 1761 1762 + static const struct dw_i3c_drvdata altr_agilex5_drvdata = { 1763 + .flags = DW_I3C_DISABLE_RUNTIME_PM_QUIRK, 1764 + }; 1765 + 1766 static const struct of_device_id dw_i3c_master_of_match[] = { 1767 { .compatible = "snps,dw-i3c-master-1.00a", }, 1768 + { .compatible = "altr,agilex5-dw-i3c-master", 1769 + .data = &altr_agilex5_drvdata, 1770 + }, 1771 {}, 1772 }; 1773 MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
+187 -42
drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
··· 7 * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com> 8 */ 9 #include <linux/acpi.h> 10 #include <linux/idr.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/platform_device.h> 15 16 - struct mipi_i3c_hci_pci_info { 17 - int (*init)(struct pci_dev *pci); 18 }; 19 20 - #define INTEL_PRIV_OFFSET 0x2b0 21 - #define INTEL_PRIV_SIZE 0x28 22 - #define INTEL_PRIV_RESETS 0x04 23 - #define INTEL_PRIV_RESETS_RESET BIT(0) 24 - #define INTEL_PRIV_RESETS_RESET_DONE BIT(1) 25 26 static DEFINE_IDA(mipi_i3c_hci_pci_ida); 27 28 - static int mipi_i3c_hci_pci_intel_init(struct pci_dev *pci) 29 - { 30 - unsigned long timeout; 31 - void __iomem *priv; 32 33 - priv = devm_ioremap(&pci->dev, 34 - pci_resource_start(pci, 0) + INTEL_PRIV_OFFSET, 35 - INTEL_PRIV_SIZE); 36 - if (!priv) 37 - return -ENOMEM; 38 39 /* Assert reset, wait for completion and release reset */ 40 - writel(0, priv + INTEL_PRIV_RESETS); 41 - timeout = jiffies + msecs_to_jiffies(10); 42 - while (!(readl(priv + INTEL_PRIV_RESETS) & 43 - INTEL_PRIV_RESETS_RESET_DONE)) { 44 - if (time_after(jiffies, timeout)) 45 - break; 46 - cpu_relax(); 47 - } 48 - writel(INTEL_PRIV_RESETS_RESET, priv + INTEL_PRIV_RESETS); 49 50 return 0; 51 } 52 53 - static struct mipi_i3c_hci_pci_info intel_info = { 54 - .init = mipi_i3c_hci_pci_intel_init, 55 }; 56 57 static int mipi_i3c_hci_pci_probe(struct pci_dev *pci, 58 const struct pci_device_id *id) 59 { 60 - struct mipi_i3c_hci_pci_info *info; 61 - struct platform_device *pdev; 62 struct resource res[2]; 63 int dev_id, ret; 64 65 ret = pcim_enable_device(pci); 66 if (ret) ··· 217 if (dev_id < 0) 218 return dev_id; 219 220 - pdev = platform_device_alloc("mipi-i3c-hci", dev_id); 221 - if (!pdev) 222 return -ENOMEM; 223 224 - pdev->dev.parent = &pci->dev; 225 - device_set_node(&pdev->dev, dev_fwnode(&pci->dev)); 226 227 - ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); 228 if (ret) 229 goto err; 230 231 - info = (struct mipi_i3c_hci_pci_info *)id->driver_data; 232 - if (info && info->init) { 233 - ret = info->init(pci); 234 if (ret) 235 goto err; 236 } 237 238 - ret = platform_device_add(pdev); 239 if (ret) 240 - goto err; 241 242 - pci_set_drvdata(pci, pdev); 243 244 return 0; 245 246 err: 247 - platform_device_put(pdev); 248 ida_free(&mipi_i3c_hci_pci_ida, dev_id); 249 return ret; 250 } 251 252 static void mipi_i3c_hci_pci_remove(struct pci_dev *pci) 253 { 254 - struct platform_device *pdev = pci_get_drvdata(pci); 255 int dev_id = pdev->id; 256 257 platform_device_unregister(pdev); 258 ida_free(&mipi_i3c_hci_pci_ida, dev_id); ··· 275 /* Panther Lake-P */ 276 { PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info}, 277 { PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info}, 278 { }, 279 }; 280 MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
··· 7 * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com> 8 */ 9 #include <linux/acpi.h> 10 + #include <linux/bitfield.h> 11 + #include <linux/debugfs.h> 12 #include <linux/idr.h> 13 + #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/pci.h> 17 #include <linux/platform_device.h> 18 + #include <linux/pm_qos.h> 19 20 + struct mipi_i3c_hci_pci { 21 + struct pci_dev *pci; 22 + struct platform_device *pdev; 23 + const struct mipi_i3c_hci_pci_info *info; 24 + void *private; 25 }; 26 27 + struct mipi_i3c_hci_pci_info { 28 + int (*init)(struct mipi_i3c_hci_pci *hci); 29 + void (*exit)(struct mipi_i3c_hci_pci *hci); 30 + }; 31 32 static DEFINE_IDA(mipi_i3c_hci_pci_ida); 33 34 + #define INTEL_PRIV_OFFSET 0x2b0 35 + #define INTEL_PRIV_SIZE 0x28 36 + #define INTEL_RESETS 0x04 37 + #define INTEL_RESETS_RESET BIT(0) 38 + #define INTEL_RESETS_RESET_DONE BIT(1) 39 + #define INTEL_RESETS_TIMEOUT_US (10 * USEC_PER_MSEC) 40 41 + #define INTEL_ACTIVELTR 0x0c 42 + #define INTEL_IDLELTR 0x10 43 + 44 + #define INTEL_LTR_REQ BIT(15) 45 + #define INTEL_LTR_SCALE_MASK GENMASK(11, 10) 46 + #define INTEL_LTR_SCALE_1US FIELD_PREP(INTEL_LTR_SCALE_MASK, 2) 47 + #define INTEL_LTR_SCALE_32US FIELD_PREP(INTEL_LTR_SCALE_MASK, 3) 48 + #define INTEL_LTR_VALUE_MASK GENMASK(9, 0) 49 + 50 + struct intel_host { 51 + void __iomem *priv; 52 + u32 active_ltr; 53 + u32 idle_ltr; 54 + struct dentry *debugfs_root; 55 + }; 56 + 57 + static void intel_cache_ltr(struct intel_host *host) 58 + { 59 + host->active_ltr = readl(host->priv + INTEL_ACTIVELTR); 60 + host->idle_ltr = readl(host->priv + INTEL_IDLELTR); 61 + } 62 + 63 + static void intel_ltr_set(struct device *dev, s32 val) 64 + { 65 + struct mipi_i3c_hci_pci *hci = dev_get_drvdata(dev); 66 + struct intel_host *host = hci->private; 67 + u32 ltr; 68 + 69 + /* 70 + * Program latency tolerance (LTR) accordingly what has been asked 71 + * by the PM QoS layer or disable it in case we were passed 72 + * negative value or PM_QOS_LATENCY_ANY. 73 + */ 74 + ltr = readl(host->priv + INTEL_ACTIVELTR); 75 + 76 + if (val == PM_QOS_LATENCY_ANY || val < 0) { 77 + ltr &= ~INTEL_LTR_REQ; 78 + } else { 79 + ltr |= INTEL_LTR_REQ; 80 + ltr &= ~INTEL_LTR_SCALE_MASK; 81 + ltr &= ~INTEL_LTR_VALUE_MASK; 82 + 83 + if (val > INTEL_LTR_VALUE_MASK) { 84 + val >>= 5; 85 + if (val > INTEL_LTR_VALUE_MASK) 86 + val = INTEL_LTR_VALUE_MASK; 87 + ltr |= INTEL_LTR_SCALE_32US | val; 88 + } else { 89 + ltr |= INTEL_LTR_SCALE_1US | val; 90 + } 91 + } 92 + 93 + if (ltr == host->active_ltr) 94 + return; 95 + 96 + writel(ltr, host->priv + INTEL_ACTIVELTR); 97 + writel(ltr, host->priv + INTEL_IDLELTR); 98 + 99 + /* Cache the values into intel_host structure */ 100 + intel_cache_ltr(host); 101 + } 102 + 103 + static void intel_ltr_expose(struct device *dev) 104 + { 105 + dev->power.set_latency_tolerance = intel_ltr_set; 106 + dev_pm_qos_expose_latency_tolerance(dev); 107 + } 108 + 109 + static void intel_ltr_hide(struct device *dev) 110 + { 111 + dev_pm_qos_hide_latency_tolerance(dev); 112 + dev->power.set_latency_tolerance = NULL; 113 + } 114 + 115 + static void intel_add_debugfs(struct mipi_i3c_hci_pci *hci) 116 + { 117 + struct dentry *dir = debugfs_create_dir(dev_name(&hci->pci->dev), NULL); 118 + struct intel_host *host = hci->private; 119 + 120 + intel_cache_ltr(host); 121 + 122 + host->debugfs_root = dir; 123 + debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr); 124 + debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr); 125 + } 126 + 127 + static void intel_remove_debugfs(struct mipi_i3c_hci_pci *hci) 128 + { 129 + struct intel_host *host = hci->private; 130 + 131 + debugfs_remove_recursive(host->debugfs_root); 132 + } 133 + 134 + static void intel_reset(void __iomem *priv) 135 + { 136 + u32 reg; 137 138 /* Assert reset, wait for completion and release reset */ 139 + writel(0, priv + INTEL_RESETS); 140 + readl_poll_timeout(priv + INTEL_RESETS, reg, 141 + reg & INTEL_RESETS_RESET_DONE, 0, 142 + INTEL_RESETS_TIMEOUT_US); 143 + writel(INTEL_RESETS_RESET, priv + INTEL_RESETS); 144 + } 145 + 146 + static void __iomem *intel_priv(struct pci_dev *pci) 147 + { 148 + resource_size_t base = pci_resource_start(pci, 0); 149 + 150 + return devm_ioremap(&pci->dev, base + INTEL_PRIV_OFFSET, INTEL_PRIV_SIZE); 151 + } 152 + 153 + static int intel_i3c_init(struct mipi_i3c_hci_pci *hci) 154 + { 155 + struct intel_host *host = devm_kzalloc(&hci->pci->dev, sizeof(*host), GFP_KERNEL); 156 + void __iomem *priv = intel_priv(hci->pci); 157 + 158 + if (!host || !priv) 159 + return -ENOMEM; 160 + 161 + dma_set_mask_and_coherent(&hci->pci->dev, DMA_BIT_MASK(64)); 162 + 163 + hci->pci->d3cold_delay = 0; 164 + 165 + hci->private = host; 166 + host->priv = priv; 167 + 168 + intel_reset(priv); 169 + 170 + intel_ltr_expose(&hci->pci->dev); 171 + intel_add_debugfs(hci); 172 173 return 0; 174 } 175 176 + static void intel_i3c_exit(struct mipi_i3c_hci_pci *hci) 177 + { 178 + intel_remove_debugfs(hci); 179 + intel_ltr_hide(&hci->pci->dev); 180 + } 181 + 182 + static const struct mipi_i3c_hci_pci_info intel_info = { 183 + .init = intel_i3c_init, 184 + .exit = intel_i3c_exit, 185 }; 186 187 static int mipi_i3c_hci_pci_probe(struct pci_dev *pci, 188 const struct pci_device_id *id) 189 { 190 + struct mipi_i3c_hci_pci *hci; 191 struct resource res[2]; 192 int dev_id, ret; 193 + 194 + hci = devm_kzalloc(&pci->dev, sizeof(*hci), GFP_KERNEL); 195 + if (!hci) 196 + return -ENOMEM; 197 + 198 + hci->pci = pci; 199 200 ret = pcim_enable_device(pci); 201 if (ret) ··· 82 if (dev_id < 0) 83 return dev_id; 84 85 + hci->pdev = platform_device_alloc("mipi-i3c-hci", dev_id); 86 + if (!hci->pdev) 87 return -ENOMEM; 88 89 + hci->pdev->dev.parent = &pci->dev; 90 + device_set_node(&hci->pdev->dev, dev_fwnode(&pci->dev)); 91 92 + ret = platform_device_add_resources(hci->pdev, res, ARRAY_SIZE(res)); 93 if (ret) 94 goto err; 95 96 + hci->info = (const struct mipi_i3c_hci_pci_info *)id->driver_data; 97 + if (hci->info && hci->info->init) { 98 + ret = hci->info->init(hci); 99 if (ret) 100 goto err; 101 } 102 103 + ret = platform_device_add(hci->pdev); 104 if (ret) 105 + goto err_exit; 106 107 + pci_set_drvdata(pci, hci); 108 109 return 0; 110 111 + err_exit: 112 + if (hci->info && hci->info->exit) 113 + hci->info->exit(hci); 114 err: 115 + platform_device_put(hci->pdev); 116 ida_free(&mipi_i3c_hci_pci_ida, dev_id); 117 return ret; 118 } 119 120 static void mipi_i3c_hci_pci_remove(struct pci_dev *pci) 121 { 122 + struct mipi_i3c_hci_pci *hci = pci_get_drvdata(pci); 123 + struct platform_device *pdev = hci->pdev; 124 int dev_id = pdev->id; 125 + 126 + if (hci->info && hci->info->exit) 127 + hci->info->exit(hci); 128 129 platform_device_unregister(pdev); 130 ida_free(&mipi_i3c_hci_pci_ida, dev_id); ··· 133 /* Panther Lake-P */ 134 { PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info}, 135 { PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info}, 136 + /* Nova Lake-S */ 137 + { PCI_VDEVICE(INTEL, 0x6e2c), (kernel_ulong_t)&intel_info}, 138 + { PCI_VDEVICE(INTEL, 0x6e2d), (kernel_ulong_t)&intel_info}, 139 { }, 140 }; 141 MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
+112 -25
drivers/i3c/master/svc-i3c-master.c
··· 40 #define SVC_I3C_MCTRL_REQUEST_NONE 0 41 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1 42 #define SVC_I3C_MCTRL_REQUEST_STOP 2 43 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3 44 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4 45 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7 46 #define SVC_I3C_MCTRL_TYPE_I3C 0 47 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4) 48 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0 49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0 50 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7) ··· 97 #define SVC_I3C_MINTMASKED 0x098 98 #define SVC_I3C_MERRWARN 0x09C 99 #define SVC_I3C_MERRWARN_NACK BIT(2) 100 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20) 101 #define SVC_I3C_MDMACTRL 0x0A0 102 #define SVC_I3C_MDATACTRL 0x0AC ··· 168 169 struct svc_i3c_cmd { 170 u8 addr; 171 - bool rnw; 172 u8 *in; 173 const void *out; 174 unsigned int len; 175 unsigned int actual_len; 176 - struct i3c_priv_xfer *xfer; 177 bool continued; 178 }; 179 ··· 390 return master->descs[i]; 391 } 392 393 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master) 394 { 395 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL); ··· 443 int ret, val; 444 u8 *buf; 445 446 - slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 447 - if (!slot) 448 - return -ENOSPC; 449 - 450 - slot->len = 0; 451 - buf = slot->data; 452 - 453 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, 454 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000); 455 if (ret) { 456 dev_err(master->dev, "Timeout when polling for COMPLETE\n"); 457 - i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 458 return ret; 459 } 460 461 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) && 462 slot->len < SVC_I3C_FIFO_SIZE) { ··· 555 * cycle, leading to missed client IBI handlers. 556 * 557 * A typical scenario is when IBIWON occurs and bus arbitration is lost 558 - * at svc_i3c_master_priv_xfers(). 559 * 560 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI. 561 */ ··· 834 goto rpm_out; 835 836 info.dyn_addr = ret; 837 838 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr), 839 master->regs + SVC_I3C_MDYNADDR); ··· 1338 } 1339 1340 static int svc_i3c_master_xfer(struct svc_i3c_master *master, 1341 - bool rnw, unsigned int xfer_type, u8 addr, 1342 u8 *in, const u8 *out, unsigned int xfer_len, 1343 unsigned int *actual_len, bool continued, bool repeat_start) 1344 { 1345 int retry = repeat_start ? 1 : 2; 1346 u32 reg; 1347 int ret; ··· 1350 /* clean SVC_I3C_MINT_IBIWON w1c bits */ 1351 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); 1352 1353 1354 while (retry--) { 1355 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | ··· 1453 1454 if (rnw) 1455 ret = svc_i3c_master_read(master, in, xfer_len); 1456 - else 1457 ret = svc_i3c_master_write(master, out, xfer_len); 1458 if (ret < 0) 1459 goto emit_stop; ··· 1466 if (ret) 1467 goto emit_stop; 1468 1469 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS); 1470 1471 if (!continued) { 1472 - svc_i3c_master_emit_stop(master); 1473 1474 /* Wait idle if stop is sent. */ 1475 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, ··· 1488 return 0; 1489 1490 emit_stop: 1491 - svc_i3c_master_emit_stop(master); 1492 svc_i3c_master_clear_merrwarn(master); 1493 svc_i3c_master_flush_fifo(master); 1494 ··· 1539 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1540 } 1541 1542 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) 1543 { 1544 struct svc_i3c_xfer *xfer = master->xferqueue.cur; ··· 1558 for (i = 0; i < xfer->ncmds; i++) { 1559 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1560 1561 - ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type, 1562 cmd->addr, cmd->in, cmd->out, 1563 cmd->len, &cmd->actual_len, 1564 cmd->continued, i > 0); ··· 1733 return ret; 1734 } 1735 1736 - static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 1737 - struct i3c_priv_xfer *xfers, 1738 - int nxfers) 1739 { 1740 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1741 struct svc_i3c_master *master = to_svc_i3c_master(m); ··· 1742 struct svc_i3c_xfer *xfer; 1743 int ret, i; 1744 1745 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1746 if (!xfer) 1747 return -ENOMEM; 1748 1749 - xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1750 1751 for (i = 0; i < nxfers; i++) { 1752 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1753 1754 cmd->xfer = &xfers[i]; 1755 cmd->addr = master->addrs[data->index]; 1756 - cmd->rnw = xfers[i].rnw; 1757 - cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL; 1758 - cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out; 1759 cmd->len = xfers[i].len; 1760 - cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0; 1761 cmd->continued = (i + 1) < nxfers; 1762 } 1763 ··· 1966 .do_daa = svc_i3c_master_do_daa, 1967 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd, 1968 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd, 1969 - .priv_xfers = svc_i3c_master_priv_xfers, 1970 .i2c_xfers = svc_i3c_master_i2c_xfers, 1971 .request_ibi = svc_i3c_master_request_ibi, 1972 .free_ibi = svc_i3c_master_free_ibi,
··· 40 #define SVC_I3C_MCTRL_REQUEST_NONE 0 41 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1 42 #define SVC_I3C_MCTRL_REQUEST_STOP 2 43 + #define SVC_I3C_MCTRL_REQUEST_FORCE_EXIT 6 44 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3 45 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4 46 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7 47 #define SVC_I3C_MCTRL_TYPE_I3C 0 48 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4) 49 + #define SVC_I3C_MCTRL_TYPE_DDR BIT(5) 50 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0 51 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0 52 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7) ··· 95 #define SVC_I3C_MINTMASKED 0x098 96 #define SVC_I3C_MERRWARN 0x09C 97 #define SVC_I3C_MERRWARN_NACK BIT(2) 98 + #define SVC_I3C_MERRWARN_CRC BIT(10) 99 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20) 100 #define SVC_I3C_MDMACTRL 0x0A0 101 #define SVC_I3C_MDATACTRL 0x0AC ··· 165 166 struct svc_i3c_cmd { 167 u8 addr; 168 + union { 169 + bool rnw; 170 + u8 cmd; 171 + u32 rnw_cmd; 172 + }; 173 u8 *in; 174 const void *out; 175 unsigned int len; 176 unsigned int actual_len; 177 + struct i3c_xfer *xfer; 178 bool continued; 179 }; 180 ··· 383 return master->descs[i]; 384 } 385 386 + static bool svc_cmd_is_read(u32 rnw_cmd, u32 type) 387 + { 388 + return (type == SVC_I3C_MCTRL_TYPE_DDR) ? (rnw_cmd & 0x80) : rnw_cmd; 389 + } 390 + 391 + static void svc_i3c_master_emit_force_exit(struct svc_i3c_master *master) 392 + { 393 + u32 reg; 394 + 395 + writel(SVC_I3C_MCTRL_REQUEST_FORCE_EXIT, master->regs + SVC_I3C_MCTRL); 396 + 397 + /* 398 + * Not need check error here because it is never happen at hardware. 399 + * IP just wait for few fclk cycle to complete DDR exit pattern. Even 400 + * though fclk stop, timeout happen here, the whole data actually 401 + * already finish transfer. The next command will be timeout because 402 + * wrong hardware state. 403 + */ 404 + readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg, 405 + SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000); 406 + 407 + /* 408 + * This delay is necessary after the emission of a stop, otherwise eg. 409 + * repeating IBIs do not get detected. There is a note in the manual 410 + * about it, stating that the stop condition might not be settled 411 + * correctly if a start condition follows too rapidly. 412 + */ 413 + udelay(1); 414 + } 415 + 416 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master) 417 { 418 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL); ··· 406 int ret, val; 407 u8 *buf; 408 409 + /* 410 + * Wait for transfer to complete before returning. Otherwise, the EmitStop 411 + * request might be sent when the transfer is not complete. 412 + */ 413 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, 414 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000); 415 if (ret) { 416 dev_err(master->dev, "Timeout when polling for COMPLETE\n"); 417 return ret; 418 } 419 + 420 + slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 421 + if (!slot) { 422 + dev_dbg(master->dev, "No free ibi slot, drop the data\n"); 423 + writel(SVC_I3C_MDATACTRL_FLUSHRB, master->regs + SVC_I3C_MDATACTRL); 424 + return -ENOSPC; 425 + } 426 + 427 + slot->len = 0; 428 + buf = slot->data; 429 430 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) && 431 slot->len < SVC_I3C_FIFO_SIZE) { ··· 512 * cycle, leading to missed client IBI handlers. 513 * 514 * A typical scenario is when IBIWON occurs and bus arbitration is lost 515 + * at svc_i3c_master_i3c_xfers(). 516 * 517 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI. 518 */ ··· 791 goto rpm_out; 792 793 info.dyn_addr = ret; 794 + 795 + info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR); 796 797 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr), 798 master->regs + SVC_I3C_MDYNADDR); ··· 1293 } 1294 1295 static int svc_i3c_master_xfer(struct svc_i3c_master *master, 1296 + u32 rnw_cmd, unsigned int xfer_type, u8 addr, 1297 u8 *in, const u8 *out, unsigned int xfer_len, 1298 unsigned int *actual_len, bool continued, bool repeat_start) 1299 { 1300 + bool rnw = svc_cmd_is_read(rnw_cmd, xfer_type); 1301 int retry = repeat_start ? 1 : 2; 1302 u32 reg; 1303 int ret; ··· 1304 /* clean SVC_I3C_MINT_IBIWON w1c bits */ 1305 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); 1306 1307 + if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR) { 1308 + /* DDR command need prefill into FIFO */ 1309 + writel(rnw_cmd, master->regs + SVC_I3C_MWDATAB); 1310 + if (!rnw) { 1311 + /* write data also need prefill into FIFO */ 1312 + ret = svc_i3c_master_write(master, out, xfer_len); 1313 + if (ret) 1314 + goto emit_stop; 1315 + } 1316 + } 1317 1318 while (retry--) { 1319 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | ··· 1397 1398 if (rnw) 1399 ret = svc_i3c_master_read(master, in, xfer_len); 1400 + else if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR) 1401 ret = svc_i3c_master_write(master, out, xfer_len); 1402 if (ret < 0) 1403 goto emit_stop; ··· 1410 if (ret) 1411 goto emit_stop; 1412 1413 + if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR && 1414 + (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_CRC)) { 1415 + ret = -ENXIO; 1416 + goto emit_stop; 1417 + } 1418 + 1419 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS); 1420 1421 if (!continued) { 1422 + if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR) 1423 + svc_i3c_master_emit_stop(master); 1424 + else 1425 + svc_i3c_master_emit_force_exit(master); 1426 1427 /* Wait idle if stop is sent. */ 1428 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, ··· 1423 return 0; 1424 1425 emit_stop: 1426 + if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR) 1427 + svc_i3c_master_emit_stop(master); 1428 + else 1429 + svc_i3c_master_emit_force_exit(master); 1430 + 1431 svc_i3c_master_clear_merrwarn(master); 1432 svc_i3c_master_flush_fifo(master); 1433 ··· 1470 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1471 } 1472 1473 + static int i3c_mode_to_svc_type(enum i3c_xfer_mode mode) 1474 + { 1475 + return (mode == I3C_SDR) ? SVC_I3C_MCTRL_TYPE_I3C : SVC_I3C_MCTRL_TYPE_DDR; 1476 + } 1477 + 1478 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) 1479 { 1480 struct svc_i3c_xfer *xfer = master->xferqueue.cur; ··· 1484 for (i = 0; i < xfer->ncmds; i++) { 1485 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1486 1487 + ret = svc_i3c_master_xfer(master, cmd->rnw_cmd, xfer->type, 1488 cmd->addr, cmd->in, cmd->out, 1489 cmd->len, &cmd->actual_len, 1490 cmd->continued, i > 0); ··· 1659 return ret; 1660 } 1661 1662 + static int svc_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, struct i3c_xfer *xfers, 1663 + int nxfers, enum i3c_xfer_mode mode) 1664 { 1665 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1666 struct svc_i3c_master *master = to_svc_i3c_master(m); ··· 1669 struct svc_i3c_xfer *xfer; 1670 int ret, i; 1671 1672 + if (mode != I3C_SDR) { 1673 + /* 1674 + * Only support data size less than FIFO SIZE when using DDR 1675 + * mode. First entry is cmd in FIFO, so actual available FIFO 1676 + * for data is SVC_I3C_FIFO_SIZE - 2 since DDR only supports 1677 + * even length. 1678 + */ 1679 + for (i = 0; i < nxfers; i++) 1680 + if (xfers[i].len > SVC_I3C_FIFO_SIZE - 2) 1681 + return -EINVAL; 1682 + } 1683 + 1684 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1685 if (!xfer) 1686 return -ENOMEM; 1687 1688 + xfer->type = i3c_mode_to_svc_type(mode); 1689 1690 for (i = 0; i < nxfers; i++) { 1691 + u32 rnw_cmd = (mode == I3C_SDR) ? xfers[i].rnw : xfers[i].cmd; 1692 + bool rnw = svc_cmd_is_read(rnw_cmd, xfer->type); 1693 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1694 1695 cmd->xfer = &xfers[i]; 1696 cmd->addr = master->addrs[data->index]; 1697 + cmd->rnw_cmd = rnw_cmd; 1698 + cmd->in = rnw ? xfers[i].data.in : NULL; 1699 + cmd->out = rnw ? NULL : xfers[i].data.out; 1700 cmd->len = xfers[i].len; 1701 + cmd->actual_len = rnw ? xfers[i].len : 0; 1702 cmd->continued = (i + 1) < nxfers; 1703 } 1704 ··· 1879 .do_daa = svc_i3c_master_do_daa, 1880 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd, 1881 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd, 1882 + .i3c_xfers = svc_i3c_master_i3c_xfers, 1883 .i2c_xfers = svc_i3c_master_i2c_xfers, 1884 .request_ibi = svc_i3c_master_request_ibi, 1885 .free_ibi = svc_i3c_master_free_ibi,
+4 -4
drivers/net/mctp/mctp-i3c.c
··· 99 100 static int mctp_i3c_read(struct mctp_i3c_device *mi) 101 { 102 - struct i3c_priv_xfer xfer = { .rnw = 1, .len = mi->mrl }; 103 struct net_device_stats *stats = &mi->mbus->ndev->stats; 104 struct mctp_i3c_internal_hdr *ihdr = NULL; 105 struct sk_buff *skb = NULL; ··· 127 128 /* Make sure netif_rx() is read in the same order as i3c. */ 129 mutex_lock(&mi->lock); 130 - rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1); 131 if (rc < 0) 132 goto err; 133 ··· 360 static void mctp_i3c_xmit(struct mctp_i3c_bus *mbus, struct sk_buff *skb) 361 { 362 struct net_device_stats *stats = &mbus->ndev->stats; 363 - struct i3c_priv_xfer xfer = { .rnw = false }; 364 struct mctp_i3c_internal_hdr *ihdr = NULL; 365 struct mctp_i3c_device *mi = NULL; 366 unsigned int data_len; ··· 409 data[data_len] = pec; 410 411 xfer.data.out = data; 412 - rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1); 413 if (rc == 0) { 414 stats->tx_bytes += data_len; 415 stats->tx_packets++;
··· 99 100 static int mctp_i3c_read(struct mctp_i3c_device *mi) 101 { 102 + struct i3c_xfer xfer = { .rnw = 1, .len = mi->mrl }; 103 struct net_device_stats *stats = &mi->mbus->ndev->stats; 104 struct mctp_i3c_internal_hdr *ihdr = NULL; 105 struct sk_buff *skb = NULL; ··· 127 128 /* Make sure netif_rx() is read in the same order as i3c. */ 129 mutex_lock(&mi->lock); 130 + rc = i3c_device_do_xfers(mi->i3c, &xfer, 1, I3C_SDR); 131 if (rc < 0) 132 goto err; 133 ··· 360 static void mctp_i3c_xmit(struct mctp_i3c_bus *mbus, struct sk_buff *skb) 361 { 362 struct net_device_stats *stats = &mbus->ndev->stats; 363 + struct i3c_xfer xfer = { .rnw = false }; 364 struct mctp_i3c_internal_hdr *ihdr = NULL; 365 struct mctp_i3c_device *mi = NULL; 366 unsigned int data_len; ··· 409 data[data_len] = pec; 410 411 xfer.data.out = data; 412 + rc = i3c_device_do_xfers(mi->i3c, &xfer, 1, I3C_SDR); 413 if (rc == 0) { 414 stats->tx_bytes += data_len; 415 stats->tx_packets++;
+30 -12
include/linux/i3c/device.h
··· 27 * These are the standard error codes as defined by the I3C specification. 28 * When -EIO is returned by the i3c_device_do_priv_xfers() or 29 * i3c_device_send_hdr_cmds() one can check the error code in 30 - * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of 31 * what went wrong. 32 * 33 */ ··· 39 }; 40 41 /** 42 - * enum i3c_hdr_mode - HDR mode ids 43 * @I3C_HDR_DDR: DDR mode 44 * @I3C_HDR_TSP: TSP mode 45 * @I3C_HDR_TSL: TSL mode 46 */ 47 - enum i3c_hdr_mode { 48 - I3C_HDR_DDR, 49 - I3C_HDR_TSP, 50 - I3C_HDR_TSL, 51 }; 52 53 /** 54 - * struct i3c_priv_xfer - I3C SDR private transfer 55 * @rnw: encodes the transfer direction. true for a read, false for a write 56 * @len: transfer length in bytes of the transfer 57 * @actual_len: actual length in bytes are transferred by the controller 58 * @data: input/output buffer ··· 65 * @data.out: output buffer. Must point to a DMA-able buffer 66 * @err: I3C error code 67 */ 68 - struct i3c_priv_xfer { 69 - u8 rnw; 70 u16 len; 71 u16 actual_len; 72 union { ··· 78 } data; 79 enum i3c_error_code err; 80 }; 81 82 /** 83 * enum i3c_dcr - I3C DCR values ··· 308 i3c_i2c_driver_unregister, \ 309 __i2cdrv) 310 311 - int i3c_device_do_priv_xfers(struct i3c_device *dev, 312 - struct i3c_priv_xfer *xfers, 313 - int nxfers); 314 315 int i3c_device_do_setdasa(struct i3c_device *dev); 316 ··· 358 void i3c_device_free_ibi(struct i3c_device *dev); 359 int i3c_device_enable_ibi(struct i3c_device *dev); 360 int i3c_device_disable_ibi(struct i3c_device *dev); 361 362 #endif /* I3C_DEV_H */
··· 27 * These are the standard error codes as defined by the I3C specification. 28 * When -EIO is returned by the i3c_device_do_priv_xfers() or 29 * i3c_device_send_hdr_cmds() one can check the error code in 30 + * &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of 31 * what went wrong. 32 * 33 */ ··· 39 }; 40 41 /** 42 + * enum i3c_xfer_mode - I3C xfer mode ids 43 * @I3C_HDR_DDR: DDR mode 44 * @I3C_HDR_TSP: TSP mode 45 * @I3C_HDR_TSL: TSL mode 46 + * @I3C_SDR: SDR mode (NOT HDR mode) 47 */ 48 + enum i3c_xfer_mode { 49 + /* The below 3 value (I3C_HDR*) must match GETCAP1 Byte bit position */ 50 + I3C_HDR_DDR = 0, 51 + I3C_HDR_TSP = 1, 52 + I3C_HDR_TSL = 2, 53 + /* Use for default SDR transfer mode */ 54 + I3C_SDR = 31, 55 }; 56 57 /** 58 + * struct i3c_xfer - I3C data transfer 59 * @rnw: encodes the transfer direction. true for a read, false for a write 60 + * @cmd: Read/Write command in HDR mode, read: 0x80 - 0xff, write: 0x00 - 0x7f 61 * @len: transfer length in bytes of the transfer 62 * @actual_len: actual length in bytes are transferred by the controller 63 * @data: input/output buffer ··· 60 * @data.out: output buffer. Must point to a DMA-able buffer 61 * @err: I3C error code 62 */ 63 + struct i3c_xfer { 64 + union { 65 + u8 rnw; 66 + u8 cmd; 67 + }; 68 u16 len; 69 u16 actual_len; 70 union { ··· 70 } data; 71 enum i3c_error_code err; 72 }; 73 + 74 + /* keep back compatible */ 75 + #define i3c_priv_xfer i3c_xfer 76 77 /** 78 * enum i3c_dcr - I3C DCR values ··· 297 i3c_i2c_driver_unregister, \ 298 __i2cdrv) 299 300 + int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers, 301 + int nxfers, enum i3c_xfer_mode mode); 302 + 303 + static inline int i3c_device_do_priv_xfers(struct i3c_device *dev, 304 + struct i3c_xfer *xfers, 305 + int nxfers) 306 + { 307 + return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR); 308 + } 309 310 int i3c_device_do_setdasa(struct i3c_device *dev); 311 ··· 341 void i3c_device_free_ibi(struct i3c_device *dev); 342 int i3c_device_enable_ibi(struct i3c_device *dev); 343 int i3c_device_disable_ibi(struct i3c_device *dev); 344 + u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev); 345 346 #endif /* I3C_DEV_H */
+9 -1
include/linux/i3c/master.h
··· 418 * @send_ccc_cmd: send a CCC command 419 * This method is mandatory. 420 * @priv_xfers: do one or several private I3C SDR transfers 421 - * This method is mandatory. 422 * @attach_i2c_dev: called every time an I2C device is attached to the bus. 423 * This is a good place to attach master controller specific 424 * data to I2C devices. ··· 478 const struct i3c_ccc_cmd *cmd); 479 int (*send_ccc_cmd)(struct i3c_master_controller *master, 480 struct i3c_ccc_cmd *cmd); 481 int (*priv_xfers)(struct i3c_dev_desc *dev, 482 struct i3c_priv_xfer *xfers, 483 int nxfers); 484 int (*attach_i2c_dev)(struct i2c_dev_desc *dev); 485 void (*detach_i2c_dev)(struct i2c_dev_desc *dev); 486 int (*i2c_xfers)(struct i2c_dev_desc *dev,
··· 418 * @send_ccc_cmd: send a CCC command 419 * This method is mandatory. 420 * @priv_xfers: do one or several private I3C SDR transfers 421 + * This method is mandatory when i3c_xfers is not implemented. It 422 + * is deprecated. 423 + * @i3c_xfers: do one or several I3C SDR or HDR transfers 424 + * This method is mandatory when priv_xfers is not implemented but 425 + * should be implemented instead of priv_xfers. 426 * @attach_i2c_dev: called every time an I2C device is attached to the bus. 427 * This is a good place to attach master controller specific 428 * data to I2C devices. ··· 474 const struct i3c_ccc_cmd *cmd); 475 int (*send_ccc_cmd)(struct i3c_master_controller *master, 476 struct i3c_ccc_cmd *cmd); 477 + /* Deprecated, please use i3c_xfers() */ 478 int (*priv_xfers)(struct i3c_dev_desc *dev, 479 struct i3c_priv_xfer *xfers, 480 int nxfers); 481 + int (*i3c_xfers)(struct i3c_dev_desc *dev, 482 + struct i3c_xfer *xfers, 483 + int nxfers, enum i3c_xfer_mode mode); 484 int (*attach_i2c_dev)(struct i2c_dev_desc *dev); 485 void (*detach_i2c_dev)(struct i2c_dev_desc *dev); 486 int (*i2c_xfers)(struct i2c_dev_desc *dev,