Merge tag 'i3c/for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux

Pull i3c updates from Alexandre Belloni:
"HDR support has finally been added. mipi-i3c-hci has been reworked and
Intel Nova Lake-S support has been added.

Subsystem:
- Add HDR transfer support

Drivers:
- dw: fix bus hang on Agilex5
- mipi-i3c-hci: Intel Nova Lake-S support, IOMMU support
- svc: HDR support"

* tag 'i3c/for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux: (28 commits)
regmap: i3c: switch to use i3c_xfer from i3c_priv_xfer
net: mctp i3c: switch to use i3c_xfer from i3c_priv_xfer
hwmon: (lm75): switch to use i3c_xfer from i3c_priv_xfer
i3c: document i3c_xfers
i3c: fix I3C_SDR bit number
i3c: master: svc: Add basic HDR mode support
i3c: master: svc: Replace bool rnw with union for HDR support
i3c: Switch to use new i3c_xfer from i3c_priv_xfer
i3c: Add HDR API support
i3c: master: add WQ_PERCPU to alloc_workqueue users
i3c: master: Remove i3c_device_free_ibi from i3c_device_remove
i3c: mipi-i3c-hci-pci: Set d3cold_delay to 0 for Intel controllers
i3c: mipi-i3c-hci-pci: Add LTR support for Intel controllers
i3c: mipi-i3c-hci-pci: Add exit callback
i3c: mipi-i3c-hci-pci: Change callback parameter
i3c: mipi-i3c-hci-pci: Allocate a structure for mipi_i3c_hci_pci device information
i3c: mipi-i3c-hci-pci: Factor out intel_reset()
i3c: mipi-i3c-hci-pci: Factor out private registers ioremapping
i3c: mipi-i3c-hci-pci: Constify driver data
i3c: mipi-i3c-hci-pci: Use readl_poll_timeout()
...

+427 -116
+5 -1
Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.yaml
··· 14 14 15 15 properties: 16 16 compatible: 17 - const: snps,dw-i3c-master-1.00a 17 + oneOf: 18 + - const: snps,dw-i3c-master-1.00a 19 + - items: 20 + - const: altr,agilex5-dw-i3c-master 21 + - const: snps,dw-i3c-master-1.00a 18 22 19 23 reg: 20 24 maxItems: 1
+4 -4
drivers/base/regmap/regmap-i3c.c
··· 11 11 { 12 12 struct device *dev = context; 13 13 struct i3c_device *i3c = dev_to_i3cdev(dev); 14 - struct i3c_priv_xfer xfers[] = { 14 + struct i3c_xfer xfers[] = { 15 15 { 16 16 .rnw = false, 17 17 .len = count, ··· 19 19 }, 20 20 }; 21 21 22 - return i3c_device_do_priv_xfers(i3c, xfers, ARRAY_SIZE(xfers)); 22 + return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR); 23 23 } 24 24 25 25 static int regmap_i3c_read(void *context, ··· 28 28 { 29 29 struct device *dev = context; 30 30 struct i3c_device *i3c = dev_to_i3cdev(dev); 31 - struct i3c_priv_xfer xfers[2]; 31 + struct i3c_xfer xfers[2]; 32 32 33 33 xfers[0].rnw = false; 34 34 xfers[0].len = reg_size; ··· 38 38 xfers[1].len = val_size; 39 39 xfers[1].data.in = val; 40 40 41 - return i3c_device_do_priv_xfers(i3c, xfers, ARRAY_SIZE(xfers)); 41 + return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR); 42 42 } 43 43 44 44 static const struct regmap_bus regmap_i3c = {
+4 -4
drivers/hwmon/lm75.c
··· 621 621 { 622 622 struct i3c_device *i3cdev = context; 623 623 struct lm75_data *data = i3cdev_get_drvdata(i3cdev); 624 - struct i3c_priv_xfer xfers[] = { 624 + struct i3c_xfer xfers[] = { 625 625 { 626 626 .rnw = false, 627 627 .len = 1, ··· 640 640 if (reg == LM75_REG_CONF && !data->params->config_reg_16bits) 641 641 xfers[1].len--; 642 642 643 - ret = i3c_device_do_priv_xfers(i3cdev, xfers, 2); 643 + ret = i3c_device_do_xfers(i3cdev, xfers, 2, I3C_SDR); 644 644 if (ret < 0) 645 645 return ret; 646 646 ··· 658 658 { 659 659 struct i3c_device *i3cdev = context; 660 660 struct lm75_data *data = i3cdev_get_drvdata(i3cdev); 661 - struct i3c_priv_xfer xfers[] = { 661 + struct i3c_xfer xfers[] = { 662 662 { 663 663 .rnw = false, 664 664 .len = 3, ··· 680 680 data->val_buf[2] = val & 0xff; 681 681 } 682 682 683 - return i3c_device_do_priv_xfers(i3cdev, xfers, 1); 683 + return i3c_device_do_xfers(i3cdev, xfers, 1, I3C_SDR); 684 684 } 685 685 686 686 static const struct regmap_bus lm75_i3c_regmap_bus = {
+20 -7
drivers/i3c/device.c
··· 15 15 #include "internals.h" 16 16 17 17 /** 18 - * i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a 19 - * specific device 18 + * i3c_device_do_xfers() - do I3C transfers directed to a specific device 20 19 * 21 20 * @dev: device with which the transfers should be done 22 21 * @xfers: array of transfers 23 22 * @nxfers: number of transfers 23 + * @mode: transfer mode 24 24 * 25 25 * Initiate one or several private SDR transfers with @dev. 26 26 * ··· 33 33 * 'xfers' some time later. See I3C spec ver 1.1.1 09-Jun-2021. Section: 34 34 * 5.1.2.2.3. 35 35 */ 36 - int i3c_device_do_priv_xfers(struct i3c_device *dev, 37 - struct i3c_priv_xfer *xfers, 38 - int nxfers) 36 + int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers, 37 + int nxfers, enum i3c_xfer_mode mode) 39 38 { 40 39 int ret, i; 41 40 ··· 47 48 } 48 49 49 50 i3c_bus_normaluse_lock(dev->bus); 50 - ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers); 51 + ret = i3c_dev_do_xfers_locked(dev->desc, xfers, nxfers, mode); 51 52 i3c_bus_normaluse_unlock(dev->bus); 52 53 53 54 return ret; 54 55 } 55 - EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers); 56 + EXPORT_SYMBOL_GPL(i3c_device_do_xfers); 56 57 57 58 /** 58 59 * i3c_device_do_setdasa() - do I3C dynamic address assignement with ··· 258 259 return NULL; 259 260 } 260 261 EXPORT_SYMBOL_GPL(i3c_device_match_id); 262 + 263 + /** 264 + * i3c_device_get_supported_xfer_mode - Returns the supported transfer mode by 265 + * connected master controller. 266 + * @dev: I3C device 267 + * 268 + * Return: a bit mask, which supported transfer mode, bit position is defined at 269 + * enum i3c_hdr_mode 270 + */ 271 + u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev) 272 + { 273 + return i3c_dev_get_master(dev->desc)->this->info.hdr_cap | BIT(I3C_SDR); 274 + } 275 + EXPORT_SYMBOL_GPL(i3c_device_get_supported_xfer_mode); 261 276 262 277 /** 263 278 * i3c_driver_register_with_owner() - register an I3C device driver
+3 -3
drivers/i3c/internals.h
··· 15 15 void i3c_bus_normaluse_unlock(struct i3c_bus *bus); 16 16 17 17 int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev); 18 - int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev, 19 - struct i3c_priv_xfer *xfers, 20 - int nxfers); 18 + int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev, 19 + struct i3c_xfer *xfers, 20 + int nxfers, enum i3c_xfer_mode mode); 21 21 int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev); 22 22 int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev); 23 23 int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+19 -12
drivers/i3c/master.c
··· 334 334 335 335 if (driver->remove) 336 336 driver->remove(i3cdev); 337 - 338 - i3c_device_free_ibi(i3cdev); 339 337 } 340 338 341 339 const struct bus_type i3c_bus_type = { ··· 2819 2821 2820 2822 static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops) 2821 2823 { 2822 - if (!ops || !ops->bus_init || !ops->priv_xfers || 2824 + if (!ops || !ops->bus_init || 2823 2825 !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers) 2826 + return -EINVAL; 2827 + 2828 + /* Must provide one of priv_xfers (SDR only) or i3c_xfers (all modes) */ 2829 + if (!ops->priv_xfers && !ops->i3c_xfers) 2824 2830 return -EINVAL; 2825 2831 2826 2832 if (ops->request_ibi && ··· 2885 2883 INIT_LIST_HEAD(&master->boardinfo.i2c); 2886 2884 INIT_LIST_HEAD(&master->boardinfo.i3c); 2887 2885 2888 - ret = i3c_bus_init(i3cbus, master->dev.of_node); 2889 - if (ret) 2890 - return ret; 2891 - 2892 2886 device_initialize(&master->dev); 2893 2887 dev_set_name(&master->dev, "i3c-%d", i3cbus->id); 2894 2888 2895 2889 master->dev.dma_mask = parent->dma_mask; 2896 2890 master->dev.coherent_dma_mask = parent->coherent_dma_mask; 2897 2891 master->dev.dma_parms = parent->dma_parms; 2892 + 2893 + ret = i3c_bus_init(i3cbus, master->dev.of_node); 2894 + if (ret) 2895 + goto err_put_dev; 2898 2896 2899 2897 ret = of_populate_i3c_bus(master); 2900 2898 if (ret) ··· 2927 2925 if (ret) 2928 2926 goto err_put_dev; 2929 2927 2930 - master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent)); 2928 + master->wq = alloc_workqueue("%s", WQ_PERCPU, 0, dev_name(parent)); 2931 2929 if (!master->wq) { 2932 2930 ret = -ENOMEM; 2933 2931 goto err_put_dev; ··· 3016 3014 dev->boardinfo->init_dyn_addr); 3017 3015 } 3018 3016 3019 - int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev, 3020 - struct i3c_priv_xfer *xfers, 3021 - int nxfers) 3017 + int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev, struct i3c_xfer *xfers, 3018 + int nxfers, enum i3c_xfer_mode mode) 3022 3019 { 3023 3020 struct i3c_master_controller *master; 3024 3021 ··· 3028 3027 if (!master || !xfers) 3029 3028 return -EINVAL; 3030 3029 3031 - if (!master->ops->priv_xfers) 3030 + if (mode != I3C_SDR && !(master->this->info.hdr_cap & BIT(mode))) 3032 3031 return -EOPNOTSUPP; 3032 + 3033 + if (master->ops->i3c_xfers) 3034 + return master->ops->i3c_xfers(dev, xfers, nxfers, mode); 3035 + 3036 + if (mode != I3C_SDR) 3037 + return -EINVAL; 3033 3038 3034 3039 return master->ops->priv_xfers(dev, xfers, nxfers); 3035 3040 }
+30 -1
drivers/i3c/master/dw-i3c-master.c
··· 228 228 229 229 /* List of quirks */ 230 230 #define AMD_I3C_OD_PP_TIMING BIT(1) 231 + #define DW_I3C_DISABLE_RUNTIME_PM_QUIRK BIT(2) 231 232 232 233 struct dw_i3c_cmd { 233 234 u32 cmd_lo; ··· 251 250 struct dw_i3c_i2c_dev_data { 252 251 u8 index; 253 252 struct i3c_generic_ibi_pool *ibi_pool; 253 + }; 254 + 255 + struct dw_i3c_drvdata { 256 + u32 flags; 254 257 }; 255 258 256 259 static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m, ··· 1540 1535 struct platform_device *pdev) 1541 1536 { 1542 1537 int ret, irq; 1538 + const struct dw_i3c_drvdata *drvdata; 1539 + unsigned long quirks = 0; 1543 1540 1544 1541 if (!master->platform_ops) 1545 1542 master->platform_ops = &dw_i3c_platform_ops_default; ··· 1597 1590 master->maxdevs = ret >> 16; 1598 1591 master->free_pos = GENMASK(master->maxdevs - 1, 0); 1599 1592 1600 - master->quirks = (unsigned long)device_get_match_data(&pdev->dev); 1593 + if (has_acpi_companion(&pdev->dev)) { 1594 + quirks = (unsigned long)device_get_match_data(&pdev->dev); 1595 + } else if (pdev->dev.of_node) { 1596 + drvdata = device_get_match_data(&pdev->dev); 1597 + if (drvdata) 1598 + quirks = drvdata->flags; 1599 + } 1600 + master->quirks = quirks; 1601 + 1602 + /* Keep controller enabled by preventing runtime suspend */ 1603 + if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK) 1604 + pm_runtime_get_noresume(&pdev->dev); 1601 1605 1602 1606 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1603 1607 ret = i3c_master_register(&master->base, &pdev->dev, ··· 1634 1616 { 1635 1617 cancel_work_sync(&master->hj_work); 1636 1618 i3c_master_unregister(&master->base); 1619 + 1620 + /* Balance pm_runtime_get_noresume() from probe() */ 1621 + if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK) 1622 + pm_runtime_put_noidle(master->dev); 1637 1623 1638 1624 pm_runtime_disable(master->dev); 1639 1625 pm_runtime_set_suspended(master->dev); ··· 1781 1759 pm_runtime_put_autosuspend(master->dev); 1782 1760 } 1783 1761 1762 + static const struct dw_i3c_drvdata altr_agilex5_drvdata = { 1763 + .flags = DW_I3C_DISABLE_RUNTIME_PM_QUIRK, 1764 + }; 1765 + 1784 1766 static const struct of_device_id dw_i3c_master_of_match[] = { 1785 1767 { .compatible = "snps,dw-i3c-master-1.00a", }, 1768 + { .compatible = "altr,agilex5-dw-i3c-master", 1769 + .data = &altr_agilex5_drvdata, 1770 + }, 1786 1771 {}, 1787 1772 }; 1788 1773 MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
+187 -42
drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
··· 7 7 * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com> 8 8 */ 9 9 #include <linux/acpi.h> 10 + #include <linux/bitfield.h> 11 + #include <linux/debugfs.h> 10 12 #include <linux/idr.h> 13 + #include <linux/iopoll.h> 11 14 #include <linux/kernel.h> 12 15 #include <linux/module.h> 13 16 #include <linux/pci.h> 14 17 #include <linux/platform_device.h> 18 + #include <linux/pm_qos.h> 15 19 16 - struct mipi_i3c_hci_pci_info { 17 - int (*init)(struct pci_dev *pci); 20 + struct mipi_i3c_hci_pci { 21 + struct pci_dev *pci; 22 + struct platform_device *pdev; 23 + const struct mipi_i3c_hci_pci_info *info; 24 + void *private; 18 25 }; 19 26 20 - #define INTEL_PRIV_OFFSET 0x2b0 21 - #define INTEL_PRIV_SIZE 0x28 22 - #define INTEL_PRIV_RESETS 0x04 23 - #define INTEL_PRIV_RESETS_RESET BIT(0) 24 - #define INTEL_PRIV_RESETS_RESET_DONE BIT(1) 27 + struct mipi_i3c_hci_pci_info { 28 + int (*init)(struct mipi_i3c_hci_pci *hci); 29 + void (*exit)(struct mipi_i3c_hci_pci *hci); 30 + }; 25 31 26 32 static DEFINE_IDA(mipi_i3c_hci_pci_ida); 27 33 28 - static int mipi_i3c_hci_pci_intel_init(struct pci_dev *pci) 29 - { 30 - unsigned long timeout; 31 - void __iomem *priv; 34 + #define INTEL_PRIV_OFFSET 0x2b0 35 + #define INTEL_PRIV_SIZE 0x28 36 + #define INTEL_RESETS 0x04 37 + #define INTEL_RESETS_RESET BIT(0) 38 + #define INTEL_RESETS_RESET_DONE BIT(1) 39 + #define INTEL_RESETS_TIMEOUT_US (10 * USEC_PER_MSEC) 32 40 33 - priv = devm_ioremap(&pci->dev, 34 - pci_resource_start(pci, 0) + INTEL_PRIV_OFFSET, 35 - INTEL_PRIV_SIZE); 36 - if (!priv) 37 - return -ENOMEM; 41 + #define INTEL_ACTIVELTR 0x0c 42 + #define INTEL_IDLELTR 0x10 43 + 44 + #define INTEL_LTR_REQ BIT(15) 45 + #define INTEL_LTR_SCALE_MASK GENMASK(11, 10) 46 + #define INTEL_LTR_SCALE_1US FIELD_PREP(INTEL_LTR_SCALE_MASK, 2) 47 + #define INTEL_LTR_SCALE_32US FIELD_PREP(INTEL_LTR_SCALE_MASK, 3) 48 + #define INTEL_LTR_VALUE_MASK GENMASK(9, 0) 49 + 50 + struct intel_host { 51 + void __iomem *priv; 52 + u32 active_ltr; 53 + u32 idle_ltr; 54 + struct dentry *debugfs_root; 55 + }; 56 + 57 + static void intel_cache_ltr(struct intel_host *host) 58 + { 59 + host->active_ltr = readl(host->priv + INTEL_ACTIVELTR); 60 + host->idle_ltr = readl(host->priv + INTEL_IDLELTR); 61 + } 62 + 63 + static void intel_ltr_set(struct device *dev, s32 val) 64 + { 65 + struct mipi_i3c_hci_pci *hci = dev_get_drvdata(dev); 66 + struct intel_host *host = hci->private; 67 + u32 ltr; 68 + 69 + /* 70 + * Program latency tolerance (LTR) accordingly what has been asked 71 + * by the PM QoS layer or disable it in case we were passed 72 + * negative value or PM_QOS_LATENCY_ANY. 73 + */ 74 + ltr = readl(host->priv + INTEL_ACTIVELTR); 75 + 76 + if (val == PM_QOS_LATENCY_ANY || val < 0) { 77 + ltr &= ~INTEL_LTR_REQ; 78 + } else { 79 + ltr |= INTEL_LTR_REQ; 80 + ltr &= ~INTEL_LTR_SCALE_MASK; 81 + ltr &= ~INTEL_LTR_VALUE_MASK; 82 + 83 + if (val > INTEL_LTR_VALUE_MASK) { 84 + val >>= 5; 85 + if (val > INTEL_LTR_VALUE_MASK) 86 + val = INTEL_LTR_VALUE_MASK; 87 + ltr |= INTEL_LTR_SCALE_32US | val; 88 + } else { 89 + ltr |= INTEL_LTR_SCALE_1US | val; 90 + } 91 + } 92 + 93 + if (ltr == host->active_ltr) 94 + return; 95 + 96 + writel(ltr, host->priv + INTEL_ACTIVELTR); 97 + writel(ltr, host->priv + INTEL_IDLELTR); 98 + 99 + /* Cache the values into intel_host structure */ 100 + intel_cache_ltr(host); 101 + } 102 + 103 + static void intel_ltr_expose(struct device *dev) 104 + { 105 + dev->power.set_latency_tolerance = intel_ltr_set; 106 + dev_pm_qos_expose_latency_tolerance(dev); 107 + } 108 + 109 + static void intel_ltr_hide(struct device *dev) 110 + { 111 + dev_pm_qos_hide_latency_tolerance(dev); 112 + dev->power.set_latency_tolerance = NULL; 113 + } 114 + 115 + static void intel_add_debugfs(struct mipi_i3c_hci_pci *hci) 116 + { 117 + struct dentry *dir = debugfs_create_dir(dev_name(&hci->pci->dev), NULL); 118 + struct intel_host *host = hci->private; 119 + 120 + intel_cache_ltr(host); 121 + 122 + host->debugfs_root = dir; 123 + debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr); 124 + debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr); 125 + } 126 + 127 + static void intel_remove_debugfs(struct mipi_i3c_hci_pci *hci) 128 + { 129 + struct intel_host *host = hci->private; 130 + 131 + debugfs_remove_recursive(host->debugfs_root); 132 + } 133 + 134 + static void intel_reset(void __iomem *priv) 135 + { 136 + u32 reg; 38 137 39 138 /* Assert reset, wait for completion and release reset */ 40 - writel(0, priv + INTEL_PRIV_RESETS); 41 - timeout = jiffies + msecs_to_jiffies(10); 42 - while (!(readl(priv + INTEL_PRIV_RESETS) & 43 - INTEL_PRIV_RESETS_RESET_DONE)) { 44 - if (time_after(jiffies, timeout)) 45 - break; 46 - cpu_relax(); 47 - } 48 - writel(INTEL_PRIV_RESETS_RESET, priv + INTEL_PRIV_RESETS); 139 + writel(0, priv + INTEL_RESETS); 140 + readl_poll_timeout(priv + INTEL_RESETS, reg, 141 + reg & INTEL_RESETS_RESET_DONE, 0, 142 + INTEL_RESETS_TIMEOUT_US); 143 + writel(INTEL_RESETS_RESET, priv + INTEL_RESETS); 144 + } 145 + 146 + static void __iomem *intel_priv(struct pci_dev *pci) 147 + { 148 + resource_size_t base = pci_resource_start(pci, 0); 149 + 150 + return devm_ioremap(&pci->dev, base + INTEL_PRIV_OFFSET, INTEL_PRIV_SIZE); 151 + } 152 + 153 + static int intel_i3c_init(struct mipi_i3c_hci_pci *hci) 154 + { 155 + struct intel_host *host = devm_kzalloc(&hci->pci->dev, sizeof(*host), GFP_KERNEL); 156 + void __iomem *priv = intel_priv(hci->pci); 157 + 158 + if (!host || !priv) 159 + return -ENOMEM; 160 + 161 + dma_set_mask_and_coherent(&hci->pci->dev, DMA_BIT_MASK(64)); 162 + 163 + hci->pci->d3cold_delay = 0; 164 + 165 + hci->private = host; 166 + host->priv = priv; 167 + 168 + intel_reset(priv); 169 + 170 + intel_ltr_expose(&hci->pci->dev); 171 + intel_add_debugfs(hci); 49 172 50 173 return 0; 51 174 } 52 175 53 - static struct mipi_i3c_hci_pci_info intel_info = { 54 - .init = mipi_i3c_hci_pci_intel_init, 176 + static void intel_i3c_exit(struct mipi_i3c_hci_pci *hci) 177 + { 178 + intel_remove_debugfs(hci); 179 + intel_ltr_hide(&hci->pci->dev); 180 + } 181 + 182 + static const struct mipi_i3c_hci_pci_info intel_info = { 183 + .init = intel_i3c_init, 184 + .exit = intel_i3c_exit, 55 185 }; 56 186 57 187 static int mipi_i3c_hci_pci_probe(struct pci_dev *pci, 58 188 const struct pci_device_id *id) 59 189 { 60 - struct mipi_i3c_hci_pci_info *info; 61 - struct platform_device *pdev; 190 + struct mipi_i3c_hci_pci *hci; 62 191 struct resource res[2]; 63 192 int dev_id, ret; 193 + 194 + hci = devm_kzalloc(&pci->dev, sizeof(*hci), GFP_KERNEL); 195 + if (!hci) 196 + return -ENOMEM; 197 + 198 + hci->pci = pci; 64 199 65 200 ret = pcim_enable_device(pci); 66 201 if (ret) ··· 217 82 if (dev_id < 0) 218 83 return dev_id; 219 84 220 - pdev = platform_device_alloc("mipi-i3c-hci", dev_id); 221 - if (!pdev) 85 + hci->pdev = platform_device_alloc("mipi-i3c-hci", dev_id); 86 + if (!hci->pdev) 222 87 return -ENOMEM; 223 88 224 - pdev->dev.parent = &pci->dev; 225 - device_set_node(&pdev->dev, dev_fwnode(&pci->dev)); 89 + hci->pdev->dev.parent = &pci->dev; 90 + device_set_node(&hci->pdev->dev, dev_fwnode(&pci->dev)); 226 91 227 - ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); 92 + ret = platform_device_add_resources(hci->pdev, res, ARRAY_SIZE(res)); 228 93 if (ret) 229 94 goto err; 230 95 231 - info = (struct mipi_i3c_hci_pci_info *)id->driver_data; 232 - if (info && info->init) { 233 - ret = info->init(pci); 96 + hci->info = (const struct mipi_i3c_hci_pci_info *)id->driver_data; 97 + if (hci->info && hci->info->init) { 98 + ret = hci->info->init(hci); 234 99 if (ret) 235 100 goto err; 236 101 } 237 102 238 - ret = platform_device_add(pdev); 103 + ret = platform_device_add(hci->pdev); 239 104 if (ret) 240 - goto err; 105 + goto err_exit; 241 106 242 - pci_set_drvdata(pci, pdev); 107 + pci_set_drvdata(pci, hci); 243 108 244 109 return 0; 245 110 111 + err_exit: 112 + if (hci->info && hci->info->exit) 113 + hci->info->exit(hci); 246 114 err: 247 - platform_device_put(pdev); 115 + platform_device_put(hci->pdev); 248 116 ida_free(&mipi_i3c_hci_pci_ida, dev_id); 249 117 return ret; 250 118 } 251 119 252 120 static void mipi_i3c_hci_pci_remove(struct pci_dev *pci) 253 121 { 254 - struct platform_device *pdev = pci_get_drvdata(pci); 122 + struct mipi_i3c_hci_pci *hci = pci_get_drvdata(pci); 123 + struct platform_device *pdev = hci->pdev; 255 124 int dev_id = pdev->id; 125 + 126 + if (hci->info && hci->info->exit) 127 + hci->info->exit(hci); 256 128 257 129 platform_device_unregister(pdev); 258 130 ida_free(&mipi_i3c_hci_pci_ida, dev_id); ··· 275 133 /* Panther Lake-P */ 276 134 { PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info}, 277 135 { PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info}, 136 + /* Nova Lake-S */ 137 + { PCI_VDEVICE(INTEL, 0x6e2c), (kernel_ulong_t)&intel_info}, 138 + { PCI_VDEVICE(INTEL, 0x6e2d), (kernel_ulong_t)&intel_info}, 278 139 { }, 279 140 }; 280 141 MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
+112 -25
drivers/i3c/master/svc-i3c-master.c
··· 40 40 #define SVC_I3C_MCTRL_REQUEST_NONE 0 41 41 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1 42 42 #define SVC_I3C_MCTRL_REQUEST_STOP 2 43 + #define SVC_I3C_MCTRL_REQUEST_FORCE_EXIT 6 43 44 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3 44 45 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4 45 46 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7 46 47 #define SVC_I3C_MCTRL_TYPE_I3C 0 47 48 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4) 49 + #define SVC_I3C_MCTRL_TYPE_DDR BIT(5) 48 50 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0 49 51 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0 50 52 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7) ··· 97 95 #define SVC_I3C_MINTMASKED 0x098 98 96 #define SVC_I3C_MERRWARN 0x09C 99 97 #define SVC_I3C_MERRWARN_NACK BIT(2) 98 + #define SVC_I3C_MERRWARN_CRC BIT(10) 100 99 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20) 101 100 #define SVC_I3C_MDMACTRL 0x0A0 102 101 #define SVC_I3C_MDATACTRL 0x0AC ··· 168 165 169 166 struct svc_i3c_cmd { 170 167 u8 addr; 171 - bool rnw; 168 + union { 169 + bool rnw; 170 + u8 cmd; 171 + u32 rnw_cmd; 172 + }; 172 173 u8 *in; 173 174 const void *out; 174 175 unsigned int len; 175 176 unsigned int actual_len; 176 - struct i3c_priv_xfer *xfer; 177 + struct i3c_xfer *xfer; 177 178 bool continued; 178 179 }; 179 180 ··· 390 383 return master->descs[i]; 391 384 } 392 385 386 + static bool svc_cmd_is_read(u32 rnw_cmd, u32 type) 387 + { 388 + return (type == SVC_I3C_MCTRL_TYPE_DDR) ? (rnw_cmd & 0x80) : rnw_cmd; 389 + } 390 + 391 + static void svc_i3c_master_emit_force_exit(struct svc_i3c_master *master) 392 + { 393 + u32 reg; 394 + 395 + writel(SVC_I3C_MCTRL_REQUEST_FORCE_EXIT, master->regs + SVC_I3C_MCTRL); 396 + 397 + /* 398 + * Not need check error here because it is never happen at hardware. 399 + * IP just wait for few fclk cycle to complete DDR exit pattern. Even 400 + * though fclk stop, timeout happen here, the whole data actually 401 + * already finish transfer. The next command will be timeout because 402 + * wrong hardware state. 403 + */ 404 + readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg, 405 + SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000); 406 + 407 + /* 408 + * This delay is necessary after the emission of a stop, otherwise eg. 409 + * repeating IBIs do not get detected. There is a note in the manual 410 + * about it, stating that the stop condition might not be settled 411 + * correctly if a start condition follows too rapidly. 412 + */ 413 + udelay(1); 414 + } 415 + 393 416 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master) 394 417 { 395 418 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL); ··· 443 406 int ret, val; 444 407 u8 *buf; 445 408 446 - slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 447 - if (!slot) 448 - return -ENOSPC; 449 - 450 - slot->len = 0; 451 - buf = slot->data; 452 - 409 + /* 410 + * Wait for transfer to complete before returning. Otherwise, the EmitStop 411 + * request might be sent when the transfer is not complete. 412 + */ 453 413 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val, 454 414 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000); 455 415 if (ret) { 456 416 dev_err(master->dev, "Timeout when polling for COMPLETE\n"); 457 - i3c_generic_ibi_recycle_slot(data->ibi_pool, slot); 458 417 return ret; 459 418 } 419 + 420 + slot = i3c_generic_ibi_get_free_slot(data->ibi_pool); 421 + if (!slot) { 422 + dev_dbg(master->dev, "No free ibi slot, drop the data\n"); 423 + writel(SVC_I3C_MDATACTRL_FLUSHRB, master->regs + SVC_I3C_MDATACTRL); 424 + return -ENOSPC; 425 + } 426 + 427 + slot->len = 0; 428 + buf = slot->data; 460 429 461 430 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) && 462 431 slot->len < SVC_I3C_FIFO_SIZE) { ··· 555 512 * cycle, leading to missed client IBI handlers. 556 513 * 557 514 * A typical scenario is when IBIWON occurs and bus arbitration is lost 558 - * at svc_i3c_master_priv_xfers(). 515 + * at svc_i3c_master_i3c_xfers(). 559 516 * 560 517 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI. 561 518 */ ··· 834 791 goto rpm_out; 835 792 836 793 info.dyn_addr = ret; 794 + 795 + info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR); 837 796 838 797 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr), 839 798 master->regs + SVC_I3C_MDYNADDR); ··· 1338 1293 } 1339 1294 1340 1295 static int svc_i3c_master_xfer(struct svc_i3c_master *master, 1341 - bool rnw, unsigned int xfer_type, u8 addr, 1296 + u32 rnw_cmd, unsigned int xfer_type, u8 addr, 1342 1297 u8 *in, const u8 *out, unsigned int xfer_len, 1343 1298 unsigned int *actual_len, bool continued, bool repeat_start) 1344 1299 { 1300 + bool rnw = svc_cmd_is_read(rnw_cmd, xfer_type); 1345 1301 int retry = repeat_start ? 1 : 2; 1346 1302 u32 reg; 1347 1303 int ret; ··· 1350 1304 /* clean SVC_I3C_MINT_IBIWON w1c bits */ 1351 1305 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS); 1352 1306 1307 + if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR) { 1308 + /* DDR command need prefill into FIFO */ 1309 + writel(rnw_cmd, master->regs + SVC_I3C_MWDATAB); 1310 + if (!rnw) { 1311 + /* write data also need prefill into FIFO */ 1312 + ret = svc_i3c_master_write(master, out, xfer_len); 1313 + if (ret) 1314 + goto emit_stop; 1315 + } 1316 + } 1353 1317 1354 1318 while (retry--) { 1355 1319 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR | ··· 1453 1397 1454 1398 if (rnw) 1455 1399 ret = svc_i3c_master_read(master, in, xfer_len); 1456 - else 1400 + else if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR) 1457 1401 ret = svc_i3c_master_write(master, out, xfer_len); 1458 1402 if (ret < 0) 1459 1403 goto emit_stop; ··· 1466 1410 if (ret) 1467 1411 goto emit_stop; 1468 1412 1413 + if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR && 1414 + (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_CRC)) { 1415 + ret = -ENXIO; 1416 + goto emit_stop; 1417 + } 1418 + 1469 1419 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS); 1470 1420 1471 1421 if (!continued) { 1472 - svc_i3c_master_emit_stop(master); 1422 + if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR) 1423 + svc_i3c_master_emit_stop(master); 1424 + else 1425 + svc_i3c_master_emit_force_exit(master); 1473 1426 1474 1427 /* Wait idle if stop is sent. */ 1475 1428 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg, ··· 1488 1423 return 0; 1489 1424 1490 1425 emit_stop: 1491 - svc_i3c_master_emit_stop(master); 1426 + if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR) 1427 + svc_i3c_master_emit_stop(master); 1428 + else 1429 + svc_i3c_master_emit_force_exit(master); 1430 + 1492 1431 svc_i3c_master_clear_merrwarn(master); 1493 1432 svc_i3c_master_flush_fifo(master); 1494 1433 ··· 1539 1470 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 1540 1471 } 1541 1472 1473 + static int i3c_mode_to_svc_type(enum i3c_xfer_mode mode) 1474 + { 1475 + return (mode == I3C_SDR) ? SVC_I3C_MCTRL_TYPE_I3C : SVC_I3C_MCTRL_TYPE_DDR; 1476 + } 1477 + 1542 1478 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) 1543 1479 { 1544 1480 struct svc_i3c_xfer *xfer = master->xferqueue.cur; ··· 1558 1484 for (i = 0; i < xfer->ncmds; i++) { 1559 1485 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1560 1486 1561 - ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type, 1487 + ret = svc_i3c_master_xfer(master, cmd->rnw_cmd, xfer->type, 1562 1488 cmd->addr, cmd->in, cmd->out, 1563 1489 cmd->len, &cmd->actual_len, 1564 1490 cmd->continued, i > 0); ··· 1733 1659 return ret; 1734 1660 } 1735 1661 1736 - static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev, 1737 - struct i3c_priv_xfer *xfers, 1738 - int nxfers) 1662 + static int svc_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, struct i3c_xfer *xfers, 1663 + int nxfers, enum i3c_xfer_mode mode) 1739 1664 { 1740 1665 struct i3c_master_controller *m = i3c_dev_get_master(dev); 1741 1666 struct svc_i3c_master *master = to_svc_i3c_master(m); ··· 1742 1669 struct svc_i3c_xfer *xfer; 1743 1670 int ret, i; 1744 1671 1672 + if (mode != I3C_SDR) { 1673 + /* 1674 + * Only support data size less than FIFO SIZE when using DDR 1675 + * mode. First entry is cmd in FIFO, so actual available FIFO 1676 + * for data is SVC_I3C_FIFO_SIZE - 2 since DDR only supports 1677 + * even length. 1678 + */ 1679 + for (i = 0; i < nxfers; i++) 1680 + if (xfers[i].len > SVC_I3C_FIFO_SIZE - 2) 1681 + return -EINVAL; 1682 + } 1683 + 1745 1684 xfer = svc_i3c_master_alloc_xfer(master, nxfers); 1746 1685 if (!xfer) 1747 1686 return -ENOMEM; 1748 1687 1749 - xfer->type = SVC_I3C_MCTRL_TYPE_I3C; 1688 + xfer->type = i3c_mode_to_svc_type(mode); 1750 1689 1751 1690 for (i = 0; i < nxfers; i++) { 1691 + u32 rnw_cmd = (mode == I3C_SDR) ? xfers[i].rnw : xfers[i].cmd; 1692 + bool rnw = svc_cmd_is_read(rnw_cmd, xfer->type); 1752 1693 struct svc_i3c_cmd *cmd = &xfer->cmds[i]; 1753 1694 1754 1695 cmd->xfer = &xfers[i]; 1755 1696 cmd->addr = master->addrs[data->index]; 1756 - cmd->rnw = xfers[i].rnw; 1757 - cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL; 1758 - cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out; 1697 + cmd->rnw_cmd = rnw_cmd; 1698 + cmd->in = rnw ? xfers[i].data.in : NULL; 1699 + cmd->out = rnw ? NULL : xfers[i].data.out; 1759 1700 cmd->len = xfers[i].len; 1760 - cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0; 1701 + cmd->actual_len = rnw ? xfers[i].len : 0; 1761 1702 cmd->continued = (i + 1) < nxfers; 1762 1703 } 1763 1704 ··· 1966 1879 .do_daa = svc_i3c_master_do_daa, 1967 1880 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd, 1968 1881 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd, 1969 - .priv_xfers = svc_i3c_master_priv_xfers, 1882 + .i3c_xfers = svc_i3c_master_i3c_xfers, 1970 1883 .i2c_xfers = svc_i3c_master_i2c_xfers, 1971 1884 .request_ibi = svc_i3c_master_request_ibi, 1972 1885 .free_ibi = svc_i3c_master_free_ibi,
+4 -4
drivers/net/mctp/mctp-i3c.c
··· 99 99 100 100 static int mctp_i3c_read(struct mctp_i3c_device *mi) 101 101 { 102 - struct i3c_priv_xfer xfer = { .rnw = 1, .len = mi->mrl }; 102 + struct i3c_xfer xfer = { .rnw = 1, .len = mi->mrl }; 103 103 struct net_device_stats *stats = &mi->mbus->ndev->stats; 104 104 struct mctp_i3c_internal_hdr *ihdr = NULL; 105 105 struct sk_buff *skb = NULL; ··· 127 127 128 128 /* Make sure netif_rx() is read in the same order as i3c. */ 129 129 mutex_lock(&mi->lock); 130 - rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1); 130 + rc = i3c_device_do_xfers(mi->i3c, &xfer, 1, I3C_SDR); 131 131 if (rc < 0) 132 132 goto err; 133 133 ··· 360 360 static void mctp_i3c_xmit(struct mctp_i3c_bus *mbus, struct sk_buff *skb) 361 361 { 362 362 struct net_device_stats *stats = &mbus->ndev->stats; 363 - struct i3c_priv_xfer xfer = { .rnw = false }; 363 + struct i3c_xfer xfer = { .rnw = false }; 364 364 struct mctp_i3c_internal_hdr *ihdr = NULL; 365 365 struct mctp_i3c_device *mi = NULL; 366 366 unsigned int data_len; ··· 409 409 data[data_len] = pec; 410 410 411 411 xfer.data.out = data; 412 - rc = i3c_device_do_priv_xfers(mi->i3c, &xfer, 1); 412 + rc = i3c_device_do_xfers(mi->i3c, &xfer, 1, I3C_SDR); 413 413 if (rc == 0) { 414 414 stats->tx_bytes += data_len; 415 415 stats->tx_packets++;
+30 -12
include/linux/i3c/device.h
··· 27 27 * These are the standard error codes as defined by the I3C specification. 28 28 * When -EIO is returned by the i3c_device_do_priv_xfers() or 29 29 * i3c_device_send_hdr_cmds() one can check the error code in 30 - * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of 30 + * &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of 31 31 * what went wrong. 32 32 * 33 33 */ ··· 39 39 }; 40 40 41 41 /** 42 - * enum i3c_hdr_mode - HDR mode ids 42 + * enum i3c_xfer_mode - I3C xfer mode ids 43 43 * @I3C_HDR_DDR: DDR mode 44 44 * @I3C_HDR_TSP: TSP mode 45 45 * @I3C_HDR_TSL: TSL mode 46 + * @I3C_SDR: SDR mode (NOT HDR mode) 46 47 */ 47 - enum i3c_hdr_mode { 48 - I3C_HDR_DDR, 49 - I3C_HDR_TSP, 50 - I3C_HDR_TSL, 48 + enum i3c_xfer_mode { 49 + /* The below 3 value (I3C_HDR*) must match GETCAP1 Byte bit position */ 50 + I3C_HDR_DDR = 0, 51 + I3C_HDR_TSP = 1, 52 + I3C_HDR_TSL = 2, 53 + /* Use for default SDR transfer mode */ 54 + I3C_SDR = 31, 51 55 }; 52 56 53 57 /** 54 - * struct i3c_priv_xfer - I3C SDR private transfer 58 + * struct i3c_xfer - I3C data transfer 55 59 * @rnw: encodes the transfer direction. true for a read, false for a write 60 + * @cmd: Read/Write command in HDR mode, read: 0x80 - 0xff, write: 0x00 - 0x7f 56 61 * @len: transfer length in bytes of the transfer 57 62 * @actual_len: actual length in bytes are transferred by the controller 58 63 * @data: input/output buffer ··· 65 60 * @data.out: output buffer. Must point to a DMA-able buffer 66 61 * @err: I3C error code 67 62 */ 68 - struct i3c_priv_xfer { 69 - u8 rnw; 63 + struct i3c_xfer { 64 + union { 65 + u8 rnw; 66 + u8 cmd; 67 + }; 70 68 u16 len; 71 69 u16 actual_len; 72 70 union { ··· 78 70 } data; 79 71 enum i3c_error_code err; 80 72 }; 73 + 74 + /* keep back compatible */ 75 + #define i3c_priv_xfer i3c_xfer 81 76 82 77 /** 83 78 * enum i3c_dcr - I3C DCR values ··· 308 297 i3c_i2c_driver_unregister, \ 309 298 __i2cdrv) 310 299 311 - int i3c_device_do_priv_xfers(struct i3c_device *dev, 312 - struct i3c_priv_xfer *xfers, 313 - int nxfers); 300 + int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers, 301 + int nxfers, enum i3c_xfer_mode mode); 302 + 303 + static inline int i3c_device_do_priv_xfers(struct i3c_device *dev, 304 + struct i3c_xfer *xfers, 305 + int nxfers) 306 + { 307 + return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR); 308 + } 314 309 315 310 int i3c_device_do_setdasa(struct i3c_device *dev); 316 311 ··· 358 341 void i3c_device_free_ibi(struct i3c_device *dev); 359 342 int i3c_device_enable_ibi(struct i3c_device *dev); 360 343 int i3c_device_disable_ibi(struct i3c_device *dev); 344 + u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev); 361 345 362 346 #endif /* I3C_DEV_H */
+9 -1
include/linux/i3c/master.h
··· 418 418 * @send_ccc_cmd: send a CCC command 419 419 * This method is mandatory. 420 420 * @priv_xfers: do one or several private I3C SDR transfers 421 - * This method is mandatory. 421 + * This method is mandatory when i3c_xfers is not implemented. It 422 + * is deprecated. 423 + * @i3c_xfers: do one or several I3C SDR or HDR transfers 424 + * This method is mandatory when priv_xfers is not implemented but 425 + * should be implemented instead of priv_xfers. 422 426 * @attach_i2c_dev: called every time an I2C device is attached to the bus. 423 427 * This is a good place to attach master controller specific 424 428 * data to I2C devices. ··· 478 474 const struct i3c_ccc_cmd *cmd); 479 475 int (*send_ccc_cmd)(struct i3c_master_controller *master, 480 476 struct i3c_ccc_cmd *cmd); 477 + /* Deprecated, please use i3c_xfers() */ 481 478 int (*priv_xfers)(struct i3c_dev_desc *dev, 482 479 struct i3c_priv_xfer *xfers, 483 480 int nxfers); 481 + int (*i3c_xfers)(struct i3c_dev_desc *dev, 482 + struct i3c_xfer *xfers, 483 + int nxfers, enum i3c_xfer_mode mode); 484 484 int (*attach_i2c_dev)(struct i2c_dev_desc *dev); 485 485 void (*detach_i2c_dev)(struct i2c_dev_desc *dev); 486 486 int (*i2c_xfers)(struct i2c_dev_desc *dev,