Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.3 362 lines 10 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2// Copyright (c) 2017 Cadence 3// Cadence PCIe host controller driver. 4// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> 5 6#include <linux/kernel.h> 7#include <linux/of_address.h> 8#include <linux/of_pci.h> 9#include <linux/platform_device.h> 10#include <linux/pm_runtime.h> 11 12#include "pcie-cadence.h" 13 14/** 15 * struct cdns_pcie_rc - private data for this PCIe Root Complex driver 16 * @pcie: Cadence PCIe controller 17 * @dev: pointer to PCIe device 18 * @cfg_res: start/end offsets in the physical system memory to map PCI 19 * configuration space accesses 20 * @bus_range: first/last buses behind the PCIe host controller 21 * @cfg_base: IO mapped window to access the PCI configuration space of a 22 * single function at a time 23 * @max_regions: maximum number of regions supported by the hardware 24 * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address 25 * translation (nbits sets into the "no BAR match" register) 26 * @vendor_id: PCI vendor ID 27 * @device_id: PCI device ID 28 */ 29struct cdns_pcie_rc { 30 struct cdns_pcie pcie; 31 struct device *dev; 32 struct resource *cfg_res; 33 struct resource *bus_range; 34 void __iomem *cfg_base; 35 u32 max_regions; 36 u32 no_bar_nbits; 37 u16 vendor_id; 38 u16 device_id; 39}; 40 41static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, 42 int where) 43{ 44 struct pci_host_bridge *bridge = pci_find_host_bridge(bus); 45 struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); 46 struct cdns_pcie *pcie = &rc->pcie; 47 unsigned int busn = bus->number; 48 u32 addr0, desc0; 49 50 if (busn == rc->bus_range->start) { 51 /* 52 * Only the root port (devfn == 0) is connected to this bus. 53 * All other PCI devices are behind some bridge hence on another 54 * bus. 55 */ 56 if (devfn) 57 return NULL; 58 59 return pcie->reg_base + (where & 0xfff); 60 } 61 /* Check that the link is up */ 62 if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1)) 63 return NULL; 64 /* Clear AXI link-down status */ 65 cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0); 66 67 /* Update Output registers for AXI region 0. */ 68 addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) | 69 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) | 70 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn); 71 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0); 72 73 /* Configuration Type 0 or Type 1 access. */ 74 desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | 75 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); 76 /* 77 * The bus number was already set once for all in desc1 by 78 * cdns_pcie_host_init_address_translation(). 79 */ 80 if (busn == rc->bus_range->start + 1) 81 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; 82 else 83 desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; 84 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0); 85 86 return rc->cfg_base + (where & 0xfff); 87} 88 89static struct pci_ops cdns_pcie_host_ops = { 90 .map_bus = cdns_pci_map_bus, 91 .read = pci_generic_config_read, 92 .write = pci_generic_config_write, 93}; 94 95static const struct of_device_id cdns_pcie_host_of_match[] = { 96 { .compatible = "cdns,cdns-pcie-host" }, 97 98 { }, 99}; 100 101static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) 102{ 103 struct cdns_pcie *pcie = &rc->pcie; 104 u32 value, ctrl; 105 106 /* 107 * Set the root complex BAR configuration register: 108 * - disable both BAR0 and BAR1. 109 * - enable Prefetchable Memory Base and Limit registers in type 1 110 * config space (64 bits). 111 * - enable IO Base and Limit registers in type 1 config 112 * space (32 bits). 113 */ 114 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; 115 value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) | 116 CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) | 117 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE | 118 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS | 119 CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE | 120 CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS; 121 cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); 122 123 /* Set root port configuration space */ 124 if (rc->vendor_id != 0xffff) 125 cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); 126 if (rc->device_id != 0xffff) 127 cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); 128 129 cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0); 130 cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0); 131 cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); 132 133 return 0; 134} 135 136static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) 137{ 138 struct cdns_pcie *pcie = &rc->pcie; 139 struct resource *cfg_res = rc->cfg_res; 140 struct resource *mem_res = pcie->mem_res; 141 struct resource *bus_range = rc->bus_range; 142 struct device *dev = rc->dev; 143 struct device_node *np = dev->of_node; 144 struct of_pci_range_parser parser; 145 struct of_pci_range range; 146 u32 addr0, addr1, desc1; 147 u64 cpu_addr; 148 int r, err; 149 150 /* 151 * Reserve region 0 for PCI configure space accesses: 152 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by 153 * cdns_pci_map_bus(), other region registers are set here once for all. 154 */ 155 addr1 = 0; /* Should be programmed to zero. */ 156 desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start); 157 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); 158 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); 159 160 cpu_addr = cfg_res->start - mem_res->start; 161 addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | 162 (lower_32_bits(cpu_addr) & GENMASK(31, 8)); 163 addr1 = upper_32_bits(cpu_addr); 164 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0); 165 cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1); 166 167 err = of_pci_range_parser_init(&parser, np); 168 if (err) 169 return err; 170 171 r = 1; 172 for_each_of_pci_range(&parser, &range) { 173 bool is_io; 174 175 if (r >= rc->max_regions) 176 break; 177 178 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) 179 is_io = false; 180 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) 181 is_io = true; 182 else 183 continue; 184 185 cdns_pcie_set_outbound_region(pcie, 0, r, is_io, 186 range.cpu_addr, 187 range.pci_addr, 188 range.size); 189 r++; 190 } 191 192 /* 193 * Set Root Port no BAR match Inbound Translation registers: 194 * needed for MSI and DMA. 195 * Root Port BAR0 and BAR1 are disabled, hence no need to set their 196 * inbound translation registers. 197 */ 198 addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); 199 addr1 = 0; 200 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); 201 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); 202 203 return 0; 204} 205 206static int cdns_pcie_host_init(struct device *dev, 207 struct list_head *resources, 208 struct cdns_pcie_rc *rc) 209{ 210 struct resource *bus_range = NULL; 211 int err; 212 213 /* Parse our PCI ranges and request their resources */ 214 err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range); 215 if (err) 216 return err; 217 218 rc->bus_range = bus_range; 219 rc->pcie.bus = bus_range->start; 220 221 err = cdns_pcie_host_init_root_port(rc); 222 if (err) 223 goto err_out; 224 225 err = cdns_pcie_host_init_address_translation(rc); 226 if (err) 227 goto err_out; 228 229 return 0; 230 231 err_out: 232 pci_free_resource_list(resources); 233 return err; 234} 235 236static int cdns_pcie_host_probe(struct platform_device *pdev) 237{ 238 struct device *dev = &pdev->dev; 239 struct device_node *np = dev->of_node; 240 struct pci_host_bridge *bridge; 241 struct list_head resources; 242 struct cdns_pcie_rc *rc; 243 struct cdns_pcie *pcie; 244 struct resource *res; 245 int ret; 246 int phy_count; 247 248 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); 249 if (!bridge) 250 return -ENOMEM; 251 252 rc = pci_host_bridge_priv(bridge); 253 rc->dev = dev; 254 255 pcie = &rc->pcie; 256 pcie->is_rc = true; 257 258 rc->max_regions = 32; 259 of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions); 260 261 rc->no_bar_nbits = 32; 262 of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); 263 264 rc->vendor_id = 0xffff; 265 of_property_read_u16(np, "vendor-id", &rc->vendor_id); 266 267 rc->device_id = 0xffff; 268 of_property_read_u16(np, "device-id", &rc->device_id); 269 270 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); 271 pcie->reg_base = devm_ioremap_resource(dev, res); 272 if (IS_ERR(pcie->reg_base)) { 273 dev_err(dev, "missing \"reg\"\n"); 274 return PTR_ERR(pcie->reg_base); 275 } 276 277 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); 278 rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); 279 if (IS_ERR(rc->cfg_base)) { 280 dev_err(dev, "missing \"cfg\"\n"); 281 return PTR_ERR(rc->cfg_base); 282 } 283 rc->cfg_res = res; 284 285 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); 286 if (!res) { 287 dev_err(dev, "missing \"mem\"\n"); 288 return -EINVAL; 289 } 290 pcie->mem_res = res; 291 292 ret = cdns_pcie_init_phy(dev, pcie); 293 if (ret) { 294 dev_err(dev, "failed to init phy\n"); 295 return ret; 296 } 297 platform_set_drvdata(pdev, pcie); 298 299 pm_runtime_enable(dev); 300 ret = pm_runtime_get_sync(dev); 301 if (ret < 0) { 302 dev_err(dev, "pm_runtime_get_sync() failed\n"); 303 goto err_get_sync; 304 } 305 306 ret = cdns_pcie_host_init(dev, &resources, rc); 307 if (ret) 308 goto err_init; 309 310 list_splice_init(&resources, &bridge->windows); 311 bridge->dev.parent = dev; 312 bridge->busnr = pcie->bus; 313 bridge->ops = &cdns_pcie_host_ops; 314 bridge->map_irq = of_irq_parse_and_map_pci; 315 bridge->swizzle_irq = pci_common_swizzle; 316 317 ret = pci_host_probe(bridge); 318 if (ret < 0) 319 goto err_host_probe; 320 321 return 0; 322 323 err_host_probe: 324 pci_free_resource_list(&resources); 325 326 err_init: 327 pm_runtime_put_sync(dev); 328 329 err_get_sync: 330 pm_runtime_disable(dev); 331 cdns_pcie_disable_phy(pcie); 332 phy_count = pcie->phy_count; 333 while (phy_count--) 334 device_link_del(pcie->link[phy_count]); 335 336 return ret; 337} 338 339static void cdns_pcie_shutdown(struct platform_device *pdev) 340{ 341 struct device *dev = &pdev->dev; 342 struct cdns_pcie *pcie = dev_get_drvdata(dev); 343 int ret; 344 345 ret = pm_runtime_put_sync(dev); 346 if (ret < 0) 347 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 348 349 pm_runtime_disable(dev); 350 cdns_pcie_disable_phy(pcie); 351} 352 353static struct platform_driver cdns_pcie_host_driver = { 354 .driver = { 355 .name = "cdns-pcie-host", 356 .of_match_table = cdns_pcie_host_of_match, 357 .pm = &cdns_pcie_pm_ops, 358 }, 359 .probe = cdns_pcie_host_probe, 360 .shutdown = cdns_pcie_shutdown, 361}; 362builtin_platform_driver(cdns_pcie_host_driver);