Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Synopsys DesignWare PCIe host controller driver
4 *
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * https://www.samsung.com
7 *
8 * Author: Jingoo Han <jg1.han@samsung.com>
9 */
10
11#include <linux/irqchip/chained_irq.h>
12#include <linux/irqdomain.h>
13#include <linux/msi.h>
14#include <linux/of_address.h>
15#include <linux/of_pci.h>
16#include <linux/pci_regs.h>
17#include <linux/platform_device.h>
18
19#include "pcie-designware.h"
20
21static struct pci_ops dw_pcie_ops;
22static struct pci_ops dw_child_pcie_ops;
23
24static void dw_msi_ack_irq(struct irq_data *d)
25{
26 irq_chip_ack_parent(d);
27}
28
29static void dw_msi_mask_irq(struct irq_data *d)
30{
31 pci_msi_mask_irq(d);
32 irq_chip_mask_parent(d);
33}
34
35static void dw_msi_unmask_irq(struct irq_data *d)
36{
37 pci_msi_unmask_irq(d);
38 irq_chip_unmask_parent(d);
39}
40
41static struct irq_chip dw_pcie_msi_irq_chip = {
42 .name = "PCI-MSI",
43 .irq_ack = dw_msi_ack_irq,
44 .irq_mask = dw_msi_mask_irq,
45 .irq_unmask = dw_msi_unmask_irq,
46};
47
48static struct msi_domain_info dw_pcie_msi_domain_info = {
49 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
50 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
51 .chip = &dw_pcie_msi_irq_chip,
52};
53
54/* MSI int handler */
55irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
56{
57 int i, pos;
58 unsigned long val;
59 u32 status, num_ctrls;
60 irqreturn_t ret = IRQ_NONE;
61 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
62
63 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
64
65 for (i = 0; i < num_ctrls; i++) {
66 status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
67 (i * MSI_REG_CTRL_BLOCK_SIZE));
68 if (!status)
69 continue;
70
71 ret = IRQ_HANDLED;
72 val = status;
73 pos = 0;
74 while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
75 pos)) != MAX_MSI_IRQS_PER_CTRL) {
76 generic_handle_domain_irq(pp->irq_domain,
77 (i * MAX_MSI_IRQS_PER_CTRL) +
78 pos);
79 pos++;
80 }
81 }
82
83 return ret;
84}
85
86/* Chained MSI interrupt service routine */
87static void dw_chained_msi_isr(struct irq_desc *desc)
88{
89 struct irq_chip *chip = irq_desc_get_chip(desc);
90 struct dw_pcie_rp *pp;
91
92 chained_irq_enter(chip, desc);
93
94 pp = irq_desc_get_handler_data(desc);
95 dw_handle_msi_irq(pp);
96
97 chained_irq_exit(chip, desc);
98}
99
100static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
101{
102 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
103 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
104 u64 msi_target;
105
106 msi_target = (u64)pp->msi_data;
107
108 msg->address_lo = lower_32_bits(msi_target);
109 msg->address_hi = upper_32_bits(msi_target);
110
111 msg->data = d->hwirq;
112
113 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
114 (int)d->hwirq, msg->address_hi, msg->address_lo);
115}
116
117static int dw_pci_msi_set_affinity(struct irq_data *d,
118 const struct cpumask *mask, bool force)
119{
120 return -EINVAL;
121}
122
123static void dw_pci_bottom_mask(struct irq_data *d)
124{
125 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
126 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
127 unsigned int res, bit, ctrl;
128 unsigned long flags;
129
130 raw_spin_lock_irqsave(&pp->lock, flags);
131
132 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
133 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
134 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
135
136 pp->irq_mask[ctrl] |= BIT(bit);
137 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
138
139 raw_spin_unlock_irqrestore(&pp->lock, flags);
140}
141
142static void dw_pci_bottom_unmask(struct irq_data *d)
143{
144 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
145 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
146 unsigned int res, bit, ctrl;
147 unsigned long flags;
148
149 raw_spin_lock_irqsave(&pp->lock, flags);
150
151 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
152 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
153 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
154
155 pp->irq_mask[ctrl] &= ~BIT(bit);
156 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
157
158 raw_spin_unlock_irqrestore(&pp->lock, flags);
159}
160
161static void dw_pci_bottom_ack(struct irq_data *d)
162{
163 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
164 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
165 unsigned int res, bit, ctrl;
166
167 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
168 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
169 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
170
171 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
172}
173
174static struct irq_chip dw_pci_msi_bottom_irq_chip = {
175 .name = "DWPCI-MSI",
176 .irq_ack = dw_pci_bottom_ack,
177 .irq_compose_msi_msg = dw_pci_setup_msi_msg,
178 .irq_set_affinity = dw_pci_msi_set_affinity,
179 .irq_mask = dw_pci_bottom_mask,
180 .irq_unmask = dw_pci_bottom_unmask,
181};
182
183static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
184 unsigned int virq, unsigned int nr_irqs,
185 void *args)
186{
187 struct dw_pcie_rp *pp = domain->host_data;
188 unsigned long flags;
189 u32 i;
190 int bit;
191
192 raw_spin_lock_irqsave(&pp->lock, flags);
193
194 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
195 order_base_2(nr_irqs));
196
197 raw_spin_unlock_irqrestore(&pp->lock, flags);
198
199 if (bit < 0)
200 return -ENOSPC;
201
202 for (i = 0; i < nr_irqs; i++)
203 irq_domain_set_info(domain, virq + i, bit + i,
204 pp->msi_irq_chip,
205 pp, handle_edge_irq,
206 NULL, NULL);
207
208 return 0;
209}
210
211static void dw_pcie_irq_domain_free(struct irq_domain *domain,
212 unsigned int virq, unsigned int nr_irqs)
213{
214 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
215 struct dw_pcie_rp *pp = domain->host_data;
216 unsigned long flags;
217
218 raw_spin_lock_irqsave(&pp->lock, flags);
219
220 bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
221 order_base_2(nr_irqs));
222
223 raw_spin_unlock_irqrestore(&pp->lock, flags);
224}
225
226static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
227 .alloc = dw_pcie_irq_domain_alloc,
228 .free = dw_pcie_irq_domain_free,
229};
230
231int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
232{
233 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
234 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
235
236 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
237 &dw_pcie_msi_domain_ops, pp);
238 if (!pp->irq_domain) {
239 dev_err(pci->dev, "Failed to create IRQ domain\n");
240 return -ENOMEM;
241 }
242
243 irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
244
245 pp->msi_domain = pci_msi_create_irq_domain(fwnode,
246 &dw_pcie_msi_domain_info,
247 pp->irq_domain);
248 if (!pp->msi_domain) {
249 dev_err(pci->dev, "Failed to create MSI domain\n");
250 irq_domain_remove(pp->irq_domain);
251 return -ENOMEM;
252 }
253
254 return 0;
255}
256
257static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
258{
259 u32 ctrl;
260
261 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
262 if (pp->msi_irq[ctrl] > 0)
263 irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
264 NULL, NULL);
265 }
266
267 irq_domain_remove(pp->msi_domain);
268 irq_domain_remove(pp->irq_domain);
269}
270
271static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
272{
273 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
274 u64 msi_target = (u64)pp->msi_data;
275
276 if (!pci_msi_enabled() || !pp->has_msi_ctrl)
277 return;
278
279 /* Program the msi_data */
280 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
281 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
282}
283
284static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
285{
286 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
287 struct device *dev = pci->dev;
288 struct platform_device *pdev = to_platform_device(dev);
289 u32 ctrl, max_vectors;
290 int irq;
291
292 /* Parse any "msiX" IRQs described in the devicetree */
293 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
294 char msi_name[] = "msiX";
295
296 msi_name[3] = '0' + ctrl;
297 irq = platform_get_irq_byname_optional(pdev, msi_name);
298 if (irq == -ENXIO)
299 break;
300 if (irq < 0)
301 return dev_err_probe(dev, irq,
302 "Failed to parse MSI IRQ '%s'\n",
303 msi_name);
304
305 pp->msi_irq[ctrl] = irq;
306 }
307
308 /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
309 if (ctrl == 0)
310 return -ENXIO;
311
312 max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
313 if (pp->num_vectors > max_vectors) {
314 dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
315 max_vectors);
316 pp->num_vectors = max_vectors;
317 }
318 if (!pp->num_vectors)
319 pp->num_vectors = max_vectors;
320
321 return 0;
322}
323
324static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
325{
326 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
327 struct device *dev = pci->dev;
328 struct platform_device *pdev = to_platform_device(dev);
329 u64 *msi_vaddr;
330 int ret;
331 u32 ctrl, num_ctrls;
332
333 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
334 pp->irq_mask[ctrl] = ~0;
335
336 if (!pp->msi_irq[0]) {
337 ret = dw_pcie_parse_split_msi_irq(pp);
338 if (ret < 0 && ret != -ENXIO)
339 return ret;
340 }
341
342 if (!pp->num_vectors)
343 pp->num_vectors = MSI_DEF_NUM_VECTORS;
344 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
345
346 if (!pp->msi_irq[0]) {
347 pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
348 if (pp->msi_irq[0] < 0) {
349 pp->msi_irq[0] = platform_get_irq(pdev, 0);
350 if (pp->msi_irq[0] < 0)
351 return pp->msi_irq[0];
352 }
353 }
354
355 dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
356
357 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
358
359 ret = dw_pcie_allocate_domains(pp);
360 if (ret)
361 return ret;
362
363 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
364 if (pp->msi_irq[ctrl] > 0)
365 irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
366 dw_chained_msi_isr, pp);
367 }
368
369 /*
370 * Even though the iMSI-RX Module supports 64-bit addresses some
371 * peripheral PCIe devices may lack 64-bit message support. In
372 * order not to miss MSI TLPs from those devices the MSI target
373 * address has to be within the lowest 4GB.
374 *
375 * Note until there is a better alternative found the reservation is
376 * done by allocating from the artificially limited DMA-coherent
377 * memory.
378 */
379 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
380 if (ret)
381 dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
382
383 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
384 GFP_KERNEL);
385 if (!msi_vaddr) {
386 dev_err(dev, "Failed to alloc and map MSI data\n");
387 dw_pcie_free_msi(pp);
388 return -ENOMEM;
389 }
390
391 return 0;
392}
393
394int dw_pcie_host_init(struct dw_pcie_rp *pp)
395{
396 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
397 struct device *dev = pci->dev;
398 struct device_node *np = dev->of_node;
399 struct platform_device *pdev = to_platform_device(dev);
400 struct resource_entry *win;
401 struct pci_host_bridge *bridge;
402 struct resource *res;
403 int ret;
404
405 raw_spin_lock_init(&pp->lock);
406
407 ret = dw_pcie_get_resources(pci);
408 if (ret)
409 return ret;
410
411 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
412 if (res) {
413 pp->cfg0_size = resource_size(res);
414 pp->cfg0_base = res->start;
415
416 pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
417 if (IS_ERR(pp->va_cfg0_base))
418 return PTR_ERR(pp->va_cfg0_base);
419 } else {
420 dev_err(dev, "Missing *config* reg space\n");
421 return -ENODEV;
422 }
423
424 bridge = devm_pci_alloc_host_bridge(dev, 0);
425 if (!bridge)
426 return -ENOMEM;
427
428 pp->bridge = bridge;
429
430 /* Get the I/O range from DT */
431 win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
432 if (win) {
433 pp->io_size = resource_size(win->res);
434 pp->io_bus_addr = win->res->start - win->offset;
435 pp->io_base = pci_pio_to_address(win->res->start);
436 }
437
438 /* Set default bus ops */
439 bridge->ops = &dw_pcie_ops;
440 bridge->child_ops = &dw_child_pcie_ops;
441
442 if (pp->ops->host_init) {
443 ret = pp->ops->host_init(pp);
444 if (ret)
445 return ret;
446 }
447
448 if (pci_msi_enabled()) {
449 pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
450 of_property_read_bool(np, "msi-parent") ||
451 of_property_read_bool(np, "msi-map"));
452
453 /*
454 * For the has_msi_ctrl case the default assignment is handled
455 * in the dw_pcie_msi_host_init().
456 */
457 if (!pp->has_msi_ctrl && !pp->num_vectors) {
458 pp->num_vectors = MSI_DEF_NUM_VECTORS;
459 } else if (pp->num_vectors > MAX_MSI_IRQS) {
460 dev_err(dev, "Invalid number of vectors\n");
461 ret = -EINVAL;
462 goto err_deinit_host;
463 }
464
465 if (pp->ops->msi_host_init) {
466 ret = pp->ops->msi_host_init(pp);
467 if (ret < 0)
468 goto err_deinit_host;
469 } else if (pp->has_msi_ctrl) {
470 ret = dw_pcie_msi_host_init(pp);
471 if (ret < 0)
472 goto err_deinit_host;
473 }
474 }
475
476 dw_pcie_version_detect(pci);
477
478 dw_pcie_iatu_detect(pci);
479
480 ret = dw_pcie_edma_detect(pci);
481 if (ret)
482 goto err_free_msi;
483
484 ret = dw_pcie_setup_rc(pp);
485 if (ret)
486 goto err_remove_edma;
487
488 if (!dw_pcie_link_up(pci)) {
489 ret = dw_pcie_start_link(pci);
490 if (ret)
491 goto err_remove_edma;
492 }
493
494 /* Ignore errors, the link may come up later */
495 dw_pcie_wait_for_link(pci);
496
497 bridge->sysdata = pp;
498
499 ret = pci_host_probe(bridge);
500 if (ret)
501 goto err_stop_link;
502
503 return 0;
504
505err_stop_link:
506 dw_pcie_stop_link(pci);
507
508err_remove_edma:
509 dw_pcie_edma_remove(pci);
510
511err_free_msi:
512 if (pp->has_msi_ctrl)
513 dw_pcie_free_msi(pp);
514
515err_deinit_host:
516 if (pp->ops->host_deinit)
517 pp->ops->host_deinit(pp);
518
519 return ret;
520}
521EXPORT_SYMBOL_GPL(dw_pcie_host_init);
522
523void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
524{
525 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
526
527 pci_stop_root_bus(pp->bridge->bus);
528 pci_remove_root_bus(pp->bridge->bus);
529
530 dw_pcie_stop_link(pci);
531
532 dw_pcie_edma_remove(pci);
533
534 if (pp->has_msi_ctrl)
535 dw_pcie_free_msi(pp);
536
537 if (pp->ops->host_deinit)
538 pp->ops->host_deinit(pp);
539}
540EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
541
542static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
543 unsigned int devfn, int where)
544{
545 struct dw_pcie_rp *pp = bus->sysdata;
546 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
547 int type, ret;
548 u32 busdev;
549
550 /*
551 * Checking whether the link is up here is a last line of defense
552 * against platforms that forward errors on the system bus as
553 * SError upon PCI configuration transactions issued when the link
554 * is down. This check is racy by definition and does not stop
555 * the system from triggering an SError if the link goes down
556 * after this check is performed.
557 */
558 if (!dw_pcie_link_up(pci))
559 return NULL;
560
561 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
562 PCIE_ATU_FUNC(PCI_FUNC(devfn));
563
564 if (pci_is_root_bus(bus->parent))
565 type = PCIE_ATU_TYPE_CFG0;
566 else
567 type = PCIE_ATU_TYPE_CFG1;
568
569 ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
570 pp->cfg0_size);
571 if (ret)
572 return NULL;
573
574 return pp->va_cfg0_base + where;
575}
576
577static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
578 int where, int size, u32 *val)
579{
580 struct dw_pcie_rp *pp = bus->sysdata;
581 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
582 int ret;
583
584 ret = pci_generic_config_read(bus, devfn, where, size, val);
585 if (ret != PCIBIOS_SUCCESSFUL)
586 return ret;
587
588 if (pp->cfg0_io_shared) {
589 ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
590 pp->io_base, pp->io_bus_addr,
591 pp->io_size);
592 if (ret)
593 return PCIBIOS_SET_FAILED;
594 }
595
596 return PCIBIOS_SUCCESSFUL;
597}
598
599static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
600 int where, int size, u32 val)
601{
602 struct dw_pcie_rp *pp = bus->sysdata;
603 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
604 int ret;
605
606 ret = pci_generic_config_write(bus, devfn, where, size, val);
607 if (ret != PCIBIOS_SUCCESSFUL)
608 return ret;
609
610 if (pp->cfg0_io_shared) {
611 ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
612 pp->io_base, pp->io_bus_addr,
613 pp->io_size);
614 if (ret)
615 return PCIBIOS_SET_FAILED;
616 }
617
618 return PCIBIOS_SUCCESSFUL;
619}
620
621static struct pci_ops dw_child_pcie_ops = {
622 .map_bus = dw_pcie_other_conf_map_bus,
623 .read = dw_pcie_rd_other_conf,
624 .write = dw_pcie_wr_other_conf,
625};
626
627void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
628{
629 struct dw_pcie_rp *pp = bus->sysdata;
630 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
631
632 if (PCI_SLOT(devfn) > 0)
633 return NULL;
634
635 return pci->dbi_base + where;
636}
637EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
638
639static struct pci_ops dw_pcie_ops = {
640 .map_bus = dw_pcie_own_conf_map_bus,
641 .read = pci_generic_config_read,
642 .write = pci_generic_config_write,
643};
644
645static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
646{
647 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
648 struct resource_entry *entry;
649 int i, ret;
650
651 /* Note the very first outbound ATU is used for CFG IOs */
652 if (!pci->num_ob_windows) {
653 dev_err(pci->dev, "No outbound iATU found\n");
654 return -EINVAL;
655 }
656
657 /*
658 * Ensure all out/inbound windows are disabled before proceeding with
659 * the MEM/IO (dma-)ranges setups.
660 */
661 for (i = 0; i < pci->num_ob_windows; i++)
662 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
663
664 for (i = 0; i < pci->num_ib_windows; i++)
665 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
666
667 i = 0;
668 resource_list_for_each_entry(entry, &pp->bridge->windows) {
669 if (resource_type(entry->res) != IORESOURCE_MEM)
670 continue;
671
672 if (pci->num_ob_windows <= ++i)
673 break;
674
675 ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
676 entry->res->start,
677 entry->res->start - entry->offset,
678 resource_size(entry->res));
679 if (ret) {
680 dev_err(pci->dev, "Failed to set MEM range %pr\n",
681 entry->res);
682 return ret;
683 }
684 }
685
686 if (pp->io_size) {
687 if (pci->num_ob_windows > ++i) {
688 ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
689 pp->io_base,
690 pp->io_bus_addr,
691 pp->io_size);
692 if (ret) {
693 dev_err(pci->dev, "Failed to set IO range %pr\n",
694 entry->res);
695 return ret;
696 }
697 } else {
698 pp->cfg0_io_shared = true;
699 }
700 }
701
702 if (pci->num_ob_windows <= i)
703 dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
704 pci->num_ob_windows);
705
706 i = 0;
707 resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
708 if (resource_type(entry->res) != IORESOURCE_MEM)
709 continue;
710
711 if (pci->num_ib_windows <= i)
712 break;
713
714 ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
715 entry->res->start,
716 entry->res->start - entry->offset,
717 resource_size(entry->res));
718 if (ret) {
719 dev_err(pci->dev, "Failed to set DMA range %pr\n",
720 entry->res);
721 return ret;
722 }
723 }
724
725 if (pci->num_ib_windows <= i)
726 dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
727 pci->num_ib_windows);
728
729 return 0;
730}
731
732int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
733{
734 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
735 u32 val, ctrl, num_ctrls;
736 int ret;
737
738 /*
739 * Enable DBI read-only registers for writing/updating configuration.
740 * Write permission gets disabled towards the end of this function.
741 */
742 dw_pcie_dbi_ro_wr_en(pci);
743
744 dw_pcie_setup(pci);
745
746 if (pp->has_msi_ctrl) {
747 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
748
749 /* Initialize IRQ Status array */
750 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
751 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
752 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
753 pp->irq_mask[ctrl]);
754 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
755 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
756 ~0);
757 }
758 }
759
760 dw_pcie_msi_init(pp);
761
762 /* Setup RC BARs */
763 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
764 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
765
766 /* Setup interrupt pins */
767 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
768 val &= 0xffff00ff;
769 val |= 0x00000100;
770 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
771
772 /* Setup bus numbers */
773 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
774 val &= 0xff000000;
775 val |= 0x00ff0100;
776 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
777
778 /* Setup command register */
779 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
780 val &= 0xffff0000;
781 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
782 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
783 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
784
785 /*
786 * If the platform provides its own child bus config accesses, it means
787 * the platform uses its own address translation component rather than
788 * ATU, so we should not program the ATU here.
789 */
790 if (pp->bridge->child_ops == &dw_child_pcie_ops) {
791 ret = dw_pcie_iatu_setup(pp);
792 if (ret)
793 return ret;
794 }
795
796 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
797
798 /* Program correct class for RC */
799 dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
800
801 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
802 val |= PORT_LOGIC_SPEED_CHANGE;
803 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
804
805 dw_pcie_dbi_ro_wr_dis(pci);
806
807 return 0;
808}
809EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);