Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI detection and setup code
4 */
5
6#include <linux/array_size.h>
7#include <linux/kernel.h>
8#include <linux/delay.h>
9#include <linux/init.h>
10#include <linux/pci.h>
11#include <linux/msi.h>
12#include <linux/of_pci.h>
13#include <linux/of_platform.h>
14#include <linux/platform_device.h>
15#include <linux/pci_hotplug.h>
16#include <linux/slab.h>
17#include <linux/sprintf.h>
18#include <linux/module.h>
19#include <linux/cpumask.h>
20#include <linux/aer.h>
21#include <linux/acpi.h>
22#include <linux/hypervisor.h>
23#include <linux/irqdomain.h>
24#include <linux/pm_runtime.h>
25#include <linux/bitfield.h>
26#include <trace/events/pci.h>
27#include "pci.h"
28
29static struct resource busn_resource = {
30 .name = "PCI busn",
31 .start = 0,
32 .end = 255,
33 .flags = IORESOURCE_BUS,
34};
35
36/* Ugh. Need to stop exporting this to modules. */
37LIST_HEAD(pci_root_buses);
38EXPORT_SYMBOL(pci_root_buses);
39
40static LIST_HEAD(pci_domain_busn_res_list);
41
42struct pci_domain_busn_res {
43 struct list_head list;
44 struct resource res;
45 int domain_nr;
46};
47
48static struct resource *get_pci_domain_busn_res(int domain_nr)
49{
50 struct pci_domain_busn_res *r;
51
52 list_for_each_entry(r, &pci_domain_busn_res_list, list)
53 if (r->domain_nr == domain_nr)
54 return &r->res;
55
56 r = kzalloc(sizeof(*r), GFP_KERNEL);
57 if (!r)
58 return NULL;
59
60 r->domain_nr = domain_nr;
61 r->res.start = 0;
62 r->res.end = 0xff;
63 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
64
65 list_add_tail(&r->list, &pci_domain_busn_res_list);
66
67 return &r->res;
68}
69
70/*
71 * Some device drivers need know if PCI is initiated.
72 * Basically, we think PCI is not initiated when there
73 * is no device to be found on the pci_bus_type.
74 */
75int no_pci_devices(void)
76{
77 struct device *dev;
78 int no_devices;
79
80 dev = bus_find_next_device(&pci_bus_type, NULL);
81 no_devices = (dev == NULL);
82 put_device(dev);
83 return no_devices;
84}
85EXPORT_SYMBOL(no_pci_devices);
86
87/*
88 * PCI Bus Class
89 */
90static void release_pcibus_dev(struct device *dev)
91{
92 struct pci_bus *pci_bus = to_pci_bus(dev);
93
94 put_device(pci_bus->bridge);
95 pci_bus_remove_resources(pci_bus);
96 pci_release_bus_of_node(pci_bus);
97 kfree(pci_bus);
98}
99
100static const struct class pcibus_class = {
101 .name = "pci_bus",
102 .dev_release = &release_pcibus_dev,
103 .dev_groups = pcibus_groups,
104};
105
106static int __init pcibus_class_init(void)
107{
108 return class_register(&pcibus_class);
109}
110postcore_initcall(pcibus_class_init);
111
112static u64 pci_size(u64 base, u64 maxbase, u64 mask)
113{
114 u64 size = mask & maxbase; /* Find the significant bits */
115 if (!size)
116 return 0;
117
118 /*
119 * Get the lowest of them to find the decode size, and from that
120 * the extent.
121 */
122 size = size & ~(size-1);
123
124 /*
125 * base == maxbase can be valid only if the BAR has already been
126 * programmed with all 1s.
127 */
128 if (base == maxbase && ((base | (size - 1)) & mask) != mask)
129 return 0;
130
131 return size;
132}
133
134static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
135{
136 u32 mem_type;
137 unsigned long flags;
138
139 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
140 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
141 flags |= IORESOURCE_IO;
142 return flags;
143 }
144
145 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
146 flags |= IORESOURCE_MEM;
147 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
148 flags |= IORESOURCE_PREFETCH;
149
150 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
151 switch (mem_type) {
152 case PCI_BASE_ADDRESS_MEM_TYPE_32:
153 break;
154 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
155 /* 1M mem BAR treated as 32-bit BAR */
156 break;
157 case PCI_BASE_ADDRESS_MEM_TYPE_64:
158 flags |= IORESOURCE_MEM_64;
159 break;
160 default:
161 /* mem unknown type treated as 32-bit BAR */
162 break;
163 }
164 return flags;
165}
166
167#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
168
169/**
170 * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs
171 * @dev: the PCI device
172 * @count: number of BARs to size
173 * @pos: starting config space position
174 * @sizes: array to store mask values
175 * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs
176 *
177 * Provided @sizes array must be sufficiently sized to store results for
178 * @count u32 BARs. Caller is responsible for disabling decode to specified
179 * BAR range around calling this function. This function is intended to avoid
180 * disabling decode around sizing each BAR individually, which can result in
181 * non-trivial overhead in virtualized environments with very large PCI BARs.
182 */
183static void __pci_size_bars(struct pci_dev *dev, int count,
184 unsigned int pos, u32 *sizes, bool rom)
185{
186 u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0;
187 int i;
188
189 for (i = 0; i < count; i++, pos += 4, sizes++) {
190 pci_read_config_dword(dev, pos, &orig);
191 pci_write_config_dword(dev, pos, mask);
192 pci_read_config_dword(dev, pos, sizes);
193 pci_write_config_dword(dev, pos, orig);
194 }
195}
196
197void __pci_size_stdbars(struct pci_dev *dev, int count,
198 unsigned int pos, u32 *sizes)
199{
200 __pci_size_bars(dev, count, pos, sizes, false);
201}
202
203static void __pci_size_rom(struct pci_dev *dev, unsigned int pos, u32 *sizes)
204{
205 __pci_size_bars(dev, 1, pos, sizes, true);
206}
207
208/**
209 * __pci_read_base - Read a PCI BAR
210 * @dev: the PCI device
211 * @type: type of the BAR
212 * @res: resource buffer to be filled in
213 * @pos: BAR position in the config space
214 * @sizes: array of one or more pre-read BAR masks
215 *
216 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
217 */
218int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
219 struct resource *res, unsigned int pos, u32 *sizes)
220{
221 u32 l = 0, sz;
222 u64 l64, sz64, mask64;
223 struct pci_bus_region region, inverted_region;
224 const char *res_name = pci_resource_name(dev, res - dev->resource);
225
226 res->name = pci_name(dev);
227
228 pci_read_config_dword(dev, pos, &l);
229 sz = sizes[0];
230
231 /*
232 * All bits set in sz means the device isn't working properly.
233 * If the BAR isn't implemented, all bits must be 0. If it's a
234 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
235 * 1 must be clear.
236 */
237 if (PCI_POSSIBLE_ERROR(sz))
238 sz = 0;
239
240 /*
241 * I don't know how l can have all bits set. Copied from old code.
242 * Maybe it fixes a bug on some ancient platform.
243 */
244 if (PCI_POSSIBLE_ERROR(l))
245 l = 0;
246
247 if (type == pci_bar_unknown) {
248 res->flags = decode_bar(dev, l);
249 res->flags |= IORESOURCE_SIZEALIGN;
250 if (res->flags & IORESOURCE_IO) {
251 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
252 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
253 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
254 } else {
255 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
256 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
257 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
258 }
259 } else {
260 if (l & PCI_ROM_ADDRESS_ENABLE)
261 res->flags |= IORESOURCE_ROM_ENABLE;
262 l64 = l & PCI_ROM_ADDRESS_MASK;
263 sz64 = sz & PCI_ROM_ADDRESS_MASK;
264 mask64 = PCI_ROM_ADDRESS_MASK;
265 }
266
267 if (res->flags & IORESOURCE_MEM_64) {
268 pci_read_config_dword(dev, pos + 4, &l);
269 sz = sizes[1];
270
271 l64 |= ((u64)l << 32);
272 sz64 |= ((u64)sz << 32);
273 mask64 |= ((u64)~0 << 32);
274 }
275
276 if (!sz64)
277 goto fail;
278
279 sz64 = pci_size(l64, sz64, mask64);
280 if (!sz64) {
281 pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name);
282 goto fail;
283 }
284
285 if (res->flags & IORESOURCE_MEM_64) {
286 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
287 && sz64 > 0x100000000ULL) {
288 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
289 resource_set_range(res, 0, 0);
290 pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
291 res_name, (unsigned long long)sz64);
292 goto out;
293 }
294
295 if ((sizeof(pci_bus_addr_t) < 8) && l) {
296 /* Above 32-bit boundary; try to reallocate */
297 res->flags |= IORESOURCE_UNSET;
298 resource_set_range(res, 0, sz64);
299 pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
300 res_name, (unsigned long long)l64);
301 goto out;
302 }
303 }
304
305 region.start = l64;
306 region.end = l64 + sz64 - 1;
307
308 pcibios_bus_to_resource(dev->bus, res, ®ion);
309 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
310
311 /*
312 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
313 * the corresponding resource address (the physical address used by
314 * the CPU. Converting that resource address back to a bus address
315 * should yield the original BAR value:
316 *
317 * resource_to_bus(bus_to_resource(A)) == A
318 *
319 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
320 * be claimed by the device.
321 */
322 if (inverted_region.start != region.start) {
323 res->flags |= IORESOURCE_UNSET;
324 res->start = 0;
325 res->end = region.end - region.start;
326 pci_info(dev, "%s: initial BAR value %#010llx invalid\n",
327 res_name, (unsigned long long)region.start);
328 }
329
330 goto out;
331
332
333fail:
334 res->flags = 0;
335out:
336 if (res->flags)
337 pci_info(dev, "%s %pR\n", res_name, res);
338
339 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
340}
341
342static __always_inline void pci_read_bases(struct pci_dev *dev,
343 unsigned int howmany, int rom)
344{
345 u32 rombar, stdbars[PCI_STD_NUM_BARS];
346 unsigned int pos, reg;
347 u16 orig_cmd;
348
349 BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS));
350
351 if (dev->non_compliant_bars)
352 return;
353
354 /* Per PCIe r4.0, sec 9.3.4.1.11, the VF BARs are all RO Zero */
355 if (dev->is_virtfn)
356 return;
357
358 /* No printks while decoding is disabled! */
359 if (!dev->mmio_always_on) {
360 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
361 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
362 pci_write_config_word(dev, PCI_COMMAND,
363 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
364 }
365 }
366
367 __pci_size_stdbars(dev, howmany, PCI_BASE_ADDRESS_0, stdbars);
368 if (rom)
369 __pci_size_rom(dev, rom, &rombar);
370
371 if (!dev->mmio_always_on &&
372 (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
373 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
374
375 for (pos = 0; pos < howmany; pos++) {
376 struct resource *res = &dev->resource[pos];
377 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
378 pos += __pci_read_base(dev, pci_bar_unknown,
379 res, reg, &stdbars[pos]);
380 }
381
382 if (rom) {
383 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
384 dev->rom_base_reg = rom;
385 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
386 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
387 __pci_read_base(dev, pci_bar_mem32, res, rom, &rombar);
388 }
389}
390
391static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res,
392 bool log)
393{
394 u8 io_base_lo, io_limit_lo;
395 unsigned long io_mask, io_granularity, base, limit;
396 struct pci_bus_region region;
397
398 io_mask = PCI_IO_RANGE_MASK;
399 io_granularity = 0x1000;
400 if (dev->io_window_1k) {
401 /* Support 1K I/O space granularity */
402 io_mask = PCI_IO_1K_RANGE_MASK;
403 io_granularity = 0x400;
404 }
405
406 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
407 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
408 base = (io_base_lo & io_mask) << 8;
409 limit = (io_limit_lo & io_mask) << 8;
410
411 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
412 u16 io_base_hi, io_limit_hi;
413
414 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
415 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
416 base |= ((unsigned long) io_base_hi << 16);
417 limit |= ((unsigned long) io_limit_hi << 16);
418 }
419
420 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
421
422 if (base <= limit) {
423 region.start = base;
424 region.end = limit + io_granularity - 1;
425 pcibios_bus_to_resource(dev->bus, res, ®ion);
426 if (log)
427 pci_info(dev, " bridge window %pR\n", res);
428 } else {
429 resource_set_range(res, 0, 0);
430 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
431 }
432}
433
434static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res,
435 bool log)
436{
437 u16 mem_base_lo, mem_limit_lo;
438 unsigned long base, limit;
439 struct pci_bus_region region;
440
441 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
442 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
443 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
444 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
445
446 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
447
448 if (base <= limit) {
449 region.start = base;
450 region.end = limit + 0xfffff;
451 pcibios_bus_to_resource(dev->bus, res, ®ion);
452 if (log)
453 pci_info(dev, " bridge window %pR\n", res);
454 } else {
455 resource_set_range(res, 0, 0);
456 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
457 }
458}
459
460static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res,
461 bool log)
462{
463 u16 mem_base_lo, mem_limit_lo;
464 u64 base64, limit64;
465 pci_bus_addr_t base, limit;
466 struct pci_bus_region region;
467
468 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
469 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
470 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
471 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
472
473 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
474 u32 mem_base_hi, mem_limit_hi;
475
476 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
477 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
478
479 /*
480 * Some bridges set the base > limit by default, and some
481 * (broken) BIOSes do not initialize them. If we find
482 * this, just assume they are not being used.
483 */
484 if (mem_base_hi <= mem_limit_hi) {
485 base64 |= (u64) mem_base_hi << 32;
486 limit64 |= (u64) mem_limit_hi << 32;
487 }
488 }
489
490 base = (pci_bus_addr_t) base64;
491 limit = (pci_bus_addr_t) limit64;
492
493 if (base != base64) {
494 pci_err(dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
495 (unsigned long long) base64);
496 return;
497 }
498
499 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM |
500 IORESOURCE_PREFETCH;
501 if (res->flags & PCI_PREF_RANGE_TYPE_64)
502 res->flags |= IORESOURCE_MEM_64;
503
504 if (base <= limit) {
505 region.start = base;
506 region.end = limit + 0xfffff;
507 pcibios_bus_to_resource(dev->bus, res, ®ion);
508 if (log)
509 pci_info(dev, " bridge window %pR\n", res);
510 } else {
511 resource_set_range(res, 0, 0);
512 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
513 }
514}
515
516static void pci_read_bridge_windows(struct pci_dev *bridge)
517{
518 u32 buses;
519 u16 io;
520 u32 pmem, tmp;
521 struct resource res;
522
523 pci_read_config_dword(bridge, PCI_PRIMARY_BUS, &buses);
524 res.flags = IORESOURCE_BUS;
525 res.start = FIELD_GET(PCI_SECONDARY_BUS_MASK, buses);
526 res.end = FIELD_GET(PCI_SUBORDINATE_BUS_MASK, buses);
527 pci_info(bridge, "PCI bridge to %pR%s\n", &res,
528 bridge->transparent ? " (subtractive decode)" : "");
529
530 pci_read_config_word(bridge, PCI_IO_BASE, &io);
531 if (!io) {
532 pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
533 pci_read_config_word(bridge, PCI_IO_BASE, &io);
534 pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
535 }
536 if (io) {
537 bridge->io_window = 1;
538 pci_read_bridge_io(bridge, &res, true);
539 }
540
541 pci_read_bridge_mmio(bridge, &res, true);
542
543 /*
544 * DECchip 21050 pass 2 errata: the bridge may miss an address
545 * disconnect boundary by one PCI data phase. Workaround: do not
546 * use prefetching on this device.
547 */
548 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
549 return;
550
551 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
552 if (!pmem) {
553 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
554 0xffe0fff0);
555 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
556 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
557 }
558 if (!pmem)
559 return;
560
561 bridge->pref_window = 1;
562
563 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
564
565 /*
566 * Bridge claims to have a 64-bit prefetchable memory
567 * window; verify that the upper bits are actually
568 * writable.
569 */
570 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
571 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
572 0xffffffff);
573 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
574 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
575 if (tmp)
576 bridge->pref_64_window = 1;
577 }
578
579 pci_read_bridge_mmio_pref(bridge, &res, true);
580}
581
582void pci_read_bridge_bases(struct pci_bus *child)
583{
584 struct pci_dev *dev = child->self;
585 struct resource *res;
586 int i;
587
588 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
589 return;
590
591 pci_info(dev, "PCI bridge to %pR%s\n",
592 &child->busn_res,
593 dev->transparent ? " (subtractive decode)" : "");
594
595 pci_bus_remove_resources(child);
596 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
597 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
598
599 pci_read_bridge_io(child->self,
600 child->resource[PCI_BUS_BRIDGE_IO_WINDOW], false);
601 pci_read_bridge_mmio(child->self,
602 child->resource[PCI_BUS_BRIDGE_MEM_WINDOW], false);
603 pci_read_bridge_mmio_pref(child->self,
604 child->resource[PCI_BUS_BRIDGE_PREF_MEM_WINDOW],
605 false);
606
607 if (!dev->transparent)
608 return;
609
610 pci_bus_for_each_resource(child->parent, res) {
611 if (!res || !res->flags)
612 continue;
613
614 pci_bus_add_resource(child, res);
615 pci_info(dev, " bridge window %pR (subtractive decode)\n", res);
616 }
617}
618
619static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
620{
621 struct pci_bus *b;
622
623 b = kzalloc(sizeof(*b), GFP_KERNEL);
624 if (!b)
625 return NULL;
626
627 INIT_LIST_HEAD(&b->node);
628 INIT_LIST_HEAD(&b->children);
629 INIT_LIST_HEAD(&b->devices);
630 INIT_LIST_HEAD(&b->slots);
631 INIT_LIST_HEAD(&b->resources);
632 b->max_bus_speed = PCI_SPEED_UNKNOWN;
633 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
634#ifdef CONFIG_PCI_DOMAINS_GENERIC
635 if (parent)
636 b->domain_nr = parent->domain_nr;
637#endif
638 return b;
639}
640
641static void pci_release_host_bridge_dev(struct device *dev)
642{
643 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
644
645 if (bridge->release_fn)
646 bridge->release_fn(bridge);
647
648 pci_free_resource_list(&bridge->windows);
649 pci_free_resource_list(&bridge->dma_ranges);
650
651 /* Host bridges only have domain_nr set in the emulation case */
652 if (bridge->domain_nr != PCI_DOMAIN_NR_NOT_SET)
653 pci_bus_release_emul_domain_nr(bridge->domain_nr);
654
655 kfree(bridge);
656}
657
658static const struct attribute_group *pci_host_bridge_groups[] = {
659#ifdef CONFIG_PCI_IDE
660 &pci_ide_attr_group,
661#endif
662 NULL
663};
664
665static const struct device_type pci_host_bridge_type = {
666 .groups = pci_host_bridge_groups,
667 .release = pci_release_host_bridge_dev,
668};
669
670static void pci_init_host_bridge(struct pci_host_bridge *bridge)
671{
672 INIT_LIST_HEAD(&bridge->windows);
673 INIT_LIST_HEAD(&bridge->dma_ranges);
674
675 /*
676 * We assume we can manage these PCIe features. Some systems may
677 * reserve these for use by the platform itself, e.g., an ACPI BIOS
678 * may implement its own AER handling and use _OSC to prevent the
679 * OS from interfering.
680 */
681 bridge->native_aer = 1;
682 bridge->native_pcie_hotplug = 1;
683 bridge->native_shpc_hotplug = 1;
684 bridge->native_pme = 1;
685 bridge->native_ltr = 1;
686 bridge->native_dpc = 1;
687 bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
688 bridge->native_cxl_error = 1;
689 bridge->dev.type = &pci_host_bridge_type;
690 pci_ide_init_host_bridge(bridge);
691
692 device_initialize(&bridge->dev);
693}
694
695struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
696{
697 struct pci_host_bridge *bridge;
698
699 bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
700 if (!bridge)
701 return NULL;
702
703 pci_init_host_bridge(bridge);
704
705 return bridge;
706}
707EXPORT_SYMBOL(pci_alloc_host_bridge);
708
709static void devm_pci_alloc_host_bridge_release(void *data)
710{
711 pci_free_host_bridge(data);
712}
713
714struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
715 size_t priv)
716{
717 int ret;
718 struct pci_host_bridge *bridge;
719
720 bridge = pci_alloc_host_bridge(priv);
721 if (!bridge)
722 return NULL;
723
724 bridge->dev.parent = dev;
725
726 ret = devm_add_action_or_reset(dev, devm_pci_alloc_host_bridge_release,
727 bridge);
728 if (ret)
729 return NULL;
730
731 ret = devm_of_pci_bridge_init(dev, bridge);
732 if (ret)
733 return NULL;
734
735 return bridge;
736}
737EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
738
739void pci_free_host_bridge(struct pci_host_bridge *bridge)
740{
741 put_device(&bridge->dev);
742}
743EXPORT_SYMBOL(pci_free_host_bridge);
744
745/* Indexed by PCI_X_SSTATUS_FREQ (secondary bus mode and frequency) */
746static const unsigned char pcix_bus_speed[] = {
747 PCI_SPEED_UNKNOWN, /* 0 */
748 PCI_SPEED_66MHz_PCIX, /* 1 */
749 PCI_SPEED_100MHz_PCIX, /* 2 */
750 PCI_SPEED_133MHz_PCIX, /* 3 */
751 PCI_SPEED_UNKNOWN, /* 4 */
752 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
753 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
754 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
755 PCI_SPEED_UNKNOWN, /* 8 */
756 PCI_SPEED_66MHz_PCIX_266, /* 9 */
757 PCI_SPEED_100MHz_PCIX_266, /* A */
758 PCI_SPEED_133MHz_PCIX_266, /* B */
759 PCI_SPEED_UNKNOWN, /* C */
760 PCI_SPEED_66MHz_PCIX_533, /* D */
761 PCI_SPEED_100MHz_PCIX_533, /* E */
762 PCI_SPEED_133MHz_PCIX_533 /* F */
763};
764
765/* Indexed by PCI_EXP_LNKCAP_SLS, PCI_EXP_LNKSTA_CLS */
766const unsigned char pcie_link_speed[] = {
767 PCI_SPEED_UNKNOWN, /* 0 */
768 PCIE_SPEED_2_5GT, /* 1 */
769 PCIE_SPEED_5_0GT, /* 2 */
770 PCIE_SPEED_8_0GT, /* 3 */
771 PCIE_SPEED_16_0GT, /* 4 */
772 PCIE_SPEED_32_0GT, /* 5 */
773 PCIE_SPEED_64_0GT, /* 6 */
774 PCI_SPEED_UNKNOWN, /* 7 */
775 PCI_SPEED_UNKNOWN, /* 8 */
776 PCI_SPEED_UNKNOWN, /* 9 */
777 PCI_SPEED_UNKNOWN, /* A */
778 PCI_SPEED_UNKNOWN, /* B */
779 PCI_SPEED_UNKNOWN, /* C */
780 PCI_SPEED_UNKNOWN, /* D */
781 PCI_SPEED_UNKNOWN, /* E */
782 PCI_SPEED_UNKNOWN /* F */
783};
784EXPORT_SYMBOL_GPL(pcie_link_speed);
785
786const char *pci_speed_string(enum pci_bus_speed speed)
787{
788 /* Indexed by the pci_bus_speed enum */
789 static const char *speed_strings[] = {
790 "33 MHz PCI", /* 0x00 */
791 "66 MHz PCI", /* 0x01 */
792 "66 MHz PCI-X", /* 0x02 */
793 "100 MHz PCI-X", /* 0x03 */
794 "133 MHz PCI-X", /* 0x04 */
795 NULL, /* 0x05 */
796 NULL, /* 0x06 */
797 NULL, /* 0x07 */
798 NULL, /* 0x08 */
799 "66 MHz PCI-X 266", /* 0x09 */
800 "100 MHz PCI-X 266", /* 0x0a */
801 "133 MHz PCI-X 266", /* 0x0b */
802 "Unknown AGP", /* 0x0c */
803 "1x AGP", /* 0x0d */
804 "2x AGP", /* 0x0e */
805 "4x AGP", /* 0x0f */
806 "8x AGP", /* 0x10 */
807 "66 MHz PCI-X 533", /* 0x11 */
808 "100 MHz PCI-X 533", /* 0x12 */
809 "133 MHz PCI-X 533", /* 0x13 */
810 "2.5 GT/s PCIe", /* 0x14 */
811 "5.0 GT/s PCIe", /* 0x15 */
812 "8.0 GT/s PCIe", /* 0x16 */
813 "16.0 GT/s PCIe", /* 0x17 */
814 "32.0 GT/s PCIe", /* 0x18 */
815 "64.0 GT/s PCIe", /* 0x19 */
816 };
817
818 if (speed < ARRAY_SIZE(speed_strings))
819 return speed_strings[speed];
820 return "Unknown";
821}
822EXPORT_SYMBOL_GPL(pci_speed_string);
823
824void pcie_update_link_speed(struct pci_bus *bus,
825 enum pcie_link_change_reason reason)
826{
827 struct pci_dev *bridge = bus->self;
828 u16 linksta, linksta2;
829
830 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
831 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA2, &linksta2);
832
833 __pcie_update_link_speed(bus, reason, linksta, linksta2);
834}
835EXPORT_SYMBOL_GPL(pcie_update_link_speed);
836
837static unsigned char agp_speeds[] = {
838 AGP_UNKNOWN,
839 AGP_1X,
840 AGP_2X,
841 AGP_4X,
842 AGP_8X
843};
844
845static enum pci_bus_speed agp_speed(int agp3, int agpstat)
846{
847 int index = 0;
848
849 if (agpstat & 4)
850 index = 3;
851 else if (agpstat & 2)
852 index = 2;
853 else if (agpstat & 1)
854 index = 1;
855 else
856 goto out;
857
858 if (agp3) {
859 index += 2;
860 if (index == 5)
861 index = 0;
862 }
863
864 out:
865 return agp_speeds[index];
866}
867
868static void pci_set_bus_speed(struct pci_bus *bus)
869{
870 struct pci_dev *bridge = bus->self;
871 int pos;
872
873 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
874 if (!pos)
875 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
876 if (pos) {
877 u32 agpstat, agpcmd;
878
879 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
880 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
881
882 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
883 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
884 }
885
886 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
887 if (pos) {
888 u16 status;
889 enum pci_bus_speed max;
890
891 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
892 &status);
893
894 if (status & PCI_X_SSTATUS_533MHZ) {
895 max = PCI_SPEED_133MHz_PCIX_533;
896 } else if (status & PCI_X_SSTATUS_266MHZ) {
897 max = PCI_SPEED_133MHz_PCIX_266;
898 } else if (status & PCI_X_SSTATUS_133MHZ) {
899 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
900 max = PCI_SPEED_133MHz_PCIX_ECC;
901 else
902 max = PCI_SPEED_133MHz_PCIX;
903 } else {
904 max = PCI_SPEED_66MHz_PCIX;
905 }
906
907 bus->max_bus_speed = max;
908 bus->cur_bus_speed =
909 pcix_bus_speed[FIELD_GET(PCI_X_SSTATUS_FREQ, status)];
910
911 return;
912 }
913
914 if (pci_is_pcie(bridge)) {
915 u32 linkcap;
916
917 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
918 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
919
920 pcie_update_link_speed(bus, PCIE_ADD_BUS);
921 }
922}
923
924static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
925{
926 struct irq_domain *d;
927
928 /* If the host bridge driver sets a MSI domain of the bridge, use it */
929 d = dev_get_msi_domain(bus->bridge);
930
931 /*
932 * Any firmware interface that can resolve the msi_domain
933 * should be called from here.
934 */
935 if (!d)
936 d = pci_host_bridge_of_msi_domain(bus);
937 if (!d)
938 d = pci_host_bridge_acpi_msi_domain(bus);
939
940 /*
941 * If no IRQ domain was found via the OF tree, try looking it up
942 * directly through the fwnode_handle.
943 */
944 if (!d) {
945 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
946
947 if (fwnode)
948 d = irq_find_matching_fwnode(fwnode,
949 DOMAIN_BUS_PCI_MSI);
950 }
951
952 return d;
953}
954
955static void pci_set_bus_msi_domain(struct pci_bus *bus)
956{
957 struct irq_domain *d;
958 struct pci_bus *b;
959
960 /*
961 * The bus can be a root bus, a subordinate bus, or a virtual bus
962 * created by an SR-IOV device. Walk up to the first bridge device
963 * found or derive the domain from the host bridge.
964 */
965 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
966 if (b->self)
967 d = dev_get_msi_domain(&b->self->dev);
968 }
969
970 if (!d)
971 d = pci_host_bridge_msi_domain(b);
972
973 dev_set_msi_domain(&bus->dev, d);
974}
975
976static bool pci_preserve_config(struct pci_host_bridge *host_bridge)
977{
978 if (pci_acpi_preserve_config(host_bridge))
979 return true;
980
981 if (host_bridge->dev.parent && host_bridge->dev.parent->of_node)
982 return of_pci_preserve_config(host_bridge->dev.parent->of_node);
983
984 return false;
985}
986
987static int pci_register_host_bridge(struct pci_host_bridge *bridge)
988{
989 struct device *parent = bridge->dev.parent;
990 struct resource_entry *window, *next, *n;
991 struct pci_bus *bus, *b;
992 resource_size_t offset, next_offset;
993 LIST_HEAD(resources);
994 struct resource *res, *next_res;
995 bool bus_registered = false;
996 char addr[64], *fmt;
997 const char *name;
998 int err;
999
1000 bus = pci_alloc_bus(NULL);
1001 if (!bus)
1002 return -ENOMEM;
1003
1004 bridge->bus = bus;
1005
1006 bus->sysdata = bridge->sysdata;
1007 bus->ops = bridge->ops;
1008 bus->number = bus->busn_res.start = bridge->busnr;
1009#ifdef CONFIG_PCI_DOMAINS_GENERIC
1010 if (bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
1011 bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
1012 else
1013 bus->domain_nr = bridge->domain_nr;
1014 if (bus->domain_nr < 0) {
1015 err = bus->domain_nr;
1016 goto free;
1017 }
1018#endif
1019
1020 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
1021 if (b) {
1022 /* Ignore it if we already got here via a different bridge */
1023 dev_dbg(&b->dev, "bus already known\n");
1024 err = -EEXIST;
1025 goto free;
1026 }
1027
1028 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
1029 bridge->busnr);
1030
1031 err = pcibios_root_bridge_prepare(bridge);
1032 if (err)
1033 goto free;
1034
1035 /* Temporarily move resources off the list */
1036 list_splice_init(&bridge->windows, &resources);
1037 err = device_add(&bridge->dev);
1038 if (err)
1039 goto free;
1040
1041 bus->bridge = get_device(&bridge->dev);
1042 device_enable_async_suspend(bus->bridge);
1043 pci_set_bus_of_node(bus);
1044 pci_set_bus_msi_domain(bus);
1045 if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
1046 !pci_host_of_has_msi_map(parent))
1047 bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
1048
1049 if (!parent)
1050 set_dev_node(bus->bridge, pcibus_to_node(bus));
1051
1052 bus->dev.class = &pcibus_class;
1053 bus->dev.parent = bus->bridge;
1054
1055 dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
1056 name = dev_name(&bus->dev);
1057
1058 err = device_register(&bus->dev);
1059 bus_registered = true;
1060 if (err)
1061 goto unregister;
1062
1063 pcibios_add_bus(bus);
1064
1065 if (bus->ops->add_bus) {
1066 err = bus->ops->add_bus(bus);
1067 if (WARN_ON(err < 0))
1068 dev_err(&bus->dev, "failed to add bus: %d\n", err);
1069 }
1070
1071 /* Create legacy_io and legacy_mem files for this bus */
1072 pci_create_legacy_files(bus);
1073
1074 if (parent)
1075 dev_info(parent, "PCI host bridge to bus %s\n", name);
1076 else
1077 pr_info("PCI host bridge to bus %s\n", name);
1078
1079 if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
1080 dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
1081
1082 /* Check if the boot configuration by FW needs to be preserved */
1083 bridge->preserve_config = pci_preserve_config(bridge);
1084
1085 /* Coalesce contiguous windows */
1086 resource_list_for_each_entry_safe(window, n, &resources) {
1087 if (list_is_last(&window->node, &resources))
1088 break;
1089
1090 next = list_next_entry(window, node);
1091 offset = window->offset;
1092 res = window->res;
1093 next_offset = next->offset;
1094 next_res = next->res;
1095
1096 if (res->flags != next_res->flags || offset != next_offset)
1097 continue;
1098
1099 if (res->end + 1 == next_res->start) {
1100 next_res->start = res->start;
1101 res->flags = res->start = res->end = 0;
1102 }
1103 }
1104
1105 /* Add initial resources to the bus */
1106 resource_list_for_each_entry_safe(window, n, &resources) {
1107 offset = window->offset;
1108 res = window->res;
1109 if (!res->flags && !res->start && !res->end) {
1110 release_resource(res);
1111 resource_list_destroy_entry(window);
1112 continue;
1113 }
1114
1115 list_move_tail(&window->node, &bridge->windows);
1116
1117 if (res->flags & IORESOURCE_BUS)
1118 pci_bus_insert_busn_res(bus, bus->number, res->end);
1119 else
1120 pci_bus_add_resource(bus, res);
1121
1122 if (offset) {
1123 if (resource_type(res) == IORESOURCE_IO)
1124 fmt = " (bus address [%#06llx-%#06llx])";
1125 else
1126 fmt = " (bus address [%#010llx-%#010llx])";
1127
1128 snprintf(addr, sizeof(addr), fmt,
1129 (unsigned long long)(res->start - offset),
1130 (unsigned long long)(res->end - offset));
1131 } else
1132 addr[0] = '\0';
1133
1134 dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
1135 }
1136
1137 of_pci_make_host_bridge_node(bridge);
1138
1139 down_write(&pci_bus_sem);
1140 list_add_tail(&bus->node, &pci_root_buses);
1141 up_write(&pci_bus_sem);
1142
1143 return 0;
1144
1145unregister:
1146 put_device(&bridge->dev);
1147 device_del(&bridge->dev);
1148free:
1149#ifdef CONFIG_PCI_DOMAINS_GENERIC
1150 if (bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
1151 pci_bus_release_domain_nr(parent, bus->domain_nr);
1152#endif
1153 if (bus_registered)
1154 put_device(&bus->dev);
1155 else
1156 kfree(bus);
1157
1158 return err;
1159}
1160
1161static bool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge)
1162{
1163 int pos;
1164 u32 status;
1165
1166 /*
1167 * If extended config space isn't accessible on a bridge's primary
1168 * bus, we certainly can't access it on the secondary bus.
1169 */
1170 if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
1171 return false;
1172
1173 /*
1174 * PCIe Root Ports and switch ports are PCIe on both sides, so if
1175 * extended config space is accessible on the primary, it's also
1176 * accessible on the secondary.
1177 */
1178 if (pci_is_pcie(bridge) &&
1179 (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT ||
1180 pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM ||
1181 pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM))
1182 return true;
1183
1184 /*
1185 * For the other bridge types:
1186 * - PCI-to-PCI bridges
1187 * - PCIe-to-PCI/PCI-X forward bridges
1188 * - PCI/PCI-X-to-PCIe reverse bridges
1189 * extended config space on the secondary side is only accessible
1190 * if the bridge supports PCI-X Mode 2.
1191 */
1192 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
1193 if (!pos)
1194 return false;
1195
1196 pci_read_config_dword(bridge, pos + PCI_X_STATUS, &status);
1197 return status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ);
1198}
1199
1200static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
1201 struct pci_dev *bridge, int busnr)
1202{
1203 struct pci_bus *child;
1204 struct pci_host_bridge *host;
1205 int i;
1206 int ret;
1207
1208 /* Allocate a new bus and inherit stuff from the parent */
1209 child = pci_alloc_bus(parent);
1210 if (!child)
1211 return NULL;
1212
1213 child->parent = parent;
1214 child->sysdata = parent->sysdata;
1215 child->bus_flags = parent->bus_flags;
1216
1217 host = pci_find_host_bridge(parent);
1218 if (host->child_ops)
1219 child->ops = host->child_ops;
1220 else
1221 child->ops = parent->ops;
1222
1223 /*
1224 * Initialize some portions of the bus device, but don't register
1225 * it now as the parent is not properly set up yet.
1226 */
1227 child->dev.class = &pcibus_class;
1228 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
1229
1230 /* Set up the primary, secondary and subordinate bus numbers */
1231 child->number = child->busn_res.start = busnr;
1232 child->primary = parent->busn_res.start;
1233 child->busn_res.end = 0xff;
1234
1235 if (!bridge) {
1236 child->dev.parent = parent->bridge;
1237 goto add_dev;
1238 }
1239
1240 child->self = bridge;
1241 child->bridge = get_device(&bridge->dev);
1242 child->dev.parent = child->bridge;
1243 pci_set_bus_of_node(child);
1244 pci_set_bus_speed(child);
1245
1246 /*
1247 * Check whether extended config space is accessible on the child
1248 * bus. Note that we currently assume it is always accessible on
1249 * the root bus.
1250 */
1251 if (!pci_bridge_child_ext_cfg_accessible(bridge)) {
1252 child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG;
1253 pci_info(child, "extended config space not accessible\n");
1254 }
1255
1256 /* Set up default resource pointers and names */
1257 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
1258 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
1259 child->resource[i]->name = child->name;
1260 }
1261 bridge->subordinate = child;
1262
1263add_dev:
1264 pci_set_bus_msi_domain(child);
1265 ret = device_register(&child->dev);
1266 if (WARN_ON(ret < 0)) {
1267 put_device(&child->dev);
1268 return NULL;
1269 }
1270
1271 pcibios_add_bus(child);
1272
1273 if (child->ops->add_bus) {
1274 ret = child->ops->add_bus(child);
1275 if (WARN_ON(ret < 0))
1276 dev_err(&child->dev, "failed to add bus: %d\n", ret);
1277 }
1278
1279 /* Create legacy_io and legacy_mem files for this bus */
1280 pci_create_legacy_files(child);
1281
1282 return child;
1283}
1284
1285struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1286 int busnr)
1287{
1288 struct pci_bus *child;
1289
1290 child = pci_alloc_child_bus(parent, dev, busnr);
1291 if (child) {
1292 down_write(&pci_bus_sem);
1293 list_add_tail(&child->node, &parent->children);
1294 up_write(&pci_bus_sem);
1295 }
1296 return child;
1297}
1298EXPORT_SYMBOL(pci_add_new_bus);
1299
1300static void pci_enable_rrs_sv(struct pci_dev *pdev)
1301{
1302 u16 root_cap = 0;
1303
1304 /* Enable Configuration RRS Software Visibility if supported */
1305 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
1306 if (root_cap & PCI_EXP_RTCAP_RRS_SV) {
1307 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
1308 PCI_EXP_RTCTL_RRS_SVE);
1309 pdev->config_rrs_sv = 1;
1310 }
1311}
1312
1313static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
1314 unsigned int available_buses);
1315
1316void pbus_validate_busn(struct pci_bus *bus)
1317{
1318 struct pci_bus *upstream = bus->parent;
1319 struct pci_dev *bridge = bus->self;
1320
1321 /* Check that all devices are accessible */
1322 while (upstream->parent) {
1323 if ((bus->busn_res.end > upstream->busn_res.end) ||
1324 (bus->number > upstream->busn_res.end) ||
1325 (bus->number < upstream->number) ||
1326 (bus->busn_res.end < upstream->number)) {
1327 pci_info(bridge, "devices behind bridge are unusable because %pR cannot be assigned for them\n",
1328 &bus->busn_res);
1329 break;
1330 }
1331 upstream = upstream->parent;
1332 }
1333}
1334
1335/**
1336 * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus
1337 * numbers from EA capability.
1338 * @dev: Bridge
1339 * @sec: updated with secondary bus number from EA
1340 * @sub: updated with subordinate bus number from EA
1341 *
1342 * If @dev is a bridge with EA capability that specifies valid secondary
1343 * and subordinate bus numbers, return true with the bus numbers in @sec
1344 * and @sub. Otherwise return false.
1345 */
1346bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
1347{
1348 int ea, offset;
1349 u32 dw;
1350 u8 ea_sec, ea_sub;
1351
1352 if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
1353 return false;
1354
1355 /* find PCI EA capability in list */
1356 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
1357 if (!ea)
1358 return false;
1359
1360 offset = ea + PCI_EA_FIRST_ENT;
1361 pci_read_config_dword(dev, offset, &dw);
1362 ea_sec = FIELD_GET(PCI_EA_SEC_BUS_MASK, dw);
1363 ea_sub = FIELD_GET(PCI_EA_SUB_BUS_MASK, dw);
1364 if (ea_sec == 0 || ea_sub < ea_sec)
1365 return false;
1366
1367 *sec = ea_sec;
1368 *sub = ea_sub;
1369 return true;
1370}
1371
1372/*
1373 * pci_scan_bridge_extend() - Scan buses behind a bridge
1374 * @bus: Parent bus the bridge is on
1375 * @dev: Bridge itself
1376 * @max: Starting subordinate number of buses behind this bridge
1377 * @available_buses: Total number of buses available for this bridge and
1378 * the devices below. After the minimal bus space has
1379 * been allocated the remaining buses will be
1380 * distributed equally between hotplug-capable bridges.
1381 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1382 * that need to be reconfigured.
1383 *
1384 * If it's a bridge, configure it and scan the bus behind it.
1385 * For CardBus bridges, we don't scan behind as the devices will
1386 * be handled by the bridge driver itself.
1387 *
1388 * We need to process bridges in two passes -- first we scan those
1389 * already configured by the BIOS and after we are done with all of
1390 * them, we proceed to assigning numbers to the remaining buses in
1391 * order to avoid overlaps between old and new bus numbers.
1392 *
1393 * Return: New subordinate number covering all buses behind this bridge.
1394 */
1395static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
1396 int max, unsigned int available_buses,
1397 int pass)
1398{
1399 struct pci_bus *child;
1400 u32 buses;
1401 u16 bctl;
1402 u8 primary, secondary, subordinate;
1403 int broken = 0;
1404 bool fixed_buses;
1405 u8 fixed_sec, fixed_sub;
1406 int next_busnr;
1407
1408 /*
1409 * Make sure the bridge is powered on to be able to access config
1410 * space of devices below it.
1411 */
1412 pm_runtime_get_sync(&dev->dev);
1413
1414 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
1415 primary = FIELD_GET(PCI_PRIMARY_BUS_MASK, buses);
1416 secondary = FIELD_GET(PCI_SECONDARY_BUS_MASK, buses);
1417 subordinate = FIELD_GET(PCI_SUBORDINATE_BUS_MASK, buses);
1418
1419 pci_dbg(dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1420 secondary, subordinate, pass);
1421
1422 if (!primary && (primary != bus->number) && secondary && subordinate) {
1423 pci_warn(dev, "Primary bus is hard wired to 0\n");
1424 primary = bus->number;
1425 }
1426
1427 /* Check if setup is sensible at all */
1428 if (!pass &&
1429 (primary != bus->number || secondary <= bus->number ||
1430 secondary > subordinate)) {
1431 pci_info(dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1432 secondary, subordinate);
1433 broken = 1;
1434 }
1435
1436 /*
1437 * Disable Master-Abort Mode during probing to avoid reporting of
1438 * bus errors in some architectures.
1439 */
1440 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1441 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1442 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1443
1444 if (pci_is_cardbus_bridge(dev)) {
1445 max = pci_cardbus_scan_bridge_extend(bus, dev, buses, max,
1446 available_buses,
1447 pass);
1448 goto out;
1449 }
1450
1451 if ((secondary || subordinate) &&
1452 !pcibios_assign_all_busses() && !broken) {
1453 unsigned int cmax, buses;
1454
1455 /*
1456 * Bus already configured by firmware, process it in the
1457 * first pass and just note the configuration.
1458 */
1459 if (pass)
1460 goto out;
1461
1462 /*
1463 * The bus might already exist for two reasons: Either we
1464 * are rescanning the bus or the bus is reachable through
1465 * more than one bridge. The second case can happen with
1466 * the i450NX chipset.
1467 */
1468 child = pci_find_bus(pci_domain_nr(bus), secondary);
1469 if (!child) {
1470 child = pci_add_new_bus(bus, dev, secondary);
1471 if (!child)
1472 goto out;
1473 child->primary = primary;
1474 pci_bus_insert_busn_res(child, secondary, subordinate);
1475 child->bridge_ctl = bctl;
1476 }
1477
1478 buses = subordinate - secondary;
1479 cmax = pci_scan_child_bus_extend(child, buses);
1480 if (cmax > subordinate)
1481 pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n",
1482 subordinate, cmax);
1483
1484 /* Subordinate should equal child->busn_res.end */
1485 if (subordinate > max)
1486 max = subordinate;
1487 } else {
1488
1489 /*
1490 * We need to assign a number to this bus which we always
1491 * do in the second pass.
1492 */
1493 if (!pass) {
1494 if (pcibios_assign_all_busses() || broken)
1495
1496 /*
1497 * Temporarily disable forwarding of the
1498 * configuration cycles on all bridges in
1499 * this bus segment to avoid possible
1500 * conflicts in the second pass between two
1501 * bridges programmed with overlapping bus
1502 * ranges.
1503 */
1504 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1505 buses & PCI_SEC_LATENCY_TIMER_MASK);
1506 goto out;
1507 }
1508
1509 /* Clear errors */
1510 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1511
1512 /* Read bus numbers from EA Capability (if present) */
1513 fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub);
1514 if (fixed_buses)
1515 next_busnr = fixed_sec;
1516 else
1517 next_busnr = max + 1;
1518
1519 /*
1520 * Prevent assigning a bus number that already exists.
1521 * This can happen when a bridge is hot-plugged, so in this
1522 * case we only re-scan this bus.
1523 */
1524 child = pci_find_bus(pci_domain_nr(bus), next_busnr);
1525 if (!child) {
1526 child = pci_add_new_bus(bus, dev, next_busnr);
1527 if (!child)
1528 goto out;
1529 pci_bus_insert_busn_res(child, next_busnr,
1530 bus->busn_res.end);
1531 }
1532 max++;
1533 if (available_buses)
1534 available_buses--;
1535
1536 buses = (buses & PCI_SEC_LATENCY_TIMER_MASK) |
1537 FIELD_PREP(PCI_PRIMARY_BUS_MASK, child->primary) |
1538 FIELD_PREP(PCI_SECONDARY_BUS_MASK, child->busn_res.start) |
1539 FIELD_PREP(PCI_SUBORDINATE_BUS_MASK, child->busn_res.end);
1540
1541 /* We need to blast all three values with a single write */
1542 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1543
1544 child->bridge_ctl = bctl;
1545 max = pci_scan_child_bus_extend(child, available_buses);
1546
1547 /*
1548 * Set subordinate bus number to its real value.
1549 * If fixed subordinate bus number exists from EA
1550 * capability then use it.
1551 */
1552 if (fixed_buses)
1553 max = fixed_sub;
1554 pci_bus_update_busn_res_end(child, max);
1555 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1556 }
1557 scnprintf(child->name, sizeof(child->name), "PCI Bus %04x:%02x",
1558 pci_domain_nr(bus), child->number);
1559
1560 pbus_validate_busn(child);
1561
1562out:
1563 /* Clear errors in the Secondary Status Register */
1564 pci_write_config_word(dev, PCI_SEC_STATUS, 0xffff);
1565
1566 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1567
1568 pm_runtime_put(&dev->dev);
1569
1570 return max;
1571}
1572
1573/*
1574 * pci_scan_bridge() - Scan buses behind a bridge
1575 * @bus: Parent bus the bridge is on
1576 * @dev: Bridge itself
1577 * @max: Starting subordinate number of buses behind this bridge
1578 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1579 * that need to be reconfigured.
1580 *
1581 * If it's a bridge, configure it and scan the bus behind it.
1582 * For CardBus bridges, we don't scan behind as the devices will
1583 * be handled by the bridge driver itself.
1584 *
1585 * We need to process bridges in two passes -- first we scan those
1586 * already configured by the BIOS and after we are done with all of
1587 * them, we proceed to assigning numbers to the remaining buses in
1588 * order to avoid overlaps between old and new bus numbers.
1589 *
1590 * Return: New subordinate number covering all buses behind this bridge.
1591 */
1592int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1593{
1594 return pci_scan_bridge_extend(bus, dev, max, 0, pass);
1595}
1596EXPORT_SYMBOL(pci_scan_bridge);
1597
1598/*
1599 * Read interrupt line and base address registers.
1600 * The architecture-dependent code can tweak these, of course.
1601 */
1602static void pci_read_irq(struct pci_dev *dev)
1603{
1604 unsigned char irq;
1605
1606 /* VFs are not allowed to use INTx, so skip the config reads */
1607 if (dev->is_virtfn) {
1608 dev->pin = 0;
1609 dev->irq = 0;
1610 return;
1611 }
1612
1613 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1614 dev->pin = irq;
1615 if (irq)
1616 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1617 dev->irq = irq;
1618}
1619
1620void set_pcie_port_type(struct pci_dev *pdev)
1621{
1622 int pos;
1623 u16 reg16;
1624 u32 reg32;
1625 int type;
1626 struct pci_dev *parent;
1627
1628 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1629 if (!pos)
1630 return;
1631
1632 pdev->pcie_cap = pos;
1633 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
1634 pdev->pcie_flags_reg = reg16;
1635
1636 type = pci_pcie_type(pdev);
1637 if (type == PCI_EXP_TYPE_ROOT_PORT)
1638 pci_enable_rrs_sv(pdev);
1639
1640 pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
1641 pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
1642
1643 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32);
1644 if (reg32 & PCI_EXP_LNKCAP_DLLLARC)
1645 pdev->link_active_reporting = 1;
1646
1647#ifdef CONFIG_PCIEASPM
1648 if (reg32 & PCI_EXP_LNKCAP_ASPM_L0S)
1649 pdev->aspm_l0s_support = 1;
1650 if (reg32 & PCI_EXP_LNKCAP_ASPM_L1)
1651 pdev->aspm_l1_support = 1;
1652#endif
1653
1654 parent = pci_upstream_bridge(pdev);
1655 if (!parent)
1656 return;
1657
1658 /*
1659 * Some systems do not identify their upstream/downstream ports
1660 * correctly so detect impossible configurations here and correct
1661 * the port type accordingly.
1662 */
1663 if (type == PCI_EXP_TYPE_DOWNSTREAM) {
1664 /*
1665 * If pdev claims to be downstream port but the parent
1666 * device is also downstream port assume pdev is actually
1667 * upstream port.
1668 */
1669 if (pcie_downstream_port(parent)) {
1670 pci_info(pdev, "claims to be downstream port but is acting as upstream port, correcting type\n");
1671 pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE;
1672 pdev->pcie_flags_reg |= PCI_EXP_TYPE_UPSTREAM;
1673 }
1674 } else if (type == PCI_EXP_TYPE_UPSTREAM) {
1675 /*
1676 * If pdev claims to be upstream port but the parent
1677 * device is also upstream port assume pdev is actually
1678 * downstream port.
1679 */
1680 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) {
1681 pci_info(pdev, "claims to be upstream port but is acting as downstream port, correcting type\n");
1682 pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE;
1683 pdev->pcie_flags_reg |= PCI_EXP_TYPE_DOWNSTREAM;
1684 }
1685 }
1686}
1687
1688void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1689{
1690 u32 reg32;
1691
1692 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32);
1693 if (reg32 & PCI_EXP_SLTCAP_HPC)
1694 pdev->is_hotplug_bridge = pdev->is_pciehp = 1;
1695}
1696
1697static void set_pcie_thunderbolt(struct pci_dev *dev)
1698{
1699 u16 vsec;
1700
1701 /* Is the device part of a Thunderbolt controller? */
1702 vsec = pci_find_vsec_capability(dev, PCI_VENDOR_ID_INTEL, PCI_VSEC_ID_INTEL_TBT);
1703 if (vsec)
1704 dev->is_thunderbolt = 1;
1705}
1706
1707static void set_pcie_cxl(struct pci_dev *dev)
1708{
1709 struct pci_dev *bridge;
1710 u16 dvsec, cap;
1711
1712 if (!pci_is_pcie(dev))
1713 return;
1714
1715 /*
1716 * Update parent's CXL state because alternate protocol training
1717 * may have changed
1718 */
1719 bridge = pci_upstream_bridge(dev);
1720 if (bridge)
1721 set_pcie_cxl(bridge);
1722
1723 dvsec = pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
1724 PCI_DVSEC_CXL_FLEXBUS_PORT);
1725 if (!dvsec)
1726 return;
1727
1728 pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS,
1729 &cap);
1730
1731 dev->is_cxl = FIELD_GET(PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS_CACHE, cap) ||
1732 FIELD_GET(PCI_DVSEC_CXL_FLEXBUS_PORT_STATUS_MEM, cap);
1733
1734}
1735
1736static void set_pcie_untrusted(struct pci_dev *dev)
1737{
1738 struct pci_dev *parent = pci_upstream_bridge(dev);
1739
1740 if (!parent)
1741 return;
1742 /*
1743 * If the upstream bridge is untrusted we treat this device as
1744 * untrusted as well.
1745 */
1746 if (parent->untrusted) {
1747 dev->untrusted = true;
1748 return;
1749 }
1750
1751 if (arch_pci_dev_is_removable(dev)) {
1752 pci_dbg(dev, "marking as untrusted\n");
1753 dev->untrusted = true;
1754 }
1755}
1756
1757static void pci_set_removable(struct pci_dev *dev)
1758{
1759 struct pci_dev *parent = pci_upstream_bridge(dev);
1760
1761 if (!parent)
1762 return;
1763 /*
1764 * We (only) consider everything tunneled below an external_facing
1765 * device to be removable by the user. We're mainly concerned with
1766 * consumer platforms with user accessible thunderbolt ports that are
1767 * vulnerable to DMA attacks, and we expect those ports to be marked by
1768 * the firmware as external_facing. Devices in traditional hotplug
1769 * slots can technically be removed, but the expectation is that unless
1770 * the port is marked with external_facing, such devices are less
1771 * accessible to user / may not be removed by end user, and thus not
1772 * exposed as "removable" to userspace.
1773 */
1774 if (dev_is_removable(&parent->dev)) {
1775 dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
1776 return;
1777 }
1778
1779 if (arch_pci_dev_is_removable(dev)) {
1780 pci_dbg(dev, "marking as removable\n");
1781 dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
1782 }
1783}
1784
1785/**
1786 * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config?
1787 * @dev: PCI device
1788 *
1789 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1790 * when forwarding a type1 configuration request the bridge must check that
1791 * the extended register address field is zero. The bridge is not permitted
1792 * to forward the transactions and must handle it as an Unsupported Request.
1793 * Some bridges do not follow this rule and simply drop the extended register
1794 * bits, resulting in the standard config space being aliased, every 256
1795 * bytes across the entire configuration space. Test for this condition by
1796 * comparing the first dword of each potential alias to the vendor/device ID.
1797 * Known offenders:
1798 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1799 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1800 */
1801static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1802{
1803#ifdef CONFIG_PCI_QUIRKS
1804 int pos, ret;
1805 u32 header, tmp;
1806
1807 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1808
1809 for (pos = PCI_CFG_SPACE_SIZE;
1810 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1811 ret = pci_read_config_dword(dev, pos, &tmp);
1812 if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
1813 return false;
1814 }
1815
1816 return true;
1817#else
1818 return false;
1819#endif
1820}
1821
1822/**
1823 * pci_cfg_space_size_ext - Get the configuration space size of the PCI device
1824 * @dev: PCI device
1825 *
1826 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1827 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1828 * access it. Maybe we don't have a way to generate extended config space
1829 * accesses, or the device is behind a reverse Express bridge. So we try
1830 * reading the dword at 0x100 which must either be 0 or a valid extended
1831 * capability header.
1832 */
1833static int pci_cfg_space_size_ext(struct pci_dev *dev)
1834{
1835 u32 status;
1836 int pos = PCI_CFG_SPACE_SIZE;
1837
1838 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1839 return PCI_CFG_SPACE_SIZE;
1840 if (PCI_POSSIBLE_ERROR(status) || pci_ext_cfg_is_aliased(dev))
1841 return PCI_CFG_SPACE_SIZE;
1842
1843 return PCI_CFG_SPACE_EXP_SIZE;
1844}
1845
1846int pci_cfg_space_size(struct pci_dev *dev)
1847{
1848 int pos;
1849 u32 status;
1850 u16 class;
1851
1852#ifdef CONFIG_PCI_IOV
1853 /*
1854 * Per the SR-IOV specification (rev 1.1, sec 3.5), VFs are required to
1855 * implement a PCIe capability and therefore must implement extended
1856 * config space. We can skip the NO_EXTCFG test below and the
1857 * reachability/aliasing test in pci_cfg_space_size_ext() by virtue of
1858 * the fact that the SR-IOV capability on the PF resides in extended
1859 * config space and must be accessible and non-aliased to have enabled
1860 * support for this VF. This is a micro performance optimization for
1861 * systems supporting many VFs.
1862 */
1863 if (dev->is_virtfn)
1864 return PCI_CFG_SPACE_EXP_SIZE;
1865#endif
1866
1867 if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
1868 return PCI_CFG_SPACE_SIZE;
1869
1870 class = dev->class >> 8;
1871 if (class == PCI_CLASS_BRIDGE_HOST)
1872 return pci_cfg_space_size_ext(dev);
1873
1874 if (pci_is_pcie(dev))
1875 return pci_cfg_space_size_ext(dev);
1876
1877 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1878 if (!pos)
1879 return PCI_CFG_SPACE_SIZE;
1880
1881 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1882 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1883 return pci_cfg_space_size_ext(dev);
1884
1885 return PCI_CFG_SPACE_SIZE;
1886}
1887
1888static u32 pci_class(struct pci_dev *dev)
1889{
1890 u32 class;
1891
1892#ifdef CONFIG_PCI_IOV
1893 if (dev->is_virtfn)
1894 return dev->physfn->sriov->class;
1895#endif
1896 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1897 return class;
1898}
1899
1900static void pci_subsystem_ids(struct pci_dev *dev, u16 *vendor, u16 *device)
1901{
1902#ifdef CONFIG_PCI_IOV
1903 if (dev->is_virtfn) {
1904 *vendor = dev->physfn->sriov->subsystem_vendor;
1905 *device = dev->physfn->sriov->subsystem_device;
1906 return;
1907 }
1908#endif
1909 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, vendor);
1910 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, device);
1911}
1912
1913static u8 pci_hdr_type(struct pci_dev *dev)
1914{
1915 u8 hdr_type;
1916
1917#ifdef CONFIG_PCI_IOV
1918 if (dev->is_virtfn)
1919 return dev->physfn->sriov->hdr_type;
1920#endif
1921 pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
1922 return hdr_type;
1923}
1924
1925#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1926
1927/**
1928 * pci_intx_mask_broken - Test PCI_COMMAND_INTX_DISABLE writability
1929 * @dev: PCI device
1930 *
1931 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
1932 * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1933 */
1934static int pci_intx_mask_broken(struct pci_dev *dev)
1935{
1936 u16 orig, toggle, new;
1937
1938 pci_read_config_word(dev, PCI_COMMAND, &orig);
1939 toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1940 pci_write_config_word(dev, PCI_COMMAND, toggle);
1941 pci_read_config_word(dev, PCI_COMMAND, &new);
1942
1943 pci_write_config_word(dev, PCI_COMMAND, orig);
1944
1945 /*
1946 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1947 * r2.3, so strictly speaking, a device is not *broken* if it's not
1948 * writable. But we'll live with the misnomer for now.
1949 */
1950 if (new != toggle)
1951 return 1;
1952 return 0;
1953}
1954
1955static void early_dump_pci_device(struct pci_dev *pdev)
1956{
1957 u32 value[PCI_CFG_SPACE_SIZE / sizeof(u32)];
1958 int i;
1959
1960 pci_info(pdev, "config space:\n");
1961
1962 for (i = 0; i < ARRAY_SIZE(value); i++)
1963 pci_read_config_dword(pdev, i * sizeof(u32), &value[i]);
1964
1965 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
1966 value, ARRAY_SIZE(value) * sizeof(u32), false);
1967}
1968
1969static const char *pci_type_str(struct pci_dev *dev)
1970{
1971 static const char * const str[] = {
1972 "PCIe Endpoint",
1973 "PCIe Legacy Endpoint",
1974 "PCIe unknown",
1975 "PCIe unknown",
1976 "PCIe Root Port",
1977 "PCIe Switch Upstream Port",
1978 "PCIe Switch Downstream Port",
1979 "PCIe to PCI/PCI-X bridge",
1980 "PCI/PCI-X to PCIe bridge",
1981 "PCIe Root Complex Integrated Endpoint",
1982 "PCIe Root Complex Event Collector",
1983 };
1984 int type;
1985
1986 if (pci_is_pcie(dev)) {
1987 type = pci_pcie_type(dev);
1988 if (type < ARRAY_SIZE(str))
1989 return str[type];
1990
1991 return "PCIe unknown";
1992 }
1993
1994 switch (dev->hdr_type) {
1995 case PCI_HEADER_TYPE_NORMAL:
1996 return "conventional PCI endpoint";
1997 case PCI_HEADER_TYPE_BRIDGE:
1998 return "conventional PCI bridge";
1999 case PCI_HEADER_TYPE_CARDBUS:
2000 return "CardBus bridge";
2001 default:
2002 return "conventional PCI";
2003 }
2004}
2005
2006/**
2007 * pci_setup_device - Fill in class and map information of a device
2008 * @dev: the device structure to fill
2009 *
2010 * Initialize the device structure with information about the device's
2011 * vendor,class,memory and IO-space addresses, IRQ lines etc.
2012 * Called at initialisation of the PCI subsystem and by CardBus services.
2013 * Returns 0 on success and negative if unknown type of device (not normal,
2014 * bridge or CardBus).
2015 */
2016int pci_setup_device(struct pci_dev *dev)
2017{
2018 u32 class;
2019 u16 cmd;
2020 u8 hdr_type;
2021 int err, pos = 0;
2022 struct pci_bus_region region;
2023 struct resource *res;
2024
2025 hdr_type = pci_hdr_type(dev);
2026
2027 dev->sysdata = dev->bus->sysdata;
2028 dev->dev.parent = dev->bus->bridge;
2029 dev->dev.bus = &pci_bus_type;
2030 dev->hdr_type = FIELD_GET(PCI_HEADER_TYPE_MASK, hdr_type);
2031 dev->multifunction = FIELD_GET(PCI_HEADER_TYPE_MFD, hdr_type);
2032 dev->error_state = pci_channel_io_normal;
2033 set_pcie_port_type(dev);
2034
2035 err = pci_set_of_node(dev);
2036 if (err)
2037 return err;
2038 pci_set_acpi_fwnode(dev);
2039
2040 pci_dev_assign_slot(dev);
2041
2042 /*
2043 * Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
2044 * set this higher, assuming the system even supports it.
2045 */
2046 dev->dma_mask = 0xffffffff;
2047
2048 /*
2049 * Assume 64-bit addresses for MSI initially. Will be changed to 32-bit
2050 * if MSI (rather than MSI-X) capability does not have
2051 * PCI_MSI_FLAGS_64BIT. Can also be overridden by driver.
2052 */
2053 dev->msi_addr_mask = DMA_BIT_MASK(64);
2054
2055 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
2056 dev->bus->number, PCI_SLOT(dev->devfn),
2057 PCI_FUNC(dev->devfn));
2058
2059 class = pci_class(dev);
2060
2061 dev->revision = class & 0xff;
2062 dev->class = class >> 8; /* upper 3 bytes */
2063
2064 if (pci_early_dump)
2065 early_dump_pci_device(dev);
2066
2067 /* Need to have dev->class ready */
2068 dev->cfg_size = pci_cfg_space_size(dev);
2069
2070 /* Need to have dev->cfg_size ready */
2071 set_pcie_thunderbolt(dev);
2072
2073 set_pcie_cxl(dev);
2074
2075 set_pcie_untrusted(dev);
2076
2077 if (pci_is_pcie(dev))
2078 dev->supported_speeds = pcie_get_supported_speeds(dev);
2079
2080 /* "Unknown power state" */
2081 dev->current_state = PCI_UNKNOWN;
2082
2083 /* Early fixups, before probing the BARs */
2084 pci_fixup_device(pci_fixup_early, dev);
2085
2086 pci_set_removable(dev);
2087
2088 pci_info(dev, "[%04x:%04x] type %02x class %#08x %s\n",
2089 dev->vendor, dev->device, dev->hdr_type, dev->class,
2090 pci_type_str(dev));
2091
2092 /* Device class may be changed after fixup */
2093 class = dev->class >> 8;
2094
2095 if (dev->non_compliant_bars && !dev->mmio_always_on) {
2096 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2097 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
2098 pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
2099 cmd &= ~PCI_COMMAND_IO;
2100 cmd &= ~PCI_COMMAND_MEMORY;
2101 pci_write_config_word(dev, PCI_COMMAND, cmd);
2102 }
2103 }
2104
2105 dev->broken_intx_masking = pci_intx_mask_broken(dev);
2106
2107 switch (dev->hdr_type) { /* header type */
2108 case PCI_HEADER_TYPE_NORMAL: /* standard header */
2109 if (class == PCI_CLASS_BRIDGE_PCI)
2110 goto bad;
2111 pci_read_irq(dev);
2112 pci_read_bases(dev, PCI_STD_NUM_BARS, PCI_ROM_ADDRESS);
2113
2114 pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device);
2115
2116 /*
2117 * Do the ugly legacy mode stuff here rather than broken chip
2118 * quirk code. Legacy mode ATA controllers have fixed
2119 * addresses. These are not always echoed in BAR0-3, and
2120 * BAR0-3 in a few cases contain junk!
2121 */
2122 if (class == PCI_CLASS_STORAGE_IDE) {
2123 u8 progif;
2124 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
2125 if ((progif & 1) == 0) {
2126 region.start = 0x1F0;
2127 region.end = 0x1F7;
2128 res = &dev->resource[0];
2129 res->flags = LEGACY_IO_RESOURCE;
2130 pcibios_bus_to_resource(dev->bus, res, ®ion);
2131 pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n",
2132 res);
2133 region.start = 0x3F6;
2134 region.end = 0x3F6;
2135 res = &dev->resource[1];
2136 res->flags = LEGACY_IO_RESOURCE;
2137 pcibios_bus_to_resource(dev->bus, res, ®ion);
2138 pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n",
2139 res);
2140 }
2141 if ((progif & 4) == 0) {
2142 region.start = 0x170;
2143 region.end = 0x177;
2144 res = &dev->resource[2];
2145 res->flags = LEGACY_IO_RESOURCE;
2146 pcibios_bus_to_resource(dev->bus, res, ®ion);
2147 pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n",
2148 res);
2149 region.start = 0x376;
2150 region.end = 0x376;
2151 res = &dev->resource[3];
2152 res->flags = LEGACY_IO_RESOURCE;
2153 pcibios_bus_to_resource(dev->bus, res, ®ion);
2154 pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n",
2155 res);
2156 }
2157 }
2158 break;
2159
2160 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
2161 /*
2162 * The PCI-to-PCI bridge spec requires that subtractive
2163 * decoding (i.e. transparent) bridge must have programming
2164 * interface code of 0x01.
2165 */
2166 pci_read_irq(dev);
2167 dev->transparent = ((dev->class & 0xff) == 1);
2168 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
2169 pci_read_bridge_windows(dev);
2170 set_pcie_hotplug_bridge(dev);
2171 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
2172 if (pos) {
2173 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
2174 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
2175 }
2176 break;
2177
2178 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
2179 if (class != PCI_CLASS_BRIDGE_CARDBUS)
2180 goto bad;
2181 pci_read_irq(dev);
2182 pci_read_bases(dev, 1, 0);
2183 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
2184 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
2185 break;
2186
2187 default: /* unknown header */
2188 pci_err(dev, "unknown header type %02x, ignoring device\n",
2189 dev->hdr_type);
2190 pci_release_of_node(dev);
2191 return -EIO;
2192
2193 bad:
2194 pci_err(dev, "ignoring class %#08x (doesn't match header type %02x)\n",
2195 dev->class, dev->hdr_type);
2196 dev->class = PCI_CLASS_NOT_DEFINED << 8;
2197 }
2198
2199 /* We found a fine healthy device, go go go... */
2200 return 0;
2201}
2202
2203static void pci_configure_mps(struct pci_dev *dev)
2204{
2205 struct pci_dev *bridge = pci_upstream_bridge(dev);
2206 int mps, mpss, p_mps, rc;
2207
2208 if (!pci_is_pcie(dev))
2209 return;
2210
2211 /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
2212 if (dev->is_virtfn)
2213 return;
2214
2215 /*
2216 * For Root Complex Integrated Endpoints, program the maximum
2217 * supported value unless limited by the PCIE_BUS_PEER2PEER case.
2218 */
2219 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
2220 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2221 mps = 128;
2222 else
2223 mps = 128 << dev->pcie_mpss;
2224 rc = pcie_set_mps(dev, mps);
2225 if (rc) {
2226 pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
2227 mps);
2228 }
2229 return;
2230 }
2231
2232 if (!bridge || !pci_is_pcie(bridge))
2233 return;
2234
2235 mps = pcie_get_mps(dev);
2236 p_mps = pcie_get_mps(bridge);
2237
2238 if (mps == p_mps)
2239 return;
2240
2241 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
2242 pci_warn(dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
2243 mps, pci_name(bridge), p_mps);
2244 return;
2245 }
2246
2247 /*
2248 * Fancier MPS configuration is done later by
2249 * pcie_bus_configure_settings()
2250 */
2251 if (pcie_bus_config != PCIE_BUS_DEFAULT)
2252 return;
2253
2254 mpss = 128 << dev->pcie_mpss;
2255 if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
2256 pcie_set_mps(bridge, mpss);
2257 pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
2258 mpss, p_mps, 128 << bridge->pcie_mpss);
2259 p_mps = pcie_get_mps(bridge);
2260 }
2261
2262 rc = pcie_set_mps(dev, p_mps);
2263 if (rc) {
2264 pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
2265 p_mps);
2266 return;
2267 }
2268
2269 pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
2270 p_mps, mps, mpss);
2271}
2272
2273int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
2274{
2275 struct pci_host_bridge *host;
2276 u32 cap;
2277 u16 ctl;
2278 int ret;
2279
2280 /* PCI_EXP_DEVCTL_EXT_TAG is RsvdP in VFs */
2281 if (!pci_is_pcie(dev) || dev->is_virtfn)
2282 return 0;
2283
2284 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
2285 if (ret)
2286 return 0;
2287
2288 if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
2289 return 0;
2290
2291 ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
2292 if (ret)
2293 return 0;
2294
2295 host = pci_find_host_bridge(dev->bus);
2296 if (!host)
2297 return 0;
2298
2299 /*
2300 * If some device in the hierarchy doesn't handle Extended Tags
2301 * correctly, make sure they're disabled.
2302 */
2303 if (host->no_ext_tags) {
2304 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
2305 pci_info(dev, "disabling Extended Tags\n");
2306 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
2307 PCI_EXP_DEVCTL_EXT_TAG);
2308 }
2309 return 0;
2310 }
2311
2312 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
2313 pci_info(dev, "enabling Extended Tags\n");
2314 pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
2315 PCI_EXP_DEVCTL_EXT_TAG);
2316 }
2317 return 0;
2318}
2319
2320static void pci_dev3_init(struct pci_dev *pdev)
2321{
2322 u16 cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DEV3);
2323 u32 val = 0;
2324
2325 if (!cap)
2326 return;
2327 pci_read_config_dword(pdev, cap + PCI_DEV3_STA, &val);
2328 pdev->fm_enabled = !!(val & PCI_DEV3_STA_SEGMENT);
2329}
2330
2331/**
2332 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
2333 * @dev: PCI device to query
2334 *
2335 * Returns true if the device has enabled relaxed ordering attribute.
2336 */
2337bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
2338{
2339 u16 v;
2340
2341 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
2342
2343 return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
2344}
2345EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
2346
2347static void pci_configure_relaxed_ordering(struct pci_dev *dev)
2348{
2349 struct pci_dev *root;
2350
2351 /* PCI_EXP_DEVCTL_RELAX_EN is RsvdP in VFs */
2352 if (dev->is_virtfn)
2353 return;
2354
2355 if (!pcie_relaxed_ordering_enabled(dev))
2356 return;
2357
2358 /*
2359 * For now, we only deal with Relaxed Ordering issues with Root
2360 * Ports. Peer-to-Peer DMA is another can of worms.
2361 */
2362 root = pcie_find_root_port(dev);
2363 if (!root)
2364 return;
2365
2366 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
2367 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
2368 PCI_EXP_DEVCTL_RELAX_EN);
2369 pci_info(dev, "Relaxed Ordering disabled because the Root Port didn't support it\n");
2370 }
2371}
2372
2373static void pci_configure_eetlp_prefix(struct pci_dev *dev)
2374{
2375 struct pci_dev *bridge;
2376 unsigned int eetlp_max;
2377 int pcie_type;
2378 u32 cap;
2379
2380 if (!pci_is_pcie(dev))
2381 return;
2382
2383 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
2384 if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
2385 return;
2386
2387 pcie_type = pci_pcie_type(dev);
2388
2389 eetlp_max = FIELD_GET(PCI_EXP_DEVCAP2_EE_PREFIX_MAX, cap);
2390 /* 00b means 4 */
2391 eetlp_max = eetlp_max ?: 4;
2392
2393 if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
2394 pcie_type == PCI_EXP_TYPE_RC_END)
2395 dev->eetlp_prefix_max = eetlp_max;
2396 else {
2397 bridge = pci_upstream_bridge(dev);
2398 if (bridge && bridge->eetlp_prefix_max)
2399 dev->eetlp_prefix_max = eetlp_max;
2400 }
2401}
2402
2403static void pci_configure_serr(struct pci_dev *dev)
2404{
2405 u16 control;
2406
2407 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
2408
2409 /*
2410 * A bridge will not forward ERR_ messages coming from an
2411 * endpoint unless SERR# forwarding is enabled.
2412 */
2413 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &control);
2414 if (!(control & PCI_BRIDGE_CTL_SERR)) {
2415 control |= PCI_BRIDGE_CTL_SERR;
2416 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, control);
2417 }
2418 }
2419}
2420
2421static void pci_configure_rcb(struct pci_dev *dev)
2422{
2423 struct pci_dev *rp;
2424 u16 rp_lnkctl;
2425
2426 /*
2427 * Per PCIe r7.0, sec 7.5.3.7, RCB is only meaningful in Root Ports
2428 * (where it is read-only), Endpoints, and Bridges. It may only be
2429 * set for Endpoints and Bridges if it is set in the Root Port. For
2430 * Endpoints, it is 'RsvdP' for Virtual Functions.
2431 */
2432 if (!pci_is_pcie(dev) ||
2433 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
2434 pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM ||
2435 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
2436 pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC ||
2437 dev->is_virtfn)
2438 return;
2439
2440 /* Root Port often not visible to virtualized guests */
2441 rp = pcie_find_root_port(dev);
2442 if (!rp)
2443 return;
2444
2445 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &rp_lnkctl);
2446 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
2447 PCI_EXP_LNKCTL_RCB,
2448 (rp_lnkctl & PCI_EXP_LNKCTL_RCB) ?
2449 PCI_EXP_LNKCTL_RCB : 0);
2450}
2451
2452static void pci_configure_device(struct pci_dev *dev)
2453{
2454 pci_configure_mps(dev);
2455 pci_configure_extended_tags(dev, NULL);
2456 pci_configure_relaxed_ordering(dev);
2457 pci_configure_ltr(dev);
2458 pci_configure_aspm_l1ss(dev);
2459 pci_configure_eetlp_prefix(dev);
2460 pci_configure_serr(dev);
2461 pci_configure_rcb(dev);
2462
2463 pci_acpi_program_hp_params(dev);
2464}
2465
2466static void pci_release_capabilities(struct pci_dev *dev)
2467{
2468 pci_aer_exit(dev);
2469 pci_rcec_exit(dev);
2470 pci_iov_release(dev);
2471 pci_free_cap_save_buffers(dev);
2472}
2473
2474/**
2475 * pci_release_dev - Free a PCI device structure when all users of it are
2476 * finished
2477 * @dev: device that's been disconnected
2478 *
2479 * Will be called only by the device core when all users of this PCI device are
2480 * done.
2481 */
2482static void pci_release_dev(struct device *dev)
2483{
2484 struct pci_dev *pci_dev;
2485
2486 pci_dev = to_pci_dev(dev);
2487 pci_release_capabilities(pci_dev);
2488 pci_release_of_node(pci_dev);
2489 pcibios_release_device(pci_dev);
2490 pci_bus_put(pci_dev->bus);
2491 kfree(pci_dev->driver_override);
2492 bitmap_free(pci_dev->dma_alias_mask);
2493 dev_dbg(dev, "device released\n");
2494 kfree(pci_dev);
2495}
2496
2497static const struct device_type pci_dev_type = {
2498 .groups = pci_dev_attr_groups,
2499};
2500
2501struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
2502{
2503 struct pci_dev *dev;
2504
2505 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
2506 if (!dev)
2507 return NULL;
2508
2509 INIT_LIST_HEAD(&dev->bus_list);
2510 dev->dev.type = &pci_dev_type;
2511 dev->bus = pci_bus_get(bus);
2512 dev->driver_exclusive_resource = (struct resource) {
2513 .name = "PCI Exclusive",
2514 .start = 0,
2515 .end = -1,
2516 };
2517
2518 spin_lock_init(&dev->pcie_cap_lock);
2519#ifdef CONFIG_PCI_MSI
2520 raw_spin_lock_init(&dev->msi_lock);
2521#endif
2522 return dev;
2523}
2524EXPORT_SYMBOL(pci_alloc_dev);
2525
2526static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l,
2527 int timeout)
2528{
2529 int delay = 1;
2530
2531 if (!pci_bus_rrs_vendor_id(*l))
2532 return true; /* not a Configuration RRS completion */
2533
2534 if (!timeout)
2535 return false; /* RRS, but caller doesn't want to wait */
2536
2537 /*
2538 * We got the reserved Vendor ID that indicates a completion with
2539 * Configuration Request Retry Status (RRS). Retry until we get a
2540 * valid Vendor ID or we time out.
2541 */
2542 while (pci_bus_rrs_vendor_id(*l)) {
2543 if (delay > timeout) {
2544 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
2545 pci_domain_nr(bus), bus->number,
2546 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2547
2548 return false;
2549 }
2550 if (delay >= 1000)
2551 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
2552 pci_domain_nr(bus), bus->number,
2553 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2554
2555 msleep(delay);
2556 delay *= 2;
2557
2558 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2559 return false;
2560 }
2561
2562 if (delay >= 1000)
2563 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
2564 pci_domain_nr(bus), bus->number,
2565 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2566
2567 return true;
2568}
2569
2570bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2571 int timeout)
2572{
2573 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2574 return false;
2575
2576 /* Some broken boards return 0 or ~0 (PCI_ERROR_RESPONSE) if a slot is empty: */
2577 if (PCI_POSSIBLE_ERROR(*l) || *l == 0x00000000 ||
2578 *l == 0x0000ffff || *l == 0xffff0000)
2579 return false;
2580
2581 if (pci_bus_rrs_vendor_id(*l))
2582 return pci_bus_wait_rrs(bus, devfn, l, timeout);
2583
2584 return true;
2585}
2586
2587bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2588 int timeout)
2589{
2590 return pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
2591}
2592EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
2593
2594/*
2595 * Read the config data for a PCI device, sanity-check it,
2596 * and fill in the dev structure.
2597 */
2598static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2599{
2600 struct pci_dev *dev;
2601 u32 l;
2602
2603 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
2604 return NULL;
2605
2606 dev = pci_alloc_dev(bus);
2607 if (!dev)
2608 return NULL;
2609
2610 dev->devfn = devfn;
2611 dev->vendor = l & 0xffff;
2612 dev->device = (l >> 16) & 0xffff;
2613
2614 if (pci_setup_device(dev)) {
2615 pci_bus_put(dev->bus);
2616 kfree(dev);
2617 return NULL;
2618 }
2619
2620 return dev;
2621}
2622
2623void pcie_report_downtraining(struct pci_dev *dev)
2624{
2625 if (!pci_is_pcie(dev))
2626 return;
2627
2628 /* Look from the device up to avoid downstream ports with no devices */
2629 if ((pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) &&
2630 (pci_pcie_type(dev) != PCI_EXP_TYPE_LEG_END) &&
2631 (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM))
2632 return;
2633
2634 /* Multi-function PCIe devices share the same link/status */
2635 if (PCI_FUNC(dev->devfn) != 0 || dev->is_virtfn)
2636 return;
2637
2638 /* Print link status only if the device is constrained by the fabric */
2639 __pcie_print_link_status(dev, false);
2640}
2641
2642static void pci_imm_ready_init(struct pci_dev *dev)
2643{
2644 u16 status;
2645
2646 pci_read_config_word(dev, PCI_STATUS, &status);
2647 if (status & PCI_STATUS_IMM_READY)
2648 dev->imm_ready = 1;
2649}
2650
2651static void pci_init_capabilities(struct pci_dev *dev)
2652{
2653 pci_ea_init(dev); /* Enhanced Allocation */
2654 pci_msi_init(dev); /* Disable MSI */
2655 pci_msix_init(dev); /* Disable MSI-X */
2656
2657 /* Buffers for saving PCIe and PCI-X capabilities */
2658 pci_allocate_cap_save_buffers(dev);
2659
2660 pci_imm_ready_init(dev); /* Immediate Readiness */
2661 pci_pm_init(dev); /* Power Management */
2662 pci_vpd_init(dev); /* Vital Product Data */
2663 pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */
2664 pci_iov_init(dev); /* Single Root I/O Virtualization */
2665 pci_ats_init(dev); /* Address Translation Services */
2666 pci_pri_init(dev); /* Page Request Interface */
2667 pci_pasid_init(dev); /* Process Address Space ID */
2668 pci_acs_init(dev); /* Access Control Services */
2669 pci_ptm_init(dev); /* Precision Time Measurement */
2670 pci_aer_init(dev); /* Advanced Error Reporting */
2671 pci_dpc_init(dev); /* Downstream Port Containment */
2672 pci_rcec_init(dev); /* Root Complex Event Collector */
2673 pci_doe_init(dev); /* Data Object Exchange */
2674 pci_tph_init(dev); /* TLP Processing Hints */
2675 pci_rebar_init(dev); /* Resizable BAR */
2676 pci_dev3_init(dev); /* Device 3 capabilities */
2677 pci_ide_init(dev); /* Link Integrity and Data Encryption */
2678
2679 pcie_report_downtraining(dev);
2680 pci_init_reset_methods(dev);
2681}
2682
2683/*
2684 * This is the equivalent of pci_host_bridge_msi_domain() that acts on
2685 * devices. Firmware interfaces that can select the MSI domain on a
2686 * per-device basis should be called from here.
2687 */
2688static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2689{
2690 struct irq_domain *d;
2691
2692 /*
2693 * If a domain has been set through the pcibios_device_add()
2694 * callback, then this is the one (platform code knows best).
2695 */
2696 d = dev_get_msi_domain(&dev->dev);
2697 if (d)
2698 return d;
2699
2700 /*
2701 * Let's see if we have a firmware interface able to provide
2702 * the domain.
2703 */
2704 d = pci_msi_get_device_domain(dev);
2705 if (d)
2706 return d;
2707
2708 return NULL;
2709}
2710
2711static void pci_set_msi_domain(struct pci_dev *dev)
2712{
2713 struct irq_domain *d;
2714
2715 /*
2716 * If the platform or firmware interfaces cannot supply a
2717 * device-specific MSI domain, then inherit the default domain
2718 * from the host bridge itself.
2719 */
2720 d = pci_dev_msi_domain(dev);
2721 if (!d)
2722 d = dev_get_msi_domain(&dev->bus->dev);
2723
2724 dev_set_msi_domain(&dev->dev, d);
2725}
2726
2727void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2728{
2729 int ret;
2730
2731 pci_configure_device(dev);
2732
2733 device_initialize(&dev->dev);
2734 dev->dev.release = pci_release_dev;
2735
2736 set_dev_node(&dev->dev, pcibus_to_node(bus));
2737 dev->dev.dma_mask = &dev->dma_mask;
2738 dev->dev.dma_parms = &dev->dma_parms;
2739 dev->dev.coherent_dma_mask = 0xffffffffull;
2740
2741 dma_set_max_seg_size(&dev->dev, 65536);
2742 dma_set_seg_boundary(&dev->dev, 0xffffffff);
2743
2744 pcie_failed_link_retrain(dev);
2745
2746 /* Fix up broken headers */
2747 pci_fixup_device(pci_fixup_header, dev);
2748
2749 pci_reassigndev_resource_alignment(dev);
2750
2751 pci_init_capabilities(dev);
2752
2753 /*
2754 * Add the device to our list of discovered devices
2755 * and the bus list for fixup functions, etc.
2756 */
2757 down_write(&pci_bus_sem);
2758 list_add_tail(&dev->bus_list, &bus->devices);
2759 up_write(&pci_bus_sem);
2760
2761 ret = pcibios_device_add(dev);
2762 WARN_ON(ret < 0);
2763
2764 /* Set up MSI IRQ domain */
2765 pci_set_msi_domain(dev);
2766
2767 /* Notifier could use PCI capabilities */
2768 ret = device_add(&dev->dev);
2769 WARN_ON(ret < 0);
2770
2771 /* Establish pdev->tsm for newly added (e.g. new SR-IOV VFs) */
2772 pci_tsm_init(dev);
2773
2774 pci_npem_create(dev);
2775
2776 pci_doe_sysfs_init(dev);
2777}
2778
2779struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2780{
2781 struct pci_dev *dev;
2782
2783 dev = pci_get_slot(bus, devfn);
2784 if (dev) {
2785 pci_dev_put(dev);
2786 return dev;
2787 }
2788
2789 dev = pci_scan_device(bus, devfn);
2790 if (!dev)
2791 return NULL;
2792
2793 pci_device_add(dev, bus);
2794
2795 return dev;
2796}
2797EXPORT_SYMBOL(pci_scan_single_device);
2798
2799static int next_ari_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
2800{
2801 int pos;
2802 u16 cap = 0;
2803 unsigned int next_fn;
2804
2805 if (!dev)
2806 return -ENODEV;
2807
2808 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2809 if (!pos)
2810 return -ENODEV;
2811
2812 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2813 next_fn = PCI_ARI_CAP_NFN(cap);
2814 if (next_fn <= fn)
2815 return -ENODEV; /* protect against malformed list */
2816
2817 return next_fn;
2818}
2819
2820static int next_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
2821{
2822 if (pci_ari_enabled(bus))
2823 return next_ari_fn(bus, dev, fn);
2824
2825 if (fn >= 7)
2826 return -ENODEV;
2827 /* only multifunction devices may have more functions */
2828 if (dev && !dev->multifunction)
2829 return -ENODEV;
2830
2831 return fn + 1;
2832}
2833
2834static int only_one_child(struct pci_bus *bus)
2835{
2836 struct pci_dev *bridge = bus->self;
2837
2838 /*
2839 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
2840 * we scan for all possible devices, not just Device 0.
2841 */
2842 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2843 return 0;
2844
2845 /*
2846 * A PCIe Downstream Port normally leads to a Link with only Device
2847 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
2848 * only for Device 0 in that situation.
2849 */
2850 if (bridge && pci_is_pcie(bridge) && pcie_downstream_port(bridge))
2851 return 1;
2852
2853 return 0;
2854}
2855
2856/**
2857 * pci_scan_slot - Scan a PCI slot on a bus for devices
2858 * @bus: PCI bus to scan
2859 * @devfn: slot number to scan (must have zero function)
2860 *
2861 * Scan a PCI slot on the specified PCI bus for devices, adding
2862 * discovered devices to the @bus->devices list. New devices
2863 * will not have is_added set.
2864 *
2865 * Returns the number of new devices found.
2866 */
2867int pci_scan_slot(struct pci_bus *bus, int devfn)
2868{
2869 struct pci_dev *dev;
2870 int fn = 0, nr = 0;
2871
2872 if (only_one_child(bus) && (devfn > 0))
2873 return 0; /* Already scanned the entire slot */
2874
2875 do {
2876 dev = pci_scan_single_device(bus, devfn + fn);
2877 if (dev) {
2878 if (!pci_dev_is_added(dev))
2879 nr++;
2880 if (fn > 0)
2881 dev->multifunction = 1;
2882 } else if (fn == 0) {
2883 /*
2884 * Function 0 is required unless we are running on
2885 * a hypervisor that passes through individual PCI
2886 * functions.
2887 */
2888 if (!hypervisor_isolated_pci_functions())
2889 break;
2890 }
2891 fn = next_fn(bus, dev, fn);
2892 } while (fn >= 0);
2893
2894 /* Only one slot has PCIe device */
2895 if (bus->self && nr)
2896 pcie_aspm_init_link_state(bus->self);
2897
2898 return nr;
2899}
2900EXPORT_SYMBOL(pci_scan_slot);
2901
2902static int pcie_find_smpss(struct pci_dev *dev, void *data)
2903{
2904 u8 *smpss = data;
2905
2906 if (!pci_is_pcie(dev))
2907 return 0;
2908
2909 /*
2910 * We don't have a way to change MPS settings on devices that have
2911 * drivers attached. A hot-added device might support only the minimum
2912 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
2913 * where devices may be hot-added, we limit the fabric MPS to 128 so
2914 * hot-added devices will work correctly.
2915 *
2916 * However, if we hot-add a device to a slot directly below a Root
2917 * Port, it's impossible for there to be other existing devices below
2918 * the port. We don't limit the MPS in this case because we can
2919 * reconfigure MPS on both the Root Port and the hot-added device,
2920 * and there are no other devices involved.
2921 *
2922 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2923 */
2924 if (dev->is_hotplug_bridge &&
2925 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2926 *smpss = 0;
2927
2928 if (*smpss > dev->pcie_mpss)
2929 *smpss = dev->pcie_mpss;
2930
2931 return 0;
2932}
2933
2934static void pcie_write_mps(struct pci_dev *dev, int mps)
2935{
2936 int rc;
2937
2938 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2939 mps = 128 << dev->pcie_mpss;
2940
2941 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2942 dev->bus->self)
2943
2944 /*
2945 * For "Performance", the assumption is made that
2946 * downstream communication will never be larger than
2947 * the MRRS. So, the MPS only needs to be configured
2948 * for the upstream communication. This being the case,
2949 * walk from the top down and set the MPS of the child
2950 * to that of the parent bus.
2951 *
2952 * Configure the device MPS with the smaller of the
2953 * device MPSS or the bridge MPS (which is assumed to be
2954 * properly configured at this point to the largest
2955 * allowable MPS based on its parent bus).
2956 */
2957 mps = min(mps, pcie_get_mps(dev->bus->self));
2958 }
2959
2960 rc = pcie_set_mps(dev, mps);
2961 if (rc)
2962 pci_err(dev, "Failed attempting to set the MPS\n");
2963}
2964
2965static void pcie_write_mrrs(struct pci_dev *dev)
2966{
2967 int rc, mrrs;
2968
2969 /*
2970 * In the "safe" case, do not configure the MRRS. There appear to be
2971 * issues with setting MRRS to 0 on a number of devices.
2972 */
2973 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2974 return;
2975
2976 /*
2977 * For max performance, the MRRS must be set to the largest supported
2978 * value. However, it cannot be configured larger than the MPS the
2979 * device or the bus can support. This should already be properly
2980 * configured by a prior call to pcie_write_mps().
2981 */
2982 mrrs = pcie_get_mps(dev);
2983
2984 /*
2985 * MRRS is a R/W register. Invalid values can be written, but a
2986 * subsequent read will verify if the value is acceptable or not.
2987 * If the MRRS value provided is not acceptable (e.g., too large),
2988 * shrink the value until it is acceptable to the HW.
2989 */
2990 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2991 rc = pcie_set_readrq(dev, mrrs);
2992 if (!rc)
2993 break;
2994
2995 pci_warn(dev, "Failed attempting to set the MRRS\n");
2996 mrrs /= 2;
2997 }
2998
2999 if (mrrs < 128)
3000 pci_err(dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
3001}
3002
3003static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
3004{
3005 int mps, orig_mps;
3006
3007 if (!pci_is_pcie(dev))
3008 return 0;
3009
3010 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
3011 pcie_bus_config == PCIE_BUS_DEFAULT)
3012 return 0;
3013
3014 mps = 128 << *(u8 *)data;
3015 orig_mps = pcie_get_mps(dev);
3016
3017 pcie_write_mps(dev, mps);
3018 pcie_write_mrrs(dev);
3019
3020 pci_info(dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
3021 pcie_get_mps(dev), 128 << dev->pcie_mpss,
3022 orig_mps, pcie_get_readrq(dev));
3023
3024 return 0;
3025}
3026
3027/*
3028 * pcie_bus_configure_settings() requires that pci_walk_bus work in a top-down,
3029 * parents then children fashion. If this changes, then this code will not
3030 * work as designed.
3031 */
3032void pcie_bus_configure_settings(struct pci_bus *bus)
3033{
3034 u8 smpss = 0;
3035
3036 if (!bus->self)
3037 return;
3038
3039 if (!pci_is_pcie(bus->self))
3040 return;
3041
3042 /*
3043 * FIXME - Peer to peer DMA is possible, though the endpoint would need
3044 * to be aware of the MPS of the destination. To work around this,
3045 * simply force the MPS of the entire system to the smallest possible.
3046 */
3047 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
3048 smpss = 0;
3049
3050 if (pcie_bus_config == PCIE_BUS_SAFE) {
3051 smpss = bus->self->pcie_mpss;
3052
3053 pcie_find_smpss(bus->self, &smpss);
3054 pci_walk_bus(bus, pcie_find_smpss, &smpss);
3055 }
3056
3057 pcie_bus_configure_set(bus->self, &smpss);
3058 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
3059}
3060EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
3061
3062/*
3063 * Called after each bus is probed, but before its children are examined. This
3064 * is marked as __weak because multiple architectures define it.
3065 */
3066void __weak pcibios_fixup_bus(struct pci_bus *bus)
3067{
3068 /* nothing to do, expected to be removed in the future */
3069}
3070
3071/**
3072 * pci_scan_child_bus_extend() - Scan devices below a bus
3073 * @bus: Bus to scan for devices
3074 * @available_buses: Total number of buses available (%0 does not try to
3075 * extend beyond the minimal)
3076 *
3077 * Scans devices below @bus including subordinate buses. Returns new
3078 * subordinate number including all the found devices. Passing
3079 * @available_buses causes the remaining bus space to be distributed
3080 * equally between hotplug-capable bridges to allow future extension of the
3081 * hierarchy.
3082 */
3083static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
3084 unsigned int available_buses)
3085{
3086 unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
3087 unsigned int start = bus->busn_res.start;
3088 unsigned int devnr, cmax, max = start;
3089 struct pci_dev *dev;
3090
3091 dev_dbg(&bus->dev, "scanning bus\n");
3092
3093 /* Go find them, Rover! */
3094 for (devnr = 0; devnr < PCI_MAX_NR_DEVS; devnr++)
3095 pci_scan_slot(bus, PCI_DEVFN(devnr, 0));
3096
3097 /* Reserve buses for SR-IOV capability */
3098 used_buses = pci_iov_bus_range(bus);
3099 max += used_buses;
3100
3101 /*
3102 * After performing arch-dependent fixup of the bus, look behind
3103 * all PCI-to-PCI bridges on this bus.
3104 */
3105 if (!bus->is_added) {
3106 dev_dbg(&bus->dev, "fixups for bus\n");
3107 pcibios_fixup_bus(bus);
3108 bus->is_added = 1;
3109 }
3110
3111 /*
3112 * Calculate how many hotplug bridges and normal bridges there
3113 * are on this bus. We will distribute the additional available
3114 * buses between hotplug bridges.
3115 */
3116 for_each_pci_bridge(dev, bus) {
3117 if (dev->is_hotplug_bridge)
3118 hotplug_bridges++;
3119 else
3120 normal_bridges++;
3121 }
3122
3123 /*
3124 * Scan bridges that are already configured. We don't touch them
3125 * unless they are misconfigured (which will be done in the second
3126 * scan below).
3127 */
3128 for_each_pci_bridge(dev, bus) {
3129 cmax = max;
3130 max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
3131
3132 /*
3133 * Reserve one bus for each bridge now to avoid extending
3134 * hotplug bridges too much during the second scan below.
3135 */
3136 used_buses++;
3137 if (max - cmax > 1)
3138 used_buses += max - cmax - 1;
3139 }
3140
3141 /* Scan bridges that need to be reconfigured */
3142 for_each_pci_bridge(dev, bus) {
3143 unsigned int buses = 0;
3144
3145 if (!hotplug_bridges && normal_bridges == 1) {
3146 /*
3147 * There is only one bridge on the bus (upstream
3148 * port) so it gets all available buses which it
3149 * can then distribute to the possible hotplug
3150 * bridges below.
3151 */
3152 buses = available_buses;
3153 } else if (dev->is_hotplug_bridge) {
3154 /*
3155 * Distribute the extra buses between hotplug
3156 * bridges if any.
3157 */
3158 buses = available_buses / hotplug_bridges;
3159 buses = min(buses, available_buses - used_buses + 1);
3160 }
3161
3162 cmax = max;
3163 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
3164 /* One bus is already accounted so don't add it again */
3165 if (max - cmax > 1)
3166 used_buses += max - cmax - 1;
3167 }
3168
3169 /*
3170 * Make sure a hotplug bridge has at least the minimum requested
3171 * number of buses but allow it to grow up to the maximum available
3172 * bus number if there is room.
3173 */
3174 if (bus->self && bus->self->is_hotplug_bridge) {
3175 used_buses = max(available_buses, pci_hotplug_bus_size - 1);
3176 if (max - start < used_buses) {
3177 max = start + used_buses;
3178
3179 /* Do not allocate more buses than we have room left */
3180 if (max > bus->busn_res.end)
3181 max = bus->busn_res.end;
3182
3183 dev_dbg(&bus->dev, "%pR extended by %#02x\n",
3184 &bus->busn_res, max - start);
3185 }
3186 }
3187
3188 /*
3189 * We've scanned the bus and so we know all about what's on
3190 * the other side of any bridges that may be on this bus plus
3191 * any devices.
3192 *
3193 * Return how far we've got finding sub-buses.
3194 */
3195 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
3196 return max;
3197}
3198
3199/**
3200 * pci_scan_child_bus() - Scan devices below a bus
3201 * @bus: Bus to scan for devices
3202 *
3203 * Scans devices below @bus including subordinate buses. Returns new
3204 * subordinate number including all the found devices.
3205 */
3206unsigned int pci_scan_child_bus(struct pci_bus *bus)
3207{
3208 return pci_scan_child_bus_extend(bus, 0);
3209}
3210EXPORT_SYMBOL_GPL(pci_scan_child_bus);
3211
3212/**
3213 * pcibios_root_bridge_prepare - Platform-specific host bridge setup
3214 * @bridge: Host bridge to set up
3215 *
3216 * Default empty implementation. Replace with an architecture-specific setup
3217 * routine, if necessary.
3218 */
3219int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
3220{
3221 return 0;
3222}
3223
3224void __weak pcibios_add_bus(struct pci_bus *bus)
3225{
3226}
3227
3228void __weak pcibios_remove_bus(struct pci_bus *bus)
3229{
3230}
3231
3232struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
3233 struct pci_ops *ops, void *sysdata, struct list_head *resources)
3234{
3235 int error;
3236 struct pci_host_bridge *bridge;
3237
3238 bridge = pci_alloc_host_bridge(0);
3239 if (!bridge)
3240 return NULL;
3241
3242 bridge->dev.parent = parent;
3243
3244 list_splice_init(resources, &bridge->windows);
3245 bridge->sysdata = sysdata;
3246 bridge->busnr = bus;
3247 bridge->ops = ops;
3248
3249 error = pci_register_host_bridge(bridge);
3250 if (error < 0)
3251 goto err_out;
3252
3253 return bridge->bus;
3254
3255err_out:
3256 put_device(&bridge->dev);
3257 return NULL;
3258}
3259EXPORT_SYMBOL_GPL(pci_create_root_bus);
3260
3261int pci_host_probe(struct pci_host_bridge *bridge)
3262{
3263 struct pci_bus *bus, *child;
3264 int ret;
3265
3266 pci_lock_rescan_remove();
3267 ret = pci_scan_root_bus_bridge(bridge);
3268 pci_unlock_rescan_remove();
3269 if (ret < 0) {
3270 dev_err(bridge->dev.parent, "Scanning root bridge failed");
3271 return ret;
3272 }
3273
3274 bus = bridge->bus;
3275
3276 /* If we must preserve the resource configuration, claim now */
3277 if (bridge->preserve_config)
3278 pci_bus_claim_resources(bus);
3279
3280 /*
3281 * Assign whatever was left unassigned. If we didn't claim above,
3282 * this will reassign everything.
3283 */
3284 pci_assign_unassigned_root_bus_resources(bus);
3285
3286 list_for_each_entry(child, &bus->children, node)
3287 pcie_bus_configure_settings(child);
3288
3289 pci_lock_rescan_remove();
3290 pci_bus_add_devices(bus);
3291 pci_unlock_rescan_remove();
3292
3293 /*
3294 * Ensure pm_runtime_enable() is called for the controller drivers
3295 * before calling pci_host_probe(). The PM framework expects that
3296 * if the parent device supports runtime PM, it will be enabled
3297 * before child runtime PM is enabled.
3298 */
3299 pm_runtime_set_active(&bridge->dev);
3300 pm_runtime_no_callbacks(&bridge->dev);
3301 devm_pm_runtime_enable(&bridge->dev);
3302
3303 return 0;
3304}
3305EXPORT_SYMBOL_GPL(pci_host_probe);
3306
3307int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
3308{
3309 struct resource *res = &b->busn_res;
3310 struct resource *parent_res, *conflict;
3311
3312 res->start = bus;
3313 res->end = bus_max;
3314 res->flags = IORESOURCE_BUS;
3315
3316 if (!pci_is_root_bus(b))
3317 parent_res = &b->parent->busn_res;
3318 else {
3319 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
3320 res->flags |= IORESOURCE_PCI_FIXED;
3321 }
3322
3323 conflict = request_resource_conflict(parent_res, res);
3324
3325 if (conflict)
3326 dev_info(&b->dev,
3327 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
3328 res, pci_is_root_bus(b) ? "domain " : "",
3329 parent_res, conflict->name, conflict);
3330
3331 return conflict == NULL;
3332}
3333
3334int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
3335{
3336 struct resource *res = &b->busn_res;
3337 struct resource old_res = *res;
3338 resource_size_t size;
3339 int ret;
3340
3341 if (res->start > bus_max)
3342 return -EINVAL;
3343
3344 size = bus_max - res->start + 1;
3345 ret = adjust_resource(res, res->start, size);
3346 dev_info(&b->dev, "busn_res: %pR end %s updated to %02x\n",
3347 &old_res, ret ? "can not be" : "is", bus_max);
3348
3349 if (!ret && !res->parent)
3350 pci_bus_insert_busn_res(b, res->start, res->end);
3351
3352 return ret;
3353}
3354
3355void pci_bus_release_busn_res(struct pci_bus *b)
3356{
3357 struct resource *res = &b->busn_res;
3358 int ret;
3359
3360 if (!res->flags || !res->parent)
3361 return;
3362
3363 ret = release_resource(res);
3364 dev_info(&b->dev, "busn_res: %pR %s released\n",
3365 res, ret ? "can not be" : "is");
3366}
3367
3368int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
3369{
3370 struct resource_entry *window;
3371 bool found = false;
3372 struct pci_bus *b;
3373 int max, bus, ret;
3374
3375 if (!bridge)
3376 return -EINVAL;
3377
3378 resource_list_for_each_entry(window, &bridge->windows)
3379 if (window->res->flags & IORESOURCE_BUS) {
3380 bridge->busnr = window->res->start;
3381 found = true;
3382 break;
3383 }
3384
3385 ret = pci_register_host_bridge(bridge);
3386 if (ret < 0)
3387 return ret;
3388
3389 b = bridge->bus;
3390 bus = bridge->busnr;
3391
3392 if (!found) {
3393 dev_info(&b->dev,
3394 "No busn resource found for root bus, will use [bus %02x-ff]\n",
3395 bus);
3396 pci_bus_insert_busn_res(b, bus, 255);
3397 }
3398
3399 max = pci_scan_child_bus(b);
3400
3401 if (!found)
3402 pci_bus_update_busn_res_end(b, max);
3403
3404 return 0;
3405}
3406EXPORT_SYMBOL(pci_scan_root_bus_bridge);
3407
3408struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
3409 struct pci_ops *ops, void *sysdata, struct list_head *resources)
3410{
3411 struct resource_entry *window;
3412 bool found = false;
3413 struct pci_bus *b;
3414 int max;
3415
3416 resource_list_for_each_entry(window, resources)
3417 if (window->res->flags & IORESOURCE_BUS) {
3418 found = true;
3419 break;
3420 }
3421
3422 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
3423 if (!b)
3424 return NULL;
3425
3426 if (!found) {
3427 dev_info(&b->dev,
3428 "No busn resource found for root bus, will use [bus %02x-ff]\n",
3429 bus);
3430 pci_bus_insert_busn_res(b, bus, 255);
3431 }
3432
3433 max = pci_scan_child_bus(b);
3434
3435 if (!found)
3436 pci_bus_update_busn_res_end(b, max);
3437
3438 return b;
3439}
3440EXPORT_SYMBOL(pci_scan_root_bus);
3441
3442struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
3443 void *sysdata)
3444{
3445 LIST_HEAD(resources);
3446 struct pci_bus *b;
3447
3448 pci_add_resource(&resources, &ioport_resource);
3449 pci_add_resource(&resources, &iomem_resource);
3450 pci_add_resource(&resources, &busn_resource);
3451 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
3452 if (b) {
3453 pci_scan_child_bus(b);
3454 } else {
3455 pci_free_resource_list(&resources);
3456 }
3457 return b;
3458}
3459EXPORT_SYMBOL(pci_scan_bus);
3460
3461/**
3462 * pci_rescan_bus_bridge_resize - Scan a PCI bus for devices
3463 * @bridge: PCI bridge for the bus to scan
3464 *
3465 * Scan a PCI bus and child buses for new devices, add them,
3466 * and enable them, resizing bridge mmio/io resource if necessary
3467 * and possible. The caller must ensure the child devices are already
3468 * removed for resizing to occur.
3469 *
3470 * Returns the max number of subordinate bus discovered.
3471 */
3472unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
3473{
3474 unsigned int max;
3475 struct pci_bus *bus = bridge->subordinate;
3476
3477 max = pci_scan_child_bus(bus);
3478
3479 pci_assign_unassigned_bridge_resources(bridge);
3480
3481 pci_bus_add_devices(bus);
3482
3483 return max;
3484}
3485
3486/**
3487 * pci_rescan_bus - Scan a PCI bus for devices
3488 * @bus: PCI bus to scan
3489 *
3490 * Scan a PCI bus and child buses for new devices, add them,
3491 * and enable them.
3492 *
3493 * Returns the max number of subordinate bus discovered.
3494 */
3495unsigned int pci_rescan_bus(struct pci_bus *bus)
3496{
3497 unsigned int max;
3498
3499 max = pci_scan_child_bus(bus);
3500 pci_assign_unassigned_bus_resources(bus);
3501 pci_bus_add_devices(bus);
3502
3503 return max;
3504}
3505EXPORT_SYMBOL_GPL(pci_rescan_bus);
3506
3507/*
3508 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
3509 * routines should always be executed under this mutex.
3510 */
3511DEFINE_MUTEX(pci_rescan_remove_lock);
3512
3513void pci_lock_rescan_remove(void)
3514{
3515 mutex_lock(&pci_rescan_remove_lock);
3516}
3517EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
3518
3519void pci_unlock_rescan_remove(void)
3520{
3521 mutex_unlock(&pci_rescan_remove_lock);
3522}
3523EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
3524
3525static int __init pci_sort_bf_cmp(const struct device *d_a,
3526 const struct device *d_b)
3527{
3528 const struct pci_dev *a = to_pci_dev(d_a);
3529 const struct pci_dev *b = to_pci_dev(d_b);
3530
3531 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
3532 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
3533
3534 if (a->bus->number < b->bus->number) return -1;
3535 else if (a->bus->number > b->bus->number) return 1;
3536
3537 if (a->devfn < b->devfn) return -1;
3538 else if (a->devfn > b->devfn) return 1;
3539
3540 return 0;
3541}
3542
3543void __init pci_sort_breadthfirst(void)
3544{
3545 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
3546}
3547
3548int pci_hp_add_bridge(struct pci_dev *dev)
3549{
3550 struct pci_bus *parent = dev->bus;
3551 int busnr, start = parent->busn_res.start;
3552 unsigned int available_buses = 0;
3553 int end = parent->busn_res.end;
3554
3555 for (busnr = start; busnr <= end; busnr++) {
3556 if (!pci_find_bus(pci_domain_nr(parent), busnr))
3557 break;
3558 }
3559 if (busnr-- > end) {
3560 pci_err(dev, "No bus number available for hot-added bridge\n");
3561 return -1;
3562 }
3563
3564 /* Scan bridges that are already configured */
3565 busnr = pci_scan_bridge(parent, dev, busnr, 0);
3566
3567 /*
3568 * Distribute the available bus numbers between hotplug-capable
3569 * bridges to make extending the chain later possible.
3570 */
3571 available_buses = end - busnr;
3572
3573 /* Scan bridges that need to be reconfigured */
3574 pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
3575
3576 if (!dev->subordinate)
3577 return -1;
3578
3579 return 0;
3580}
3581EXPORT_SYMBOL_GPL(pci_hp_add_bridge);