Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) Microsoft Corporation.
4 *
5 * Author:
6 * Jake Oshins <jakeo@microsoft.com>
7 *
8 * This driver acts as a paravirtual front-end for PCI Express root buses.
9 * When a PCI Express function (either an entire device or an SR-IOV
10 * Virtual Function) is being passed through to the VM, this driver exposes
11 * a new bus to the guest VM. This is modeled as a root PCI bus because
12 * no bridges are being exposed to the VM. In fact, with a "Generation 2"
13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14 * until a device as been exposed using this driver.
15 *
16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
17 * the PCI Firmware Specifications. Thus while each device passed through
18 * to the VM using this front-end will appear at "device 0", the domain will
19 * be unique. Typically, each bus will have one PCI function on it, though
20 * this driver does support more than one.
21 *
22 * In order to map the interrupts from the device through to the guest VM,
23 * this driver also implements an IRQ Domain, which handles interrupts (either
24 * MSI or MSI-X) associated with the functions on the bus. As interrupts are
25 * set up, torn down, or reaffined, this driver communicates with the
26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27 * interrupt will be delivered to the correct virtual processor at the right
28 * vector. This driver does not support level-triggered (line-based)
29 * interrupts, and will report that the Interrupt Line register in the
30 * function's configuration space is zero.
31 *
32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33 * facilities. For instance, the configuration space of a function exposed
34 * by Hyper-V is mapped into a single page of memory space, and the
35 * read and write handlers for config space must be aware of this mechanism.
36 * Similarly, device setup and teardown involves messages sent to and from
37 * the PCI back-end driver in Hyper-V.
38 */
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/pci.h>
43#include <linux/pci-ecam.h>
44#include <linux/delay.h>
45#include <linux/semaphore.h>
46#include <linux/irq.h>
47#include <linux/msi.h>
48#include <linux/hyperv.h>
49#include <linux/refcount.h>
50#include <linux/irqdomain.h>
51#include <linux/acpi.h>
52#include <asm/mshyperv.h>
53
54/*
55 * Protocol versions. The low word is the minor version, the high word the
56 * major version.
57 */
58
59#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
60#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
61#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
62
63enum pci_protocol_version_t {
64 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
65 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
66 PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
67 PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
68};
69
70#define CPU_AFFINITY_ALL -1ULL
71
72/*
73 * Supported protocol versions in the order of probing - highest go
74 * first.
75 */
76static enum pci_protocol_version_t pci_protocol_versions[] = {
77 PCI_PROTOCOL_VERSION_1_4,
78 PCI_PROTOCOL_VERSION_1_3,
79 PCI_PROTOCOL_VERSION_1_2,
80 PCI_PROTOCOL_VERSION_1_1,
81};
82
83#define PCI_CONFIG_MMIO_LENGTH 0x2000
84#define CFG_PAGE_OFFSET 0x1000
85#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
86
87#define MAX_SUPPORTED_MSI_MESSAGES 0x400
88
89#define STATUS_REVISION_MISMATCH 0xC0000059
90
91/* space for 32bit serial number as string */
92#define SLOT_NAME_SIZE 11
93
94/*
95 * Size of requestor for VMbus; the value is based on the observation
96 * that having more than one request outstanding is 'rare', and so 64
97 * should be generous in ensuring that we don't ever run out.
98 */
99#define HV_PCI_RQSTOR_SIZE 64
100
101/*
102 * Message Types
103 */
104
105enum pci_message_type {
106 /*
107 * Version 1.1
108 */
109 PCI_MESSAGE_BASE = 0x42490000,
110 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
111 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
112 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
113 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
114 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
115 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
116 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
117 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
118 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
119 PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
120 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
121 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
122 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
123 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
124 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
125 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
126 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
127 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
128 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
129 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
130 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
131 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
132 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
133 PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
134 PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
135 PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
136 PCI_MESSAGE_MAXIMUM
137};
138
139/*
140 * Structures defining the virtual PCI Express protocol.
141 */
142
143union pci_version {
144 struct {
145 u16 minor_version;
146 u16 major_version;
147 } parts;
148 u32 version;
149} __packed;
150
151/*
152 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
153 * which is all this driver does. This representation is the one used in
154 * Windows, which is what is expected when sending this back and forth with
155 * the Hyper-V parent partition.
156 */
157union win_slot_encoding {
158 struct {
159 u32 dev:5;
160 u32 func:3;
161 u32 reserved:24;
162 } bits;
163 u32 slot;
164} __packed;
165
166/*
167 * Pretty much as defined in the PCI Specifications.
168 */
169struct pci_function_description {
170 u16 v_id; /* vendor ID */
171 u16 d_id; /* device ID */
172 u8 rev;
173 u8 prog_intf;
174 u8 subclass;
175 u8 base_class;
176 u32 subsystem_id;
177 union win_slot_encoding win_slot;
178 u32 ser; /* serial number */
179} __packed;
180
181enum pci_device_description_flags {
182 HV_PCI_DEVICE_FLAG_NONE = 0x0,
183 HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
184};
185
186struct pci_function_description2 {
187 u16 v_id; /* vendor ID */
188 u16 d_id; /* device ID */
189 u8 rev;
190 u8 prog_intf;
191 u8 subclass;
192 u8 base_class;
193 u32 subsystem_id;
194 union win_slot_encoding win_slot;
195 u32 ser; /* serial number */
196 u32 flags;
197 u16 virtual_numa_node;
198 u16 reserved;
199} __packed;
200
201/**
202 * struct hv_msi_desc
203 * @vector: IDT entry
204 * @delivery_mode: As defined in Intel's Programmer's
205 * Reference Manual, Volume 3, Chapter 8.
206 * @vector_count: Number of contiguous entries in the
207 * Interrupt Descriptor Table that are
208 * occupied by this Message-Signaled
209 * Interrupt. For "MSI", as first defined
210 * in PCI 2.2, this can be between 1 and
211 * 32. For "MSI-X," as first defined in PCI
212 * 3.0, this must be 1, as each MSI-X table
213 * entry would have its own descriptor.
214 * @reserved: Empty space
215 * @cpu_mask: All the target virtual processors.
216 */
217struct hv_msi_desc {
218 u8 vector;
219 u8 delivery_mode;
220 u16 vector_count;
221 u32 reserved;
222 u64 cpu_mask;
223} __packed;
224
225/**
226 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
227 * @vector: IDT entry
228 * @delivery_mode: As defined in Intel's Programmer's
229 * Reference Manual, Volume 3, Chapter 8.
230 * @vector_count: Number of contiguous entries in the
231 * Interrupt Descriptor Table that are
232 * occupied by this Message-Signaled
233 * Interrupt. For "MSI", as first defined
234 * in PCI 2.2, this can be between 1 and
235 * 32. For "MSI-X," as first defined in PCI
236 * 3.0, this must be 1, as each MSI-X table
237 * entry would have its own descriptor.
238 * @processor_count: number of bits enabled in array.
239 * @processor_array: All the target virtual processors.
240 */
241struct hv_msi_desc2 {
242 u8 vector;
243 u8 delivery_mode;
244 u16 vector_count;
245 u16 processor_count;
246 u16 processor_array[32];
247} __packed;
248
249/*
250 * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
251 * Everything is the same as in 'hv_msi_desc2' except that the size of the
252 * 'vector' field is larger to support bigger vector values. For ex: LPI
253 * vectors on ARM.
254 */
255struct hv_msi_desc3 {
256 u32 vector;
257 u8 delivery_mode;
258 u8 reserved;
259 u16 vector_count;
260 u16 processor_count;
261 u16 processor_array[32];
262} __packed;
263
264/**
265 * struct tran_int_desc
266 * @reserved: unused, padding
267 * @vector_count: same as in hv_msi_desc
268 * @data: This is the "data payload" value that is
269 * written by the device when it generates
270 * a message-signaled interrupt, either MSI
271 * or MSI-X.
272 * @address: This is the address to which the data
273 * payload is written on interrupt
274 * generation.
275 */
276struct tran_int_desc {
277 u16 reserved;
278 u16 vector_count;
279 u32 data;
280 u64 address;
281} __packed;
282
283/*
284 * A generic message format for virtual PCI.
285 * Specific message formats are defined later in the file.
286 */
287
288struct pci_message {
289 u32 type;
290} __packed;
291
292struct pci_child_message {
293 struct pci_message message_type;
294 union win_slot_encoding wslot;
295} __packed;
296
297struct pci_incoming_message {
298 struct vmpacket_descriptor hdr;
299 struct pci_message message_type;
300} __packed;
301
302struct pci_response {
303 struct vmpacket_descriptor hdr;
304 s32 status; /* negative values are failures */
305} __packed;
306
307struct pci_packet {
308 void (*completion_func)(void *context, struct pci_response *resp,
309 int resp_packet_size);
310 void *compl_ctxt;
311
312 struct pci_message message[];
313};
314
315/*
316 * Specific message types supporting the PCI protocol.
317 */
318
319/*
320 * Version negotiation message. Sent from the guest to the host.
321 * The guest is free to try different versions until the host
322 * accepts the version.
323 *
324 * pci_version: The protocol version requested.
325 * is_last_attempt: If TRUE, this is the last version guest will request.
326 * reservedz: Reserved field, set to zero.
327 */
328
329struct pci_version_request {
330 struct pci_message message_type;
331 u32 protocol_version;
332} __packed;
333
334/*
335 * Bus D0 Entry. This is sent from the guest to the host when the virtual
336 * bus (PCI Express port) is ready for action.
337 */
338
339struct pci_bus_d0_entry {
340 struct pci_message message_type;
341 u32 reserved;
342 u64 mmio_base;
343} __packed;
344
345struct pci_bus_relations {
346 struct pci_incoming_message incoming;
347 u32 device_count;
348 struct pci_function_description func[];
349} __packed;
350
351struct pci_bus_relations2 {
352 struct pci_incoming_message incoming;
353 u32 device_count;
354 struct pci_function_description2 func[];
355} __packed;
356
357struct pci_q_res_req_response {
358 struct vmpacket_descriptor hdr;
359 s32 status; /* negative values are failures */
360 u32 probed_bar[PCI_STD_NUM_BARS];
361} __packed;
362
363struct pci_set_power {
364 struct pci_message message_type;
365 union win_slot_encoding wslot;
366 u32 power_state; /* In Windows terms */
367 u32 reserved;
368} __packed;
369
370struct pci_set_power_response {
371 struct vmpacket_descriptor hdr;
372 s32 status; /* negative values are failures */
373 union win_slot_encoding wslot;
374 u32 resultant_state; /* In Windows terms */
375 u32 reserved;
376} __packed;
377
378struct pci_resources_assigned {
379 struct pci_message message_type;
380 union win_slot_encoding wslot;
381 u8 memory_range[0x14][6]; /* not used here */
382 u32 msi_descriptors;
383 u32 reserved[4];
384} __packed;
385
386struct pci_resources_assigned2 {
387 struct pci_message message_type;
388 union win_slot_encoding wslot;
389 u8 memory_range[0x14][6]; /* not used here */
390 u32 msi_descriptor_count;
391 u8 reserved[70];
392} __packed;
393
394struct pci_create_interrupt {
395 struct pci_message message_type;
396 union win_slot_encoding wslot;
397 struct hv_msi_desc int_desc;
398} __packed;
399
400struct pci_create_int_response {
401 struct pci_response response;
402 u32 reserved;
403 struct tran_int_desc int_desc;
404} __packed;
405
406struct pci_create_interrupt2 {
407 struct pci_message message_type;
408 union win_slot_encoding wslot;
409 struct hv_msi_desc2 int_desc;
410} __packed;
411
412struct pci_create_interrupt3 {
413 struct pci_message message_type;
414 union win_slot_encoding wslot;
415 struct hv_msi_desc3 int_desc;
416} __packed;
417
418struct pci_delete_interrupt {
419 struct pci_message message_type;
420 union win_slot_encoding wslot;
421 struct tran_int_desc int_desc;
422} __packed;
423
424/*
425 * Note: the VM must pass a valid block id, wslot and bytes_requested.
426 */
427struct pci_read_block {
428 struct pci_message message_type;
429 u32 block_id;
430 union win_slot_encoding wslot;
431 u32 bytes_requested;
432} __packed;
433
434struct pci_read_block_response {
435 struct vmpacket_descriptor hdr;
436 u32 status;
437 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
438} __packed;
439
440/*
441 * Note: the VM must pass a valid block id, wslot and byte_count.
442 */
443struct pci_write_block {
444 struct pci_message message_type;
445 u32 block_id;
446 union win_slot_encoding wslot;
447 u32 byte_count;
448 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
449} __packed;
450
451struct pci_dev_inval_block {
452 struct pci_incoming_message incoming;
453 union win_slot_encoding wslot;
454 u64 block_mask;
455} __packed;
456
457struct pci_dev_incoming {
458 struct pci_incoming_message incoming;
459 union win_slot_encoding wslot;
460} __packed;
461
462struct pci_eject_response {
463 struct pci_message message_type;
464 union win_slot_encoding wslot;
465 u32 status;
466} __packed;
467
468static int pci_ring_size = (4 * PAGE_SIZE);
469
470/*
471 * Driver specific state.
472 */
473
474enum hv_pcibus_state {
475 hv_pcibus_init = 0,
476 hv_pcibus_probed,
477 hv_pcibus_installed,
478 hv_pcibus_removing,
479 hv_pcibus_maximum
480};
481
482struct hv_pcibus_device {
483#ifdef CONFIG_X86
484 struct pci_sysdata sysdata;
485#elif defined(CONFIG_ARM64)
486 struct pci_config_window sysdata;
487#endif
488 struct pci_host_bridge *bridge;
489 struct fwnode_handle *fwnode;
490 /* Protocol version negotiated with the host */
491 enum pci_protocol_version_t protocol_version;
492 enum hv_pcibus_state state;
493 struct hv_device *hdev;
494 resource_size_t low_mmio_space;
495 resource_size_t high_mmio_space;
496 struct resource *mem_config;
497 struct resource *low_mmio_res;
498 struct resource *high_mmio_res;
499 struct completion *survey_event;
500 struct pci_bus *pci_bus;
501 spinlock_t config_lock; /* Avoid two threads writing index page */
502 spinlock_t device_list_lock; /* Protect lists below */
503 void __iomem *cfg_addr;
504
505 struct list_head children;
506 struct list_head dr_list;
507
508 struct msi_domain_info msi_info;
509 struct irq_domain *irq_domain;
510
511 struct workqueue_struct *wq;
512
513 /* Highest slot of child device with resources allocated */
514 int wslot_res_allocated;
515 bool use_calls; /* Use hypercalls to access mmio cfg space */
516};
517
518/*
519 * Tracks "Device Relations" messages from the host, which must be both
520 * processed in order and deferred so that they don't run in the context
521 * of the incoming packet callback.
522 */
523struct hv_dr_work {
524 struct work_struct wrk;
525 struct hv_pcibus_device *bus;
526};
527
528struct hv_pcidev_description {
529 u16 v_id; /* vendor ID */
530 u16 d_id; /* device ID */
531 u8 rev;
532 u8 prog_intf;
533 u8 subclass;
534 u8 base_class;
535 u32 subsystem_id;
536 union win_slot_encoding win_slot;
537 u32 ser; /* serial number */
538 u32 flags;
539 u16 virtual_numa_node;
540};
541
542struct hv_dr_state {
543 struct list_head list_entry;
544 u32 device_count;
545 struct hv_pcidev_description func[];
546};
547
548enum hv_pcichild_state {
549 hv_pcichild_init = 0,
550 hv_pcichild_requirements,
551 hv_pcichild_resourced,
552 hv_pcichild_ejecting,
553 hv_pcichild_maximum
554};
555
556struct hv_pci_dev {
557 /* List protected by pci_rescan_remove_lock */
558 struct list_head list_entry;
559 refcount_t refs;
560 enum hv_pcichild_state state;
561 struct pci_slot *pci_slot;
562 struct hv_pcidev_description desc;
563 bool reported_missing;
564 struct hv_pcibus_device *hbus;
565 struct work_struct wrk;
566
567 void (*block_invalidate)(void *context, u64 block_mask);
568 void *invalidate_context;
569
570 /*
571 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
572 * read it back, for each of the BAR offsets within config space.
573 */
574 u32 probed_bar[PCI_STD_NUM_BARS];
575};
576
577struct hv_pci_compl {
578 struct completion host_event;
579 s32 completion_status;
580};
581
582static void hv_pci_onchannelcallback(void *context);
583
584#ifdef CONFIG_X86
585#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
586#define FLOW_HANDLER handle_edge_irq
587#define FLOW_NAME "edge"
588
589static int hv_pci_irqchip_init(void)
590{
591 return 0;
592}
593
594static struct irq_domain *hv_pci_get_root_domain(void)
595{
596 return x86_vector_domain;
597}
598
599static unsigned int hv_msi_get_int_vector(struct irq_data *data)
600{
601 struct irq_cfg *cfg = irqd_cfg(data);
602
603 return cfg->vector;
604}
605
606#define hv_msi_prepare pci_msi_prepare
607
608/**
609 * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
610 * affinity.
611 * @data: Describes the IRQ
612 *
613 * Build new a destination for the MSI and make a hypercall to
614 * update the Interrupt Redirection Table. "Device Logical ID"
615 * is built out of this PCI bus's instance GUID and the function
616 * number of the device.
617 */
618static void hv_arch_irq_unmask(struct irq_data *data)
619{
620 struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
621 struct hv_retarget_device_interrupt *params;
622 struct tran_int_desc *int_desc;
623 struct hv_pcibus_device *hbus;
624 const struct cpumask *dest;
625 cpumask_var_t tmp;
626 struct pci_bus *pbus;
627 struct pci_dev *pdev;
628 unsigned long flags;
629 u32 var_size = 0;
630 int cpu, nr_bank;
631 u64 res;
632
633 dest = irq_data_get_effective_affinity_mask(data);
634 pdev = msi_desc_to_pci_dev(msi_desc);
635 pbus = pdev->bus;
636 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
637 int_desc = data->chip_data;
638
639 local_irq_save(flags);
640
641 params = *this_cpu_ptr(hyperv_pcpu_input_arg);
642 memset(params, 0, sizeof(*params));
643 params->partition_id = HV_PARTITION_ID_SELF;
644 params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
645 params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
646 params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
647 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
648 (hbus->hdev->dev_instance.b[4] << 16) |
649 (hbus->hdev->dev_instance.b[7] << 8) |
650 (hbus->hdev->dev_instance.b[6] & 0xf8) |
651 PCI_FUNC(pdev->devfn);
652 params->int_target.vector = hv_msi_get_int_vector(data);
653
654 /*
655 * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
656 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
657 * spurious interrupt storm. Not doing so does not seem to have a
658 * negative effect (yet?).
659 */
660
661 if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
662 /*
663 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
664 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
665 * with >64 VP support.
666 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
667 * is not sufficient for this hypercall.
668 */
669 params->int_target.flags |=
670 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
671
672 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
673 res = 1;
674 goto out;
675 }
676
677 cpumask_and(tmp, dest, cpu_online_mask);
678 nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp);
679 free_cpumask_var(tmp);
680
681 if (nr_bank <= 0) {
682 res = 1;
683 goto out;
684 }
685
686 /*
687 * var-sized hypercall, var-size starts after vp_mask (thus
688 * vp_set.format does not count, but vp_set.valid_bank_mask
689 * does).
690 */
691 var_size = 1 + nr_bank;
692 } else {
693 for_each_cpu_and(cpu, dest, cpu_online_mask) {
694 params->int_target.vp_mask |=
695 (1ULL << hv_cpu_number_to_vp_number(cpu));
696 }
697 }
698
699 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
700 params, NULL);
701
702out:
703 local_irq_restore(flags);
704
705 /*
706 * During hibernation, when a CPU is offlined, the kernel tries
707 * to move the interrupt to the remaining CPUs that haven't
708 * been offlined yet. In this case, the below hv_do_hypercall()
709 * always fails since the vmbus channel has been closed:
710 * refer to cpu_disable_common() -> fixup_irqs() ->
711 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
712 *
713 * Suppress the error message for hibernation because the failure
714 * during hibernation does not matter (at this time all the devices
715 * have been frozen). Note: the correct affinity info is still updated
716 * into the irqdata data structure in migrate_one_irq() ->
717 * irq_do_set_affinity(), so later when the VM resumes,
718 * hv_pci_restore_msi_state() is able to correctly restore the
719 * interrupt with the correct affinity.
720 */
721 if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
722 dev_err(&hbus->hdev->device,
723 "%s() failed: %#llx", __func__, res);
724}
725#elif defined(CONFIG_ARM64)
726/*
727 * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
728 * of room at the start to allow for SPIs to be specified through ACPI and
729 * starting with a power of two to satisfy power of 2 multi-MSI requirement.
730 */
731#define HV_PCI_MSI_SPI_START 64
732#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
733#define DELIVERY_MODE 0
734#define FLOW_HANDLER NULL
735#define FLOW_NAME NULL
736#define hv_msi_prepare NULL
737
738struct hv_pci_chip_data {
739 DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
740 struct mutex map_lock;
741};
742
743/* Hyper-V vPCI MSI GIC IRQ domain */
744static struct irq_domain *hv_msi_gic_irq_domain;
745
746/* Hyper-V PCI MSI IRQ chip */
747static struct irq_chip hv_arm64_msi_irq_chip = {
748 .name = "MSI",
749 .irq_set_affinity = irq_chip_set_affinity_parent,
750 .irq_eoi = irq_chip_eoi_parent,
751 .irq_mask = irq_chip_mask_parent,
752 .irq_unmask = irq_chip_unmask_parent
753};
754
755static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
756{
757 return irqd->parent_data->hwirq;
758}
759
760/*
761 * @nr_bm_irqs: Indicates the number of IRQs that were allocated from
762 * the bitmap.
763 * @nr_dom_irqs: Indicates the number of IRQs that were allocated from
764 * the parent domain.
765 */
766static void hv_pci_vec_irq_free(struct irq_domain *domain,
767 unsigned int virq,
768 unsigned int nr_bm_irqs,
769 unsigned int nr_dom_irqs)
770{
771 struct hv_pci_chip_data *chip_data = domain->host_data;
772 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
773 int first = d->hwirq - HV_PCI_MSI_SPI_START;
774 int i;
775
776 mutex_lock(&chip_data->map_lock);
777 bitmap_release_region(chip_data->spi_map,
778 first,
779 get_count_order(nr_bm_irqs));
780 mutex_unlock(&chip_data->map_lock);
781 for (i = 0; i < nr_dom_irqs; i++) {
782 if (i)
783 d = irq_domain_get_irq_data(domain, virq + i);
784 irq_domain_reset_irq_data(d);
785 }
786
787 irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
788}
789
790static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
791 unsigned int virq,
792 unsigned int nr_irqs)
793{
794 hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
795}
796
797static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
798 unsigned int nr_irqs,
799 irq_hw_number_t *hwirq)
800{
801 struct hv_pci_chip_data *chip_data = domain->host_data;
802 int index;
803
804 /* Find and allocate region from the SPI bitmap */
805 mutex_lock(&chip_data->map_lock);
806 index = bitmap_find_free_region(chip_data->spi_map,
807 HV_PCI_MSI_SPI_NR,
808 get_count_order(nr_irqs));
809 mutex_unlock(&chip_data->map_lock);
810 if (index < 0)
811 return -ENOSPC;
812
813 *hwirq = index + HV_PCI_MSI_SPI_START;
814
815 return 0;
816}
817
818static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
819 unsigned int virq,
820 irq_hw_number_t hwirq)
821{
822 struct irq_fwspec fwspec;
823 struct irq_data *d;
824 int ret;
825
826 fwspec.fwnode = domain->parent->fwnode;
827 fwspec.param_count = 2;
828 fwspec.param[0] = hwirq;
829 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
830
831 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
832 if (ret)
833 return ret;
834
835 /*
836 * Since the interrupt specifier is not coming from ACPI or DT, the
837 * trigger type will need to be set explicitly. Otherwise, it will be
838 * set to whatever is in the GIC configuration.
839 */
840 d = irq_domain_get_irq_data(domain->parent, virq);
841
842 return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
843}
844
845static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
846 unsigned int virq, unsigned int nr_irqs,
847 void *args)
848{
849 irq_hw_number_t hwirq;
850 unsigned int i;
851 int ret;
852
853 ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
854 if (ret)
855 return ret;
856
857 for (i = 0; i < nr_irqs; i++) {
858 ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
859 hwirq + i);
860 if (ret) {
861 hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
862 return ret;
863 }
864
865 irq_domain_set_hwirq_and_chip(domain, virq + i,
866 hwirq + i,
867 &hv_arm64_msi_irq_chip,
868 domain->host_data);
869 pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
870 }
871
872 return 0;
873}
874
875/*
876 * Pick the first cpu as the irq affinity that can be temporarily used for
877 * composing MSI from the hypervisor. GIC will eventually set the right
878 * affinity for the irq and the 'unmask' will retarget the interrupt to that
879 * cpu.
880 */
881static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
882 struct irq_data *irqd, bool reserve)
883{
884 int cpu = cpumask_first(cpu_present_mask);
885
886 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
887
888 return 0;
889}
890
891static const struct irq_domain_ops hv_pci_domain_ops = {
892 .alloc = hv_pci_vec_irq_domain_alloc,
893 .free = hv_pci_vec_irq_domain_free,
894 .activate = hv_pci_vec_irq_domain_activate,
895};
896
897static int hv_pci_irqchip_init(void)
898{
899 static struct hv_pci_chip_data *chip_data;
900 struct fwnode_handle *fn = NULL;
901 int ret = -ENOMEM;
902
903 chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
904 if (!chip_data)
905 return ret;
906
907 mutex_init(&chip_data->map_lock);
908 fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
909 if (!fn)
910 goto free_chip;
911
912 /*
913 * IRQ domain once enabled, should not be removed since there is no
914 * way to ensure that all the corresponding devices are also gone and
915 * no interrupts will be generated.
916 */
917 hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
918 fn, &hv_pci_domain_ops,
919 chip_data);
920
921 if (!hv_msi_gic_irq_domain) {
922 pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
923 goto free_chip;
924 }
925
926 return 0;
927
928free_chip:
929 kfree(chip_data);
930 if (fn)
931 irq_domain_free_fwnode(fn);
932
933 return ret;
934}
935
936static struct irq_domain *hv_pci_get_root_domain(void)
937{
938 return hv_msi_gic_irq_domain;
939}
940
941/*
942 * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
943 * registers which Hyper-V already supports, so no hypercall needed.
944 */
945static void hv_arch_irq_unmask(struct irq_data *data) { }
946#endif /* CONFIG_ARM64 */
947
948/**
949 * hv_pci_generic_compl() - Invoked for a completion packet
950 * @context: Set up by the sender of the packet.
951 * @resp: The response packet
952 * @resp_packet_size: Size in bytes of the packet
953 *
954 * This function is used to trigger an event and report status
955 * for any message for which the completion packet contains a
956 * status and nothing else.
957 */
958static void hv_pci_generic_compl(void *context, struct pci_response *resp,
959 int resp_packet_size)
960{
961 struct hv_pci_compl *comp_pkt = context;
962
963 comp_pkt->completion_status = resp->status;
964 complete(&comp_pkt->host_event);
965}
966
967static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
968 u32 wslot);
969
970static void get_pcichild(struct hv_pci_dev *hpdev)
971{
972 refcount_inc(&hpdev->refs);
973}
974
975static void put_pcichild(struct hv_pci_dev *hpdev)
976{
977 if (refcount_dec_and_test(&hpdev->refs))
978 kfree(hpdev);
979}
980
981/*
982 * There is no good way to get notified from vmbus_onoffer_rescind(),
983 * so let's use polling here, since this is not a hot path.
984 */
985static int wait_for_response(struct hv_device *hdev,
986 struct completion *comp)
987{
988 while (true) {
989 if (hdev->channel->rescind) {
990 dev_warn_once(&hdev->device, "The device is gone.\n");
991 return -ENODEV;
992 }
993
994 if (wait_for_completion_timeout(comp, HZ / 10))
995 break;
996 }
997
998 return 0;
999}
1000
1001/**
1002 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
1003 * @devfn: The Linux representation of PCI slot
1004 *
1005 * Windows uses a slightly different representation of PCI slot.
1006 *
1007 * Return: The Windows representation
1008 */
1009static u32 devfn_to_wslot(int devfn)
1010{
1011 union win_slot_encoding wslot;
1012
1013 wslot.slot = 0;
1014 wslot.bits.dev = PCI_SLOT(devfn);
1015 wslot.bits.func = PCI_FUNC(devfn);
1016
1017 return wslot.slot;
1018}
1019
1020/**
1021 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1022 * @wslot: The Windows representation of PCI slot
1023 *
1024 * Windows uses a slightly different representation of PCI slot.
1025 *
1026 * Return: The Linux representation
1027 */
1028static int wslot_to_devfn(u32 wslot)
1029{
1030 union win_slot_encoding slot_no;
1031
1032 slot_no.slot = wslot;
1033 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1034}
1035
1036static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
1037{
1038 struct hv_mmio_read_input *in;
1039 struct hv_mmio_read_output *out;
1040 u64 ret;
1041
1042 /*
1043 * Must be called with interrupts disabled so it is safe
1044 * to use the per-cpu input argument page. Use it for
1045 * both input and output.
1046 */
1047 in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1048 out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
1049 in->gpa = gpa;
1050 in->size = size;
1051
1052 ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
1053 if (hv_result_success(ret)) {
1054 switch (size) {
1055 case 1:
1056 *val = *(u8 *)(out->data);
1057 break;
1058 case 2:
1059 *val = *(u16 *)(out->data);
1060 break;
1061 default:
1062 *val = *(u32 *)(out->data);
1063 break;
1064 }
1065 } else
1066 dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
1067 ret, gpa, size);
1068}
1069
1070static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
1071{
1072 struct hv_mmio_write_input *in;
1073 u64 ret;
1074
1075 /*
1076 * Must be called with interrupts disabled so it is safe
1077 * to use the per-cpu input argument memory.
1078 */
1079 in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1080 in->gpa = gpa;
1081 in->size = size;
1082 switch (size) {
1083 case 1:
1084 *(u8 *)(in->data) = val;
1085 break;
1086 case 2:
1087 *(u16 *)(in->data) = val;
1088 break;
1089 default:
1090 *(u32 *)(in->data) = val;
1091 break;
1092 }
1093
1094 ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
1095 if (!hv_result_success(ret))
1096 dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
1097 ret, gpa, size);
1098}
1099
1100/*
1101 * PCI Configuration Space for these root PCI buses is implemented as a pair
1102 * of pages in memory-mapped I/O space. Writing to the first page chooses
1103 * the PCI function being written or read. Once the first page has been
1104 * written to, the following page maps in the entire configuration space of
1105 * the function.
1106 */
1107
1108/**
1109 * _hv_pcifront_read_config() - Internal PCI config read
1110 * @hpdev: The PCI driver's representation of the device
1111 * @where: Offset within config space
1112 * @size: Size of the transfer
1113 * @val: Pointer to the buffer receiving the data
1114 */
1115static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1116 int size, u32 *val)
1117{
1118 struct hv_pcibus_device *hbus = hpdev->hbus;
1119 struct device *dev = &hbus->hdev->device;
1120 int offset = where + CFG_PAGE_OFFSET;
1121 unsigned long flags;
1122
1123 /*
1124 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1125 */
1126 if (where + size <= PCI_COMMAND) {
1127 memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1128 } else if (where >= PCI_CLASS_REVISION && where + size <=
1129 PCI_CACHE_LINE_SIZE) {
1130 memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1131 PCI_CLASS_REVISION, size);
1132 } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1133 PCI_ROM_ADDRESS) {
1134 memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1135 PCI_SUBSYSTEM_VENDOR_ID, size);
1136 } else if (where >= PCI_ROM_ADDRESS && where + size <=
1137 PCI_CAPABILITY_LIST) {
1138 /* ROM BARs are unimplemented */
1139 *val = 0;
1140 } else if (where >= PCI_INTERRUPT_LINE && where + size <=
1141 PCI_INTERRUPT_PIN) {
1142 /*
1143 * Interrupt Line and Interrupt PIN are hard-wired to zero
1144 * because this front-end only supports message-signaled
1145 * interrupts.
1146 */
1147 *val = 0;
1148 } else if (where + size <= CFG_PAGE_SIZE) {
1149
1150 spin_lock_irqsave(&hbus->config_lock, flags);
1151 if (hbus->use_calls) {
1152 phys_addr_t addr = hbus->mem_config->start + offset;
1153
1154 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1155 hpdev->desc.win_slot.slot);
1156 hv_pci_read_mmio(dev, addr, size, val);
1157 } else {
1158 void __iomem *addr = hbus->cfg_addr + offset;
1159
1160 /* Choose the function to be read. (See comment above) */
1161 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1162 /* Make sure the function was chosen before reading. */
1163 mb();
1164 /* Read from that function's config space. */
1165 switch (size) {
1166 case 1:
1167 *val = readb(addr);
1168 break;
1169 case 2:
1170 *val = readw(addr);
1171 break;
1172 default:
1173 *val = readl(addr);
1174 break;
1175 }
1176 /*
1177 * Make sure the read was done before we release the
1178 * spinlock allowing consecutive reads/writes.
1179 */
1180 mb();
1181 }
1182 spin_unlock_irqrestore(&hbus->config_lock, flags);
1183 } else {
1184 dev_err(dev, "Attempt to read beyond a function's config space.\n");
1185 }
1186}
1187
1188static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1189{
1190 struct hv_pcibus_device *hbus = hpdev->hbus;
1191 struct device *dev = &hbus->hdev->device;
1192 u32 val;
1193 u16 ret;
1194 unsigned long flags;
1195
1196 spin_lock_irqsave(&hbus->config_lock, flags);
1197
1198 if (hbus->use_calls) {
1199 phys_addr_t addr = hbus->mem_config->start +
1200 CFG_PAGE_OFFSET + PCI_VENDOR_ID;
1201
1202 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1203 hpdev->desc.win_slot.slot);
1204 hv_pci_read_mmio(dev, addr, 2, &val);
1205 ret = val; /* Truncates to 16 bits */
1206 } else {
1207 void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
1208 PCI_VENDOR_ID;
1209 /* Choose the function to be read. (See comment above) */
1210 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1211 /* Make sure the function was chosen before we start reading. */
1212 mb();
1213 /* Read from that function's config space. */
1214 ret = readw(addr);
1215 /*
1216 * mb() is not required here, because the
1217 * spin_unlock_irqrestore() is a barrier.
1218 */
1219 }
1220
1221 spin_unlock_irqrestore(&hbus->config_lock, flags);
1222
1223 return ret;
1224}
1225
1226/**
1227 * _hv_pcifront_write_config() - Internal PCI config write
1228 * @hpdev: The PCI driver's representation of the device
1229 * @where: Offset within config space
1230 * @size: Size of the transfer
1231 * @val: The data being transferred
1232 */
1233static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1234 int size, u32 val)
1235{
1236 struct hv_pcibus_device *hbus = hpdev->hbus;
1237 struct device *dev = &hbus->hdev->device;
1238 int offset = where + CFG_PAGE_OFFSET;
1239 unsigned long flags;
1240
1241 if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1242 where + size <= PCI_CAPABILITY_LIST) {
1243 /* SSIDs and ROM BARs are read-only */
1244 } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1245 spin_lock_irqsave(&hbus->config_lock, flags);
1246
1247 if (hbus->use_calls) {
1248 phys_addr_t addr = hbus->mem_config->start + offset;
1249
1250 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1251 hpdev->desc.win_slot.slot);
1252 hv_pci_write_mmio(dev, addr, size, val);
1253 } else {
1254 void __iomem *addr = hbus->cfg_addr + offset;
1255
1256 /* Choose the function to write. (See comment above) */
1257 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1258 /* Make sure the function was chosen before writing. */
1259 wmb();
1260 /* Write to that function's config space. */
1261 switch (size) {
1262 case 1:
1263 writeb(val, addr);
1264 break;
1265 case 2:
1266 writew(val, addr);
1267 break;
1268 default:
1269 writel(val, addr);
1270 break;
1271 }
1272 /*
1273 * Make sure the write was done before we release the
1274 * spinlock allowing consecutive reads/writes.
1275 */
1276 mb();
1277 }
1278 spin_unlock_irqrestore(&hbus->config_lock, flags);
1279 } else {
1280 dev_err(dev, "Attempt to write beyond a function's config space.\n");
1281 }
1282}
1283
1284/**
1285 * hv_pcifront_read_config() - Read configuration space
1286 * @bus: PCI Bus structure
1287 * @devfn: Device/function
1288 * @where: Offset from base
1289 * @size: Byte/word/dword
1290 * @val: Value to be read
1291 *
1292 * Return: PCIBIOS_SUCCESSFUL on success
1293 * PCIBIOS_DEVICE_NOT_FOUND on failure
1294 */
1295static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1296 int where, int size, u32 *val)
1297{
1298 struct hv_pcibus_device *hbus =
1299 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1300 struct hv_pci_dev *hpdev;
1301
1302 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1303 if (!hpdev)
1304 return PCIBIOS_DEVICE_NOT_FOUND;
1305
1306 _hv_pcifront_read_config(hpdev, where, size, val);
1307
1308 put_pcichild(hpdev);
1309 return PCIBIOS_SUCCESSFUL;
1310}
1311
1312/**
1313 * hv_pcifront_write_config() - Write configuration space
1314 * @bus: PCI Bus structure
1315 * @devfn: Device/function
1316 * @where: Offset from base
1317 * @size: Byte/word/dword
1318 * @val: Value to be written to device
1319 *
1320 * Return: PCIBIOS_SUCCESSFUL on success
1321 * PCIBIOS_DEVICE_NOT_FOUND on failure
1322 */
1323static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1324 int where, int size, u32 val)
1325{
1326 struct hv_pcibus_device *hbus =
1327 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1328 struct hv_pci_dev *hpdev;
1329
1330 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1331 if (!hpdev)
1332 return PCIBIOS_DEVICE_NOT_FOUND;
1333
1334 _hv_pcifront_write_config(hpdev, where, size, val);
1335
1336 put_pcichild(hpdev);
1337 return PCIBIOS_SUCCESSFUL;
1338}
1339
1340/* PCIe operations */
1341static struct pci_ops hv_pcifront_ops = {
1342 .read = hv_pcifront_read_config,
1343 .write = hv_pcifront_write_config,
1344};
1345
1346/*
1347 * Paravirtual backchannel
1348 *
1349 * Hyper-V SR-IOV provides a backchannel mechanism in software for
1350 * communication between a VF driver and a PF driver. These
1351 * "configuration blocks" are similar in concept to PCI configuration space,
1352 * but instead of doing reads and writes in 32-bit chunks through a very slow
1353 * path, packets of up to 128 bytes can be sent or received asynchronously.
1354 *
1355 * Nearly every SR-IOV device contains just such a communications channel in
1356 * hardware, so using this one in software is usually optional. Using the
1357 * software channel, however, allows driver implementers to leverage software
1358 * tools that fuzz the communications channel looking for vulnerabilities.
1359 *
1360 * The usage model for these packets puts the responsibility for reading or
1361 * writing on the VF driver. The VF driver sends a read or a write packet,
1362 * indicating which "block" is being referred to by number.
1363 *
1364 * If the PF driver wishes to initiate communication, it can "invalidate" one or
1365 * more of the first 64 blocks. This invalidation is delivered via a callback
1366 * supplied by the VF driver by this driver.
1367 *
1368 * No protocol is implied, except that supplied by the PF and VF drivers.
1369 */
1370
1371struct hv_read_config_compl {
1372 struct hv_pci_compl comp_pkt;
1373 void *buf;
1374 unsigned int len;
1375 unsigned int bytes_returned;
1376};
1377
1378/**
1379 * hv_pci_read_config_compl() - Invoked when a response packet
1380 * for a read config block operation arrives.
1381 * @context: Identifies the read config operation
1382 * @resp: The response packet itself
1383 * @resp_packet_size: Size in bytes of the response packet
1384 */
1385static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1386 int resp_packet_size)
1387{
1388 struct hv_read_config_compl *comp = context;
1389 struct pci_read_block_response *read_resp =
1390 (struct pci_read_block_response *)resp;
1391 unsigned int data_len, hdr_len;
1392
1393 hdr_len = offsetof(struct pci_read_block_response, bytes);
1394 if (resp_packet_size < hdr_len) {
1395 comp->comp_pkt.completion_status = -1;
1396 goto out;
1397 }
1398
1399 data_len = resp_packet_size - hdr_len;
1400 if (data_len > 0 && read_resp->status == 0) {
1401 comp->bytes_returned = min(comp->len, data_len);
1402 memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1403 } else {
1404 comp->bytes_returned = 0;
1405 }
1406
1407 comp->comp_pkt.completion_status = read_resp->status;
1408out:
1409 complete(&comp->comp_pkt.host_event);
1410}
1411
1412/**
1413 * hv_read_config_block() - Sends a read config block request to
1414 * the back-end driver running in the Hyper-V parent partition.
1415 * @pdev: The PCI driver's representation for this device.
1416 * @buf: Buffer into which the config block will be copied.
1417 * @len: Size in bytes of buf.
1418 * @block_id: Identifies the config block which has been requested.
1419 * @bytes_returned: Size which came back from the back-end driver.
1420 *
1421 * Return: 0 on success, -errno on failure
1422 */
1423static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1424 unsigned int len, unsigned int block_id,
1425 unsigned int *bytes_returned)
1426{
1427 struct hv_pcibus_device *hbus =
1428 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1429 sysdata);
1430 struct {
1431 struct pci_packet pkt;
1432 char buf[sizeof(struct pci_read_block)];
1433 } pkt;
1434 struct hv_read_config_compl comp_pkt;
1435 struct pci_read_block *read_blk;
1436 int ret;
1437
1438 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1439 return -EINVAL;
1440
1441 init_completion(&comp_pkt.comp_pkt.host_event);
1442 comp_pkt.buf = buf;
1443 comp_pkt.len = len;
1444
1445 memset(&pkt, 0, sizeof(pkt));
1446 pkt.pkt.completion_func = hv_pci_read_config_compl;
1447 pkt.pkt.compl_ctxt = &comp_pkt;
1448 read_blk = (struct pci_read_block *)&pkt.pkt.message;
1449 read_blk->message_type.type = PCI_READ_BLOCK;
1450 read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1451 read_blk->block_id = block_id;
1452 read_blk->bytes_requested = len;
1453
1454 ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1455 sizeof(*read_blk), (unsigned long)&pkt.pkt,
1456 VM_PKT_DATA_INBAND,
1457 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1458 if (ret)
1459 return ret;
1460
1461 ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1462 if (ret)
1463 return ret;
1464
1465 if (comp_pkt.comp_pkt.completion_status != 0 ||
1466 comp_pkt.bytes_returned == 0) {
1467 dev_err(&hbus->hdev->device,
1468 "Read Config Block failed: 0x%x, bytes_returned=%d\n",
1469 comp_pkt.comp_pkt.completion_status,
1470 comp_pkt.bytes_returned);
1471 return -EIO;
1472 }
1473
1474 *bytes_returned = comp_pkt.bytes_returned;
1475 return 0;
1476}
1477
1478/**
1479 * hv_pci_write_config_compl() - Invoked when a response packet for a write
1480 * config block operation arrives.
1481 * @context: Identifies the write config operation
1482 * @resp: The response packet itself
1483 * @resp_packet_size: Size in bytes of the response packet
1484 */
1485static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1486 int resp_packet_size)
1487{
1488 struct hv_pci_compl *comp_pkt = context;
1489
1490 comp_pkt->completion_status = resp->status;
1491 complete(&comp_pkt->host_event);
1492}
1493
1494/**
1495 * hv_write_config_block() - Sends a write config block request to the
1496 * back-end driver running in the Hyper-V parent partition.
1497 * @pdev: The PCI driver's representation for this device.
1498 * @buf: Buffer from which the config block will be copied.
1499 * @len: Size in bytes of buf.
1500 * @block_id: Identifies the config block which is being written.
1501 *
1502 * Return: 0 on success, -errno on failure
1503 */
1504static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1505 unsigned int len, unsigned int block_id)
1506{
1507 struct hv_pcibus_device *hbus =
1508 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1509 sysdata);
1510 struct {
1511 struct pci_packet pkt;
1512 char buf[sizeof(struct pci_write_block)];
1513 u32 reserved;
1514 } pkt;
1515 struct hv_pci_compl comp_pkt;
1516 struct pci_write_block *write_blk;
1517 u32 pkt_size;
1518 int ret;
1519
1520 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1521 return -EINVAL;
1522
1523 init_completion(&comp_pkt.host_event);
1524
1525 memset(&pkt, 0, sizeof(pkt));
1526 pkt.pkt.completion_func = hv_pci_write_config_compl;
1527 pkt.pkt.compl_ctxt = &comp_pkt;
1528 write_blk = (struct pci_write_block *)&pkt.pkt.message;
1529 write_blk->message_type.type = PCI_WRITE_BLOCK;
1530 write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1531 write_blk->block_id = block_id;
1532 write_blk->byte_count = len;
1533 memcpy(write_blk->bytes, buf, len);
1534 pkt_size = offsetof(struct pci_write_block, bytes) + len;
1535 /*
1536 * This quirk is required on some hosts shipped around 2018, because
1537 * these hosts don't check the pkt_size correctly (new hosts have been
1538 * fixed since early 2019). The quirk is also safe on very old hosts
1539 * and new hosts, because, on them, what really matters is the length
1540 * specified in write_blk->byte_count.
1541 */
1542 pkt_size += sizeof(pkt.reserved);
1543
1544 ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1545 (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1546 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1547 if (ret)
1548 return ret;
1549
1550 ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1551 if (ret)
1552 return ret;
1553
1554 if (comp_pkt.completion_status != 0) {
1555 dev_err(&hbus->hdev->device,
1556 "Write Config Block failed: 0x%x\n",
1557 comp_pkt.completion_status);
1558 return -EIO;
1559 }
1560
1561 return 0;
1562}
1563
1564/**
1565 * hv_register_block_invalidate() - Invoked when a config block invalidation
1566 * arrives from the back-end driver.
1567 * @pdev: The PCI driver's representation for this device.
1568 * @context: Identifies the device.
1569 * @block_invalidate: Identifies all of the blocks being invalidated.
1570 *
1571 * Return: 0 on success, -errno on failure
1572 */
1573static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1574 void (*block_invalidate)(void *context,
1575 u64 block_mask))
1576{
1577 struct hv_pcibus_device *hbus =
1578 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1579 sysdata);
1580 struct hv_pci_dev *hpdev;
1581
1582 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1583 if (!hpdev)
1584 return -ENODEV;
1585
1586 hpdev->block_invalidate = block_invalidate;
1587 hpdev->invalidate_context = context;
1588
1589 put_pcichild(hpdev);
1590 return 0;
1591
1592}
1593
1594/* Interrupt management hooks */
1595static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1596 struct tran_int_desc *int_desc)
1597{
1598 struct pci_delete_interrupt *int_pkt;
1599 struct {
1600 struct pci_packet pkt;
1601 u8 buffer[sizeof(struct pci_delete_interrupt)];
1602 } ctxt;
1603
1604 if (!int_desc->vector_count) {
1605 kfree(int_desc);
1606 return;
1607 }
1608 memset(&ctxt, 0, sizeof(ctxt));
1609 int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
1610 int_pkt->message_type.type =
1611 PCI_DELETE_INTERRUPT_MESSAGE;
1612 int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1613 int_pkt->int_desc = *int_desc;
1614 vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1615 0, VM_PKT_DATA_INBAND, 0);
1616 kfree(int_desc);
1617}
1618
1619/**
1620 * hv_msi_free() - Free the MSI.
1621 * @domain: The interrupt domain pointer
1622 * @info: Extra MSI-related context
1623 * @irq: Identifies the IRQ.
1624 *
1625 * The Hyper-V parent partition and hypervisor are tracking the
1626 * messages that are in use, keeping the interrupt redirection
1627 * table up to date. This callback sends a message that frees
1628 * the IRT entry and related tracking nonsense.
1629 */
1630static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1631 unsigned int irq)
1632{
1633 struct hv_pcibus_device *hbus;
1634 struct hv_pci_dev *hpdev;
1635 struct pci_dev *pdev;
1636 struct tran_int_desc *int_desc;
1637 struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1638 struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1639
1640 pdev = msi_desc_to_pci_dev(msi);
1641 hbus = info->data;
1642 int_desc = irq_data_get_irq_chip_data(irq_data);
1643 if (!int_desc)
1644 return;
1645
1646 irq_data->chip_data = NULL;
1647 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1648 if (!hpdev) {
1649 kfree(int_desc);
1650 return;
1651 }
1652
1653 hv_int_desc_free(hpdev, int_desc);
1654 put_pcichild(hpdev);
1655}
1656
1657static void hv_irq_mask(struct irq_data *data)
1658{
1659 pci_msi_mask_irq(data);
1660 if (data->parent_data->chip->irq_mask)
1661 irq_chip_mask_parent(data);
1662}
1663
1664static void hv_irq_unmask(struct irq_data *data)
1665{
1666 hv_arch_irq_unmask(data);
1667
1668 if (data->parent_data->chip->irq_unmask)
1669 irq_chip_unmask_parent(data);
1670 pci_msi_unmask_irq(data);
1671}
1672
1673struct compose_comp_ctxt {
1674 struct hv_pci_compl comp_pkt;
1675 struct tran_int_desc int_desc;
1676};
1677
1678static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1679 int resp_packet_size)
1680{
1681 struct compose_comp_ctxt *comp_pkt = context;
1682 struct pci_create_int_response *int_resp =
1683 (struct pci_create_int_response *)resp;
1684
1685 if (resp_packet_size < sizeof(*int_resp)) {
1686 comp_pkt->comp_pkt.completion_status = -1;
1687 goto out;
1688 }
1689 comp_pkt->comp_pkt.completion_status = resp->status;
1690 comp_pkt->int_desc = int_resp->int_desc;
1691out:
1692 complete(&comp_pkt->comp_pkt.host_event);
1693}
1694
1695static u32 hv_compose_msi_req_v1(
1696 struct pci_create_interrupt *int_pkt,
1697 u32 slot, u8 vector, u16 vector_count)
1698{
1699 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1700 int_pkt->wslot.slot = slot;
1701 int_pkt->int_desc.vector = vector;
1702 int_pkt->int_desc.vector_count = vector_count;
1703 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1704
1705 /*
1706 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1707 * hv_irq_unmask().
1708 */
1709 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1710
1711 return sizeof(*int_pkt);
1712}
1713
1714/*
1715 * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1716 * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1717 * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1718 * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1719 * not irrelevant because Hyper-V chooses the physical CPU to handle the
1720 * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1721 * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1722 * but assigning too many vPCI device interrupts to the same pCPU can cause a
1723 * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1724 * to spread out the pCPUs that it selects.
1725 *
1726 * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1727 * to always return the same dummy vCPU, because a second call to
1728 * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1729 * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1730 * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1731 * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1732 * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1733 * the same pCPU, even though the vCPUs will be spread out by later calls
1734 * to hv_irq_unmask(), but that is the best we can do now.
1735 *
1736 * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1737 * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1738 * enhancement is planned for a future version. With that enhancement, the
1739 * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1740 * device will be spread across multiple pCPUs.
1741 */
1742
1743/*
1744 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1745 * by subsequent retarget in hv_irq_unmask().
1746 */
1747static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1748{
1749 return cpumask_first_and(affinity, cpu_online_mask);
1750}
1751
1752/*
1753 * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1754 */
1755static int hv_compose_multi_msi_req_get_cpu(void)
1756{
1757 static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1758
1759 /* -1 means starting with CPU 0 */
1760 static int cpu_next = -1;
1761
1762 unsigned long flags;
1763 int cpu;
1764
1765 spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1766
1767 cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
1768 false);
1769 cpu = cpu_next;
1770
1771 spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1772
1773 return cpu;
1774}
1775
1776static u32 hv_compose_msi_req_v2(
1777 struct pci_create_interrupt2 *int_pkt, int cpu,
1778 u32 slot, u8 vector, u16 vector_count)
1779{
1780 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1781 int_pkt->wslot.slot = slot;
1782 int_pkt->int_desc.vector = vector;
1783 int_pkt->int_desc.vector_count = vector_count;
1784 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1785 int_pkt->int_desc.processor_array[0] =
1786 hv_cpu_number_to_vp_number(cpu);
1787 int_pkt->int_desc.processor_count = 1;
1788
1789 return sizeof(*int_pkt);
1790}
1791
1792static u32 hv_compose_msi_req_v3(
1793 struct pci_create_interrupt3 *int_pkt, int cpu,
1794 u32 slot, u32 vector, u16 vector_count)
1795{
1796 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1797 int_pkt->wslot.slot = slot;
1798 int_pkt->int_desc.vector = vector;
1799 int_pkt->int_desc.reserved = 0;
1800 int_pkt->int_desc.vector_count = vector_count;
1801 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1802 int_pkt->int_desc.processor_array[0] =
1803 hv_cpu_number_to_vp_number(cpu);
1804 int_pkt->int_desc.processor_count = 1;
1805
1806 return sizeof(*int_pkt);
1807}
1808
1809/**
1810 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1811 * @data: Everything about this MSI
1812 * @msg: Buffer that is filled in by this function
1813 *
1814 * This function unpacks the IRQ looking for target CPU set, IDT
1815 * vector and mode and sends a message to the parent partition
1816 * asking for a mapping for that tuple in this partition. The
1817 * response supplies a data value and address to which that data
1818 * should be written to trigger that interrupt.
1819 */
1820static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1821{
1822 struct hv_pcibus_device *hbus;
1823 struct vmbus_channel *channel;
1824 struct hv_pci_dev *hpdev;
1825 struct pci_bus *pbus;
1826 struct pci_dev *pdev;
1827 const struct cpumask *dest;
1828 struct compose_comp_ctxt comp;
1829 struct tran_int_desc *int_desc;
1830 struct msi_desc *msi_desc;
1831 /*
1832 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1833 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1834 */
1835 u16 vector_count;
1836 u32 vector;
1837 struct {
1838 struct pci_packet pci_pkt;
1839 union {
1840 struct pci_create_interrupt v1;
1841 struct pci_create_interrupt2 v2;
1842 struct pci_create_interrupt3 v3;
1843 } int_pkts;
1844 } __packed ctxt;
1845 bool multi_msi;
1846 u64 trans_id;
1847 u32 size;
1848 int ret;
1849 int cpu;
1850
1851 msi_desc = irq_data_get_msi_desc(data);
1852 multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1853 msi_desc->nvec_used > 1;
1854
1855 /* Reuse the previous allocation */
1856 if (data->chip_data && multi_msi) {
1857 int_desc = data->chip_data;
1858 msg->address_hi = int_desc->address >> 32;
1859 msg->address_lo = int_desc->address & 0xffffffff;
1860 msg->data = int_desc->data;
1861 return;
1862 }
1863
1864 pdev = msi_desc_to_pci_dev(msi_desc);
1865 dest = irq_data_get_effective_affinity_mask(data);
1866 pbus = pdev->bus;
1867 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1868 channel = hbus->hdev->channel;
1869 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1870 if (!hpdev)
1871 goto return_null_message;
1872
1873 /* Free any previous message that might have already been composed. */
1874 if (data->chip_data && !multi_msi) {
1875 int_desc = data->chip_data;
1876 data->chip_data = NULL;
1877 hv_int_desc_free(hpdev, int_desc);
1878 }
1879
1880 int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1881 if (!int_desc)
1882 goto drop_reference;
1883
1884 if (multi_msi) {
1885 /*
1886 * If this is not the first MSI of Multi MSI, we already have
1887 * a mapping. Can exit early.
1888 */
1889 if (msi_desc->irq != data->irq) {
1890 data->chip_data = int_desc;
1891 int_desc->address = msi_desc->msg.address_lo |
1892 (u64)msi_desc->msg.address_hi << 32;
1893 int_desc->data = msi_desc->msg.data +
1894 (data->irq - msi_desc->irq);
1895 msg->address_hi = msi_desc->msg.address_hi;
1896 msg->address_lo = msi_desc->msg.address_lo;
1897 msg->data = int_desc->data;
1898 put_pcichild(hpdev);
1899 return;
1900 }
1901 /*
1902 * The vector we select here is a dummy value. The correct
1903 * value gets sent to the hypervisor in unmask(). This needs
1904 * to be aligned with the count, and also not zero. Multi-msi
1905 * is powers of 2 up to 32, so 32 will always work here.
1906 */
1907 vector = 32;
1908 vector_count = msi_desc->nvec_used;
1909 cpu = hv_compose_multi_msi_req_get_cpu();
1910 } else {
1911 vector = hv_msi_get_int_vector(data);
1912 vector_count = 1;
1913 cpu = hv_compose_msi_req_get_cpu(dest);
1914 }
1915
1916 /*
1917 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1918 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1919 * for better readability.
1920 */
1921 memset(&ctxt, 0, sizeof(ctxt));
1922 init_completion(&comp.comp_pkt.host_event);
1923 ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1924 ctxt.pci_pkt.compl_ctxt = ∁
1925
1926 switch (hbus->protocol_version) {
1927 case PCI_PROTOCOL_VERSION_1_1:
1928 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1929 hpdev->desc.win_slot.slot,
1930 (u8)vector,
1931 vector_count);
1932 break;
1933
1934 case PCI_PROTOCOL_VERSION_1_2:
1935 case PCI_PROTOCOL_VERSION_1_3:
1936 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1937 cpu,
1938 hpdev->desc.win_slot.slot,
1939 (u8)vector,
1940 vector_count);
1941 break;
1942
1943 case PCI_PROTOCOL_VERSION_1_4:
1944 size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
1945 cpu,
1946 hpdev->desc.win_slot.slot,
1947 vector,
1948 vector_count);
1949 break;
1950
1951 default:
1952 /* As we only negotiate protocol versions known to this driver,
1953 * this path should never hit. However, this is it not a hot
1954 * path so we print a message to aid future updates.
1955 */
1956 dev_err(&hbus->hdev->device,
1957 "Unexpected vPCI protocol, update driver.");
1958 goto free_int_desc;
1959 }
1960
1961 ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1962 size, (unsigned long)&ctxt.pci_pkt,
1963 &trans_id, VM_PKT_DATA_INBAND,
1964 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1965 if (ret) {
1966 dev_err(&hbus->hdev->device,
1967 "Sending request for interrupt failed: 0x%x",
1968 comp.comp_pkt.completion_status);
1969 goto free_int_desc;
1970 }
1971
1972 /*
1973 * Prevents hv_pci_onchannelcallback() from running concurrently
1974 * in the tasklet.
1975 */
1976 tasklet_disable_in_atomic(&channel->callback_event);
1977
1978 /*
1979 * Since this function is called with IRQ locks held, can't
1980 * do normal wait for completion; instead poll.
1981 */
1982 while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1983 unsigned long flags;
1984
1985 /* 0xFFFF means an invalid PCI VENDOR ID. */
1986 if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1987 dev_err_once(&hbus->hdev->device,
1988 "the device has gone\n");
1989 goto enable_tasklet;
1990 }
1991
1992 /*
1993 * Make sure that the ring buffer data structure doesn't get
1994 * freed while we dereference the ring buffer pointer. Test
1995 * for the channel's onchannel_callback being NULL within a
1996 * sched_lock critical section. See also the inline comments
1997 * in vmbus_reset_channel_cb().
1998 */
1999 spin_lock_irqsave(&channel->sched_lock, flags);
2000 if (unlikely(channel->onchannel_callback == NULL)) {
2001 spin_unlock_irqrestore(&channel->sched_lock, flags);
2002 goto enable_tasklet;
2003 }
2004 hv_pci_onchannelcallback(hbus);
2005 spin_unlock_irqrestore(&channel->sched_lock, flags);
2006
2007 if (hpdev->state == hv_pcichild_ejecting) {
2008 dev_err_once(&hbus->hdev->device,
2009 "the device is being ejected\n");
2010 goto enable_tasklet;
2011 }
2012
2013 udelay(100);
2014 }
2015
2016 tasklet_enable(&channel->callback_event);
2017
2018 if (comp.comp_pkt.completion_status < 0) {
2019 dev_err(&hbus->hdev->device,
2020 "Request for interrupt failed: 0x%x",
2021 comp.comp_pkt.completion_status);
2022 goto free_int_desc;
2023 }
2024
2025 /*
2026 * Record the assignment so that this can be unwound later. Using
2027 * irq_set_chip_data() here would be appropriate, but the lock it takes
2028 * is already held.
2029 */
2030 *int_desc = comp.int_desc;
2031 data->chip_data = int_desc;
2032
2033 /* Pass up the result. */
2034 msg->address_hi = comp.int_desc.address >> 32;
2035 msg->address_lo = comp.int_desc.address & 0xffffffff;
2036 msg->data = comp.int_desc.data;
2037
2038 put_pcichild(hpdev);
2039 return;
2040
2041enable_tasklet:
2042 tasklet_enable(&channel->callback_event);
2043 /*
2044 * The completion packet on the stack becomes invalid after 'return';
2045 * remove the ID from the VMbus requestor if the identifier is still
2046 * mapped to/associated with the packet. (The identifier could have
2047 * been 're-used', i.e., already removed and (re-)mapped.)
2048 *
2049 * Cf. hv_pci_onchannelcallback().
2050 */
2051 vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
2052free_int_desc:
2053 kfree(int_desc);
2054drop_reference:
2055 put_pcichild(hpdev);
2056return_null_message:
2057 msg->address_hi = 0;
2058 msg->address_lo = 0;
2059 msg->data = 0;
2060}
2061
2062/* HW Interrupt Chip Descriptor */
2063static struct irq_chip hv_msi_irq_chip = {
2064 .name = "Hyper-V PCIe MSI",
2065 .irq_compose_msi_msg = hv_compose_msi_msg,
2066 .irq_set_affinity = irq_chip_set_affinity_parent,
2067#ifdef CONFIG_X86
2068 .irq_ack = irq_chip_ack_parent,
2069#elif defined(CONFIG_ARM64)
2070 .irq_eoi = irq_chip_eoi_parent,
2071#endif
2072 .irq_mask = hv_irq_mask,
2073 .irq_unmask = hv_irq_unmask,
2074};
2075
2076static struct msi_domain_ops hv_msi_ops = {
2077 .msi_prepare = hv_msi_prepare,
2078 .msi_free = hv_msi_free,
2079};
2080
2081/**
2082 * hv_pcie_init_irq_domain() - Initialize IRQ domain
2083 * @hbus: The root PCI bus
2084 *
2085 * This function creates an IRQ domain which will be used for
2086 * interrupts from devices that have been passed through. These
2087 * devices only support MSI and MSI-X, not line-based interrupts
2088 * or simulations of line-based interrupts through PCIe's
2089 * fabric-layer messages. Because interrupts are remapped, we
2090 * can support multi-message MSI here.
2091 *
2092 * Return: '0' on success and error value on failure
2093 */
2094static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2095{
2096 hbus->msi_info.chip = &hv_msi_irq_chip;
2097 hbus->msi_info.ops = &hv_msi_ops;
2098 hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
2099 MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
2100 MSI_FLAG_PCI_MSIX);
2101 hbus->msi_info.handler = FLOW_HANDLER;
2102 hbus->msi_info.handler_name = FLOW_NAME;
2103 hbus->msi_info.data = hbus;
2104 hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
2105 &hbus->msi_info,
2106 hv_pci_get_root_domain());
2107 if (!hbus->irq_domain) {
2108 dev_err(&hbus->hdev->device,
2109 "Failed to build an MSI IRQ domain\n");
2110 return -ENODEV;
2111 }
2112
2113 dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2114
2115 return 0;
2116}
2117
2118/**
2119 * get_bar_size() - Get the address space consumed by a BAR
2120 * @bar_val: Value that a BAR returned after -1 was written
2121 * to it.
2122 *
2123 * This function returns the size of the BAR, rounded up to 1
2124 * page. It has to be rounded up because the hypervisor's page
2125 * table entry that maps the BAR into the VM can't specify an
2126 * offset within a page. The invariant is that the hypervisor
2127 * must place any BARs of smaller than page length at the
2128 * beginning of a page.
2129 *
2130 * Return: Size in bytes of the consumed MMIO space.
2131 */
2132static u64 get_bar_size(u64 bar_val)
2133{
2134 return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2135 PAGE_SIZE);
2136}
2137
2138/**
2139 * survey_child_resources() - Total all MMIO requirements
2140 * @hbus: Root PCI bus, as understood by this driver
2141 */
2142static void survey_child_resources(struct hv_pcibus_device *hbus)
2143{
2144 struct hv_pci_dev *hpdev;
2145 resource_size_t bar_size = 0;
2146 unsigned long flags;
2147 struct completion *event;
2148 u64 bar_val;
2149 int i;
2150
2151 /* If nobody is waiting on the answer, don't compute it. */
2152 event = xchg(&hbus->survey_event, NULL);
2153 if (!event)
2154 return;
2155
2156 /* If the answer has already been computed, go with it. */
2157 if (hbus->low_mmio_space || hbus->high_mmio_space) {
2158 complete(event);
2159 return;
2160 }
2161
2162 spin_lock_irqsave(&hbus->device_list_lock, flags);
2163
2164 /*
2165 * Due to an interesting quirk of the PCI spec, all memory regions
2166 * for a child device are a power of 2 in size and aligned in memory,
2167 * so it's sufficient to just add them up without tracking alignment.
2168 */
2169 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2170 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2171 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2172 dev_err(&hbus->hdev->device,
2173 "There's an I/O BAR in this list!\n");
2174
2175 if (hpdev->probed_bar[i] != 0) {
2176 /*
2177 * A probed BAR has all the upper bits set that
2178 * can be changed.
2179 */
2180
2181 bar_val = hpdev->probed_bar[i];
2182 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2183 bar_val |=
2184 ((u64)hpdev->probed_bar[++i] << 32);
2185 else
2186 bar_val |= 0xffffffff00000000ULL;
2187
2188 bar_size = get_bar_size(bar_val);
2189
2190 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2191 hbus->high_mmio_space += bar_size;
2192 else
2193 hbus->low_mmio_space += bar_size;
2194 }
2195 }
2196 }
2197
2198 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2199 complete(event);
2200}
2201
2202/**
2203 * prepopulate_bars() - Fill in BARs with defaults
2204 * @hbus: Root PCI bus, as understood by this driver
2205 *
2206 * The core PCI driver code seems much, much happier if the BARs
2207 * for a device have values upon first scan. So fill them in.
2208 * The algorithm below works down from large sizes to small,
2209 * attempting to pack the assignments optimally. The assumption,
2210 * enforced in other parts of the code, is that the beginning of
2211 * the memory-mapped I/O space will be aligned on the largest
2212 * BAR size.
2213 */
2214static void prepopulate_bars(struct hv_pcibus_device *hbus)
2215{
2216 resource_size_t high_size = 0;
2217 resource_size_t low_size = 0;
2218 resource_size_t high_base = 0;
2219 resource_size_t low_base = 0;
2220 resource_size_t bar_size;
2221 struct hv_pci_dev *hpdev;
2222 unsigned long flags;
2223 u64 bar_val;
2224 u32 command;
2225 bool high;
2226 int i;
2227
2228 if (hbus->low_mmio_space) {
2229 low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2230 low_base = hbus->low_mmio_res->start;
2231 }
2232
2233 if (hbus->high_mmio_space) {
2234 high_size = 1ULL <<
2235 (63 - __builtin_clzll(hbus->high_mmio_space));
2236 high_base = hbus->high_mmio_res->start;
2237 }
2238
2239 spin_lock_irqsave(&hbus->device_list_lock, flags);
2240
2241 /*
2242 * Clear the memory enable bit, in case it's already set. This occurs
2243 * in the suspend path of hibernation, where the device is suspended,
2244 * resumed and suspended again: see hibernation_snapshot() and
2245 * hibernation_platform_enter().
2246 *
2247 * If the memory enable bit is already set, Hyper-V silently ignores
2248 * the below BAR updates, and the related PCI device driver can not
2249 * work, because reading from the device register(s) always returns
2250 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2251 */
2252 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2253 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2254 command &= ~PCI_COMMAND_MEMORY;
2255 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2256 }
2257
2258 /* Pick addresses for the BARs. */
2259 do {
2260 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2261 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2262 bar_val = hpdev->probed_bar[i];
2263 if (bar_val == 0)
2264 continue;
2265 high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2266 if (high) {
2267 bar_val |=
2268 ((u64)hpdev->probed_bar[i + 1]
2269 << 32);
2270 } else {
2271 bar_val |= 0xffffffffULL << 32;
2272 }
2273 bar_size = get_bar_size(bar_val);
2274 if (high) {
2275 if (high_size != bar_size) {
2276 i++;
2277 continue;
2278 }
2279 _hv_pcifront_write_config(hpdev,
2280 PCI_BASE_ADDRESS_0 + (4 * i),
2281 4,
2282 (u32)(high_base & 0xffffff00));
2283 i++;
2284 _hv_pcifront_write_config(hpdev,
2285 PCI_BASE_ADDRESS_0 + (4 * i),
2286 4, (u32)(high_base >> 32));
2287 high_base += bar_size;
2288 } else {
2289 if (low_size != bar_size)
2290 continue;
2291 _hv_pcifront_write_config(hpdev,
2292 PCI_BASE_ADDRESS_0 + (4 * i),
2293 4,
2294 (u32)(low_base & 0xffffff00));
2295 low_base += bar_size;
2296 }
2297 }
2298 if (high_size <= 1 && low_size <= 1) {
2299 /*
2300 * No need to set the PCI_COMMAND_MEMORY bit as
2301 * the core PCI driver doesn't require the bit
2302 * to be pre-set. Actually here we intentionally
2303 * keep the bit off so that the PCI BAR probing
2304 * in the core PCI driver doesn't cause Hyper-V
2305 * to unnecessarily unmap/map the virtual BARs
2306 * from/to the physical BARs multiple times.
2307 * This reduces the VM boot time significantly
2308 * if the BAR sizes are huge.
2309 */
2310 break;
2311 }
2312 }
2313
2314 high_size >>= 1;
2315 low_size >>= 1;
2316 } while (high_size || low_size);
2317
2318 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2319}
2320
2321/*
2322 * Assign entries in sysfs pci slot directory.
2323 *
2324 * Note that this function does not need to lock the children list
2325 * because it is called from pci_devices_present_work which
2326 * is serialized with hv_eject_device_work because they are on the
2327 * same ordered workqueue. Therefore hbus->children list will not change
2328 * even when pci_create_slot sleeps.
2329 */
2330static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2331{
2332 struct hv_pci_dev *hpdev;
2333 char name[SLOT_NAME_SIZE];
2334 int slot_nr;
2335
2336 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2337 if (hpdev->pci_slot)
2338 continue;
2339
2340 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2341 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2342 hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2343 name, NULL);
2344 if (IS_ERR(hpdev->pci_slot)) {
2345 pr_warn("pci_create slot %s failed\n", name);
2346 hpdev->pci_slot = NULL;
2347 }
2348 }
2349}
2350
2351/*
2352 * Remove entries in sysfs pci slot directory.
2353 */
2354static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2355{
2356 struct hv_pci_dev *hpdev;
2357
2358 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2359 if (!hpdev->pci_slot)
2360 continue;
2361 pci_destroy_slot(hpdev->pci_slot);
2362 hpdev->pci_slot = NULL;
2363 }
2364}
2365
2366/*
2367 * Set NUMA node for the devices on the bus
2368 */
2369static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2370{
2371 struct pci_dev *dev;
2372 struct pci_bus *bus = hbus->bridge->bus;
2373 struct hv_pci_dev *hv_dev;
2374
2375 list_for_each_entry(dev, &bus->devices, bus_list) {
2376 hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2377 if (!hv_dev)
2378 continue;
2379
2380 if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2381 hv_dev->desc.virtual_numa_node < num_possible_nodes())
2382 /*
2383 * The kernel may boot with some NUMA nodes offline
2384 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2385 * "numa=off". In those cases, adjust the host provided
2386 * NUMA node to a valid NUMA node used by the kernel.
2387 */
2388 set_dev_node(&dev->dev,
2389 numa_map_to_online_node(
2390 hv_dev->desc.virtual_numa_node));
2391
2392 put_pcichild(hv_dev);
2393 }
2394}
2395
2396/**
2397 * create_root_hv_pci_bus() - Expose a new root PCI bus
2398 * @hbus: Root PCI bus, as understood by this driver
2399 *
2400 * Return: 0 on success, -errno on failure
2401 */
2402static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2403{
2404 int error;
2405 struct pci_host_bridge *bridge = hbus->bridge;
2406
2407 bridge->dev.parent = &hbus->hdev->device;
2408 bridge->sysdata = &hbus->sysdata;
2409 bridge->ops = &hv_pcifront_ops;
2410
2411 error = pci_scan_root_bus_bridge(bridge);
2412 if (error)
2413 return error;
2414
2415 pci_lock_rescan_remove();
2416 hv_pci_assign_numa_node(hbus);
2417 pci_bus_assign_resources(bridge->bus);
2418 hv_pci_assign_slots(hbus);
2419 pci_bus_add_devices(bridge->bus);
2420 pci_unlock_rescan_remove();
2421 hbus->state = hv_pcibus_installed;
2422 return 0;
2423}
2424
2425struct q_res_req_compl {
2426 struct completion host_event;
2427 struct hv_pci_dev *hpdev;
2428};
2429
2430/**
2431 * q_resource_requirements() - Query Resource Requirements
2432 * @context: The completion context.
2433 * @resp: The response that came from the host.
2434 * @resp_packet_size: The size in bytes of resp.
2435 *
2436 * This function is invoked on completion of a Query Resource
2437 * Requirements packet.
2438 */
2439static void q_resource_requirements(void *context, struct pci_response *resp,
2440 int resp_packet_size)
2441{
2442 struct q_res_req_compl *completion = context;
2443 struct pci_q_res_req_response *q_res_req =
2444 (struct pci_q_res_req_response *)resp;
2445 s32 status;
2446 int i;
2447
2448 status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2449 if (status < 0) {
2450 dev_err(&completion->hpdev->hbus->hdev->device,
2451 "query resource requirements failed: %x\n",
2452 status);
2453 } else {
2454 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2455 completion->hpdev->probed_bar[i] =
2456 q_res_req->probed_bar[i];
2457 }
2458 }
2459
2460 complete(&completion->host_event);
2461}
2462
2463/**
2464 * new_pcichild_device() - Create a new child device
2465 * @hbus: The internal struct tracking this root PCI bus.
2466 * @desc: The information supplied so far from the host
2467 * about the device.
2468 *
2469 * This function creates the tracking structure for a new child
2470 * device and kicks off the process of figuring out what it is.
2471 *
2472 * Return: Pointer to the new tracking struct
2473 */
2474static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2475 struct hv_pcidev_description *desc)
2476{
2477 struct hv_pci_dev *hpdev;
2478 struct pci_child_message *res_req;
2479 struct q_res_req_compl comp_pkt;
2480 struct {
2481 struct pci_packet init_packet;
2482 u8 buffer[sizeof(struct pci_child_message)];
2483 } pkt;
2484 unsigned long flags;
2485 int ret;
2486
2487 hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2488 if (!hpdev)
2489 return NULL;
2490
2491 hpdev->hbus = hbus;
2492
2493 memset(&pkt, 0, sizeof(pkt));
2494 init_completion(&comp_pkt.host_event);
2495 comp_pkt.hpdev = hpdev;
2496 pkt.init_packet.compl_ctxt = &comp_pkt;
2497 pkt.init_packet.completion_func = q_resource_requirements;
2498 res_req = (struct pci_child_message *)&pkt.init_packet.message;
2499 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2500 res_req->wslot.slot = desc->win_slot.slot;
2501
2502 ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2503 sizeof(struct pci_child_message),
2504 (unsigned long)&pkt.init_packet,
2505 VM_PKT_DATA_INBAND,
2506 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2507 if (ret)
2508 goto error;
2509
2510 if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2511 goto error;
2512
2513 hpdev->desc = *desc;
2514 refcount_set(&hpdev->refs, 1);
2515 get_pcichild(hpdev);
2516 spin_lock_irqsave(&hbus->device_list_lock, flags);
2517
2518 list_add_tail(&hpdev->list_entry, &hbus->children);
2519 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2520 return hpdev;
2521
2522error:
2523 kfree(hpdev);
2524 return NULL;
2525}
2526
2527/**
2528 * get_pcichild_wslot() - Find device from slot
2529 * @hbus: Root PCI bus, as understood by this driver
2530 * @wslot: Location on the bus
2531 *
2532 * This function looks up a PCI device and returns the internal
2533 * representation of it. It acquires a reference on it, so that
2534 * the device won't be deleted while somebody is using it. The
2535 * caller is responsible for calling put_pcichild() to release
2536 * this reference.
2537 *
2538 * Return: Internal representation of a PCI device
2539 */
2540static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2541 u32 wslot)
2542{
2543 unsigned long flags;
2544 struct hv_pci_dev *iter, *hpdev = NULL;
2545
2546 spin_lock_irqsave(&hbus->device_list_lock, flags);
2547 list_for_each_entry(iter, &hbus->children, list_entry) {
2548 if (iter->desc.win_slot.slot == wslot) {
2549 hpdev = iter;
2550 get_pcichild(hpdev);
2551 break;
2552 }
2553 }
2554 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2555
2556 return hpdev;
2557}
2558
2559/**
2560 * pci_devices_present_work() - Handle new list of child devices
2561 * @work: Work struct embedded in struct hv_dr_work
2562 *
2563 * "Bus Relations" is the Windows term for "children of this
2564 * bus." The terminology is preserved here for people trying to
2565 * debug the interaction between Hyper-V and Linux. This
2566 * function is called when the parent partition reports a list
2567 * of functions that should be observed under this PCI Express
2568 * port (bus).
2569 *
2570 * This function updates the list, and must tolerate being
2571 * called multiple times with the same information. The typical
2572 * number of child devices is one, with very atypical cases
2573 * involving three or four, so the algorithms used here can be
2574 * simple and inefficient.
2575 *
2576 * It must also treat the omission of a previously observed device as
2577 * notification that the device no longer exists.
2578 *
2579 * Note that this function is serialized with hv_eject_device_work(),
2580 * because both are pushed to the ordered workqueue hbus->wq.
2581 */
2582static void pci_devices_present_work(struct work_struct *work)
2583{
2584 u32 child_no;
2585 bool found;
2586 struct hv_pcidev_description *new_desc;
2587 struct hv_pci_dev *hpdev;
2588 struct hv_pcibus_device *hbus;
2589 struct list_head removed;
2590 struct hv_dr_work *dr_wrk;
2591 struct hv_dr_state *dr = NULL;
2592 unsigned long flags;
2593
2594 dr_wrk = container_of(work, struct hv_dr_work, wrk);
2595 hbus = dr_wrk->bus;
2596 kfree(dr_wrk);
2597
2598 INIT_LIST_HEAD(&removed);
2599
2600 /* Pull this off the queue and process it if it was the last one. */
2601 spin_lock_irqsave(&hbus->device_list_lock, flags);
2602 while (!list_empty(&hbus->dr_list)) {
2603 dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2604 list_entry);
2605 list_del(&dr->list_entry);
2606
2607 /* Throw this away if the list still has stuff in it. */
2608 if (!list_empty(&hbus->dr_list)) {
2609 kfree(dr);
2610 continue;
2611 }
2612 }
2613 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2614
2615 if (!dr)
2616 return;
2617
2618 /* First, mark all existing children as reported missing. */
2619 spin_lock_irqsave(&hbus->device_list_lock, flags);
2620 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2621 hpdev->reported_missing = true;
2622 }
2623 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2624
2625 /* Next, add back any reported devices. */
2626 for (child_no = 0; child_no < dr->device_count; child_no++) {
2627 found = false;
2628 new_desc = &dr->func[child_no];
2629
2630 spin_lock_irqsave(&hbus->device_list_lock, flags);
2631 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2632 if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2633 (hpdev->desc.v_id == new_desc->v_id) &&
2634 (hpdev->desc.d_id == new_desc->d_id) &&
2635 (hpdev->desc.ser == new_desc->ser)) {
2636 hpdev->reported_missing = false;
2637 found = true;
2638 }
2639 }
2640 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2641
2642 if (!found) {
2643 hpdev = new_pcichild_device(hbus, new_desc);
2644 if (!hpdev)
2645 dev_err(&hbus->hdev->device,
2646 "couldn't record a child device.\n");
2647 }
2648 }
2649
2650 /* Move missing children to a list on the stack. */
2651 spin_lock_irqsave(&hbus->device_list_lock, flags);
2652 do {
2653 found = false;
2654 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2655 if (hpdev->reported_missing) {
2656 found = true;
2657 put_pcichild(hpdev);
2658 list_move_tail(&hpdev->list_entry, &removed);
2659 break;
2660 }
2661 }
2662 } while (found);
2663 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2664
2665 /* Delete everything that should no longer exist. */
2666 while (!list_empty(&removed)) {
2667 hpdev = list_first_entry(&removed, struct hv_pci_dev,
2668 list_entry);
2669 list_del(&hpdev->list_entry);
2670
2671 if (hpdev->pci_slot)
2672 pci_destroy_slot(hpdev->pci_slot);
2673
2674 put_pcichild(hpdev);
2675 }
2676
2677 switch (hbus->state) {
2678 case hv_pcibus_installed:
2679 /*
2680 * Tell the core to rescan bus
2681 * because there may have been changes.
2682 */
2683 pci_lock_rescan_remove();
2684 pci_scan_child_bus(hbus->bridge->bus);
2685 hv_pci_assign_numa_node(hbus);
2686 hv_pci_assign_slots(hbus);
2687 pci_unlock_rescan_remove();
2688 break;
2689
2690 case hv_pcibus_init:
2691 case hv_pcibus_probed:
2692 survey_child_resources(hbus);
2693 break;
2694
2695 default:
2696 break;
2697 }
2698
2699 kfree(dr);
2700}
2701
2702/**
2703 * hv_pci_start_relations_work() - Queue work to start device discovery
2704 * @hbus: Root PCI bus, as understood by this driver
2705 * @dr: The list of children returned from host
2706 *
2707 * Return: 0 on success, -errno on failure
2708 */
2709static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2710 struct hv_dr_state *dr)
2711{
2712 struct hv_dr_work *dr_wrk;
2713 unsigned long flags;
2714 bool pending_dr;
2715
2716 if (hbus->state == hv_pcibus_removing) {
2717 dev_info(&hbus->hdev->device,
2718 "PCI VMBus BUS_RELATIONS: ignored\n");
2719 return -ENOENT;
2720 }
2721
2722 dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2723 if (!dr_wrk)
2724 return -ENOMEM;
2725
2726 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2727 dr_wrk->bus = hbus;
2728
2729 spin_lock_irqsave(&hbus->device_list_lock, flags);
2730 /*
2731 * If pending_dr is true, we have already queued a work,
2732 * which will see the new dr. Otherwise, we need to
2733 * queue a new work.
2734 */
2735 pending_dr = !list_empty(&hbus->dr_list);
2736 list_add_tail(&dr->list_entry, &hbus->dr_list);
2737 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2738
2739 if (pending_dr)
2740 kfree(dr_wrk);
2741 else
2742 queue_work(hbus->wq, &dr_wrk->wrk);
2743
2744 return 0;
2745}
2746
2747/**
2748 * hv_pci_devices_present() - Handle list of new children
2749 * @hbus: Root PCI bus, as understood by this driver
2750 * @relations: Packet from host listing children
2751 *
2752 * Process a new list of devices on the bus. The list of devices is
2753 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2754 * whenever a new list of devices for this bus appears.
2755 */
2756static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2757 struct pci_bus_relations *relations)
2758{
2759 struct hv_dr_state *dr;
2760 int i;
2761
2762 dr = kzalloc(struct_size(dr, func, relations->device_count),
2763 GFP_NOWAIT);
2764 if (!dr)
2765 return;
2766
2767 dr->device_count = relations->device_count;
2768 for (i = 0; i < dr->device_count; i++) {
2769 dr->func[i].v_id = relations->func[i].v_id;
2770 dr->func[i].d_id = relations->func[i].d_id;
2771 dr->func[i].rev = relations->func[i].rev;
2772 dr->func[i].prog_intf = relations->func[i].prog_intf;
2773 dr->func[i].subclass = relations->func[i].subclass;
2774 dr->func[i].base_class = relations->func[i].base_class;
2775 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2776 dr->func[i].win_slot = relations->func[i].win_slot;
2777 dr->func[i].ser = relations->func[i].ser;
2778 }
2779
2780 if (hv_pci_start_relations_work(hbus, dr))
2781 kfree(dr);
2782}
2783
2784/**
2785 * hv_pci_devices_present2() - Handle list of new children
2786 * @hbus: Root PCI bus, as understood by this driver
2787 * @relations: Packet from host listing children
2788 *
2789 * This function is the v2 version of hv_pci_devices_present()
2790 */
2791static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2792 struct pci_bus_relations2 *relations)
2793{
2794 struct hv_dr_state *dr;
2795 int i;
2796
2797 dr = kzalloc(struct_size(dr, func, relations->device_count),
2798 GFP_NOWAIT);
2799 if (!dr)
2800 return;
2801
2802 dr->device_count = relations->device_count;
2803 for (i = 0; i < dr->device_count; i++) {
2804 dr->func[i].v_id = relations->func[i].v_id;
2805 dr->func[i].d_id = relations->func[i].d_id;
2806 dr->func[i].rev = relations->func[i].rev;
2807 dr->func[i].prog_intf = relations->func[i].prog_intf;
2808 dr->func[i].subclass = relations->func[i].subclass;
2809 dr->func[i].base_class = relations->func[i].base_class;
2810 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2811 dr->func[i].win_slot = relations->func[i].win_slot;
2812 dr->func[i].ser = relations->func[i].ser;
2813 dr->func[i].flags = relations->func[i].flags;
2814 dr->func[i].virtual_numa_node =
2815 relations->func[i].virtual_numa_node;
2816 }
2817
2818 if (hv_pci_start_relations_work(hbus, dr))
2819 kfree(dr);
2820}
2821
2822/**
2823 * hv_eject_device_work() - Asynchronously handles ejection
2824 * @work: Work struct embedded in internal device struct
2825 *
2826 * This function handles ejecting a device. Windows will
2827 * attempt to gracefully eject a device, waiting 60 seconds to
2828 * hear back from the guest OS that this completed successfully.
2829 * If this timer expires, the device will be forcibly removed.
2830 */
2831static void hv_eject_device_work(struct work_struct *work)
2832{
2833 struct pci_eject_response *ejct_pkt;
2834 struct hv_pcibus_device *hbus;
2835 struct hv_pci_dev *hpdev;
2836 struct pci_dev *pdev;
2837 unsigned long flags;
2838 int wslot;
2839 struct {
2840 struct pci_packet pkt;
2841 u8 buffer[sizeof(struct pci_eject_response)];
2842 } ctxt;
2843
2844 hpdev = container_of(work, struct hv_pci_dev, wrk);
2845 hbus = hpdev->hbus;
2846
2847 WARN_ON(hpdev->state != hv_pcichild_ejecting);
2848
2849 /*
2850 * Ejection can come before or after the PCI bus has been set up, so
2851 * attempt to find it and tear down the bus state, if it exists. This
2852 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2853 * because hbus->bridge->bus may not exist yet.
2854 */
2855 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2856 pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2857 if (pdev) {
2858 pci_lock_rescan_remove();
2859 pci_stop_and_remove_bus_device(pdev);
2860 pci_dev_put(pdev);
2861 pci_unlock_rescan_remove();
2862 }
2863
2864 spin_lock_irqsave(&hbus->device_list_lock, flags);
2865 list_del(&hpdev->list_entry);
2866 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2867
2868 if (hpdev->pci_slot)
2869 pci_destroy_slot(hpdev->pci_slot);
2870
2871 memset(&ctxt, 0, sizeof(ctxt));
2872 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2873 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2874 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2875 vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2876 sizeof(*ejct_pkt), 0,
2877 VM_PKT_DATA_INBAND, 0);
2878
2879 /* For the get_pcichild() in hv_pci_eject_device() */
2880 put_pcichild(hpdev);
2881 /* For the two refs got in new_pcichild_device() */
2882 put_pcichild(hpdev);
2883 put_pcichild(hpdev);
2884 /* hpdev has been freed. Do not use it any more. */
2885}
2886
2887/**
2888 * hv_pci_eject_device() - Handles device ejection
2889 * @hpdev: Internal device tracking struct
2890 *
2891 * This function is invoked when an ejection packet arrives. It
2892 * just schedules work so that we don't re-enter the packet
2893 * delivery code handling the ejection.
2894 */
2895static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2896{
2897 struct hv_pcibus_device *hbus = hpdev->hbus;
2898 struct hv_device *hdev = hbus->hdev;
2899
2900 if (hbus->state == hv_pcibus_removing) {
2901 dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2902 return;
2903 }
2904
2905 hpdev->state = hv_pcichild_ejecting;
2906 get_pcichild(hpdev);
2907 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2908 queue_work(hbus->wq, &hpdev->wrk);
2909}
2910
2911/**
2912 * hv_pci_onchannelcallback() - Handles incoming packets
2913 * @context: Internal bus tracking struct
2914 *
2915 * This function is invoked whenever the host sends a packet to
2916 * this channel (which is private to this root PCI bus).
2917 */
2918static void hv_pci_onchannelcallback(void *context)
2919{
2920 const int packet_size = 0x100;
2921 int ret;
2922 struct hv_pcibus_device *hbus = context;
2923 struct vmbus_channel *chan = hbus->hdev->channel;
2924 u32 bytes_recvd;
2925 u64 req_id, req_addr;
2926 struct vmpacket_descriptor *desc;
2927 unsigned char *buffer;
2928 int bufferlen = packet_size;
2929 struct pci_packet *comp_packet;
2930 struct pci_response *response;
2931 struct pci_incoming_message *new_message;
2932 struct pci_bus_relations *bus_rel;
2933 struct pci_bus_relations2 *bus_rel2;
2934 struct pci_dev_inval_block *inval;
2935 struct pci_dev_incoming *dev_message;
2936 struct hv_pci_dev *hpdev;
2937 unsigned long flags;
2938
2939 buffer = kmalloc(bufferlen, GFP_ATOMIC);
2940 if (!buffer)
2941 return;
2942
2943 while (1) {
2944 ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
2945 &bytes_recvd, &req_id);
2946
2947 if (ret == -ENOBUFS) {
2948 kfree(buffer);
2949 /* Handle large packet */
2950 bufferlen = bytes_recvd;
2951 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2952 if (!buffer)
2953 return;
2954 continue;
2955 }
2956
2957 /* Zero length indicates there are no more packets. */
2958 if (ret || !bytes_recvd)
2959 break;
2960
2961 /*
2962 * All incoming packets must be at least as large as a
2963 * response.
2964 */
2965 if (bytes_recvd <= sizeof(struct pci_response))
2966 continue;
2967 desc = (struct vmpacket_descriptor *)buffer;
2968
2969 switch (desc->type) {
2970 case VM_PKT_COMP:
2971
2972 lock_requestor(chan, flags);
2973 req_addr = __vmbus_request_addr_match(chan, req_id,
2974 VMBUS_RQST_ADDR_ANY);
2975 if (req_addr == VMBUS_RQST_ERROR) {
2976 unlock_requestor(chan, flags);
2977 dev_err(&hbus->hdev->device,
2978 "Invalid transaction ID %llx\n",
2979 req_id);
2980 break;
2981 }
2982 comp_packet = (struct pci_packet *)req_addr;
2983 response = (struct pci_response *)buffer;
2984 /*
2985 * Call ->completion_func() within the critical section to make
2986 * sure that the packet pointer is still valid during the call:
2987 * here 'valid' means that there's a task still waiting for the
2988 * completion, and that the packet data is still on the waiting
2989 * task's stack. Cf. hv_compose_msi_msg().
2990 */
2991 comp_packet->completion_func(comp_packet->compl_ctxt,
2992 response,
2993 bytes_recvd);
2994 unlock_requestor(chan, flags);
2995 break;
2996
2997 case VM_PKT_DATA_INBAND:
2998
2999 new_message = (struct pci_incoming_message *)buffer;
3000 switch (new_message->message_type.type) {
3001 case PCI_BUS_RELATIONS:
3002
3003 bus_rel = (struct pci_bus_relations *)buffer;
3004 if (bytes_recvd < sizeof(*bus_rel) ||
3005 bytes_recvd <
3006 struct_size(bus_rel, func,
3007 bus_rel->device_count)) {
3008 dev_err(&hbus->hdev->device,
3009 "bus relations too small\n");
3010 break;
3011 }
3012
3013 hv_pci_devices_present(hbus, bus_rel);
3014 break;
3015
3016 case PCI_BUS_RELATIONS2:
3017
3018 bus_rel2 = (struct pci_bus_relations2 *)buffer;
3019 if (bytes_recvd < sizeof(*bus_rel2) ||
3020 bytes_recvd <
3021 struct_size(bus_rel2, func,
3022 bus_rel2->device_count)) {
3023 dev_err(&hbus->hdev->device,
3024 "bus relations v2 too small\n");
3025 break;
3026 }
3027
3028 hv_pci_devices_present2(hbus, bus_rel2);
3029 break;
3030
3031 case PCI_EJECT:
3032
3033 dev_message = (struct pci_dev_incoming *)buffer;
3034 if (bytes_recvd < sizeof(*dev_message)) {
3035 dev_err(&hbus->hdev->device,
3036 "eject message too small\n");
3037 break;
3038 }
3039 hpdev = get_pcichild_wslot(hbus,
3040 dev_message->wslot.slot);
3041 if (hpdev) {
3042 hv_pci_eject_device(hpdev);
3043 put_pcichild(hpdev);
3044 }
3045 break;
3046
3047 case PCI_INVALIDATE_BLOCK:
3048
3049 inval = (struct pci_dev_inval_block *)buffer;
3050 if (bytes_recvd < sizeof(*inval)) {
3051 dev_err(&hbus->hdev->device,
3052 "invalidate message too small\n");
3053 break;
3054 }
3055 hpdev = get_pcichild_wslot(hbus,
3056 inval->wslot.slot);
3057 if (hpdev) {
3058 if (hpdev->block_invalidate) {
3059 hpdev->block_invalidate(
3060 hpdev->invalidate_context,
3061 inval->block_mask);
3062 }
3063 put_pcichild(hpdev);
3064 }
3065 break;
3066
3067 default:
3068 dev_warn(&hbus->hdev->device,
3069 "Unimplemented protocol message %x\n",
3070 new_message->message_type.type);
3071 break;
3072 }
3073 break;
3074
3075 default:
3076 dev_err(&hbus->hdev->device,
3077 "unhandled packet type %d, tid %llx len %d\n",
3078 desc->type, req_id, bytes_recvd);
3079 break;
3080 }
3081 }
3082
3083 kfree(buffer);
3084}
3085
3086/**
3087 * hv_pci_protocol_negotiation() - Set up protocol
3088 * @hdev: VMBus's tracking struct for this root PCI bus.
3089 * @version: Array of supported channel protocol versions in
3090 * the order of probing - highest go first.
3091 * @num_version: Number of elements in the version array.
3092 *
3093 * This driver is intended to support running on Windows 10
3094 * (server) and later versions. It will not run on earlier
3095 * versions, as they assume that many of the operations which
3096 * Linux needs accomplished with a spinlock held were done via
3097 * asynchronous messaging via VMBus. Windows 10 increases the
3098 * surface area of PCI emulation so that these actions can take
3099 * place by suspending a virtual processor for their duration.
3100 *
3101 * This function negotiates the channel protocol version,
3102 * failing if the host doesn't support the necessary protocol
3103 * level.
3104 */
3105static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3106 enum pci_protocol_version_t version[],
3107 int num_version)
3108{
3109 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3110 struct pci_version_request *version_req;
3111 struct hv_pci_compl comp_pkt;
3112 struct pci_packet *pkt;
3113 int ret;
3114 int i;
3115
3116 /*
3117 * Initiate the handshake with the host and negotiate
3118 * a version that the host can support. We start with the
3119 * highest version number and go down if the host cannot
3120 * support it.
3121 */
3122 pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3123 if (!pkt)
3124 return -ENOMEM;
3125
3126 init_completion(&comp_pkt.host_event);
3127 pkt->completion_func = hv_pci_generic_compl;
3128 pkt->compl_ctxt = &comp_pkt;
3129 version_req = (struct pci_version_request *)&pkt->message;
3130 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3131
3132 for (i = 0; i < num_version; i++) {
3133 version_req->protocol_version = version[i];
3134 ret = vmbus_sendpacket(hdev->channel, version_req,
3135 sizeof(struct pci_version_request),
3136 (unsigned long)pkt, VM_PKT_DATA_INBAND,
3137 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3138 if (!ret)
3139 ret = wait_for_response(hdev, &comp_pkt.host_event);
3140
3141 if (ret) {
3142 dev_err(&hdev->device,
3143 "PCI Pass-through VSP failed to request version: %d",
3144 ret);
3145 goto exit;
3146 }
3147
3148 if (comp_pkt.completion_status >= 0) {
3149 hbus->protocol_version = version[i];
3150 dev_info(&hdev->device,
3151 "PCI VMBus probing: Using version %#x\n",
3152 hbus->protocol_version);
3153 goto exit;
3154 }
3155
3156 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3157 dev_err(&hdev->device,
3158 "PCI Pass-through VSP failed version request: %#x",
3159 comp_pkt.completion_status);
3160 ret = -EPROTO;
3161 goto exit;
3162 }
3163
3164 reinit_completion(&comp_pkt.host_event);
3165 }
3166
3167 dev_err(&hdev->device,
3168 "PCI pass-through VSP failed to find supported version");
3169 ret = -EPROTO;
3170
3171exit:
3172 kfree(pkt);
3173 return ret;
3174}
3175
3176/**
3177 * hv_pci_free_bridge_windows() - Release memory regions for the
3178 * bus
3179 * @hbus: Root PCI bus, as understood by this driver
3180 */
3181static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3182{
3183 /*
3184 * Set the resources back to the way they looked when they
3185 * were allocated by setting IORESOURCE_BUSY again.
3186 */
3187
3188 if (hbus->low_mmio_space && hbus->low_mmio_res) {
3189 hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3190 vmbus_free_mmio(hbus->low_mmio_res->start,
3191 resource_size(hbus->low_mmio_res));
3192 }
3193
3194 if (hbus->high_mmio_space && hbus->high_mmio_res) {
3195 hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3196 vmbus_free_mmio(hbus->high_mmio_res->start,
3197 resource_size(hbus->high_mmio_res));
3198 }
3199}
3200
3201/**
3202 * hv_pci_allocate_bridge_windows() - Allocate memory regions
3203 * for the bus
3204 * @hbus: Root PCI bus, as understood by this driver
3205 *
3206 * This function calls vmbus_allocate_mmio(), which is itself a
3207 * bit of a compromise. Ideally, we might change the pnp layer
3208 * in the kernel such that it comprehends either PCI devices
3209 * which are "grandchildren of ACPI," with some intermediate bus
3210 * node (in this case, VMBus) or change it such that it
3211 * understands VMBus. The pnp layer, however, has been declared
3212 * deprecated, and not subject to change.
3213 *
3214 * The workaround, implemented here, is to ask VMBus to allocate
3215 * MMIO space for this bus. VMBus itself knows which ranges are
3216 * appropriate by looking at its own ACPI objects. Then, after
3217 * these ranges are claimed, they're modified to look like they
3218 * would have looked if the ACPI and pnp code had allocated
3219 * bridge windows. These descriptors have to exist in this form
3220 * in order to satisfy the code which will get invoked when the
3221 * endpoint PCI function driver calls request_mem_region() or
3222 * request_mem_region_exclusive().
3223 *
3224 * Return: 0 on success, -errno on failure
3225 */
3226static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3227{
3228 resource_size_t align;
3229 int ret;
3230
3231 if (hbus->low_mmio_space) {
3232 align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3233 ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3234 (u64)(u32)0xffffffff,
3235 hbus->low_mmio_space,
3236 align, false);
3237 if (ret) {
3238 dev_err(&hbus->hdev->device,
3239 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3240 hbus->low_mmio_space);
3241 return ret;
3242 }
3243
3244 /* Modify this resource to become a bridge window. */
3245 hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3246 hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3247 pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3248 }
3249
3250 if (hbus->high_mmio_space) {
3251 align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3252 ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3253 0x100000000, -1,
3254 hbus->high_mmio_space, align,
3255 false);
3256 if (ret) {
3257 dev_err(&hbus->hdev->device,
3258 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3259 hbus->high_mmio_space);
3260 goto release_low_mmio;
3261 }
3262
3263 /* Modify this resource to become a bridge window. */
3264 hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3265 hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3266 pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3267 }
3268
3269 return 0;
3270
3271release_low_mmio:
3272 if (hbus->low_mmio_res) {
3273 vmbus_free_mmio(hbus->low_mmio_res->start,
3274 resource_size(hbus->low_mmio_res));
3275 }
3276
3277 return ret;
3278}
3279
3280/**
3281 * hv_allocate_config_window() - Find MMIO space for PCI Config
3282 * @hbus: Root PCI bus, as understood by this driver
3283 *
3284 * This function claims memory-mapped I/O space for accessing
3285 * configuration space for the functions on this bus.
3286 *
3287 * Return: 0 on success, -errno on failure
3288 */
3289static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3290{
3291 int ret;
3292
3293 /*
3294 * Set up a region of MMIO space to use for accessing configuration
3295 * space.
3296 */
3297 ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3298 PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3299 if (ret)
3300 return ret;
3301
3302 /*
3303 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3304 * resource claims (those which cannot be overlapped) and the ranges
3305 * which are valid for the children of this bus, which are intended
3306 * to be overlapped by those children. Set the flag on this claim
3307 * meaning that this region can't be overlapped.
3308 */
3309
3310 hbus->mem_config->flags |= IORESOURCE_BUSY;
3311
3312 return 0;
3313}
3314
3315static void hv_free_config_window(struct hv_pcibus_device *hbus)
3316{
3317 vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3318}
3319
3320static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3321
3322/**
3323 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3324 * @hdev: VMBus's tracking struct for this root PCI bus
3325 *
3326 * Return: 0 on success, -errno on failure
3327 */
3328static int hv_pci_enter_d0(struct hv_device *hdev)
3329{
3330 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3331 struct pci_bus_d0_entry *d0_entry;
3332 struct hv_pci_compl comp_pkt;
3333 struct pci_packet *pkt;
3334 int ret;
3335
3336 /*
3337 * Tell the host that the bus is ready to use, and moved into the
3338 * powered-on state. This includes telling the host which region
3339 * of memory-mapped I/O space has been chosen for configuration space
3340 * access.
3341 */
3342 pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3343 if (!pkt)
3344 return -ENOMEM;
3345
3346 init_completion(&comp_pkt.host_event);
3347 pkt->completion_func = hv_pci_generic_compl;
3348 pkt->compl_ctxt = &comp_pkt;
3349 d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
3350 d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3351 d0_entry->mmio_base = hbus->mem_config->start;
3352
3353 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3354 (unsigned long)pkt, VM_PKT_DATA_INBAND,
3355 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3356 if (!ret)
3357 ret = wait_for_response(hdev, &comp_pkt.host_event);
3358
3359 if (ret)
3360 goto exit;
3361
3362 if (comp_pkt.completion_status < 0) {
3363 dev_err(&hdev->device,
3364 "PCI Pass-through VSP failed D0 Entry with status %x\n",
3365 comp_pkt.completion_status);
3366 ret = -EPROTO;
3367 goto exit;
3368 }
3369
3370 ret = 0;
3371
3372exit:
3373 kfree(pkt);
3374 return ret;
3375}
3376
3377/**
3378 * hv_pci_query_relations() - Ask host to send list of child
3379 * devices
3380 * @hdev: VMBus's tracking struct for this root PCI bus
3381 *
3382 * Return: 0 on success, -errno on failure
3383 */
3384static int hv_pci_query_relations(struct hv_device *hdev)
3385{
3386 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3387 struct pci_message message;
3388 struct completion comp;
3389 int ret;
3390
3391 /* Ask the host to send along the list of child devices */
3392 init_completion(&comp);
3393 if (cmpxchg(&hbus->survey_event, NULL, &comp))
3394 return -ENOTEMPTY;
3395
3396 memset(&message, 0, sizeof(message));
3397 message.type = PCI_QUERY_BUS_RELATIONS;
3398
3399 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3400 0, VM_PKT_DATA_INBAND, 0);
3401 if (!ret)
3402 ret = wait_for_response(hdev, &comp);
3403
3404 return ret;
3405}
3406
3407/**
3408 * hv_send_resources_allocated() - Report local resource choices
3409 * @hdev: VMBus's tracking struct for this root PCI bus
3410 *
3411 * The host OS is expecting to be sent a request as a message
3412 * which contains all the resources that the device will use.
3413 * The response contains those same resources, "translated"
3414 * which is to say, the values which should be used by the
3415 * hardware, when it delivers an interrupt. (MMIO resources are
3416 * used in local terms.) This is nice for Windows, and lines up
3417 * with the FDO/PDO split, which doesn't exist in Linux. Linux
3418 * is deeply expecting to scan an emulated PCI configuration
3419 * space. So this message is sent here only to drive the state
3420 * machine on the host forward.
3421 *
3422 * Return: 0 on success, -errno on failure
3423 */
3424static int hv_send_resources_allocated(struct hv_device *hdev)
3425{
3426 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3427 struct pci_resources_assigned *res_assigned;
3428 struct pci_resources_assigned2 *res_assigned2;
3429 struct hv_pci_compl comp_pkt;
3430 struct hv_pci_dev *hpdev;
3431 struct pci_packet *pkt;
3432 size_t size_res;
3433 int wslot;
3434 int ret;
3435
3436 size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3437 ? sizeof(*res_assigned) : sizeof(*res_assigned2);
3438
3439 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3440 if (!pkt)
3441 return -ENOMEM;
3442
3443 ret = 0;
3444
3445 for (wslot = 0; wslot < 256; wslot++) {
3446 hpdev = get_pcichild_wslot(hbus, wslot);
3447 if (!hpdev)
3448 continue;
3449
3450 memset(pkt, 0, sizeof(*pkt) + size_res);
3451 init_completion(&comp_pkt.host_event);
3452 pkt->completion_func = hv_pci_generic_compl;
3453 pkt->compl_ctxt = &comp_pkt;
3454
3455 if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3456 res_assigned =
3457 (struct pci_resources_assigned *)&pkt->message;
3458 res_assigned->message_type.type =
3459 PCI_RESOURCES_ASSIGNED;
3460 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3461 } else {
3462 res_assigned2 =
3463 (struct pci_resources_assigned2 *)&pkt->message;
3464 res_assigned2->message_type.type =
3465 PCI_RESOURCES_ASSIGNED2;
3466 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3467 }
3468 put_pcichild(hpdev);
3469
3470 ret = vmbus_sendpacket(hdev->channel, &pkt->message,
3471 size_res, (unsigned long)pkt,
3472 VM_PKT_DATA_INBAND,
3473 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3474 if (!ret)
3475 ret = wait_for_response(hdev, &comp_pkt.host_event);
3476 if (ret)
3477 break;
3478
3479 if (comp_pkt.completion_status < 0) {
3480 ret = -EPROTO;
3481 dev_err(&hdev->device,
3482 "resource allocated returned 0x%x",
3483 comp_pkt.completion_status);
3484 break;
3485 }
3486
3487 hbus->wslot_res_allocated = wslot;
3488 }
3489
3490 kfree(pkt);
3491 return ret;
3492}
3493
3494/**
3495 * hv_send_resources_released() - Report local resources
3496 * released
3497 * @hdev: VMBus's tracking struct for this root PCI bus
3498 *
3499 * Return: 0 on success, -errno on failure
3500 */
3501static int hv_send_resources_released(struct hv_device *hdev)
3502{
3503 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3504 struct pci_child_message pkt;
3505 struct hv_pci_dev *hpdev;
3506 int wslot;
3507 int ret;
3508
3509 for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3510 hpdev = get_pcichild_wslot(hbus, wslot);
3511 if (!hpdev)
3512 continue;
3513
3514 memset(&pkt, 0, sizeof(pkt));
3515 pkt.message_type.type = PCI_RESOURCES_RELEASED;
3516 pkt.wslot.slot = hpdev->desc.win_slot.slot;
3517
3518 put_pcichild(hpdev);
3519
3520 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3521 VM_PKT_DATA_INBAND, 0);
3522 if (ret)
3523 return ret;
3524
3525 hbus->wslot_res_allocated = wslot - 1;
3526 }
3527
3528 hbus->wslot_res_allocated = -1;
3529
3530 return 0;
3531}
3532
3533#define HVPCI_DOM_MAP_SIZE (64 * 1024)
3534static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3535
3536/*
3537 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3538 * as invalid for passthrough PCI devices of this driver.
3539 */
3540#define HVPCI_DOM_INVALID 0
3541
3542/**
3543 * hv_get_dom_num() - Get a valid PCI domain number
3544 * Check if the PCI domain number is in use, and return another number if
3545 * it is in use.
3546 *
3547 * @dom: Requested domain number
3548 *
3549 * return: domain number on success, HVPCI_DOM_INVALID on failure
3550 */
3551static u16 hv_get_dom_num(u16 dom)
3552{
3553 unsigned int i;
3554
3555 if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3556 return dom;
3557
3558 for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3559 if (test_and_set_bit(i, hvpci_dom_map) == 0)
3560 return i;
3561 }
3562
3563 return HVPCI_DOM_INVALID;
3564}
3565
3566/**
3567 * hv_put_dom_num() - Mark the PCI domain number as free
3568 * @dom: Domain number to be freed
3569 */
3570static void hv_put_dom_num(u16 dom)
3571{
3572 clear_bit(dom, hvpci_dom_map);
3573}
3574
3575/**
3576 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3577 * @hdev: VMBus's tracking struct for this root PCI bus
3578 * @dev_id: Identifies the device itself
3579 *
3580 * Return: 0 on success, -errno on failure
3581 */
3582static int hv_pci_probe(struct hv_device *hdev,
3583 const struct hv_vmbus_device_id *dev_id)
3584{
3585 struct pci_host_bridge *bridge;
3586 struct hv_pcibus_device *hbus;
3587 u16 dom_req, dom;
3588 char *name;
3589 bool enter_d0_retry = true;
3590 int ret;
3591
3592 bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3593 if (!bridge)
3594 return -ENOMEM;
3595
3596 hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
3597 if (!hbus)
3598 return -ENOMEM;
3599
3600 hbus->bridge = bridge;
3601 hbus->state = hv_pcibus_init;
3602 hbus->wslot_res_allocated = -1;
3603
3604 /*
3605 * The PCI bus "domain" is what is called "segment" in ACPI and other
3606 * specs. Pull it from the instance ID, to get something usually
3607 * unique. In rare cases of collision, we will find out another number
3608 * not in use.
3609 *
3610 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3611 * together with this guest driver can guarantee that (1) The only
3612 * domain used by Gen1 VMs for something that looks like a physical
3613 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3614 * (2) There will be no overlap between domains (after fixing possible
3615 * collisions) in the same VM.
3616 */
3617 dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3618 dom = hv_get_dom_num(dom_req);
3619
3620 if (dom == HVPCI_DOM_INVALID) {
3621 dev_err(&hdev->device,
3622 "Unable to use dom# 0x%x or other numbers", dom_req);
3623 ret = -EINVAL;
3624 goto free_bus;
3625 }
3626
3627 if (dom != dom_req)
3628 dev_info(&hdev->device,
3629 "PCI dom# 0x%x has collision, using 0x%x",
3630 dom_req, dom);
3631
3632 hbus->bridge->domain_nr = dom;
3633#ifdef CONFIG_X86
3634 hbus->sysdata.domain = dom;
3635 hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
3636#elif defined(CONFIG_ARM64)
3637 /*
3638 * Set the PCI bus parent to be the corresponding VMbus
3639 * device. Then the VMbus device will be assigned as the
3640 * ACPI companion in pcibios_root_bridge_prepare() and
3641 * pci_dma_configure() will propagate device coherence
3642 * information to devices created on the bus.
3643 */
3644 hbus->sysdata.parent = hdev->device.parent;
3645 hbus->use_calls = false;
3646#endif
3647
3648 hbus->hdev = hdev;
3649 INIT_LIST_HEAD(&hbus->children);
3650 INIT_LIST_HEAD(&hbus->dr_list);
3651 spin_lock_init(&hbus->config_lock);
3652 spin_lock_init(&hbus->device_list_lock);
3653 hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3654 hbus->bridge->domain_nr);
3655 if (!hbus->wq) {
3656 ret = -ENOMEM;
3657 goto free_dom;
3658 }
3659
3660 hdev->channel->next_request_id_callback = vmbus_next_request_id;
3661 hdev->channel->request_addr_callback = vmbus_request_addr;
3662 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3663
3664 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3665 hv_pci_onchannelcallback, hbus);
3666 if (ret)
3667 goto destroy_wq;
3668
3669 hv_set_drvdata(hdev, hbus);
3670
3671 ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3672 ARRAY_SIZE(pci_protocol_versions));
3673 if (ret)
3674 goto close;
3675
3676 ret = hv_allocate_config_window(hbus);
3677 if (ret)
3678 goto close;
3679
3680 hbus->cfg_addr = ioremap(hbus->mem_config->start,
3681 PCI_CONFIG_MMIO_LENGTH);
3682 if (!hbus->cfg_addr) {
3683 dev_err(&hdev->device,
3684 "Unable to map a virtual address for config space\n");
3685 ret = -ENOMEM;
3686 goto free_config;
3687 }
3688
3689 name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3690 if (!name) {
3691 ret = -ENOMEM;
3692 goto unmap;
3693 }
3694
3695 hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3696 kfree(name);
3697 if (!hbus->fwnode) {
3698 ret = -ENOMEM;
3699 goto unmap;
3700 }
3701
3702 ret = hv_pcie_init_irq_domain(hbus);
3703 if (ret)
3704 goto free_fwnode;
3705
3706retry:
3707 ret = hv_pci_query_relations(hdev);
3708 if (ret)
3709 goto free_irq_domain;
3710
3711 ret = hv_pci_enter_d0(hdev);
3712 /*
3713 * In certain case (Kdump) the pci device of interest was
3714 * not cleanly shut down and resource is still held on host
3715 * side, the host could return invalid device status.
3716 * We need to explicitly request host to release the resource
3717 * and try to enter D0 again.
3718 * Since the hv_pci_bus_exit() call releases structures
3719 * of all its child devices, we need to start the retry from
3720 * hv_pci_query_relations() call, requesting host to send
3721 * the synchronous child device relations message before this
3722 * information is needed in hv_send_resources_allocated()
3723 * call later.
3724 */
3725 if (ret == -EPROTO && enter_d0_retry) {
3726 enter_d0_retry = false;
3727
3728 dev_err(&hdev->device, "Retrying D0 Entry\n");
3729
3730 /*
3731 * Hv_pci_bus_exit() calls hv_send_resources_released()
3732 * to free up resources of its child devices.
3733 * In the kdump kernel we need to set the
3734 * wslot_res_allocated to 255 so it scans all child
3735 * devices to release resources allocated in the
3736 * normal kernel before panic happened.
3737 */
3738 hbus->wslot_res_allocated = 255;
3739 ret = hv_pci_bus_exit(hdev, true);
3740
3741 if (ret == 0)
3742 goto retry;
3743
3744 dev_err(&hdev->device,
3745 "Retrying D0 failed with ret %d\n", ret);
3746 }
3747 if (ret)
3748 goto free_irq_domain;
3749
3750 ret = hv_pci_allocate_bridge_windows(hbus);
3751 if (ret)
3752 goto exit_d0;
3753
3754 ret = hv_send_resources_allocated(hdev);
3755 if (ret)
3756 goto free_windows;
3757
3758 prepopulate_bars(hbus);
3759
3760 hbus->state = hv_pcibus_probed;
3761
3762 ret = create_root_hv_pci_bus(hbus);
3763 if (ret)
3764 goto free_windows;
3765
3766 return 0;
3767
3768free_windows:
3769 hv_pci_free_bridge_windows(hbus);
3770exit_d0:
3771 (void) hv_pci_bus_exit(hdev, true);
3772free_irq_domain:
3773 irq_domain_remove(hbus->irq_domain);
3774free_fwnode:
3775 irq_domain_free_fwnode(hbus->fwnode);
3776unmap:
3777 iounmap(hbus->cfg_addr);
3778free_config:
3779 hv_free_config_window(hbus);
3780close:
3781 vmbus_close(hdev->channel);
3782destroy_wq:
3783 destroy_workqueue(hbus->wq);
3784free_dom:
3785 hv_put_dom_num(hbus->bridge->domain_nr);
3786free_bus:
3787 kfree(hbus);
3788 return ret;
3789}
3790
3791static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3792{
3793 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3794 struct vmbus_channel *chan = hdev->channel;
3795 struct {
3796 struct pci_packet teardown_packet;
3797 u8 buffer[sizeof(struct pci_message)];
3798 } pkt;
3799 struct hv_pci_compl comp_pkt;
3800 struct hv_pci_dev *hpdev, *tmp;
3801 unsigned long flags;
3802 u64 trans_id;
3803 int ret;
3804
3805 /*
3806 * After the host sends the RESCIND_CHANNEL message, it doesn't
3807 * access the per-channel ringbuffer any longer.
3808 */
3809 if (chan->rescind)
3810 return 0;
3811
3812 if (!keep_devs) {
3813 struct list_head removed;
3814
3815 /* Move all present children to the list on stack */
3816 INIT_LIST_HEAD(&removed);
3817 spin_lock_irqsave(&hbus->device_list_lock, flags);
3818 list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3819 list_move_tail(&hpdev->list_entry, &removed);
3820 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3821
3822 /* Remove all children in the list */
3823 list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3824 list_del(&hpdev->list_entry);
3825 if (hpdev->pci_slot)
3826 pci_destroy_slot(hpdev->pci_slot);
3827 /* For the two refs got in new_pcichild_device() */
3828 put_pcichild(hpdev);
3829 put_pcichild(hpdev);
3830 }
3831 }
3832
3833 ret = hv_send_resources_released(hdev);
3834 if (ret) {
3835 dev_err(&hdev->device,
3836 "Couldn't send resources released packet(s)\n");
3837 return ret;
3838 }
3839
3840 memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3841 init_completion(&comp_pkt.host_event);
3842 pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3843 pkt.teardown_packet.compl_ctxt = &comp_pkt;
3844 pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
3845
3846 ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
3847 sizeof(struct pci_message),
3848 (unsigned long)&pkt.teardown_packet,
3849 &trans_id, VM_PKT_DATA_INBAND,
3850 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3851 if (ret)
3852 return ret;
3853
3854 if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3855 /*
3856 * The completion packet on the stack becomes invalid after
3857 * 'return'; remove the ID from the VMbus requestor if the
3858 * identifier is still mapped to/associated with the packet.
3859 *
3860 * Cf. hv_pci_onchannelcallback().
3861 */
3862 vmbus_request_addr_match(chan, trans_id,
3863 (unsigned long)&pkt.teardown_packet);
3864 return -ETIMEDOUT;
3865 }
3866
3867 return 0;
3868}
3869
3870/**
3871 * hv_pci_remove() - Remove routine for this VMBus channel
3872 * @hdev: VMBus's tracking struct for this root PCI bus
3873 */
3874static void hv_pci_remove(struct hv_device *hdev)
3875{
3876 struct hv_pcibus_device *hbus;
3877
3878 hbus = hv_get_drvdata(hdev);
3879 if (hbus->state == hv_pcibus_installed) {
3880 tasklet_disable(&hdev->channel->callback_event);
3881 hbus->state = hv_pcibus_removing;
3882 tasklet_enable(&hdev->channel->callback_event);
3883 destroy_workqueue(hbus->wq);
3884 hbus->wq = NULL;
3885 /*
3886 * At this point, no work is running or can be scheduled
3887 * on hbus-wq. We can't race with hv_pci_devices_present()
3888 * or hv_pci_eject_device(), it's safe to proceed.
3889 */
3890
3891 /* Remove the bus from PCI's point of view. */
3892 pci_lock_rescan_remove();
3893 pci_stop_root_bus(hbus->bridge->bus);
3894 hv_pci_remove_slots(hbus);
3895 pci_remove_root_bus(hbus->bridge->bus);
3896 pci_unlock_rescan_remove();
3897 }
3898
3899 hv_pci_bus_exit(hdev, false);
3900
3901 vmbus_close(hdev->channel);
3902
3903 iounmap(hbus->cfg_addr);
3904 hv_free_config_window(hbus);
3905 hv_pci_free_bridge_windows(hbus);
3906 irq_domain_remove(hbus->irq_domain);
3907 irq_domain_free_fwnode(hbus->fwnode);
3908
3909 hv_put_dom_num(hbus->bridge->domain_nr);
3910
3911 kfree(hbus);
3912}
3913
3914static int hv_pci_suspend(struct hv_device *hdev)
3915{
3916 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3917 enum hv_pcibus_state old_state;
3918 int ret;
3919
3920 /*
3921 * hv_pci_suspend() must make sure there are no pending work items
3922 * before calling vmbus_close(), since it runs in a process context
3923 * as a callback in dpm_suspend(). When it starts to run, the channel
3924 * callback hv_pci_onchannelcallback(), which runs in a tasklet
3925 * context, can be still running concurrently and scheduling new work
3926 * items onto hbus->wq in hv_pci_devices_present() and
3927 * hv_pci_eject_device(), and the work item handlers can access the
3928 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
3929 * the work item handler pci_devices_present_work() ->
3930 * new_pcichild_device() writes to the vmbus channel.
3931 *
3932 * To eliminate the race, hv_pci_suspend() disables the channel
3933 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
3934 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
3935 * it knows that no new work item can be scheduled, and then it flushes
3936 * hbus->wq and safely closes the vmbus channel.
3937 */
3938 tasklet_disable(&hdev->channel->callback_event);
3939
3940 /* Change the hbus state to prevent new work items. */
3941 old_state = hbus->state;
3942 if (hbus->state == hv_pcibus_installed)
3943 hbus->state = hv_pcibus_removing;
3944
3945 tasklet_enable(&hdev->channel->callback_event);
3946
3947 if (old_state != hv_pcibus_installed)
3948 return -EINVAL;
3949
3950 flush_workqueue(hbus->wq);
3951
3952 ret = hv_pci_bus_exit(hdev, true);
3953 if (ret)
3954 return ret;
3955
3956 vmbus_close(hdev->channel);
3957
3958 return 0;
3959}
3960
3961static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
3962{
3963 struct irq_data *irq_data;
3964 struct msi_desc *entry;
3965 int ret = 0;
3966
3967 msi_lock_descs(&pdev->dev);
3968 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
3969 irq_data = irq_get_irq_data(entry->irq);
3970 if (WARN_ON_ONCE(!irq_data)) {
3971 ret = -EINVAL;
3972 break;
3973 }
3974
3975 hv_compose_msi_msg(irq_data, &entry->msg);
3976 }
3977 msi_unlock_descs(&pdev->dev);
3978
3979 return ret;
3980}
3981
3982/*
3983 * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
3984 * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
3985 * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
3986 * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
3987 * Table entries.
3988 */
3989static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
3990{
3991 pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
3992}
3993
3994static int hv_pci_resume(struct hv_device *hdev)
3995{
3996 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3997 enum pci_protocol_version_t version[1];
3998 int ret;
3999
4000 hbus->state = hv_pcibus_init;
4001
4002 hdev->channel->next_request_id_callback = vmbus_next_request_id;
4003 hdev->channel->request_addr_callback = vmbus_request_addr;
4004 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
4005
4006 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
4007 hv_pci_onchannelcallback, hbus);
4008 if (ret)
4009 return ret;
4010
4011 /* Only use the version that was in use before hibernation. */
4012 version[0] = hbus->protocol_version;
4013 ret = hv_pci_protocol_negotiation(hdev, version, 1);
4014 if (ret)
4015 goto out;
4016
4017 ret = hv_pci_query_relations(hdev);
4018 if (ret)
4019 goto out;
4020
4021 ret = hv_pci_enter_d0(hdev);
4022 if (ret)
4023 goto out;
4024
4025 ret = hv_send_resources_allocated(hdev);
4026 if (ret)
4027 goto out;
4028
4029 prepopulate_bars(hbus);
4030
4031 hv_pci_restore_msi_state(hbus);
4032
4033 hbus->state = hv_pcibus_installed;
4034 return 0;
4035out:
4036 vmbus_close(hdev->channel);
4037 return ret;
4038}
4039
4040static const struct hv_vmbus_device_id hv_pci_id_table[] = {
4041 /* PCI Pass-through Class ID */
4042 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
4043 { HV_PCIE_GUID, },
4044 { },
4045};
4046
4047MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
4048
4049static struct hv_driver hv_pci_drv = {
4050 .name = "hv_pci",
4051 .id_table = hv_pci_id_table,
4052 .probe = hv_pci_probe,
4053 .remove = hv_pci_remove,
4054 .suspend = hv_pci_suspend,
4055 .resume = hv_pci_resume,
4056};
4057
4058static void __exit exit_hv_pci_drv(void)
4059{
4060 vmbus_driver_unregister(&hv_pci_drv);
4061
4062 hvpci_block_ops.read_block = NULL;
4063 hvpci_block_ops.write_block = NULL;
4064 hvpci_block_ops.reg_blk_invalidate = NULL;
4065}
4066
4067static int __init init_hv_pci_drv(void)
4068{
4069 int ret;
4070
4071 if (!hv_is_hyperv_initialized())
4072 return -ENODEV;
4073
4074 ret = hv_pci_irqchip_init();
4075 if (ret)
4076 return ret;
4077
4078 /* Set the invalid domain number's bit, so it will not be used */
4079 set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
4080
4081 /* Initialize PCI block r/w interface */
4082 hvpci_block_ops.read_block = hv_read_config_block;
4083 hvpci_block_ops.write_block = hv_write_config_block;
4084 hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4085
4086 return vmbus_driver_register(&hv_pci_drv);
4087}
4088
4089module_init(init_hv_pci_drv);
4090module_exit(exit_hv_pci_drv);
4091
4092MODULE_DESCRIPTION("Hyper-V PCI");
4093MODULE_LICENSE("GPL v2");