at v5.16 16 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef LINUX_MSI_H 3#define LINUX_MSI_H 4 5#include <linux/kobject.h> 6#include <linux/list.h> 7#include <asm/msi.h> 8 9/* Dummy shadow structures if an architecture does not define them */ 10#ifndef arch_msi_msg_addr_lo 11typedef struct arch_msi_msg_addr_lo { 12 u32 address_lo; 13} __attribute__ ((packed)) arch_msi_msg_addr_lo_t; 14#endif 15 16#ifndef arch_msi_msg_addr_hi 17typedef struct arch_msi_msg_addr_hi { 18 u32 address_hi; 19} __attribute__ ((packed)) arch_msi_msg_addr_hi_t; 20#endif 21 22#ifndef arch_msi_msg_data 23typedef struct arch_msi_msg_data { 24 u32 data; 25} __attribute__ ((packed)) arch_msi_msg_data_t; 26#endif 27 28/** 29 * msi_msg - Representation of a MSI message 30 * @address_lo: Low 32 bits of msi message address 31 * @arch_addrlo: Architecture specific shadow of @address_lo 32 * @address_hi: High 32 bits of msi message address 33 * (only used when device supports it) 34 * @arch_addrhi: Architecture specific shadow of @address_hi 35 * @data: MSI message data (usually 16 bits) 36 * @arch_data: Architecture specific shadow of @data 37 */ 38struct msi_msg { 39 union { 40 u32 address_lo; 41 arch_msi_msg_addr_lo_t arch_addr_lo; 42 }; 43 union { 44 u32 address_hi; 45 arch_msi_msg_addr_hi_t arch_addr_hi; 46 }; 47 union { 48 u32 data; 49 arch_msi_msg_data_t arch_data; 50 }; 51}; 52 53extern int pci_msi_ignore_mask; 54/* Helper functions */ 55struct irq_data; 56struct msi_desc; 57struct pci_dev; 58struct platform_msi_priv_data; 59void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 60#ifdef CONFIG_GENERIC_MSI_IRQ 61void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 62#else 63static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 64{ 65} 66#endif 67 68typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, 69 struct msi_msg *msg); 70 71/** 72 * platform_msi_desc - Platform device specific msi descriptor data 73 * @msi_priv_data: Pointer to platform private data 74 * @msi_index: The index of the MSI descriptor for multi MSI 75 */ 76struct platform_msi_desc { 77 struct platform_msi_priv_data *msi_priv_data; 78 u16 msi_index; 79}; 80 81/** 82 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data 83 * @msi_index: The index of the MSI descriptor 84 */ 85struct fsl_mc_msi_desc { 86 u16 msi_index; 87}; 88 89/** 90 * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data 91 * @dev_index: TISCI device index 92 */ 93struct ti_sci_inta_msi_desc { 94 u16 dev_index; 95}; 96 97/** 98 * struct msi_desc - Descriptor structure for MSI based interrupts 99 * @list: List head for management 100 * @irq: The base interrupt number 101 * @nvec_used: The number of vectors used 102 * @dev: Pointer to the device which uses this descriptor 103 * @msg: The last set MSI message cached for reuse 104 * @affinity: Optional pointer to a cpu affinity mask for this descriptor 105 * 106 * @write_msi_msg: Callback that may be called when the MSI message 107 * address or data changes 108 * @write_msi_msg_data: Data parameter for the callback. 109 * 110 * @msi_mask: [PCI MSI] MSI cached mask bits 111 * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits 112 * @is_msix: [PCI MSI/X] True if MSI-X 113 * @multiple: [PCI MSI/X] log2 num of messages allocated 114 * @multi_cap: [PCI MSI/X] log2 num of messages supported 115 * @maskbit: [PCI MSI/X] Mask-Pending bit supported? 116 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit 117 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor 118 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq 119 * @mask_pos: [PCI MSI] Mask register position 120 * @mask_base: [PCI MSI-X] Mask register base address 121 * @platform: [platform] Platform device specific msi descriptor data 122 * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data 123 * @inta: [INTA] TISCI based INTA specific msi descriptor data 124 */ 125struct msi_desc { 126 /* Shared device/bus type independent data */ 127 struct list_head list; 128 unsigned int irq; 129 unsigned int nvec_used; 130 struct device *dev; 131 struct msi_msg msg; 132 struct irq_affinity_desc *affinity; 133#ifdef CONFIG_IRQ_MSI_IOMMU 134 const void *iommu_cookie; 135#endif 136 137 void (*write_msi_msg)(struct msi_desc *entry, void *data); 138 void *write_msi_msg_data; 139 140 union { 141 /* PCI MSI/X specific data */ 142 struct { 143 union { 144 u32 msi_mask; 145 u32 msix_ctrl; 146 }; 147 struct { 148 u8 is_msix : 1; 149 u8 multiple : 3; 150 u8 multi_cap : 3; 151 u8 can_mask : 1; 152 u8 is_64 : 1; 153 u8 is_virtual : 1; 154 u16 entry_nr; 155 unsigned default_irq; 156 } msi_attrib; 157 union { 158 u8 mask_pos; 159 void __iomem *mask_base; 160 }; 161 }; 162 163 /* 164 * Non PCI variants add their data structure here. New 165 * entries need to use a named structure. We want 166 * proper name spaces for this. The PCI part is 167 * anonymous for now as it would require an immediate 168 * tree wide cleanup. 169 */ 170 struct platform_msi_desc platform; 171 struct fsl_mc_msi_desc fsl_mc; 172 struct ti_sci_inta_msi_desc inta; 173 }; 174}; 175 176/* Helpers to hide struct msi_desc implementation details */ 177#define msi_desc_to_dev(desc) ((desc)->dev) 178#define dev_to_msi_list(dev) (&(dev)->msi_list) 179#define first_msi_entry(dev) \ 180 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) 181#define for_each_msi_entry(desc, dev) \ 182 list_for_each_entry((desc), dev_to_msi_list((dev)), list) 183#define for_each_msi_entry_safe(desc, tmp, dev) \ 184 list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) 185#define for_each_msi_vector(desc, __irq, dev) \ 186 for_each_msi_entry((desc), (dev)) \ 187 if ((desc)->irq) \ 188 for (__irq = (desc)->irq; \ 189 __irq < ((desc)->irq + (desc)->nvec_used); \ 190 __irq++) 191 192#ifdef CONFIG_IRQ_MSI_IOMMU 193static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) 194{ 195 return desc->iommu_cookie; 196} 197 198static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, 199 const void *iommu_cookie) 200{ 201 desc->iommu_cookie = iommu_cookie; 202} 203#else 204static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) 205{ 206 return NULL; 207} 208 209static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, 210 const void *iommu_cookie) 211{ 212} 213#endif 214 215#ifdef CONFIG_PCI_MSI 216#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) 217#define for_each_pci_msi_entry(desc, pdev) \ 218 for_each_msi_entry((desc), &(pdev)->dev) 219 220struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); 221void *msi_desc_to_pci_sysdata(struct msi_desc *desc); 222void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); 223#else /* CONFIG_PCI_MSI */ 224static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) 225{ 226 return NULL; 227} 228static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) 229{ 230} 231#endif /* CONFIG_PCI_MSI */ 232 233struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, 234 const struct irq_affinity_desc *affinity); 235void free_msi_entry(struct msi_desc *entry); 236void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 237void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 238 239void pci_msi_mask_irq(struct irq_data *data); 240void pci_msi_unmask_irq(struct irq_data *data); 241 242const struct attribute_group **msi_populate_sysfs(struct device *dev); 243void msi_destroy_sysfs(struct device *dev, 244 const struct attribute_group **msi_irq_groups); 245 246/* 247 * The arch hooks to setup up msi irqs. Default functions are implemented 248 * as weak symbols so that they /can/ be overriden by architecture specific 249 * code if needed. These hooks can only be enabled by the architecture. 250 * 251 * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by 252 * stubs with warnings. 253 */ 254#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS 255int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); 256void arch_teardown_msi_irq(unsigned int irq); 257int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 258void arch_teardown_msi_irqs(struct pci_dev *dev); 259#else 260static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 261{ 262 WARN_ON_ONCE(1); 263 return -ENODEV; 264} 265 266static inline void arch_teardown_msi_irqs(struct pci_dev *dev) 267{ 268 WARN_ON_ONCE(1); 269} 270#endif 271 272/* 273 * The restore hooks are still available as they are useful even 274 * for fully irq domain based setups. Courtesy to XEN/X86. 275 */ 276void arch_restore_msi_irqs(struct pci_dev *dev); 277void default_restore_msi_irqs(struct pci_dev *dev); 278 279#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 280 281#include <linux/irqhandler.h> 282 283struct irq_domain; 284struct irq_domain_ops; 285struct irq_chip; 286struct device_node; 287struct fwnode_handle; 288struct msi_domain_info; 289 290/** 291 * struct msi_domain_ops - MSI interrupt domain callbacks 292 * @get_hwirq: Retrieve the resulting hw irq number 293 * @msi_init: Domain specific init function for MSI interrupts 294 * @msi_free: Domain specific function to free a MSI interrupts 295 * @msi_check: Callback for verification of the domain/info/dev data 296 * @msi_prepare: Prepare the allocation of the interrupts in the domain 297 * @msi_finish: Optional callback to finalize the allocation 298 * @set_desc: Set the msi descriptor for an interrupt 299 * @handle_error: Optional error handler if the allocation fails 300 * @domain_alloc_irqs: Optional function to override the default allocation 301 * function. 302 * @domain_free_irqs: Optional function to override the default free 303 * function. 304 * 305 * @get_hwirq, @msi_init and @msi_free are callbacks used by 306 * msi_create_irq_domain() and related interfaces 307 * 308 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error 309 * are callbacks used by msi_domain_alloc_irqs() and related 310 * interfaces which are based on msi_desc. 311 * 312 * @domain_alloc_irqs, @domain_free_irqs can be used to override the 313 * default allocation/free functions (__msi_domain_alloc/free_irqs). This 314 * is initially for a wrapper around XENs seperate MSI universe which can't 315 * be wrapped into the regular irq domains concepts by mere mortals. This 316 * allows to universally use msi_domain_alloc/free_irqs without having to 317 * special case XEN all over the place. 318 * 319 * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs 320 * are set to the default implementation if NULL and even when 321 * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and 322 * because these callbacks are obviously mandatory. 323 * 324 * This is NOT meant to be abused, but it can be useful to build wrappers 325 * for specialized MSI irq domains which need extra work before and after 326 * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs(). 327 */ 328struct msi_domain_ops { 329 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, 330 msi_alloc_info_t *arg); 331 int (*msi_init)(struct irq_domain *domain, 332 struct msi_domain_info *info, 333 unsigned int virq, irq_hw_number_t hwirq, 334 msi_alloc_info_t *arg); 335 void (*msi_free)(struct irq_domain *domain, 336 struct msi_domain_info *info, 337 unsigned int virq); 338 int (*msi_check)(struct irq_domain *domain, 339 struct msi_domain_info *info, 340 struct device *dev); 341 int (*msi_prepare)(struct irq_domain *domain, 342 struct device *dev, int nvec, 343 msi_alloc_info_t *arg); 344 void (*msi_finish)(msi_alloc_info_t *arg, int retval); 345 void (*set_desc)(msi_alloc_info_t *arg, 346 struct msi_desc *desc); 347 int (*handle_error)(struct irq_domain *domain, 348 struct msi_desc *desc, int error); 349 int (*domain_alloc_irqs)(struct irq_domain *domain, 350 struct device *dev, int nvec); 351 void (*domain_free_irqs)(struct irq_domain *domain, 352 struct device *dev); 353}; 354 355/** 356 * struct msi_domain_info - MSI interrupt domain data 357 * @flags: Flags to decribe features and capabilities 358 * @ops: The callback data structure 359 * @chip: Optional: associated interrupt chip 360 * @chip_data: Optional: associated interrupt chip data 361 * @handler: Optional: associated interrupt flow handler 362 * @handler_data: Optional: associated interrupt flow handler data 363 * @handler_name: Optional: associated interrupt flow handler name 364 * @data: Optional: domain specific data 365 */ 366struct msi_domain_info { 367 u32 flags; 368 struct msi_domain_ops *ops; 369 struct irq_chip *chip; 370 void *chip_data; 371 irq_flow_handler_t handler; 372 void *handler_data; 373 const char *handler_name; 374 void *data; 375}; 376 377/* Flags for msi_domain_info */ 378enum { 379 /* 380 * Init non implemented ops callbacks with default MSI domain 381 * callbacks. 382 */ 383 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), 384 /* 385 * Init non implemented chip callbacks with default MSI chip 386 * callbacks. 387 */ 388 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), 389 /* Support multiple PCI MSI interrupts */ 390 MSI_FLAG_MULTI_PCI_MSI = (1 << 2), 391 /* Support PCI MSIX interrupts */ 392 MSI_FLAG_PCI_MSIX = (1 << 3), 393 /* Needs early activate, required for PCI */ 394 MSI_FLAG_ACTIVATE_EARLY = (1 << 4), 395 /* 396 * Must reactivate when irq is started even when 397 * MSI_FLAG_ACTIVATE_EARLY has been set. 398 */ 399 MSI_FLAG_MUST_REACTIVATE = (1 << 5), 400 /* Is level-triggered capable, using two messages */ 401 MSI_FLAG_LEVEL_CAPABLE = (1 << 6), 402}; 403 404int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, 405 bool force); 406 407struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, 408 struct msi_domain_info *info, 409 struct irq_domain *parent); 410int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 411 int nvec); 412int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, 413 int nvec); 414void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); 415void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); 416struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); 417 418struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, 419 struct msi_domain_info *info, 420 struct irq_domain *parent); 421int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, 422 irq_write_msi_msg_t write_msi_msg); 423void platform_msi_domain_free_irqs(struct device *dev); 424 425/* When an MSI domain is used as an intermediate domain */ 426int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, 427 int nvec, msi_alloc_info_t *args); 428int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, 429 int virq, int nvec, msi_alloc_info_t *args); 430struct irq_domain * 431__platform_msi_create_device_domain(struct device *dev, 432 unsigned int nvec, 433 bool is_tree, 434 irq_write_msi_msg_t write_msi_msg, 435 const struct irq_domain_ops *ops, 436 void *host_data); 437 438#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ 439 __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) 440#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ 441 __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) 442 443int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, 444 unsigned int nr_irqs); 445void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, 446 unsigned int nvec); 447void *platform_msi_get_host_data(struct irq_domain *domain); 448#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ 449 450#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 451void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); 452struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, 453 struct msi_domain_info *info, 454 struct irq_domain *parent); 455int pci_msi_domain_check_cap(struct irq_domain *domain, 456 struct msi_domain_info *info, struct device *dev); 457u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); 458struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); 459bool pci_dev_has_special_msi_domain(struct pci_dev *pdev); 460#else 461static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) 462{ 463 return NULL; 464} 465#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ 466 467#endif /* LINUX_MSI_H */