Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.19-rc7 563 lines 20 kB view raw
1/* 2 * Copyright © 2006-2015, Intel Corporation. 3 * 4 * Authors: Ashok Raj <ashok.raj@intel.com> 5 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6 * David Woodhouse <David.Woodhouse@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 19 * Place - Suite 330, Boston, MA 02111-1307 USA. 20 */ 21 22#ifndef _INTEL_IOMMU_H_ 23#define _INTEL_IOMMU_H_ 24 25#include <linux/types.h> 26#include <linux/iova.h> 27#include <linux/io.h> 28#include <linux/idr.h> 29#include <linux/dma_remapping.h> 30#include <linux/mmu_notifier.h> 31#include <linux/list.h> 32#include <linux/iommu.h> 33#include <linux/io-64-nonatomic-lo-hi.h> 34#include <linux/dmar.h> 35 36#include <asm/cacheflush.h> 37#include <asm/iommu.h> 38 39/* 40 * Intel IOMMU register specification per version 1.0 public spec. 41 */ 42 43#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ 44#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ 45#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ 46#define DMAR_GCMD_REG 0x18 /* Global command register */ 47#define DMAR_GSTS_REG 0x1c /* Global status register */ 48#define DMAR_RTADDR_REG 0x20 /* Root entry table */ 49#define DMAR_CCMD_REG 0x28 /* Context command reg */ 50#define DMAR_FSTS_REG 0x34 /* Fault Status register */ 51#define DMAR_FECTL_REG 0x38 /* Fault control register */ 52#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ 53#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ 54#define DMAR_FEUADDR_REG 0x44 /* Upper address register */ 55#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ 56#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ 57#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ 58#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ 59#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ 60#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ 61#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ 62#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ 63#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ 64#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ 65#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ 66#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ 67#define DMAR_PQH_REG 0xc0 /* Page request queue head register */ 68#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ 69#define DMAR_PQA_REG 0xd0 /* Page request queue address register */ 70#define DMAR_PRS_REG 0xdc /* Page request status register */ 71#define DMAR_PECTL_REG 0xe0 /* Page request event control register */ 72#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ 73#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ 74#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ 75 76#define OFFSET_STRIDE (9) 77 78#define dmar_readq(a) readq(a) 79#define dmar_writeq(a,v) writeq(v,a) 80 81#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) 82#define DMAR_VER_MINOR(v) ((v) & 0x0f) 83 84/* 85 * Decoding Capability Register 86 */ 87#define cap_5lp_support(c) (((c) >> 60) & 1) 88#define cap_pi_support(c) (((c) >> 59) & 1) 89#define cap_fl1gp_support(c) (((c) >> 56) & 1) 90#define cap_read_drain(c) (((c) >> 55) & 1) 91#define cap_write_drain(c) (((c) >> 54) & 1) 92#define cap_max_amask_val(c) (((c) >> 48) & 0x3f) 93#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) 94#define cap_pgsel_inv(c) (((c) >> 39) & 1) 95 96#define cap_super_page_val(c) (((c) >> 34) & 0xf) 97#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ 98 * OFFSET_STRIDE) + 21) 99 100#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) 101#define cap_max_fault_reg_offset(c) \ 102 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) 103 104#define cap_zlr(c) (((c) >> 22) & 1) 105#define cap_isoch(c) (((c) >> 23) & 1) 106#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) 107#define cap_sagaw(c) (((c) >> 8) & 0x1f) 108#define cap_caching_mode(c) (((c) >> 7) & 1) 109#define cap_phmr(c) (((c) >> 6) & 1) 110#define cap_plmr(c) (((c) >> 5) & 1) 111#define cap_rwbf(c) (((c) >> 4) & 1) 112#define cap_afl(c) (((c) >> 3) & 1) 113#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) 114/* 115 * Extended Capability Register 116 */ 117 118#define ecap_dit(e) ((e >> 41) & 0x1) 119#define ecap_pasid(e) ((e >> 40) & 0x1) 120#define ecap_pss(e) ((e >> 35) & 0x1f) 121#define ecap_eafs(e) ((e >> 34) & 0x1) 122#define ecap_nwfs(e) ((e >> 33) & 0x1) 123#define ecap_srs(e) ((e >> 31) & 0x1) 124#define ecap_ers(e) ((e >> 30) & 0x1) 125#define ecap_prs(e) ((e >> 29) & 0x1) 126#define ecap_broken_pasid(e) ((e >> 28) & 0x1) 127#define ecap_dis(e) ((e >> 27) & 0x1) 128#define ecap_nest(e) ((e >> 26) & 0x1) 129#define ecap_mts(e) ((e >> 25) & 0x1) 130#define ecap_ecs(e) ((e >> 24) & 0x1) 131#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) 132#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) 133#define ecap_coherent(e) ((e) & 0x1) 134#define ecap_qis(e) ((e) & 0x2) 135#define ecap_pass_through(e) ((e >> 6) & 0x1) 136#define ecap_eim_support(e) ((e >> 4) & 0x1) 137#define ecap_ir_support(e) ((e >> 3) & 0x1) 138#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) 139#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) 140#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ 141 142/* IOTLB_REG */ 143#define DMA_TLB_FLUSH_GRANU_OFFSET 60 144#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) 145#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) 146#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) 147#define DMA_TLB_IIRG(type) ((type >> 60) & 3) 148#define DMA_TLB_IAIG(val) (((val) >> 57) & 3) 149#define DMA_TLB_READ_DRAIN (((u64)1) << 49) 150#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) 151#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) 152#define DMA_TLB_IVT (((u64)1) << 63) 153#define DMA_TLB_IH_NONLEAF (((u64)1) << 6) 154#define DMA_TLB_MAX_SIZE (0x3f) 155 156/* INVALID_DESC */ 157#define DMA_CCMD_INVL_GRANU_OFFSET 61 158#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) 159#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) 160#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) 161#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) 162#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) 163#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) 164#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) 165#define DMA_ID_TLB_ADDR(addr) (addr) 166#define DMA_ID_TLB_ADDR_MASK(mask) (mask) 167 168/* PMEN_REG */ 169#define DMA_PMEN_EPM (((u32)1)<<31) 170#define DMA_PMEN_PRS (((u32)1)<<0) 171 172/* GCMD_REG */ 173#define DMA_GCMD_TE (((u32)1) << 31) 174#define DMA_GCMD_SRTP (((u32)1) << 30) 175#define DMA_GCMD_SFL (((u32)1) << 29) 176#define DMA_GCMD_EAFL (((u32)1) << 28) 177#define DMA_GCMD_WBF (((u32)1) << 27) 178#define DMA_GCMD_QIE (((u32)1) << 26) 179#define DMA_GCMD_SIRTP (((u32)1) << 24) 180#define DMA_GCMD_IRE (((u32) 1) << 25) 181#define DMA_GCMD_CFI (((u32) 1) << 23) 182 183/* GSTS_REG */ 184#define DMA_GSTS_TES (((u32)1) << 31) 185#define DMA_GSTS_RTPS (((u32)1) << 30) 186#define DMA_GSTS_FLS (((u32)1) << 29) 187#define DMA_GSTS_AFLS (((u32)1) << 28) 188#define DMA_GSTS_WBFS (((u32)1) << 27) 189#define DMA_GSTS_QIES (((u32)1) << 26) 190#define DMA_GSTS_IRTPS (((u32)1) << 24) 191#define DMA_GSTS_IRES (((u32)1) << 25) 192#define DMA_GSTS_CFIS (((u32)1) << 23) 193 194/* DMA_RTADDR_REG */ 195#define DMA_RTADDR_RTT (((u64)1) << 11) 196 197/* CCMD_REG */ 198#define DMA_CCMD_ICC (((u64)1) << 63) 199#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) 200#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) 201#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) 202#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) 203#define DMA_CCMD_MASK_NOBIT 0 204#define DMA_CCMD_MASK_1BIT 1 205#define DMA_CCMD_MASK_2BIT 2 206#define DMA_CCMD_MASK_3BIT 3 207#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) 208#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) 209 210/* FECTL_REG */ 211#define DMA_FECTL_IM (((u32)1) << 31) 212 213/* FSTS_REG */ 214#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ 215#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ 216#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ 217#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ 218#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ 219#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ 220#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) 221 222/* FRCD_REG, 32 bits access */ 223#define DMA_FRCD_F (((u32)1) << 31) 224#define dma_frcd_type(d) ((d >> 30) & 1) 225#define dma_frcd_fault_reason(c) (c & 0xff) 226#define dma_frcd_source_id(c) (c & 0xffff) 227/* low 64 bit */ 228#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) 229 230/* PRS_REG */ 231#define DMA_PRS_PPR ((u32)1) 232 233#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 234do { \ 235 cycles_t start_time = get_cycles(); \ 236 while (1) { \ 237 sts = op(iommu->reg + offset); \ 238 if (cond) \ 239 break; \ 240 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ 241 panic("DMAR hardware is malfunctioning\n"); \ 242 cpu_relax(); \ 243 } \ 244} while (0) 245 246#define QI_LENGTH 256 /* queue length */ 247 248enum { 249 QI_FREE, 250 QI_IN_USE, 251 QI_DONE, 252 QI_ABORT 253}; 254 255#define QI_CC_TYPE 0x1 256#define QI_IOTLB_TYPE 0x2 257#define QI_DIOTLB_TYPE 0x3 258#define QI_IEC_TYPE 0x4 259#define QI_IWD_TYPE 0x5 260#define QI_EIOTLB_TYPE 0x6 261#define QI_PC_TYPE 0x7 262#define QI_DEIOTLB_TYPE 0x8 263#define QI_PGRP_RESP_TYPE 0x9 264#define QI_PSTRM_RESP_TYPE 0xa 265 266#define QI_IEC_SELECTIVE (((u64)1) << 4) 267#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) 268#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) 269 270#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) 271#define QI_IWD_STATUS_WRITE (((u64)1) << 5) 272 273#define QI_IOTLB_DID(did) (((u64)did) << 16) 274#define QI_IOTLB_DR(dr) (((u64)dr) << 7) 275#define QI_IOTLB_DW(dw) (((u64)dw) << 6) 276#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) 277#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) 278#define QI_IOTLB_IH(ih) (((u64)ih) << 6) 279#define QI_IOTLB_AM(am) (((u8)am)) 280 281#define QI_CC_FM(fm) (((u64)fm) << 48) 282#define QI_CC_SID(sid) (((u64)sid) << 32) 283#define QI_CC_DID(did) (((u64)did) << 16) 284#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) 285 286#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) 287#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) 288#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) 289#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) 290#define QI_DEV_IOTLB_SIZE 1 291#define QI_DEV_IOTLB_MAX_INVS 32 292 293#define QI_PC_PASID(pasid) (((u64)pasid) << 32) 294#define QI_PC_DID(did) (((u64)did) << 16) 295#define QI_PC_GRAN(gran) (((u64)gran) << 4) 296 297#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) 298#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) 299 300#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) 301#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) 302#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) 303#define QI_EIOTLB_AM(am) (((u64)am)) 304#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) 305#define QI_EIOTLB_DID(did) (((u64)did) << 16) 306#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) 307 308#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) 309#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) 310#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) 311#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) 312#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) 313#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) 314#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) 315#define QI_DEV_EIOTLB_MAX_INVS 32 316 317#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) 318#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) 319#define QI_PGRP_RESP_CODE(res) ((u64)(res)) 320#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) 321#define QI_PGRP_DID(did) (((u64)(did)) << 16) 322#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) 323 324#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) 325#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) 326#define QI_PSTRM_RESP_CODE(res) ((u64)(res)) 327#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) 328#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) 329#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) 330#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) 331 332#define QI_RESP_SUCCESS 0x0 333#define QI_RESP_INVALID 0x1 334#define QI_RESP_FAILURE 0xf 335 336#define QI_GRAN_ALL_ALL 0 337#define QI_GRAN_NONG_ALL 1 338#define QI_GRAN_NONG_PASID 2 339#define QI_GRAN_PSI_PASID 3 340 341struct qi_desc { 342 u64 low, high; 343}; 344 345struct q_inval { 346 raw_spinlock_t q_lock; 347 struct qi_desc *desc; /* invalidation queue */ 348 int *desc_status; /* desc status */ 349 int free_head; /* first free entry */ 350 int free_tail; /* last free entry */ 351 int free_cnt; 352}; 353 354#ifdef CONFIG_IRQ_REMAP 355/* 1MB - maximum possible interrupt remapping table size */ 356#define INTR_REMAP_PAGE_ORDER 8 357#define INTR_REMAP_TABLE_REG_SIZE 0xf 358#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf 359 360#define INTR_REMAP_TABLE_ENTRIES 65536 361 362struct irq_domain; 363 364struct ir_table { 365 struct irte *base; 366 unsigned long *bitmap; 367}; 368#endif 369 370struct iommu_flush { 371 void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, 372 u8 fm, u64 type); 373 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, 374 unsigned int size_order, u64 type); 375}; 376 377enum { 378 SR_DMAR_FECTL_REG, 379 SR_DMAR_FEDATA_REG, 380 SR_DMAR_FEADDR_REG, 381 SR_DMAR_FEUADDR_REG, 382 MAX_SR_DMAR_REGS 383}; 384 385#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) 386#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) 387 388struct pasid_entry; 389struct pasid_state_entry; 390struct page_req_dsc; 391 392struct dmar_domain { 393 int nid; /* node id */ 394 395 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED]; 396 /* Refcount of devices per iommu */ 397 398 399 u16 iommu_did[DMAR_UNITS_SUPPORTED]; 400 /* Domain ids per IOMMU. Use u16 since 401 * domain ids are 16 bit wide according 402 * to VT-d spec, section 9.3 */ 403 404 bool has_iotlb_device; 405 struct list_head devices; /* all devices' list */ 406 struct iova_domain iovad; /* iova's that belong to this domain */ 407 408 struct dma_pte *pgd; /* virtual address */ 409 int gaw; /* max guest address width */ 410 411 /* adjusted guest address width, 0 is level 2 30-bit */ 412 int agaw; 413 414 int flags; /* flags to find out type of domain */ 415 416 int iommu_coherency;/* indicate coherency of iommu access */ 417 int iommu_snooping; /* indicate snooping control feature*/ 418 int iommu_count; /* reference count of iommu */ 419 int iommu_superpage;/* Level of superpages supported: 420 0 == 4KiB (no superpages), 1 == 2MiB, 421 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ 422 u64 max_addr; /* maximum mapped address */ 423 424 struct iommu_domain domain; /* generic domain data structure for 425 iommu core */ 426}; 427 428struct intel_iommu { 429 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 430 u64 reg_phys; /* physical address of hw register set */ 431 u64 reg_size; /* size of hw register set */ 432 u64 cap; 433 u64 ecap; 434 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ 435 raw_spinlock_t register_lock; /* protect register handling */ 436 int seq_id; /* sequence id of the iommu */ 437 int agaw; /* agaw of this iommu */ 438 int msagaw; /* max sagaw of this iommu */ 439 unsigned int irq, pr_irq; 440 u16 segment; /* PCI segment# */ 441 unsigned char name[13]; /* Device Name */ 442 443#ifdef CONFIG_INTEL_IOMMU 444 unsigned long *domain_ids; /* bitmap of domains */ 445 struct dmar_domain ***domains; /* ptr to domains */ 446 spinlock_t lock; /* protect context, domain ids */ 447 struct root_entry *root_entry; /* virtual address */ 448 449 struct iommu_flush flush; 450#endif 451#ifdef CONFIG_INTEL_IOMMU_SVM 452 /* These are large and need to be contiguous, so we allocate just 453 * one for now. We'll maybe want to rethink that if we truly give 454 * devices away to userspace processes (e.g. for DPDK) and don't 455 * want to trust that userspace will use *only* the PASID it was 456 * told to. But while it's all driver-arbitrated, we're fine. */ 457 struct pasid_state_entry *pasid_state_table; 458 struct page_req_dsc *prq; 459 unsigned char prq_name[16]; /* Name for PRQ interrupt */ 460 u32 pasid_max; 461#endif 462 struct q_inval *qi; /* Queued invalidation info */ 463 u32 *iommu_state; /* Store iommu states between suspend and resume.*/ 464 465#ifdef CONFIG_IRQ_REMAP 466 struct ir_table *ir_table; /* Interrupt remapping info */ 467 struct irq_domain *ir_domain; 468 struct irq_domain *ir_msi_domain; 469#endif 470 struct iommu_device iommu; /* IOMMU core code handle */ 471 int node; 472 u32 flags; /* Software defined flags */ 473}; 474 475/* PCI domain-device relationship */ 476struct device_domain_info { 477 struct list_head link; /* link to domain siblings */ 478 struct list_head global; /* link to global list */ 479 struct list_head table; /* link to pasid table */ 480 u8 bus; /* PCI bus number */ 481 u8 devfn; /* PCI devfn number */ 482 u16 pfsid; /* SRIOV physical function source ID */ 483 u8 pasid_supported:3; 484 u8 pasid_enabled:1; 485 u8 pri_supported:1; 486 u8 pri_enabled:1; 487 u8 ats_supported:1; 488 u8 ats_enabled:1; 489 u8 ats_qdep; 490 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ 491 struct intel_iommu *iommu; /* IOMMU used by this device */ 492 struct dmar_domain *domain; /* pointer to domain */ 493 struct pasid_table *pasid_table; /* pasid table */ 494}; 495 496static inline void __iommu_flush_cache( 497 struct intel_iommu *iommu, void *addr, int size) 498{ 499 if (!ecap_coherent(iommu->ecap)) 500 clflush_cache_range(addr, size); 501} 502 503extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); 504extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); 505 506extern int dmar_enable_qi(struct intel_iommu *iommu); 507extern void dmar_disable_qi(struct intel_iommu *iommu); 508extern int dmar_reenable_qi(struct intel_iommu *iommu); 509extern void qi_global_iec(struct intel_iommu *iommu); 510 511extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, 512 u8 fm, u64 type); 513extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 514 unsigned int size_order, u64 type); 515extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, 516 u16 qdep, u64 addr, unsigned mask); 517extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 518 519extern int dmar_ir_support(void); 520 521struct dmar_domain *get_valid_domain_for_dev(struct device *dev); 522void *alloc_pgtable_page(int node); 523void free_pgtable_page(void *vaddr); 524struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); 525int for_each_device_domain(int (*fn)(struct device_domain_info *info, 526 void *data), void *data); 527 528#ifdef CONFIG_INTEL_IOMMU_SVM 529int intel_svm_init(struct intel_iommu *iommu); 530int intel_svm_exit(struct intel_iommu *iommu); 531extern int intel_svm_enable_prq(struct intel_iommu *iommu); 532extern int intel_svm_finish_prq(struct intel_iommu *iommu); 533 534struct svm_dev_ops; 535 536struct intel_svm_dev { 537 struct list_head list; 538 struct rcu_head rcu; 539 struct device *dev; 540 struct svm_dev_ops *ops; 541 int users; 542 u16 did; 543 u16 dev_iotlb:1; 544 u16 sid, qdep; 545}; 546 547struct intel_svm { 548 struct mmu_notifier notifier; 549 struct mm_struct *mm; 550 struct intel_iommu *iommu; 551 int flags; 552 int pasid; 553 struct list_head devs; 554 struct list_head list; 555}; 556 557extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev); 558extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); 559#endif 560 561extern const struct attribute_group *intel_iommu_groups[]; 562 563#endif