Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.5 3681 lines 85 kB view raw
1/* 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 3 * Author: Joerg Roedel <joerg.roedel@amd.com> 4 * Leo Duran <leo.duran@amd.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20#include <linux/ratelimit.h> 21#include <linux/pci.h> 22#include <linux/pci-ats.h> 23#include <linux/bitmap.h> 24#include <linux/slab.h> 25#include <linux/debugfs.h> 26#include <linux/scatterlist.h> 27#include <linux/dma-mapping.h> 28#include <linux/iommu-helper.h> 29#include <linux/iommu.h> 30#include <linux/delay.h> 31#include <linux/amd-iommu.h> 32#include <linux/notifier.h> 33#include <linux/export.h> 34#include <asm/msidef.h> 35#include <asm/proto.h> 36#include <asm/iommu.h> 37#include <asm/gart.h> 38#include <asm/dma.h> 39 40#include "amd_iommu_proto.h" 41#include "amd_iommu_types.h" 42 43#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 44 45#define LOOP_TIMEOUT 100000 46 47/* 48 * This bitmap is used to advertise the page sizes our hardware support 49 * to the IOMMU core, which will then use this information to split 50 * physically contiguous memory regions it is mapping into page sizes 51 * that we support. 52 * 53 * Traditionally the IOMMU core just handed us the mappings directly, 54 * after making sure the size is an order of a 4KiB page and that the 55 * mapping has natural alignment. 56 * 57 * To retain this behavior, we currently advertise that we support 58 * all page sizes that are an order of 4KiB. 59 * 60 * If at some point we'd like to utilize the IOMMU core's new behavior, 61 * we could change this to advertise the real page sizes we support. 62 */ 63#define AMD_IOMMU_PGSIZES (~0xFFFUL) 64 65static DEFINE_RWLOCK(amd_iommu_devtable_lock); 66 67/* A list of preallocated protection domains */ 68static LIST_HEAD(iommu_pd_list); 69static DEFINE_SPINLOCK(iommu_pd_list_lock); 70 71/* List of all available dev_data structures */ 72static LIST_HEAD(dev_data_list); 73static DEFINE_SPINLOCK(dev_data_list_lock); 74 75/* 76 * Domain for untranslated devices - only allocated 77 * if iommu=pt passed on kernel cmd line. 78 */ 79static struct protection_domain *pt_domain; 80 81static struct iommu_ops amd_iommu_ops; 82 83static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 84int amd_iommu_max_glx_val = -1; 85 86static struct dma_map_ops amd_iommu_dma_ops; 87 88/* 89 * general struct to manage commands send to an IOMMU 90 */ 91struct iommu_cmd { 92 u32 data[4]; 93}; 94 95static void update_domain(struct protection_domain *domain); 96static int __init alloc_passthrough_domain(void); 97 98/**************************************************************************** 99 * 100 * Helper functions 101 * 102 ****************************************************************************/ 103 104static struct iommu_dev_data *alloc_dev_data(u16 devid) 105{ 106 struct iommu_dev_data *dev_data; 107 unsigned long flags; 108 109 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); 110 if (!dev_data) 111 return NULL; 112 113 dev_data->devid = devid; 114 atomic_set(&dev_data->bind, 0); 115 116 spin_lock_irqsave(&dev_data_list_lock, flags); 117 list_add_tail(&dev_data->dev_data_list, &dev_data_list); 118 spin_unlock_irqrestore(&dev_data_list_lock, flags); 119 120 return dev_data; 121} 122 123static void free_dev_data(struct iommu_dev_data *dev_data) 124{ 125 unsigned long flags; 126 127 spin_lock_irqsave(&dev_data_list_lock, flags); 128 list_del(&dev_data->dev_data_list); 129 spin_unlock_irqrestore(&dev_data_list_lock, flags); 130 131 kfree(dev_data); 132} 133 134static struct iommu_dev_data *search_dev_data(u16 devid) 135{ 136 struct iommu_dev_data *dev_data; 137 unsigned long flags; 138 139 spin_lock_irqsave(&dev_data_list_lock, flags); 140 list_for_each_entry(dev_data, &dev_data_list, dev_data_list) { 141 if (dev_data->devid == devid) 142 goto out_unlock; 143 } 144 145 dev_data = NULL; 146 147out_unlock: 148 spin_unlock_irqrestore(&dev_data_list_lock, flags); 149 150 return dev_data; 151} 152 153static struct iommu_dev_data *find_dev_data(u16 devid) 154{ 155 struct iommu_dev_data *dev_data; 156 157 dev_data = search_dev_data(devid); 158 159 if (dev_data == NULL) 160 dev_data = alloc_dev_data(devid); 161 162 return dev_data; 163} 164 165static inline u16 get_device_id(struct device *dev) 166{ 167 struct pci_dev *pdev = to_pci_dev(dev); 168 169 return calc_devid(pdev->bus->number, pdev->devfn); 170} 171 172static struct iommu_dev_data *get_dev_data(struct device *dev) 173{ 174 return dev->archdata.iommu; 175} 176 177static bool pci_iommuv2_capable(struct pci_dev *pdev) 178{ 179 static const int caps[] = { 180 PCI_EXT_CAP_ID_ATS, 181 PCI_EXT_CAP_ID_PRI, 182 PCI_EXT_CAP_ID_PASID, 183 }; 184 int i, pos; 185 186 for (i = 0; i < 3; ++i) { 187 pos = pci_find_ext_capability(pdev, caps[i]); 188 if (pos == 0) 189 return false; 190 } 191 192 return true; 193} 194 195static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum) 196{ 197 struct iommu_dev_data *dev_data; 198 199 dev_data = get_dev_data(&pdev->dev); 200 201 return dev_data->errata & (1 << erratum) ? true : false; 202} 203 204/* 205 * In this function the list of preallocated protection domains is traversed to 206 * find the domain for a specific device 207 */ 208static struct dma_ops_domain *find_protection_domain(u16 devid) 209{ 210 struct dma_ops_domain *entry, *ret = NULL; 211 unsigned long flags; 212 u16 alias = amd_iommu_alias_table[devid]; 213 214 if (list_empty(&iommu_pd_list)) 215 return NULL; 216 217 spin_lock_irqsave(&iommu_pd_list_lock, flags); 218 219 list_for_each_entry(entry, &iommu_pd_list, list) { 220 if (entry->target_dev == devid || 221 entry->target_dev == alias) { 222 ret = entry; 223 break; 224 } 225 } 226 227 spin_unlock_irqrestore(&iommu_pd_list_lock, flags); 228 229 return ret; 230} 231 232/* 233 * This function checks if the driver got a valid device from the caller to 234 * avoid dereferencing invalid pointers. 235 */ 236static bool check_device(struct device *dev) 237{ 238 u16 devid; 239 240 if (!dev || !dev->dma_mask) 241 return false; 242 243 /* No device or no PCI device */ 244 if (dev->bus != &pci_bus_type) 245 return false; 246 247 devid = get_device_id(dev); 248 249 /* Out of our scope? */ 250 if (devid > amd_iommu_last_bdf) 251 return false; 252 253 if (amd_iommu_rlookup_table[devid] == NULL) 254 return false; 255 256 return true; 257} 258 259static int iommu_init_device(struct device *dev) 260{ 261 struct pci_dev *pdev = to_pci_dev(dev); 262 struct iommu_dev_data *dev_data; 263 u16 alias; 264 265 if (dev->archdata.iommu) 266 return 0; 267 268 dev_data = find_dev_data(get_device_id(dev)); 269 if (!dev_data) 270 return -ENOMEM; 271 272 alias = amd_iommu_alias_table[dev_data->devid]; 273 if (alias != dev_data->devid) { 274 struct iommu_dev_data *alias_data; 275 276 alias_data = find_dev_data(alias); 277 if (alias_data == NULL) { 278 pr_err("AMD-Vi: Warning: Unhandled device %s\n", 279 dev_name(dev)); 280 free_dev_data(dev_data); 281 return -ENOTSUPP; 282 } 283 dev_data->alias_data = alias_data; 284 } 285 286 if (pci_iommuv2_capable(pdev)) { 287 struct amd_iommu *iommu; 288 289 iommu = amd_iommu_rlookup_table[dev_data->devid]; 290 dev_data->iommu_v2 = iommu->is_iommu_v2; 291 } 292 293 dev->archdata.iommu = dev_data; 294 295 return 0; 296} 297 298static void iommu_ignore_device(struct device *dev) 299{ 300 u16 devid, alias; 301 302 devid = get_device_id(dev); 303 alias = amd_iommu_alias_table[devid]; 304 305 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); 306 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); 307 308 amd_iommu_rlookup_table[devid] = NULL; 309 amd_iommu_rlookup_table[alias] = NULL; 310} 311 312static void iommu_uninit_device(struct device *dev) 313{ 314 /* 315 * Nothing to do here - we keep dev_data around for unplugged devices 316 * and reuse it when the device is re-plugged - not doing so would 317 * introduce a ton of races. 318 */ 319} 320 321void __init amd_iommu_uninit_devices(void) 322{ 323 struct iommu_dev_data *dev_data, *n; 324 struct pci_dev *pdev = NULL; 325 326 for_each_pci_dev(pdev) { 327 328 if (!check_device(&pdev->dev)) 329 continue; 330 331 iommu_uninit_device(&pdev->dev); 332 } 333 334 /* Free all of our dev_data structures */ 335 list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list) 336 free_dev_data(dev_data); 337} 338 339int __init amd_iommu_init_devices(void) 340{ 341 struct pci_dev *pdev = NULL; 342 int ret = 0; 343 344 for_each_pci_dev(pdev) { 345 346 if (!check_device(&pdev->dev)) 347 continue; 348 349 ret = iommu_init_device(&pdev->dev); 350 if (ret == -ENOTSUPP) 351 iommu_ignore_device(&pdev->dev); 352 else if (ret) 353 goto out_free; 354 } 355 356 return 0; 357 358out_free: 359 360 amd_iommu_uninit_devices(); 361 362 return ret; 363} 364#ifdef CONFIG_AMD_IOMMU_STATS 365 366/* 367 * Initialization code for statistics collection 368 */ 369 370DECLARE_STATS_COUNTER(compl_wait); 371DECLARE_STATS_COUNTER(cnt_map_single); 372DECLARE_STATS_COUNTER(cnt_unmap_single); 373DECLARE_STATS_COUNTER(cnt_map_sg); 374DECLARE_STATS_COUNTER(cnt_unmap_sg); 375DECLARE_STATS_COUNTER(cnt_alloc_coherent); 376DECLARE_STATS_COUNTER(cnt_free_coherent); 377DECLARE_STATS_COUNTER(cross_page); 378DECLARE_STATS_COUNTER(domain_flush_single); 379DECLARE_STATS_COUNTER(domain_flush_all); 380DECLARE_STATS_COUNTER(alloced_io_mem); 381DECLARE_STATS_COUNTER(total_map_requests); 382DECLARE_STATS_COUNTER(complete_ppr); 383DECLARE_STATS_COUNTER(invalidate_iotlb); 384DECLARE_STATS_COUNTER(invalidate_iotlb_all); 385DECLARE_STATS_COUNTER(pri_requests); 386 387 388static struct dentry *stats_dir; 389static struct dentry *de_fflush; 390 391static void amd_iommu_stats_add(struct __iommu_counter *cnt) 392{ 393 if (stats_dir == NULL) 394 return; 395 396 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir, 397 &cnt->value); 398} 399 400static void amd_iommu_stats_init(void) 401{ 402 stats_dir = debugfs_create_dir("amd-iommu", NULL); 403 if (stats_dir == NULL) 404 return; 405 406 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, 407 &amd_iommu_unmap_flush); 408 409 amd_iommu_stats_add(&compl_wait); 410 amd_iommu_stats_add(&cnt_map_single); 411 amd_iommu_stats_add(&cnt_unmap_single); 412 amd_iommu_stats_add(&cnt_map_sg); 413 amd_iommu_stats_add(&cnt_unmap_sg); 414 amd_iommu_stats_add(&cnt_alloc_coherent); 415 amd_iommu_stats_add(&cnt_free_coherent); 416 amd_iommu_stats_add(&cross_page); 417 amd_iommu_stats_add(&domain_flush_single); 418 amd_iommu_stats_add(&domain_flush_all); 419 amd_iommu_stats_add(&alloced_io_mem); 420 amd_iommu_stats_add(&total_map_requests); 421 amd_iommu_stats_add(&complete_ppr); 422 amd_iommu_stats_add(&invalidate_iotlb); 423 amd_iommu_stats_add(&invalidate_iotlb_all); 424 amd_iommu_stats_add(&pri_requests); 425} 426 427#endif 428 429/**************************************************************************** 430 * 431 * Interrupt handling functions 432 * 433 ****************************************************************************/ 434 435static void dump_dte_entry(u16 devid) 436{ 437 int i; 438 439 for (i = 0; i < 4; ++i) 440 pr_err("AMD-Vi: DTE[%d]: %016llx\n", i, 441 amd_iommu_dev_table[devid].data[i]); 442} 443 444static void dump_command(unsigned long phys_addr) 445{ 446 struct iommu_cmd *cmd = phys_to_virt(phys_addr); 447 int i; 448 449 for (i = 0; i < 4; ++i) 450 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); 451} 452 453static void iommu_print_event(struct amd_iommu *iommu, void *__evt) 454{ 455 int type, devid, domid, flags; 456 volatile u32 *event = __evt; 457 int count = 0; 458 u64 address; 459 460retry: 461 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; 462 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; 463 domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK; 464 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 465 address = (u64)(((u64)event[3]) << 32) | event[2]; 466 467 if (type == 0) { 468 /* Did we hit the erratum? */ 469 if (++count == LOOP_TIMEOUT) { 470 pr_err("AMD-Vi: No event written to event log\n"); 471 return; 472 } 473 udelay(1); 474 goto retry; 475 } 476 477 printk(KERN_ERR "AMD-Vi: Event logged ["); 478 479 switch (type) { 480 case EVENT_TYPE_ILL_DEV: 481 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x " 482 "address=0x%016llx flags=0x%04x]\n", 483 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 484 address, flags); 485 dump_dte_entry(devid); 486 break; 487 case EVENT_TYPE_IO_FAULT: 488 printk("IO_PAGE_FAULT device=%02x:%02x.%x " 489 "domain=0x%04x address=0x%016llx flags=0x%04x]\n", 490 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 491 domid, address, flags); 492 break; 493 case EVENT_TYPE_DEV_TAB_ERR: 494 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " 495 "address=0x%016llx flags=0x%04x]\n", 496 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 497 address, flags); 498 break; 499 case EVENT_TYPE_PAGE_TAB_ERR: 500 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x " 501 "domain=0x%04x address=0x%016llx flags=0x%04x]\n", 502 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 503 domid, address, flags); 504 break; 505 case EVENT_TYPE_ILL_CMD: 506 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); 507 dump_command(address); 508 break; 509 case EVENT_TYPE_CMD_HARD_ERR: 510 printk("COMMAND_HARDWARE_ERROR address=0x%016llx " 511 "flags=0x%04x]\n", address, flags); 512 break; 513 case EVENT_TYPE_IOTLB_INV_TO: 514 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x " 515 "address=0x%016llx]\n", 516 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 517 address); 518 break; 519 case EVENT_TYPE_INV_DEV_REQ: 520 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x " 521 "address=0x%016llx flags=0x%04x]\n", 522 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 523 address, flags); 524 break; 525 default: 526 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type); 527 } 528 529 memset(__evt, 0, 4 * sizeof(u32)); 530} 531 532static void iommu_poll_events(struct amd_iommu *iommu) 533{ 534 u32 head, tail; 535 unsigned long flags; 536 537 spin_lock_irqsave(&iommu->lock, flags); 538 539 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 540 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 541 542 while (head != tail) { 543 iommu_print_event(iommu, iommu->evt_buf + head); 544 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; 545 } 546 547 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 548 549 spin_unlock_irqrestore(&iommu->lock, flags); 550} 551 552static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) 553{ 554 struct amd_iommu_fault fault; 555 556 INC_STATS_COUNTER(pri_requests); 557 558 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { 559 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); 560 return; 561 } 562 563 fault.address = raw[1]; 564 fault.pasid = PPR_PASID(raw[0]); 565 fault.device_id = PPR_DEVID(raw[0]); 566 fault.tag = PPR_TAG(raw[0]); 567 fault.flags = PPR_FLAGS(raw[0]); 568 569 atomic_notifier_call_chain(&ppr_notifier, 0, &fault); 570} 571 572static void iommu_poll_ppr_log(struct amd_iommu *iommu) 573{ 574 unsigned long flags; 575 u32 head, tail; 576 577 if (iommu->ppr_log == NULL) 578 return; 579 580 /* enable ppr interrupts again */ 581 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); 582 583 spin_lock_irqsave(&iommu->lock, flags); 584 585 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 586 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 587 588 while (head != tail) { 589 volatile u64 *raw; 590 u64 entry[2]; 591 int i; 592 593 raw = (u64 *)(iommu->ppr_log + head); 594 595 /* 596 * Hardware bug: Interrupt may arrive before the entry is 597 * written to memory. If this happens we need to wait for the 598 * entry to arrive. 599 */ 600 for (i = 0; i < LOOP_TIMEOUT; ++i) { 601 if (PPR_REQ_TYPE(raw[0]) != 0) 602 break; 603 udelay(1); 604 } 605 606 /* Avoid memcpy function-call overhead */ 607 entry[0] = raw[0]; 608 entry[1] = raw[1]; 609 610 /* 611 * To detect the hardware bug we need to clear the entry 612 * back to zero. 613 */ 614 raw[0] = raw[1] = 0UL; 615 616 /* Update head pointer of hardware ring-buffer */ 617 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; 618 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 619 620 /* 621 * Release iommu->lock because ppr-handling might need to 622 * re-aquire it 623 */ 624 spin_unlock_irqrestore(&iommu->lock, flags); 625 626 /* Handle PPR entry */ 627 iommu_handle_ppr_entry(iommu, entry); 628 629 spin_lock_irqsave(&iommu->lock, flags); 630 631 /* Refresh ring-buffer information */ 632 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 633 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 634 } 635 636 spin_unlock_irqrestore(&iommu->lock, flags); 637} 638 639irqreturn_t amd_iommu_int_thread(int irq, void *data) 640{ 641 struct amd_iommu *iommu; 642 643 for_each_iommu(iommu) { 644 iommu_poll_events(iommu); 645 iommu_poll_ppr_log(iommu); 646 } 647 648 return IRQ_HANDLED; 649} 650 651irqreturn_t amd_iommu_int_handler(int irq, void *data) 652{ 653 return IRQ_WAKE_THREAD; 654} 655 656/**************************************************************************** 657 * 658 * IOMMU command queuing functions 659 * 660 ****************************************************************************/ 661 662static int wait_on_sem(volatile u64 *sem) 663{ 664 int i = 0; 665 666 while (*sem == 0 && i < LOOP_TIMEOUT) { 667 udelay(1); 668 i += 1; 669 } 670 671 if (i == LOOP_TIMEOUT) { 672 pr_alert("AMD-Vi: Completion-Wait loop timed out\n"); 673 return -EIO; 674 } 675 676 return 0; 677} 678 679static void copy_cmd_to_buffer(struct amd_iommu *iommu, 680 struct iommu_cmd *cmd, 681 u32 tail) 682{ 683 u8 *target; 684 685 target = iommu->cmd_buf + tail; 686 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; 687 688 /* Copy command to buffer */ 689 memcpy(target, cmd, sizeof(*cmd)); 690 691 /* Tell the IOMMU about it */ 692 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 693} 694 695static void build_completion_wait(struct iommu_cmd *cmd, u64 address) 696{ 697 WARN_ON(address & 0x7ULL); 698 699 memset(cmd, 0, sizeof(*cmd)); 700 cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; 701 cmd->data[1] = upper_32_bits(__pa(address)); 702 cmd->data[2] = 1; 703 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); 704} 705 706static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) 707{ 708 memset(cmd, 0, sizeof(*cmd)); 709 cmd->data[0] = devid; 710 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); 711} 712 713static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, 714 size_t size, u16 domid, int pde) 715{ 716 u64 pages; 717 int s; 718 719 pages = iommu_num_pages(address, size, PAGE_SIZE); 720 s = 0; 721 722 if (pages > 1) { 723 /* 724 * If we have to flush more than one page, flush all 725 * TLB entries for this domain 726 */ 727 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 728 s = 1; 729 } 730 731 address &= PAGE_MASK; 732 733 memset(cmd, 0, sizeof(*cmd)); 734 cmd->data[1] |= domid; 735 cmd->data[2] = lower_32_bits(address); 736 cmd->data[3] = upper_32_bits(address); 737 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); 738 if (s) /* size bit - we flush more than one 4kb page */ 739 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 740 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ 741 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 742} 743 744static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, 745 u64 address, size_t size) 746{ 747 u64 pages; 748 int s; 749 750 pages = iommu_num_pages(address, size, PAGE_SIZE); 751 s = 0; 752 753 if (pages > 1) { 754 /* 755 * If we have to flush more than one page, flush all 756 * TLB entries for this domain 757 */ 758 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 759 s = 1; 760 } 761 762 address &= PAGE_MASK; 763 764 memset(cmd, 0, sizeof(*cmd)); 765 cmd->data[0] = devid; 766 cmd->data[0] |= (qdep & 0xff) << 24; 767 cmd->data[1] = devid; 768 cmd->data[2] = lower_32_bits(address); 769 cmd->data[3] = upper_32_bits(address); 770 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); 771 if (s) 772 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 773} 774 775static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid, 776 u64 address, bool size) 777{ 778 memset(cmd, 0, sizeof(*cmd)); 779 780 address &= ~(0xfffULL); 781 782 cmd->data[0] = pasid & PASID_MASK; 783 cmd->data[1] = domid; 784 cmd->data[2] = lower_32_bits(address); 785 cmd->data[3] = upper_32_bits(address); 786 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 787 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; 788 if (size) 789 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 790 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); 791} 792 793static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid, 794 int qdep, u64 address, bool size) 795{ 796 memset(cmd, 0, sizeof(*cmd)); 797 798 address &= ~(0xfffULL); 799 800 cmd->data[0] = devid; 801 cmd->data[0] |= (pasid & 0xff) << 16; 802 cmd->data[0] |= (qdep & 0xff) << 24; 803 cmd->data[1] = devid; 804 cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16; 805 cmd->data[2] = lower_32_bits(address); 806 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; 807 cmd->data[3] = upper_32_bits(address); 808 if (size) 809 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 810 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); 811} 812 813static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid, 814 int status, int tag, bool gn) 815{ 816 memset(cmd, 0, sizeof(*cmd)); 817 818 cmd->data[0] = devid; 819 if (gn) { 820 cmd->data[1] = pasid & PASID_MASK; 821 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; 822 } 823 cmd->data[3] = tag & 0x1ff; 824 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT; 825 826 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR); 827} 828 829static void build_inv_all(struct iommu_cmd *cmd) 830{ 831 memset(cmd, 0, sizeof(*cmd)); 832 CMD_SET_TYPE(cmd, CMD_INV_ALL); 833} 834 835/* 836 * Writes the command to the IOMMUs command buffer and informs the 837 * hardware about the new command. 838 */ 839static int iommu_queue_command_sync(struct amd_iommu *iommu, 840 struct iommu_cmd *cmd, 841 bool sync) 842{ 843 u32 left, tail, head, next_tail; 844 unsigned long flags; 845 846 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); 847 848again: 849 spin_lock_irqsave(&iommu->lock, flags); 850 851 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 852 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 853 next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; 854 left = (head - next_tail) % iommu->cmd_buf_size; 855 856 if (left <= 2) { 857 struct iommu_cmd sync_cmd; 858 volatile u64 sem = 0; 859 int ret; 860 861 build_completion_wait(&sync_cmd, (u64)&sem); 862 copy_cmd_to_buffer(iommu, &sync_cmd, tail); 863 864 spin_unlock_irqrestore(&iommu->lock, flags); 865 866 if ((ret = wait_on_sem(&sem)) != 0) 867 return ret; 868 869 goto again; 870 } 871 872 copy_cmd_to_buffer(iommu, cmd, tail); 873 874 /* We need to sync now to make sure all commands are processed */ 875 iommu->need_sync = sync; 876 877 spin_unlock_irqrestore(&iommu->lock, flags); 878 879 return 0; 880} 881 882static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 883{ 884 return iommu_queue_command_sync(iommu, cmd, true); 885} 886 887/* 888 * This function queues a completion wait command into the command 889 * buffer of an IOMMU 890 */ 891static int iommu_completion_wait(struct amd_iommu *iommu) 892{ 893 struct iommu_cmd cmd; 894 volatile u64 sem = 0; 895 int ret; 896 897 if (!iommu->need_sync) 898 return 0; 899 900 build_completion_wait(&cmd, (u64)&sem); 901 902 ret = iommu_queue_command_sync(iommu, &cmd, false); 903 if (ret) 904 return ret; 905 906 return wait_on_sem(&sem); 907} 908 909static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) 910{ 911 struct iommu_cmd cmd; 912 913 build_inv_dte(&cmd, devid); 914 915 return iommu_queue_command(iommu, &cmd); 916} 917 918static void iommu_flush_dte_all(struct amd_iommu *iommu) 919{ 920 u32 devid; 921 922 for (devid = 0; devid <= 0xffff; ++devid) 923 iommu_flush_dte(iommu, devid); 924 925 iommu_completion_wait(iommu); 926} 927 928/* 929 * This function uses heavy locking and may disable irqs for some time. But 930 * this is no issue because it is only called during resume. 931 */ 932static void iommu_flush_tlb_all(struct amd_iommu *iommu) 933{ 934 u32 dom_id; 935 936 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { 937 struct iommu_cmd cmd; 938 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 939 dom_id, 1); 940 iommu_queue_command(iommu, &cmd); 941 } 942 943 iommu_completion_wait(iommu); 944} 945 946static void iommu_flush_all(struct amd_iommu *iommu) 947{ 948 struct iommu_cmd cmd; 949 950 build_inv_all(&cmd); 951 952 iommu_queue_command(iommu, &cmd); 953 iommu_completion_wait(iommu); 954} 955 956void iommu_flush_all_caches(struct amd_iommu *iommu) 957{ 958 if (iommu_feature(iommu, FEATURE_IA)) { 959 iommu_flush_all(iommu); 960 } else { 961 iommu_flush_dte_all(iommu); 962 iommu_flush_tlb_all(iommu); 963 } 964} 965 966/* 967 * Command send function for flushing on-device TLB 968 */ 969static int device_flush_iotlb(struct iommu_dev_data *dev_data, 970 u64 address, size_t size) 971{ 972 struct amd_iommu *iommu; 973 struct iommu_cmd cmd; 974 int qdep; 975 976 qdep = dev_data->ats.qdep; 977 iommu = amd_iommu_rlookup_table[dev_data->devid]; 978 979 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); 980 981 return iommu_queue_command(iommu, &cmd); 982} 983 984/* 985 * Command send function for invalidating a device table entry 986 */ 987static int device_flush_dte(struct iommu_dev_data *dev_data) 988{ 989 struct amd_iommu *iommu; 990 int ret; 991 992 iommu = amd_iommu_rlookup_table[dev_data->devid]; 993 994 ret = iommu_flush_dte(iommu, dev_data->devid); 995 if (ret) 996 return ret; 997 998 if (dev_data->ats.enabled) 999 ret = device_flush_iotlb(dev_data, 0, ~0UL); 1000 1001 return ret; 1002} 1003 1004/* 1005 * TLB invalidation function which is called from the mapping functions. 1006 * It invalidates a single PTE if the range to flush is within a single 1007 * page. Otherwise it flushes the whole TLB of the IOMMU. 1008 */ 1009static void __domain_flush_pages(struct protection_domain *domain, 1010 u64 address, size_t size, int pde) 1011{ 1012 struct iommu_dev_data *dev_data; 1013 struct iommu_cmd cmd; 1014 int ret = 0, i; 1015 1016 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); 1017 1018 for (i = 0; i < amd_iommus_present; ++i) { 1019 if (!domain->dev_iommu[i]) 1020 continue; 1021 1022 /* 1023 * Devices of this domain are behind this IOMMU 1024 * We need a TLB flush 1025 */ 1026 ret |= iommu_queue_command(amd_iommus[i], &cmd); 1027 } 1028 1029 list_for_each_entry(dev_data, &domain->dev_list, list) { 1030 1031 if (!dev_data->ats.enabled) 1032 continue; 1033 1034 ret |= device_flush_iotlb(dev_data, address, size); 1035 } 1036 1037 WARN_ON(ret); 1038} 1039 1040static void domain_flush_pages(struct protection_domain *domain, 1041 u64 address, size_t size) 1042{ 1043 __domain_flush_pages(domain, address, size, 0); 1044} 1045 1046/* Flush the whole IO/TLB for a given protection domain */ 1047static void domain_flush_tlb(struct protection_domain *domain) 1048{ 1049 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); 1050} 1051 1052/* Flush the whole IO/TLB for a given protection domain - including PDE */ 1053static void domain_flush_tlb_pde(struct protection_domain *domain) 1054{ 1055 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); 1056} 1057 1058static void domain_flush_complete(struct protection_domain *domain) 1059{ 1060 int i; 1061 1062 for (i = 0; i < amd_iommus_present; ++i) { 1063 if (!domain->dev_iommu[i]) 1064 continue; 1065 1066 /* 1067 * Devices of this domain are behind this IOMMU 1068 * We need to wait for completion of all commands. 1069 */ 1070 iommu_completion_wait(amd_iommus[i]); 1071 } 1072} 1073 1074 1075/* 1076 * This function flushes the DTEs for all devices in domain 1077 */ 1078static void domain_flush_devices(struct protection_domain *domain) 1079{ 1080 struct iommu_dev_data *dev_data; 1081 1082 list_for_each_entry(dev_data, &domain->dev_list, list) 1083 device_flush_dte(dev_data); 1084} 1085 1086/**************************************************************************** 1087 * 1088 * The functions below are used the create the page table mappings for 1089 * unity mapped regions. 1090 * 1091 ****************************************************************************/ 1092 1093/* 1094 * This function is used to add another level to an IO page table. Adding 1095 * another level increases the size of the address space by 9 bits to a size up 1096 * to 64 bits. 1097 */ 1098static bool increase_address_space(struct protection_domain *domain, 1099 gfp_t gfp) 1100{ 1101 u64 *pte; 1102 1103 if (domain->mode == PAGE_MODE_6_LEVEL) 1104 /* address space already 64 bit large */ 1105 return false; 1106 1107 pte = (void *)get_zeroed_page(gfp); 1108 if (!pte) 1109 return false; 1110 1111 *pte = PM_LEVEL_PDE(domain->mode, 1112 virt_to_phys(domain->pt_root)); 1113 domain->pt_root = pte; 1114 domain->mode += 1; 1115 domain->updated = true; 1116 1117 return true; 1118} 1119 1120static u64 *alloc_pte(struct protection_domain *domain, 1121 unsigned long address, 1122 unsigned long page_size, 1123 u64 **pte_page, 1124 gfp_t gfp) 1125{ 1126 int level, end_lvl; 1127 u64 *pte, *page; 1128 1129 BUG_ON(!is_power_of_2(page_size)); 1130 1131 while (address > PM_LEVEL_SIZE(domain->mode)) 1132 increase_address_space(domain, gfp); 1133 1134 level = domain->mode - 1; 1135 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; 1136 address = PAGE_SIZE_ALIGN(address, page_size); 1137 end_lvl = PAGE_SIZE_LEVEL(page_size); 1138 1139 while (level > end_lvl) { 1140 if (!IOMMU_PTE_PRESENT(*pte)) { 1141 page = (u64 *)get_zeroed_page(gfp); 1142 if (!page) 1143 return NULL; 1144 *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); 1145 } 1146 1147 /* No level skipping support yet */ 1148 if (PM_PTE_LEVEL(*pte) != level) 1149 return NULL; 1150 1151 level -= 1; 1152 1153 pte = IOMMU_PTE_PAGE(*pte); 1154 1155 if (pte_page && level == end_lvl) 1156 *pte_page = pte; 1157 1158 pte = &pte[PM_LEVEL_INDEX(level, address)]; 1159 } 1160 1161 return pte; 1162} 1163 1164/* 1165 * This function checks if there is a PTE for a given dma address. If 1166 * there is one, it returns the pointer to it. 1167 */ 1168static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) 1169{ 1170 int level; 1171 u64 *pte; 1172 1173 if (address > PM_LEVEL_SIZE(domain->mode)) 1174 return NULL; 1175 1176 level = domain->mode - 1; 1177 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; 1178 1179 while (level > 0) { 1180 1181 /* Not Present */ 1182 if (!IOMMU_PTE_PRESENT(*pte)) 1183 return NULL; 1184 1185 /* Large PTE */ 1186 if (PM_PTE_LEVEL(*pte) == 0x07) { 1187 unsigned long pte_mask, __pte; 1188 1189 /* 1190 * If we have a series of large PTEs, make 1191 * sure to return a pointer to the first one. 1192 */ 1193 pte_mask = PTE_PAGE_SIZE(*pte); 1194 pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); 1195 __pte = ((unsigned long)pte) & pte_mask; 1196 1197 return (u64 *)__pte; 1198 } 1199 1200 /* No level skipping support yet */ 1201 if (PM_PTE_LEVEL(*pte) != level) 1202 return NULL; 1203 1204 level -= 1; 1205 1206 /* Walk to the next level */ 1207 pte = IOMMU_PTE_PAGE(*pte); 1208 pte = &pte[PM_LEVEL_INDEX(level, address)]; 1209 } 1210 1211 return pte; 1212} 1213 1214/* 1215 * Generic mapping functions. It maps a physical address into a DMA 1216 * address space. It allocates the page table pages if necessary. 1217 * In the future it can be extended to a generic mapping function 1218 * supporting all features of AMD IOMMU page tables like level skipping 1219 * and full 64 bit address spaces. 1220 */ 1221static int iommu_map_page(struct protection_domain *dom, 1222 unsigned long bus_addr, 1223 unsigned long phys_addr, 1224 int prot, 1225 unsigned long page_size) 1226{ 1227 u64 __pte, *pte; 1228 int i, count; 1229 1230 if (!(prot & IOMMU_PROT_MASK)) 1231 return -EINVAL; 1232 1233 bus_addr = PAGE_ALIGN(bus_addr); 1234 phys_addr = PAGE_ALIGN(phys_addr); 1235 count = PAGE_SIZE_PTE_COUNT(page_size); 1236 pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); 1237 1238 for (i = 0; i < count; ++i) 1239 if (IOMMU_PTE_PRESENT(pte[i])) 1240 return -EBUSY; 1241 1242 if (page_size > PAGE_SIZE) { 1243 __pte = PAGE_SIZE_PTE(phys_addr, page_size); 1244 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; 1245 } else 1246 __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC; 1247 1248 if (prot & IOMMU_PROT_IR) 1249 __pte |= IOMMU_PTE_IR; 1250 if (prot & IOMMU_PROT_IW) 1251 __pte |= IOMMU_PTE_IW; 1252 1253 for (i = 0; i < count; ++i) 1254 pte[i] = __pte; 1255 1256 update_domain(dom); 1257 1258 return 0; 1259} 1260 1261static unsigned long iommu_unmap_page(struct protection_domain *dom, 1262 unsigned long bus_addr, 1263 unsigned long page_size) 1264{ 1265 unsigned long long unmap_size, unmapped; 1266 u64 *pte; 1267 1268 BUG_ON(!is_power_of_2(page_size)); 1269 1270 unmapped = 0; 1271 1272 while (unmapped < page_size) { 1273 1274 pte = fetch_pte(dom, bus_addr); 1275 1276 if (!pte) { 1277 /* 1278 * No PTE for this address 1279 * move forward in 4kb steps 1280 */ 1281 unmap_size = PAGE_SIZE; 1282 } else if (PM_PTE_LEVEL(*pte) == 0) { 1283 /* 4kb PTE found for this address */ 1284 unmap_size = PAGE_SIZE; 1285 *pte = 0ULL; 1286 } else { 1287 int count, i; 1288 1289 /* Large PTE found which maps this address */ 1290 unmap_size = PTE_PAGE_SIZE(*pte); 1291 count = PAGE_SIZE_PTE_COUNT(unmap_size); 1292 for (i = 0; i < count; i++) 1293 pte[i] = 0ULL; 1294 } 1295 1296 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; 1297 unmapped += unmap_size; 1298 } 1299 1300 BUG_ON(!is_power_of_2(unmapped)); 1301 1302 return unmapped; 1303} 1304 1305/* 1306 * This function checks if a specific unity mapping entry is needed for 1307 * this specific IOMMU. 1308 */ 1309static int iommu_for_unity_map(struct amd_iommu *iommu, 1310 struct unity_map_entry *entry) 1311{ 1312 u16 bdf, i; 1313 1314 for (i = entry->devid_start; i <= entry->devid_end; ++i) { 1315 bdf = amd_iommu_alias_table[i]; 1316 if (amd_iommu_rlookup_table[bdf] == iommu) 1317 return 1; 1318 } 1319 1320 return 0; 1321} 1322 1323/* 1324 * This function actually applies the mapping to the page table of the 1325 * dma_ops domain. 1326 */ 1327static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, 1328 struct unity_map_entry *e) 1329{ 1330 u64 addr; 1331 int ret; 1332 1333 for (addr = e->address_start; addr < e->address_end; 1334 addr += PAGE_SIZE) { 1335 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, 1336 PAGE_SIZE); 1337 if (ret) 1338 return ret; 1339 /* 1340 * if unity mapping is in aperture range mark the page 1341 * as allocated in the aperture 1342 */ 1343 if (addr < dma_dom->aperture_size) 1344 __set_bit(addr >> PAGE_SHIFT, 1345 dma_dom->aperture[0]->bitmap); 1346 } 1347 1348 return 0; 1349} 1350 1351/* 1352 * Init the unity mappings for a specific IOMMU in the system 1353 * 1354 * Basically iterates over all unity mapping entries and applies them to 1355 * the default domain DMA of that IOMMU if necessary. 1356 */ 1357static int iommu_init_unity_mappings(struct amd_iommu *iommu) 1358{ 1359 struct unity_map_entry *entry; 1360 int ret; 1361 1362 list_for_each_entry(entry, &amd_iommu_unity_map, list) { 1363 if (!iommu_for_unity_map(iommu, entry)) 1364 continue; 1365 ret = dma_ops_unity_map(iommu->default_dom, entry); 1366 if (ret) 1367 return ret; 1368 } 1369 1370 return 0; 1371} 1372 1373/* 1374 * Inits the unity mappings required for a specific device 1375 */ 1376static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, 1377 u16 devid) 1378{ 1379 struct unity_map_entry *e; 1380 int ret; 1381 1382 list_for_each_entry(e, &amd_iommu_unity_map, list) { 1383 if (!(devid >= e->devid_start && devid <= e->devid_end)) 1384 continue; 1385 ret = dma_ops_unity_map(dma_dom, e); 1386 if (ret) 1387 return ret; 1388 } 1389 1390 return 0; 1391} 1392 1393/**************************************************************************** 1394 * 1395 * The next functions belong to the address allocator for the dma_ops 1396 * interface functions. They work like the allocators in the other IOMMU 1397 * drivers. Its basically a bitmap which marks the allocated pages in 1398 * the aperture. Maybe it could be enhanced in the future to a more 1399 * efficient allocator. 1400 * 1401 ****************************************************************************/ 1402 1403/* 1404 * The address allocator core functions. 1405 * 1406 * called with domain->lock held 1407 */ 1408 1409/* 1410 * Used to reserve address ranges in the aperture (e.g. for exclusion 1411 * ranges. 1412 */ 1413static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, 1414 unsigned long start_page, 1415 unsigned int pages) 1416{ 1417 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT; 1418 1419 if (start_page + pages > last_page) 1420 pages = last_page - start_page; 1421 1422 for (i = start_page; i < start_page + pages; ++i) { 1423 int index = i / APERTURE_RANGE_PAGES; 1424 int page = i % APERTURE_RANGE_PAGES; 1425 __set_bit(page, dom->aperture[index]->bitmap); 1426 } 1427} 1428 1429/* 1430 * This function is used to add a new aperture range to an existing 1431 * aperture in case of dma_ops domain allocation or address allocation 1432 * failure. 1433 */ 1434static int alloc_new_range(struct dma_ops_domain *dma_dom, 1435 bool populate, gfp_t gfp) 1436{ 1437 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; 1438 struct amd_iommu *iommu; 1439 unsigned long i, old_size; 1440 1441#ifdef CONFIG_IOMMU_STRESS 1442 populate = false; 1443#endif 1444 1445 if (index >= APERTURE_MAX_RANGES) 1446 return -ENOMEM; 1447 1448 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp); 1449 if (!dma_dom->aperture[index]) 1450 return -ENOMEM; 1451 1452 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp); 1453 if (!dma_dom->aperture[index]->bitmap) 1454 goto out_free; 1455 1456 dma_dom->aperture[index]->offset = dma_dom->aperture_size; 1457 1458 if (populate) { 1459 unsigned long address = dma_dom->aperture_size; 1460 int i, num_ptes = APERTURE_RANGE_PAGES / 512; 1461 u64 *pte, *pte_page; 1462 1463 for (i = 0; i < num_ptes; ++i) { 1464 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, 1465 &pte_page, gfp); 1466 if (!pte) 1467 goto out_free; 1468 1469 dma_dom->aperture[index]->pte_pages[i] = pte_page; 1470 1471 address += APERTURE_RANGE_SIZE / 64; 1472 } 1473 } 1474 1475 old_size = dma_dom->aperture_size; 1476 dma_dom->aperture_size += APERTURE_RANGE_SIZE; 1477 1478 /* Reserve address range used for MSI messages */ 1479 if (old_size < MSI_ADDR_BASE_LO && 1480 dma_dom->aperture_size > MSI_ADDR_BASE_LO) { 1481 unsigned long spage; 1482 int pages; 1483 1484 pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE); 1485 spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT; 1486 1487 dma_ops_reserve_addresses(dma_dom, spage, pages); 1488 } 1489 1490 /* Initialize the exclusion range if necessary */ 1491 for_each_iommu(iommu) { 1492 if (iommu->exclusion_start && 1493 iommu->exclusion_start >= dma_dom->aperture[index]->offset 1494 && iommu->exclusion_start < dma_dom->aperture_size) { 1495 unsigned long startpage; 1496 int pages = iommu_num_pages(iommu->exclusion_start, 1497 iommu->exclusion_length, 1498 PAGE_SIZE); 1499 startpage = iommu->exclusion_start >> PAGE_SHIFT; 1500 dma_ops_reserve_addresses(dma_dom, startpage, pages); 1501 } 1502 } 1503 1504 /* 1505 * Check for areas already mapped as present in the new aperture 1506 * range and mark those pages as reserved in the allocator. Such 1507 * mappings may already exist as a result of requested unity 1508 * mappings for devices. 1509 */ 1510 for (i = dma_dom->aperture[index]->offset; 1511 i < dma_dom->aperture_size; 1512 i += PAGE_SIZE) { 1513 u64 *pte = fetch_pte(&dma_dom->domain, i); 1514 if (!pte || !IOMMU_PTE_PRESENT(*pte)) 1515 continue; 1516 1517 dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1); 1518 } 1519 1520 update_domain(&dma_dom->domain); 1521 1522 return 0; 1523 1524out_free: 1525 update_domain(&dma_dom->domain); 1526 1527 free_page((unsigned long)dma_dom->aperture[index]->bitmap); 1528 1529 kfree(dma_dom->aperture[index]); 1530 dma_dom->aperture[index] = NULL; 1531 1532 return -ENOMEM; 1533} 1534 1535static unsigned long dma_ops_area_alloc(struct device *dev, 1536 struct dma_ops_domain *dom, 1537 unsigned int pages, 1538 unsigned long align_mask, 1539 u64 dma_mask, 1540 unsigned long start) 1541{ 1542 unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE; 1543 int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT; 1544 int i = start >> APERTURE_RANGE_SHIFT; 1545 unsigned long boundary_size; 1546 unsigned long address = -1; 1547 unsigned long limit; 1548 1549 next_bit >>= PAGE_SHIFT; 1550 1551 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1552 PAGE_SIZE) >> PAGE_SHIFT; 1553 1554 for (;i < max_index; ++i) { 1555 unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT; 1556 1557 if (dom->aperture[i]->offset >= dma_mask) 1558 break; 1559 1560 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, 1561 dma_mask >> PAGE_SHIFT); 1562 1563 address = iommu_area_alloc(dom->aperture[i]->bitmap, 1564 limit, next_bit, pages, 0, 1565 boundary_size, align_mask); 1566 if (address != -1) { 1567 address = dom->aperture[i]->offset + 1568 (address << PAGE_SHIFT); 1569 dom->next_address = address + (pages << PAGE_SHIFT); 1570 break; 1571 } 1572 1573 next_bit = 0; 1574 } 1575 1576 return address; 1577} 1578 1579static unsigned long dma_ops_alloc_addresses(struct device *dev, 1580 struct dma_ops_domain *dom, 1581 unsigned int pages, 1582 unsigned long align_mask, 1583 u64 dma_mask) 1584{ 1585 unsigned long address; 1586 1587#ifdef CONFIG_IOMMU_STRESS 1588 dom->next_address = 0; 1589 dom->need_flush = true; 1590#endif 1591 1592 address = dma_ops_area_alloc(dev, dom, pages, align_mask, 1593 dma_mask, dom->next_address); 1594 1595 if (address == -1) { 1596 dom->next_address = 0; 1597 address = dma_ops_area_alloc(dev, dom, pages, align_mask, 1598 dma_mask, 0); 1599 dom->need_flush = true; 1600 } 1601 1602 if (unlikely(address == -1)) 1603 address = DMA_ERROR_CODE; 1604 1605 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); 1606 1607 return address; 1608} 1609 1610/* 1611 * The address free function. 1612 * 1613 * called with domain->lock held 1614 */ 1615static void dma_ops_free_addresses(struct dma_ops_domain *dom, 1616 unsigned long address, 1617 unsigned int pages) 1618{ 1619 unsigned i = address >> APERTURE_RANGE_SHIFT; 1620 struct aperture_range *range = dom->aperture[i]; 1621 1622 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL); 1623 1624#ifdef CONFIG_IOMMU_STRESS 1625 if (i < 4) 1626 return; 1627#endif 1628 1629 if (address >= dom->next_address) 1630 dom->need_flush = true; 1631 1632 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; 1633 1634 bitmap_clear(range->bitmap, address, pages); 1635 1636} 1637 1638/**************************************************************************** 1639 * 1640 * The next functions belong to the domain allocation. A domain is 1641 * allocated for every IOMMU as the default domain. If device isolation 1642 * is enabled, every device get its own domain. The most important thing 1643 * about domains is the page table mapping the DMA address space they 1644 * contain. 1645 * 1646 ****************************************************************************/ 1647 1648/* 1649 * This function adds a protection domain to the global protection domain list 1650 */ 1651static void add_domain_to_list(struct protection_domain *domain) 1652{ 1653 unsigned long flags; 1654 1655 spin_lock_irqsave(&amd_iommu_pd_lock, flags); 1656 list_add(&domain->list, &amd_iommu_pd_list); 1657 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); 1658} 1659 1660/* 1661 * This function removes a protection domain to the global 1662 * protection domain list 1663 */ 1664static void del_domain_from_list(struct protection_domain *domain) 1665{ 1666 unsigned long flags; 1667 1668 spin_lock_irqsave(&amd_iommu_pd_lock, flags); 1669 list_del(&domain->list); 1670 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); 1671} 1672 1673static u16 domain_id_alloc(void) 1674{ 1675 unsigned long flags; 1676 int id; 1677 1678 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1679 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); 1680 BUG_ON(id == 0); 1681 if (id > 0 && id < MAX_DOMAIN_ID) 1682 __set_bit(id, amd_iommu_pd_alloc_bitmap); 1683 else 1684 id = 0; 1685 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1686 1687 return id; 1688} 1689 1690static void domain_id_free(int id) 1691{ 1692 unsigned long flags; 1693 1694 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1695 if (id > 0 && id < MAX_DOMAIN_ID) 1696 __clear_bit(id, amd_iommu_pd_alloc_bitmap); 1697 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1698} 1699 1700static void free_pagetable(struct protection_domain *domain) 1701{ 1702 int i, j; 1703 u64 *p1, *p2, *p3; 1704 1705 p1 = domain->pt_root; 1706 1707 if (!p1) 1708 return; 1709 1710 for (i = 0; i < 512; ++i) { 1711 if (!IOMMU_PTE_PRESENT(p1[i])) 1712 continue; 1713 1714 p2 = IOMMU_PTE_PAGE(p1[i]); 1715 for (j = 0; j < 512; ++j) { 1716 if (!IOMMU_PTE_PRESENT(p2[j])) 1717 continue; 1718 p3 = IOMMU_PTE_PAGE(p2[j]); 1719 free_page((unsigned long)p3); 1720 } 1721 1722 free_page((unsigned long)p2); 1723 } 1724 1725 free_page((unsigned long)p1); 1726 1727 domain->pt_root = NULL; 1728} 1729 1730static void free_gcr3_tbl_level1(u64 *tbl) 1731{ 1732 u64 *ptr; 1733 int i; 1734 1735 for (i = 0; i < 512; ++i) { 1736 if (!(tbl[i] & GCR3_VALID)) 1737 continue; 1738 1739 ptr = __va(tbl[i] & PAGE_MASK); 1740 1741 free_page((unsigned long)ptr); 1742 } 1743} 1744 1745static void free_gcr3_tbl_level2(u64 *tbl) 1746{ 1747 u64 *ptr; 1748 int i; 1749 1750 for (i = 0; i < 512; ++i) { 1751 if (!(tbl[i] & GCR3_VALID)) 1752 continue; 1753 1754 ptr = __va(tbl[i] & PAGE_MASK); 1755 1756 free_gcr3_tbl_level1(ptr); 1757 } 1758} 1759 1760static void free_gcr3_table(struct protection_domain *domain) 1761{ 1762 if (domain->glx == 2) 1763 free_gcr3_tbl_level2(domain->gcr3_tbl); 1764 else if (domain->glx == 1) 1765 free_gcr3_tbl_level1(domain->gcr3_tbl); 1766 else if (domain->glx != 0) 1767 BUG(); 1768 1769 free_page((unsigned long)domain->gcr3_tbl); 1770} 1771 1772/* 1773 * Free a domain, only used if something went wrong in the 1774 * allocation path and we need to free an already allocated page table 1775 */ 1776static void dma_ops_domain_free(struct dma_ops_domain *dom) 1777{ 1778 int i; 1779 1780 if (!dom) 1781 return; 1782 1783 del_domain_from_list(&dom->domain); 1784 1785 free_pagetable(&dom->domain); 1786 1787 for (i = 0; i < APERTURE_MAX_RANGES; ++i) { 1788 if (!dom->aperture[i]) 1789 continue; 1790 free_page((unsigned long)dom->aperture[i]->bitmap); 1791 kfree(dom->aperture[i]); 1792 } 1793 1794 kfree(dom); 1795} 1796 1797/* 1798 * Allocates a new protection domain usable for the dma_ops functions. 1799 * It also initializes the page table and the address allocator data 1800 * structures required for the dma_ops interface 1801 */ 1802static struct dma_ops_domain *dma_ops_domain_alloc(void) 1803{ 1804 struct dma_ops_domain *dma_dom; 1805 1806 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); 1807 if (!dma_dom) 1808 return NULL; 1809 1810 spin_lock_init(&dma_dom->domain.lock); 1811 1812 dma_dom->domain.id = domain_id_alloc(); 1813 if (dma_dom->domain.id == 0) 1814 goto free_dma_dom; 1815 INIT_LIST_HEAD(&dma_dom->domain.dev_list); 1816 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; 1817 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); 1818 dma_dom->domain.flags = PD_DMA_OPS_MASK; 1819 dma_dom->domain.priv = dma_dom; 1820 if (!dma_dom->domain.pt_root) 1821 goto free_dma_dom; 1822 1823 dma_dom->need_flush = false; 1824 dma_dom->target_dev = 0xffff; 1825 1826 add_domain_to_list(&dma_dom->domain); 1827 1828 if (alloc_new_range(dma_dom, true, GFP_KERNEL)) 1829 goto free_dma_dom; 1830 1831 /* 1832 * mark the first page as allocated so we never return 0 as 1833 * a valid dma-address. So we can use 0 as error value 1834 */ 1835 dma_dom->aperture[0]->bitmap[0] = 1; 1836 dma_dom->next_address = 0; 1837 1838 1839 return dma_dom; 1840 1841free_dma_dom: 1842 dma_ops_domain_free(dma_dom); 1843 1844 return NULL; 1845} 1846 1847/* 1848 * little helper function to check whether a given protection domain is a 1849 * dma_ops domain 1850 */ 1851static bool dma_ops_domain(struct protection_domain *domain) 1852{ 1853 return domain->flags & PD_DMA_OPS_MASK; 1854} 1855 1856static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) 1857{ 1858 u64 pte_root = 0; 1859 u64 flags = 0; 1860 1861 if (domain->mode != PAGE_MODE_NONE) 1862 pte_root = virt_to_phys(domain->pt_root); 1863 1864 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) 1865 << DEV_ENTRY_MODE_SHIFT; 1866 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; 1867 1868 flags = amd_iommu_dev_table[devid].data[1]; 1869 1870 if (ats) 1871 flags |= DTE_FLAG_IOTLB; 1872 1873 if (domain->flags & PD_IOMMUV2_MASK) { 1874 u64 gcr3 = __pa(domain->gcr3_tbl); 1875 u64 glx = domain->glx; 1876 u64 tmp; 1877 1878 pte_root |= DTE_FLAG_GV; 1879 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT; 1880 1881 /* First mask out possible old values for GCR3 table */ 1882 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 1883 flags &= ~tmp; 1884 1885 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 1886 flags &= ~tmp; 1887 1888 /* Encode GCR3 table into DTE */ 1889 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A; 1890 pte_root |= tmp; 1891 1892 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B; 1893 flags |= tmp; 1894 1895 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C; 1896 flags |= tmp; 1897 } 1898 1899 flags &= ~(0xffffUL); 1900 flags |= domain->id; 1901 1902 amd_iommu_dev_table[devid].data[1] = flags; 1903 amd_iommu_dev_table[devid].data[0] = pte_root; 1904} 1905 1906static void clear_dte_entry(u16 devid) 1907{ 1908 /* remove entry from the device table seen by the hardware */ 1909 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; 1910 amd_iommu_dev_table[devid].data[1] = 0; 1911 1912 amd_iommu_apply_erratum_63(devid); 1913} 1914 1915static void do_attach(struct iommu_dev_data *dev_data, 1916 struct protection_domain *domain) 1917{ 1918 struct amd_iommu *iommu; 1919 bool ats; 1920 1921 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1922 ats = dev_data->ats.enabled; 1923 1924 /* Update data structures */ 1925 dev_data->domain = domain; 1926 list_add(&dev_data->list, &domain->dev_list); 1927 set_dte_entry(dev_data->devid, domain, ats); 1928 1929 /* Do reference counting */ 1930 domain->dev_iommu[iommu->index] += 1; 1931 domain->dev_cnt += 1; 1932 1933 /* Flush the DTE entry */ 1934 device_flush_dte(dev_data); 1935} 1936 1937static void do_detach(struct iommu_dev_data *dev_data) 1938{ 1939 struct amd_iommu *iommu; 1940 1941 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1942 1943 /* decrease reference counters */ 1944 dev_data->domain->dev_iommu[iommu->index] -= 1; 1945 dev_data->domain->dev_cnt -= 1; 1946 1947 /* Update data structures */ 1948 dev_data->domain = NULL; 1949 list_del(&dev_data->list); 1950 clear_dte_entry(dev_data->devid); 1951 1952 /* Flush the DTE entry */ 1953 device_flush_dte(dev_data); 1954} 1955 1956/* 1957 * If a device is not yet associated with a domain, this function does 1958 * assigns it visible for the hardware 1959 */ 1960static int __attach_device(struct iommu_dev_data *dev_data, 1961 struct protection_domain *domain) 1962{ 1963 int ret; 1964 1965 /* lock domain */ 1966 spin_lock(&domain->lock); 1967 1968 if (dev_data->alias_data != NULL) { 1969 struct iommu_dev_data *alias_data = dev_data->alias_data; 1970 1971 /* Some sanity checks */ 1972 ret = -EBUSY; 1973 if (alias_data->domain != NULL && 1974 alias_data->domain != domain) 1975 goto out_unlock; 1976 1977 if (dev_data->domain != NULL && 1978 dev_data->domain != domain) 1979 goto out_unlock; 1980 1981 /* Do real assignment */ 1982 if (alias_data->domain == NULL) 1983 do_attach(alias_data, domain); 1984 1985 atomic_inc(&alias_data->bind); 1986 } 1987 1988 if (dev_data->domain == NULL) 1989 do_attach(dev_data, domain); 1990 1991 atomic_inc(&dev_data->bind); 1992 1993 ret = 0; 1994 1995out_unlock: 1996 1997 /* ready */ 1998 spin_unlock(&domain->lock); 1999 2000 return ret; 2001} 2002 2003 2004static void pdev_iommuv2_disable(struct pci_dev *pdev) 2005{ 2006 pci_disable_ats(pdev); 2007 pci_disable_pri(pdev); 2008 pci_disable_pasid(pdev); 2009} 2010 2011/* FIXME: Change generic reset-function to do the same */ 2012static int pri_reset_while_enabled(struct pci_dev *pdev) 2013{ 2014 u16 control; 2015 int pos; 2016 2017 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); 2018 if (!pos) 2019 return -EINVAL; 2020 2021 pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); 2022 control |= PCI_PRI_CTRL_RESET; 2023 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); 2024 2025 return 0; 2026} 2027 2028static int pdev_iommuv2_enable(struct pci_dev *pdev) 2029{ 2030 bool reset_enable; 2031 int reqs, ret; 2032 2033 /* FIXME: Hardcode number of outstanding requests for now */ 2034 reqs = 32; 2035 if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE)) 2036 reqs = 1; 2037 reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET); 2038 2039 /* Only allow access to user-accessible pages */ 2040 ret = pci_enable_pasid(pdev, 0); 2041 if (ret) 2042 goto out_err; 2043 2044 /* First reset the PRI state of the device */ 2045 ret = pci_reset_pri(pdev); 2046 if (ret) 2047 goto out_err; 2048 2049 /* Enable PRI */ 2050 ret = pci_enable_pri(pdev, reqs); 2051 if (ret) 2052 goto out_err; 2053 2054 if (reset_enable) { 2055 ret = pri_reset_while_enabled(pdev); 2056 if (ret) 2057 goto out_err; 2058 } 2059 2060 ret = pci_enable_ats(pdev, PAGE_SHIFT); 2061 if (ret) 2062 goto out_err; 2063 2064 return 0; 2065 2066out_err: 2067 pci_disable_pri(pdev); 2068 pci_disable_pasid(pdev); 2069 2070 return ret; 2071} 2072 2073/* FIXME: Move this to PCI code */ 2074#define PCI_PRI_TLP_OFF (1 << 15) 2075 2076bool pci_pri_tlp_required(struct pci_dev *pdev) 2077{ 2078 u16 status; 2079 int pos; 2080 2081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); 2082 if (!pos) 2083 return false; 2084 2085 pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); 2086 2087 return (status & PCI_PRI_TLP_OFF) ? true : false; 2088} 2089 2090/* 2091 * If a device is not yet associated with a domain, this function does 2092 * assigns it visible for the hardware 2093 */ 2094static int attach_device(struct device *dev, 2095 struct protection_domain *domain) 2096{ 2097 struct pci_dev *pdev = to_pci_dev(dev); 2098 struct iommu_dev_data *dev_data; 2099 unsigned long flags; 2100 int ret; 2101 2102 dev_data = get_dev_data(dev); 2103 2104 if (domain->flags & PD_IOMMUV2_MASK) { 2105 if (!dev_data->iommu_v2 || !dev_data->passthrough) 2106 return -EINVAL; 2107 2108 if (pdev_iommuv2_enable(pdev) != 0) 2109 return -EINVAL; 2110 2111 dev_data->ats.enabled = true; 2112 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2113 dev_data->pri_tlp = pci_pri_tlp_required(pdev); 2114 } else if (amd_iommu_iotlb_sup && 2115 pci_enable_ats(pdev, PAGE_SHIFT) == 0) { 2116 dev_data->ats.enabled = true; 2117 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2118 } 2119 2120 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 2121 ret = __attach_device(dev_data, domain); 2122 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2123 2124 /* 2125 * We might boot into a crash-kernel here. The crashed kernel 2126 * left the caches in the IOMMU dirty. So we have to flush 2127 * here to evict all dirty stuff. 2128 */ 2129 domain_flush_tlb_pde(domain); 2130 2131 return ret; 2132} 2133 2134/* 2135 * Removes a device from a protection domain (unlocked) 2136 */ 2137static void __detach_device(struct iommu_dev_data *dev_data) 2138{ 2139 struct protection_domain *domain; 2140 unsigned long flags; 2141 2142 BUG_ON(!dev_data->domain); 2143 2144 domain = dev_data->domain; 2145 2146 spin_lock_irqsave(&domain->lock, flags); 2147 2148 if (dev_data->alias_data != NULL) { 2149 struct iommu_dev_data *alias_data = dev_data->alias_data; 2150 2151 if (atomic_dec_and_test(&alias_data->bind)) 2152 do_detach(alias_data); 2153 } 2154 2155 if (atomic_dec_and_test(&dev_data->bind)) 2156 do_detach(dev_data); 2157 2158 spin_unlock_irqrestore(&domain->lock, flags); 2159 2160 /* 2161 * If we run in passthrough mode the device must be assigned to the 2162 * passthrough domain if it is detached from any other domain. 2163 * Make sure we can deassign from the pt_domain itself. 2164 */ 2165 if (dev_data->passthrough && 2166 (dev_data->domain == NULL && domain != pt_domain)) 2167 __attach_device(dev_data, pt_domain); 2168} 2169 2170/* 2171 * Removes a device from a protection domain (with devtable_lock held) 2172 */ 2173static void detach_device(struct device *dev) 2174{ 2175 struct protection_domain *domain; 2176 struct iommu_dev_data *dev_data; 2177 unsigned long flags; 2178 2179 dev_data = get_dev_data(dev); 2180 domain = dev_data->domain; 2181 2182 /* lock device table */ 2183 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 2184 __detach_device(dev_data); 2185 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2186 2187 if (domain->flags & PD_IOMMUV2_MASK) 2188 pdev_iommuv2_disable(to_pci_dev(dev)); 2189 else if (dev_data->ats.enabled) 2190 pci_disable_ats(to_pci_dev(dev)); 2191 2192 dev_data->ats.enabled = false; 2193} 2194 2195/* 2196 * Find out the protection domain structure for a given PCI device. This 2197 * will give us the pointer to the page table root for example. 2198 */ 2199static struct protection_domain *domain_for_device(struct device *dev) 2200{ 2201 struct iommu_dev_data *dev_data; 2202 struct protection_domain *dom = NULL; 2203 unsigned long flags; 2204 2205 dev_data = get_dev_data(dev); 2206 2207 if (dev_data->domain) 2208 return dev_data->domain; 2209 2210 if (dev_data->alias_data != NULL) { 2211 struct iommu_dev_data *alias_data = dev_data->alias_data; 2212 2213 read_lock_irqsave(&amd_iommu_devtable_lock, flags); 2214 if (alias_data->domain != NULL) { 2215 __attach_device(dev_data, alias_data->domain); 2216 dom = alias_data->domain; 2217 } 2218 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2219 } 2220 2221 return dom; 2222} 2223 2224static int device_change_notifier(struct notifier_block *nb, 2225 unsigned long action, void *data) 2226{ 2227 struct dma_ops_domain *dma_domain; 2228 struct protection_domain *domain; 2229 struct iommu_dev_data *dev_data; 2230 struct device *dev = data; 2231 struct amd_iommu *iommu; 2232 unsigned long flags; 2233 u16 devid; 2234 2235 if (!check_device(dev)) 2236 return 0; 2237 2238 devid = get_device_id(dev); 2239 iommu = amd_iommu_rlookup_table[devid]; 2240 dev_data = get_dev_data(dev); 2241 2242 switch (action) { 2243 case BUS_NOTIFY_UNBOUND_DRIVER: 2244 2245 domain = domain_for_device(dev); 2246 2247 if (!domain) 2248 goto out; 2249 if (dev_data->passthrough) 2250 break; 2251 detach_device(dev); 2252 break; 2253 case BUS_NOTIFY_ADD_DEVICE: 2254 2255 iommu_init_device(dev); 2256 2257 domain = domain_for_device(dev); 2258 2259 /* allocate a protection domain if a device is added */ 2260 dma_domain = find_protection_domain(devid); 2261 if (dma_domain) 2262 goto out; 2263 dma_domain = dma_ops_domain_alloc(); 2264 if (!dma_domain) 2265 goto out; 2266 dma_domain->target_dev = devid; 2267 2268 spin_lock_irqsave(&iommu_pd_list_lock, flags); 2269 list_add_tail(&dma_domain->list, &iommu_pd_list); 2270 spin_unlock_irqrestore(&iommu_pd_list_lock, flags); 2271 2272 dev_data = get_dev_data(dev); 2273 2274 if (!dev_data->passthrough) 2275 dev->archdata.dma_ops = &amd_iommu_dma_ops; 2276 else 2277 dev->archdata.dma_ops = &nommu_dma_ops; 2278 2279 break; 2280 case BUS_NOTIFY_DEL_DEVICE: 2281 2282 iommu_uninit_device(dev); 2283 2284 default: 2285 goto out; 2286 } 2287 2288 iommu_completion_wait(iommu); 2289 2290out: 2291 return 0; 2292} 2293 2294static struct notifier_block device_nb = { 2295 .notifier_call = device_change_notifier, 2296}; 2297 2298void amd_iommu_init_notifier(void) 2299{ 2300 bus_register_notifier(&pci_bus_type, &device_nb); 2301} 2302 2303/***************************************************************************** 2304 * 2305 * The next functions belong to the dma_ops mapping/unmapping code. 2306 * 2307 *****************************************************************************/ 2308 2309/* 2310 * In the dma_ops path we only have the struct device. This function 2311 * finds the corresponding IOMMU, the protection domain and the 2312 * requestor id for a given device. 2313 * If the device is not yet associated with a domain this is also done 2314 * in this function. 2315 */ 2316static struct protection_domain *get_domain(struct device *dev) 2317{ 2318 struct protection_domain *domain; 2319 struct dma_ops_domain *dma_dom; 2320 u16 devid = get_device_id(dev); 2321 2322 if (!check_device(dev)) 2323 return ERR_PTR(-EINVAL); 2324 2325 domain = domain_for_device(dev); 2326 if (domain != NULL && !dma_ops_domain(domain)) 2327 return ERR_PTR(-EBUSY); 2328 2329 if (domain != NULL) 2330 return domain; 2331 2332 /* Device not bount yet - bind it */ 2333 dma_dom = find_protection_domain(devid); 2334 if (!dma_dom) 2335 dma_dom = amd_iommu_rlookup_table[devid]->default_dom; 2336 attach_device(dev, &dma_dom->domain); 2337 DUMP_printk("Using protection domain %d for device %s\n", 2338 dma_dom->domain.id, dev_name(dev)); 2339 2340 return &dma_dom->domain; 2341} 2342 2343static void update_device_table(struct protection_domain *domain) 2344{ 2345 struct iommu_dev_data *dev_data; 2346 2347 list_for_each_entry(dev_data, &domain->dev_list, list) 2348 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); 2349} 2350 2351static void update_domain(struct protection_domain *domain) 2352{ 2353 if (!domain->updated) 2354 return; 2355 2356 update_device_table(domain); 2357 2358 domain_flush_devices(domain); 2359 domain_flush_tlb_pde(domain); 2360 2361 domain->updated = false; 2362} 2363 2364/* 2365 * This function fetches the PTE for a given address in the aperture 2366 */ 2367static u64* dma_ops_get_pte(struct dma_ops_domain *dom, 2368 unsigned long address) 2369{ 2370 struct aperture_range *aperture; 2371 u64 *pte, *pte_page; 2372 2373 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)]; 2374 if (!aperture) 2375 return NULL; 2376 2377 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; 2378 if (!pte) { 2379 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, 2380 GFP_ATOMIC); 2381 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; 2382 } else 2383 pte += PM_LEVEL_INDEX(0, address); 2384 2385 update_domain(&dom->domain); 2386 2387 return pte; 2388} 2389 2390/* 2391 * This is the generic map function. It maps one 4kb page at paddr to 2392 * the given address in the DMA address space for the domain. 2393 */ 2394static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom, 2395 unsigned long address, 2396 phys_addr_t paddr, 2397 int direction) 2398{ 2399 u64 *pte, __pte; 2400 2401 WARN_ON(address > dom->aperture_size); 2402 2403 paddr &= PAGE_MASK; 2404 2405 pte = dma_ops_get_pte(dom, address); 2406 if (!pte) 2407 return DMA_ERROR_CODE; 2408 2409 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; 2410 2411 if (direction == DMA_TO_DEVICE) 2412 __pte |= IOMMU_PTE_IR; 2413 else if (direction == DMA_FROM_DEVICE) 2414 __pte |= IOMMU_PTE_IW; 2415 else if (direction == DMA_BIDIRECTIONAL) 2416 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW; 2417 2418 WARN_ON(*pte); 2419 2420 *pte = __pte; 2421 2422 return (dma_addr_t)address; 2423} 2424 2425/* 2426 * The generic unmapping function for on page in the DMA address space. 2427 */ 2428static void dma_ops_domain_unmap(struct dma_ops_domain *dom, 2429 unsigned long address) 2430{ 2431 struct aperture_range *aperture; 2432 u64 *pte; 2433 2434 if (address >= dom->aperture_size) 2435 return; 2436 2437 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)]; 2438 if (!aperture) 2439 return; 2440 2441 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; 2442 if (!pte) 2443 return; 2444 2445 pte += PM_LEVEL_INDEX(0, address); 2446 2447 WARN_ON(!*pte); 2448 2449 *pte = 0ULL; 2450} 2451 2452/* 2453 * This function contains common code for mapping of a physically 2454 * contiguous memory region into DMA address space. It is used by all 2455 * mapping functions provided with this IOMMU driver. 2456 * Must be called with the domain lock held. 2457 */ 2458static dma_addr_t __map_single(struct device *dev, 2459 struct dma_ops_domain *dma_dom, 2460 phys_addr_t paddr, 2461 size_t size, 2462 int dir, 2463 bool align, 2464 u64 dma_mask) 2465{ 2466 dma_addr_t offset = paddr & ~PAGE_MASK; 2467 dma_addr_t address, start, ret; 2468 unsigned int pages; 2469 unsigned long align_mask = 0; 2470 int i; 2471 2472 pages = iommu_num_pages(paddr, size, PAGE_SIZE); 2473 paddr &= PAGE_MASK; 2474 2475 INC_STATS_COUNTER(total_map_requests); 2476 2477 if (pages > 1) 2478 INC_STATS_COUNTER(cross_page); 2479 2480 if (align) 2481 align_mask = (1UL << get_order(size)) - 1; 2482 2483retry: 2484 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, 2485 dma_mask); 2486 if (unlikely(address == DMA_ERROR_CODE)) { 2487 /* 2488 * setting next_address here will let the address 2489 * allocator only scan the new allocated range in the 2490 * first run. This is a small optimization. 2491 */ 2492 dma_dom->next_address = dma_dom->aperture_size; 2493 2494 if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) 2495 goto out; 2496 2497 /* 2498 * aperture was successfully enlarged by 128 MB, try 2499 * allocation again 2500 */ 2501 goto retry; 2502 } 2503 2504 start = address; 2505 for (i = 0; i < pages; ++i) { 2506 ret = dma_ops_domain_map(dma_dom, start, paddr, dir); 2507 if (ret == DMA_ERROR_CODE) 2508 goto out_unmap; 2509 2510 paddr += PAGE_SIZE; 2511 start += PAGE_SIZE; 2512 } 2513 address += offset; 2514 2515 ADD_STATS_COUNTER(alloced_io_mem, size); 2516 2517 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { 2518 domain_flush_tlb(&dma_dom->domain); 2519 dma_dom->need_flush = false; 2520 } else if (unlikely(amd_iommu_np_cache)) 2521 domain_flush_pages(&dma_dom->domain, address, size); 2522 2523out: 2524 return address; 2525 2526out_unmap: 2527 2528 for (--i; i >= 0; --i) { 2529 start -= PAGE_SIZE; 2530 dma_ops_domain_unmap(dma_dom, start); 2531 } 2532 2533 dma_ops_free_addresses(dma_dom, address, pages); 2534 2535 return DMA_ERROR_CODE; 2536} 2537 2538/* 2539 * Does the reverse of the __map_single function. Must be called with 2540 * the domain lock held too 2541 */ 2542static void __unmap_single(struct dma_ops_domain *dma_dom, 2543 dma_addr_t dma_addr, 2544 size_t size, 2545 int dir) 2546{ 2547 dma_addr_t flush_addr; 2548 dma_addr_t i, start; 2549 unsigned int pages; 2550 2551 if ((dma_addr == DMA_ERROR_CODE) || 2552 (dma_addr + size > dma_dom->aperture_size)) 2553 return; 2554 2555 flush_addr = dma_addr; 2556 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 2557 dma_addr &= PAGE_MASK; 2558 start = dma_addr; 2559 2560 for (i = 0; i < pages; ++i) { 2561 dma_ops_domain_unmap(dma_dom, start); 2562 start += PAGE_SIZE; 2563 } 2564 2565 SUB_STATS_COUNTER(alloced_io_mem, size); 2566 2567 dma_ops_free_addresses(dma_dom, dma_addr, pages); 2568 2569 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 2570 domain_flush_pages(&dma_dom->domain, flush_addr, size); 2571 dma_dom->need_flush = false; 2572 } 2573} 2574 2575/* 2576 * The exported map_single function for dma_ops. 2577 */ 2578static dma_addr_t map_page(struct device *dev, struct page *page, 2579 unsigned long offset, size_t size, 2580 enum dma_data_direction dir, 2581 struct dma_attrs *attrs) 2582{ 2583 unsigned long flags; 2584 struct protection_domain *domain; 2585 dma_addr_t addr; 2586 u64 dma_mask; 2587 phys_addr_t paddr = page_to_phys(page) + offset; 2588 2589 INC_STATS_COUNTER(cnt_map_single); 2590 2591 domain = get_domain(dev); 2592 if (PTR_ERR(domain) == -EINVAL) 2593 return (dma_addr_t)paddr; 2594 else if (IS_ERR(domain)) 2595 return DMA_ERROR_CODE; 2596 2597 dma_mask = *dev->dma_mask; 2598 2599 spin_lock_irqsave(&domain->lock, flags); 2600 2601 addr = __map_single(dev, domain->priv, paddr, size, dir, false, 2602 dma_mask); 2603 if (addr == DMA_ERROR_CODE) 2604 goto out; 2605 2606 domain_flush_complete(domain); 2607 2608out: 2609 spin_unlock_irqrestore(&domain->lock, flags); 2610 2611 return addr; 2612} 2613 2614/* 2615 * The exported unmap_single function for dma_ops. 2616 */ 2617static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 2618 enum dma_data_direction dir, struct dma_attrs *attrs) 2619{ 2620 unsigned long flags; 2621 struct protection_domain *domain; 2622 2623 INC_STATS_COUNTER(cnt_unmap_single); 2624 2625 domain = get_domain(dev); 2626 if (IS_ERR(domain)) 2627 return; 2628 2629 spin_lock_irqsave(&domain->lock, flags); 2630 2631 __unmap_single(domain->priv, dma_addr, size, dir); 2632 2633 domain_flush_complete(domain); 2634 2635 spin_unlock_irqrestore(&domain->lock, flags); 2636} 2637 2638/* 2639 * This is a special map_sg function which is used if we should map a 2640 * device which is not handled by an AMD IOMMU in the system. 2641 */ 2642static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, 2643 int nelems, int dir) 2644{ 2645 struct scatterlist *s; 2646 int i; 2647 2648 for_each_sg(sglist, s, nelems, i) { 2649 s->dma_address = (dma_addr_t)sg_phys(s); 2650 s->dma_length = s->length; 2651 } 2652 2653 return nelems; 2654} 2655 2656/* 2657 * The exported map_sg function for dma_ops (handles scatter-gather 2658 * lists). 2659 */ 2660static int map_sg(struct device *dev, struct scatterlist *sglist, 2661 int nelems, enum dma_data_direction dir, 2662 struct dma_attrs *attrs) 2663{ 2664 unsigned long flags; 2665 struct protection_domain *domain; 2666 int i; 2667 struct scatterlist *s; 2668 phys_addr_t paddr; 2669 int mapped_elems = 0; 2670 u64 dma_mask; 2671 2672 INC_STATS_COUNTER(cnt_map_sg); 2673 2674 domain = get_domain(dev); 2675 if (PTR_ERR(domain) == -EINVAL) 2676 return map_sg_no_iommu(dev, sglist, nelems, dir); 2677 else if (IS_ERR(domain)) 2678 return 0; 2679 2680 dma_mask = *dev->dma_mask; 2681 2682 spin_lock_irqsave(&domain->lock, flags); 2683 2684 for_each_sg(sglist, s, nelems, i) { 2685 paddr = sg_phys(s); 2686 2687 s->dma_address = __map_single(dev, domain->priv, 2688 paddr, s->length, dir, false, 2689 dma_mask); 2690 2691 if (s->dma_address) { 2692 s->dma_length = s->length; 2693 mapped_elems++; 2694 } else 2695 goto unmap; 2696 } 2697 2698 domain_flush_complete(domain); 2699 2700out: 2701 spin_unlock_irqrestore(&domain->lock, flags); 2702 2703 return mapped_elems; 2704unmap: 2705 for_each_sg(sglist, s, mapped_elems, i) { 2706 if (s->dma_address) 2707 __unmap_single(domain->priv, s->dma_address, 2708 s->dma_length, dir); 2709 s->dma_address = s->dma_length = 0; 2710 } 2711 2712 mapped_elems = 0; 2713 2714 goto out; 2715} 2716 2717/* 2718 * The exported map_sg function for dma_ops (handles scatter-gather 2719 * lists). 2720 */ 2721static void unmap_sg(struct device *dev, struct scatterlist *sglist, 2722 int nelems, enum dma_data_direction dir, 2723 struct dma_attrs *attrs) 2724{ 2725 unsigned long flags; 2726 struct protection_domain *domain; 2727 struct scatterlist *s; 2728 int i; 2729 2730 INC_STATS_COUNTER(cnt_unmap_sg); 2731 2732 domain = get_domain(dev); 2733 if (IS_ERR(domain)) 2734 return; 2735 2736 spin_lock_irqsave(&domain->lock, flags); 2737 2738 for_each_sg(sglist, s, nelems, i) { 2739 __unmap_single(domain->priv, s->dma_address, 2740 s->dma_length, dir); 2741 s->dma_address = s->dma_length = 0; 2742 } 2743 2744 domain_flush_complete(domain); 2745 2746 spin_unlock_irqrestore(&domain->lock, flags); 2747} 2748 2749/* 2750 * The exported alloc_coherent function for dma_ops. 2751 */ 2752static void *alloc_coherent(struct device *dev, size_t size, 2753 dma_addr_t *dma_addr, gfp_t flag, 2754 struct dma_attrs *attrs) 2755{ 2756 unsigned long flags; 2757 void *virt_addr; 2758 struct protection_domain *domain; 2759 phys_addr_t paddr; 2760 u64 dma_mask = dev->coherent_dma_mask; 2761 2762 INC_STATS_COUNTER(cnt_alloc_coherent); 2763 2764 domain = get_domain(dev); 2765 if (PTR_ERR(domain) == -EINVAL) { 2766 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 2767 *dma_addr = __pa(virt_addr); 2768 return virt_addr; 2769 } else if (IS_ERR(domain)) 2770 return NULL; 2771 2772 dma_mask = dev->coherent_dma_mask; 2773 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 2774 flag |= __GFP_ZERO; 2775 2776 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 2777 if (!virt_addr) 2778 return NULL; 2779 2780 paddr = virt_to_phys(virt_addr); 2781 2782 if (!dma_mask) 2783 dma_mask = *dev->dma_mask; 2784 2785 spin_lock_irqsave(&domain->lock, flags); 2786 2787 *dma_addr = __map_single(dev, domain->priv, paddr, 2788 size, DMA_BIDIRECTIONAL, true, dma_mask); 2789 2790 if (*dma_addr == DMA_ERROR_CODE) { 2791 spin_unlock_irqrestore(&domain->lock, flags); 2792 goto out_free; 2793 } 2794 2795 domain_flush_complete(domain); 2796 2797 spin_unlock_irqrestore(&domain->lock, flags); 2798 2799 return virt_addr; 2800 2801out_free: 2802 2803 free_pages((unsigned long)virt_addr, get_order(size)); 2804 2805 return NULL; 2806} 2807 2808/* 2809 * The exported free_coherent function for dma_ops. 2810 */ 2811static void free_coherent(struct device *dev, size_t size, 2812 void *virt_addr, dma_addr_t dma_addr, 2813 struct dma_attrs *attrs) 2814{ 2815 unsigned long flags; 2816 struct protection_domain *domain; 2817 2818 INC_STATS_COUNTER(cnt_free_coherent); 2819 2820 domain = get_domain(dev); 2821 if (IS_ERR(domain)) 2822 goto free_mem; 2823 2824 spin_lock_irqsave(&domain->lock, flags); 2825 2826 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 2827 2828 domain_flush_complete(domain); 2829 2830 spin_unlock_irqrestore(&domain->lock, flags); 2831 2832free_mem: 2833 free_pages((unsigned long)virt_addr, get_order(size)); 2834} 2835 2836/* 2837 * This function is called by the DMA layer to find out if we can handle a 2838 * particular device. It is part of the dma_ops. 2839 */ 2840static int amd_iommu_dma_supported(struct device *dev, u64 mask) 2841{ 2842 return check_device(dev); 2843} 2844 2845/* 2846 * The function for pre-allocating protection domains. 2847 * 2848 * If the driver core informs the DMA layer if a driver grabs a device 2849 * we don't need to preallocate the protection domains anymore. 2850 * For now we have to. 2851 */ 2852static void __init prealloc_protection_domains(void) 2853{ 2854 struct iommu_dev_data *dev_data; 2855 struct dma_ops_domain *dma_dom; 2856 struct pci_dev *dev = NULL; 2857 u16 devid; 2858 2859 for_each_pci_dev(dev) { 2860 2861 /* Do we handle this device? */ 2862 if (!check_device(&dev->dev)) 2863 continue; 2864 2865 dev_data = get_dev_data(&dev->dev); 2866 if (!amd_iommu_force_isolation && dev_data->iommu_v2) { 2867 /* Make sure passthrough domain is allocated */ 2868 alloc_passthrough_domain(); 2869 dev_data->passthrough = true; 2870 attach_device(&dev->dev, pt_domain); 2871 pr_info("AMD-Vi: Using passthough domain for device %s\n", 2872 dev_name(&dev->dev)); 2873 } 2874 2875 /* Is there already any domain for it? */ 2876 if (domain_for_device(&dev->dev)) 2877 continue; 2878 2879 devid = get_device_id(&dev->dev); 2880 2881 dma_dom = dma_ops_domain_alloc(); 2882 if (!dma_dom) 2883 continue; 2884 init_unity_mappings_for_device(dma_dom, devid); 2885 dma_dom->target_dev = devid; 2886 2887 attach_device(&dev->dev, &dma_dom->domain); 2888 2889 list_add_tail(&dma_dom->list, &iommu_pd_list); 2890 } 2891} 2892 2893static struct dma_map_ops amd_iommu_dma_ops = { 2894 .alloc = alloc_coherent, 2895 .free = free_coherent, 2896 .map_page = map_page, 2897 .unmap_page = unmap_page, 2898 .map_sg = map_sg, 2899 .unmap_sg = unmap_sg, 2900 .dma_supported = amd_iommu_dma_supported, 2901}; 2902 2903static unsigned device_dma_ops_init(void) 2904{ 2905 struct iommu_dev_data *dev_data; 2906 struct pci_dev *pdev = NULL; 2907 unsigned unhandled = 0; 2908 2909 for_each_pci_dev(pdev) { 2910 if (!check_device(&pdev->dev)) { 2911 2912 iommu_ignore_device(&pdev->dev); 2913 2914 unhandled += 1; 2915 continue; 2916 } 2917 2918 dev_data = get_dev_data(&pdev->dev); 2919 2920 if (!dev_data->passthrough) 2921 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops; 2922 else 2923 pdev->dev.archdata.dma_ops = &nommu_dma_ops; 2924 } 2925 2926 return unhandled; 2927} 2928 2929/* 2930 * The function which clues the AMD IOMMU driver into dma_ops. 2931 */ 2932 2933void __init amd_iommu_init_api(void) 2934{ 2935 bus_set_iommu(&pci_bus_type, &amd_iommu_ops); 2936} 2937 2938int __init amd_iommu_init_dma_ops(void) 2939{ 2940 struct amd_iommu *iommu; 2941 int ret, unhandled; 2942 2943 /* 2944 * first allocate a default protection domain for every IOMMU we 2945 * found in the system. Devices not assigned to any other 2946 * protection domain will be assigned to the default one. 2947 */ 2948 for_each_iommu(iommu) { 2949 iommu->default_dom = dma_ops_domain_alloc(); 2950 if (iommu->default_dom == NULL) 2951 return -ENOMEM; 2952 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; 2953 ret = iommu_init_unity_mappings(iommu); 2954 if (ret) 2955 goto free_domains; 2956 } 2957 2958 /* 2959 * Pre-allocate the protection domains for each device. 2960 */ 2961 prealloc_protection_domains(); 2962 2963 iommu_detected = 1; 2964 swiotlb = 0; 2965 2966 /* Make the driver finally visible to the drivers */ 2967 unhandled = device_dma_ops_init(); 2968 if (unhandled && max_pfn > MAX_DMA32_PFN) { 2969 /* There are unhandled devices - initialize swiotlb for them */ 2970 swiotlb = 1; 2971 } 2972 2973 amd_iommu_stats_init(); 2974 2975 return 0; 2976 2977free_domains: 2978 2979 for_each_iommu(iommu) { 2980 if (iommu->default_dom) 2981 dma_ops_domain_free(iommu->default_dom); 2982 } 2983 2984 return ret; 2985} 2986 2987/***************************************************************************** 2988 * 2989 * The following functions belong to the exported interface of AMD IOMMU 2990 * 2991 * This interface allows access to lower level functions of the IOMMU 2992 * like protection domain handling and assignement of devices to domains 2993 * which is not possible with the dma_ops interface. 2994 * 2995 *****************************************************************************/ 2996 2997static void cleanup_domain(struct protection_domain *domain) 2998{ 2999 struct iommu_dev_data *dev_data, *next; 3000 unsigned long flags; 3001 3002 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 3003 3004 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 3005 __detach_device(dev_data); 3006 atomic_set(&dev_data->bind, 0); 3007 } 3008 3009 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 3010} 3011 3012static void protection_domain_free(struct protection_domain *domain) 3013{ 3014 if (!domain) 3015 return; 3016 3017 del_domain_from_list(domain); 3018 3019 if (domain->id) 3020 domain_id_free(domain->id); 3021 3022 kfree(domain); 3023} 3024 3025static struct protection_domain *protection_domain_alloc(void) 3026{ 3027 struct protection_domain *domain; 3028 3029 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 3030 if (!domain) 3031 return NULL; 3032 3033 spin_lock_init(&domain->lock); 3034 mutex_init(&domain->api_lock); 3035 domain->id = domain_id_alloc(); 3036 if (!domain->id) 3037 goto out_err; 3038 INIT_LIST_HEAD(&domain->dev_list); 3039 3040 add_domain_to_list(domain); 3041 3042 return domain; 3043 3044out_err: 3045 kfree(domain); 3046 3047 return NULL; 3048} 3049 3050static int __init alloc_passthrough_domain(void) 3051{ 3052 if (pt_domain != NULL) 3053 return 0; 3054 3055 /* allocate passthrough domain */ 3056 pt_domain = protection_domain_alloc(); 3057 if (!pt_domain) 3058 return -ENOMEM; 3059 3060 pt_domain->mode = PAGE_MODE_NONE; 3061 3062 return 0; 3063} 3064static int amd_iommu_domain_init(struct iommu_domain *dom) 3065{ 3066 struct protection_domain *domain; 3067 3068 domain = protection_domain_alloc(); 3069 if (!domain) 3070 goto out_free; 3071 3072 domain->mode = PAGE_MODE_3_LEVEL; 3073 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); 3074 if (!domain->pt_root) 3075 goto out_free; 3076 3077 domain->iommu_domain = dom; 3078 3079 dom->priv = domain; 3080 3081 return 0; 3082 3083out_free: 3084 protection_domain_free(domain); 3085 3086 return -ENOMEM; 3087} 3088 3089static void amd_iommu_domain_destroy(struct iommu_domain *dom) 3090{ 3091 struct protection_domain *domain = dom->priv; 3092 3093 if (!domain) 3094 return; 3095 3096 if (domain->dev_cnt > 0) 3097 cleanup_domain(domain); 3098 3099 BUG_ON(domain->dev_cnt != 0); 3100 3101 if (domain->mode != PAGE_MODE_NONE) 3102 free_pagetable(domain); 3103 3104 if (domain->flags & PD_IOMMUV2_MASK) 3105 free_gcr3_table(domain); 3106 3107 protection_domain_free(domain); 3108 3109 dom->priv = NULL; 3110} 3111 3112static void amd_iommu_detach_device(struct iommu_domain *dom, 3113 struct device *dev) 3114{ 3115 struct iommu_dev_data *dev_data = dev->archdata.iommu; 3116 struct amd_iommu *iommu; 3117 u16 devid; 3118 3119 if (!check_device(dev)) 3120 return; 3121 3122 devid = get_device_id(dev); 3123 3124 if (dev_data->domain != NULL) 3125 detach_device(dev); 3126 3127 iommu = amd_iommu_rlookup_table[devid]; 3128 if (!iommu) 3129 return; 3130 3131 iommu_completion_wait(iommu); 3132} 3133 3134static int amd_iommu_attach_device(struct iommu_domain *dom, 3135 struct device *dev) 3136{ 3137 struct protection_domain *domain = dom->priv; 3138 struct iommu_dev_data *dev_data; 3139 struct amd_iommu *iommu; 3140 int ret; 3141 3142 if (!check_device(dev)) 3143 return -EINVAL; 3144 3145 dev_data = dev->archdata.iommu; 3146 3147 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3148 if (!iommu) 3149 return -EINVAL; 3150 3151 if (dev_data->domain) 3152 detach_device(dev); 3153 3154 ret = attach_device(dev, domain); 3155 3156 iommu_completion_wait(iommu); 3157 3158 return ret; 3159} 3160 3161static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, 3162 phys_addr_t paddr, size_t page_size, int iommu_prot) 3163{ 3164 struct protection_domain *domain = dom->priv; 3165 int prot = 0; 3166 int ret; 3167 3168 if (domain->mode == PAGE_MODE_NONE) 3169 return -EINVAL; 3170 3171 if (iommu_prot & IOMMU_READ) 3172 prot |= IOMMU_PROT_IR; 3173 if (iommu_prot & IOMMU_WRITE) 3174 prot |= IOMMU_PROT_IW; 3175 3176 mutex_lock(&domain->api_lock); 3177 ret = iommu_map_page(domain, iova, paddr, prot, page_size); 3178 mutex_unlock(&domain->api_lock); 3179 3180 return ret; 3181} 3182 3183static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, 3184 size_t page_size) 3185{ 3186 struct protection_domain *domain = dom->priv; 3187 size_t unmap_size; 3188 3189 if (domain->mode == PAGE_MODE_NONE) 3190 return -EINVAL; 3191 3192 mutex_lock(&domain->api_lock); 3193 unmap_size = iommu_unmap_page(domain, iova, page_size); 3194 mutex_unlock(&domain->api_lock); 3195 3196 domain_flush_tlb_pde(domain); 3197 3198 return unmap_size; 3199} 3200 3201static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 3202 unsigned long iova) 3203{ 3204 struct protection_domain *domain = dom->priv; 3205 unsigned long offset_mask; 3206 phys_addr_t paddr; 3207 u64 *pte, __pte; 3208 3209 if (domain->mode == PAGE_MODE_NONE) 3210 return iova; 3211 3212 pte = fetch_pte(domain, iova); 3213 3214 if (!pte || !IOMMU_PTE_PRESENT(*pte)) 3215 return 0; 3216 3217 if (PM_PTE_LEVEL(*pte) == 0) 3218 offset_mask = PAGE_SIZE - 1; 3219 else 3220 offset_mask = PTE_PAGE_SIZE(*pte) - 1; 3221 3222 __pte = *pte & PM_ADDR_MASK; 3223 paddr = (__pte & ~offset_mask) | (iova & offset_mask); 3224 3225 return paddr; 3226} 3227 3228static int amd_iommu_domain_has_cap(struct iommu_domain *domain, 3229 unsigned long cap) 3230{ 3231 switch (cap) { 3232 case IOMMU_CAP_CACHE_COHERENCY: 3233 return 1; 3234 } 3235 3236 return 0; 3237} 3238 3239static int amd_iommu_device_group(struct device *dev, unsigned int *groupid) 3240{ 3241 struct iommu_dev_data *dev_data = dev->archdata.iommu; 3242 struct pci_dev *pdev = to_pci_dev(dev); 3243 u16 devid; 3244 3245 if (!dev_data) 3246 return -ENODEV; 3247 3248 if (pdev->is_virtfn || !iommu_group_mf) 3249 devid = dev_data->devid; 3250 else 3251 devid = calc_devid(pdev->bus->number, 3252 PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); 3253 3254 *groupid = amd_iommu_alias_table[devid]; 3255 3256 return 0; 3257} 3258 3259static struct iommu_ops amd_iommu_ops = { 3260 .domain_init = amd_iommu_domain_init, 3261 .domain_destroy = amd_iommu_domain_destroy, 3262 .attach_dev = amd_iommu_attach_device, 3263 .detach_dev = amd_iommu_detach_device, 3264 .map = amd_iommu_map, 3265 .unmap = amd_iommu_unmap, 3266 .iova_to_phys = amd_iommu_iova_to_phys, 3267 .domain_has_cap = amd_iommu_domain_has_cap, 3268 .device_group = amd_iommu_device_group, 3269 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3270}; 3271 3272/***************************************************************************** 3273 * 3274 * The next functions do a basic initialization of IOMMU for pass through 3275 * mode 3276 * 3277 * In passthrough mode the IOMMU is initialized and enabled but not used for 3278 * DMA-API translation. 3279 * 3280 *****************************************************************************/ 3281 3282int __init amd_iommu_init_passthrough(void) 3283{ 3284 struct iommu_dev_data *dev_data; 3285 struct pci_dev *dev = NULL; 3286 struct amd_iommu *iommu; 3287 u16 devid; 3288 int ret; 3289 3290 ret = alloc_passthrough_domain(); 3291 if (ret) 3292 return ret; 3293 3294 for_each_pci_dev(dev) { 3295 if (!check_device(&dev->dev)) 3296 continue; 3297 3298 dev_data = get_dev_data(&dev->dev); 3299 dev_data->passthrough = true; 3300 3301 devid = get_device_id(&dev->dev); 3302 3303 iommu = amd_iommu_rlookup_table[devid]; 3304 if (!iommu) 3305 continue; 3306 3307 attach_device(&dev->dev, pt_domain); 3308 } 3309 3310 amd_iommu_stats_init(); 3311 3312 pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); 3313 3314 return 0; 3315} 3316 3317/* IOMMUv2 specific functions */ 3318int amd_iommu_register_ppr_notifier(struct notifier_block *nb) 3319{ 3320 return atomic_notifier_chain_register(&ppr_notifier, nb); 3321} 3322EXPORT_SYMBOL(amd_iommu_register_ppr_notifier); 3323 3324int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb) 3325{ 3326 return atomic_notifier_chain_unregister(&ppr_notifier, nb); 3327} 3328EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); 3329 3330void amd_iommu_domain_direct_map(struct iommu_domain *dom) 3331{ 3332 struct protection_domain *domain = dom->priv; 3333 unsigned long flags; 3334 3335 spin_lock_irqsave(&domain->lock, flags); 3336 3337 /* Update data structure */ 3338 domain->mode = PAGE_MODE_NONE; 3339 domain->updated = true; 3340 3341 /* Make changes visible to IOMMUs */ 3342 update_domain(domain); 3343 3344 /* Page-table is not visible to IOMMU anymore, so free it */ 3345 free_pagetable(domain); 3346 3347 spin_unlock_irqrestore(&domain->lock, flags); 3348} 3349EXPORT_SYMBOL(amd_iommu_domain_direct_map); 3350 3351int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) 3352{ 3353 struct protection_domain *domain = dom->priv; 3354 unsigned long flags; 3355 int levels, ret; 3356 3357 if (pasids <= 0 || pasids > (PASID_MASK + 1)) 3358 return -EINVAL; 3359 3360 /* Number of GCR3 table levels required */ 3361 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9) 3362 levels += 1; 3363 3364 if (levels > amd_iommu_max_glx_val) 3365 return -EINVAL; 3366 3367 spin_lock_irqsave(&domain->lock, flags); 3368 3369 /* 3370 * Save us all sanity checks whether devices already in the 3371 * domain support IOMMUv2. Just force that the domain has no 3372 * devices attached when it is switched into IOMMUv2 mode. 3373 */ 3374 ret = -EBUSY; 3375 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) 3376 goto out; 3377 3378 ret = -ENOMEM; 3379 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); 3380 if (domain->gcr3_tbl == NULL) 3381 goto out; 3382 3383 domain->glx = levels; 3384 domain->flags |= PD_IOMMUV2_MASK; 3385 domain->updated = true; 3386 3387 update_domain(domain); 3388 3389 ret = 0; 3390 3391out: 3392 spin_unlock_irqrestore(&domain->lock, flags); 3393 3394 return ret; 3395} 3396EXPORT_SYMBOL(amd_iommu_domain_enable_v2); 3397 3398static int __flush_pasid(struct protection_domain *domain, int pasid, 3399 u64 address, bool size) 3400{ 3401 struct iommu_dev_data *dev_data; 3402 struct iommu_cmd cmd; 3403 int i, ret; 3404 3405 if (!(domain->flags & PD_IOMMUV2_MASK)) 3406 return -EINVAL; 3407 3408 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); 3409 3410 /* 3411 * IOMMU TLB needs to be flushed before Device TLB to 3412 * prevent device TLB refill from IOMMU TLB 3413 */ 3414 for (i = 0; i < amd_iommus_present; ++i) { 3415 if (domain->dev_iommu[i] == 0) 3416 continue; 3417 3418 ret = iommu_queue_command(amd_iommus[i], &cmd); 3419 if (ret != 0) 3420 goto out; 3421 } 3422 3423 /* Wait until IOMMU TLB flushes are complete */ 3424 domain_flush_complete(domain); 3425 3426 /* Now flush device TLBs */ 3427 list_for_each_entry(dev_data, &domain->dev_list, list) { 3428 struct amd_iommu *iommu; 3429 int qdep; 3430 3431 BUG_ON(!dev_data->ats.enabled); 3432 3433 qdep = dev_data->ats.qdep; 3434 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3435 3436 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, 3437 qdep, address, size); 3438 3439 ret = iommu_queue_command(iommu, &cmd); 3440 if (ret != 0) 3441 goto out; 3442 } 3443 3444 /* Wait until all device TLBs are flushed */ 3445 domain_flush_complete(domain); 3446 3447 ret = 0; 3448 3449out: 3450 3451 return ret; 3452} 3453 3454static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, 3455 u64 address) 3456{ 3457 INC_STATS_COUNTER(invalidate_iotlb); 3458 3459 return __flush_pasid(domain, pasid, address, false); 3460} 3461 3462int amd_iommu_flush_page(struct iommu_domain *dom, int pasid, 3463 u64 address) 3464{ 3465 struct protection_domain *domain = dom->priv; 3466 unsigned long flags; 3467 int ret; 3468 3469 spin_lock_irqsave(&domain->lock, flags); 3470 ret = __amd_iommu_flush_page(domain, pasid, address); 3471 spin_unlock_irqrestore(&domain->lock, flags); 3472 3473 return ret; 3474} 3475EXPORT_SYMBOL(amd_iommu_flush_page); 3476 3477static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) 3478{ 3479 INC_STATS_COUNTER(invalidate_iotlb_all); 3480 3481 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 3482 true); 3483} 3484 3485int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid) 3486{ 3487 struct protection_domain *domain = dom->priv; 3488 unsigned long flags; 3489 int ret; 3490 3491 spin_lock_irqsave(&domain->lock, flags); 3492 ret = __amd_iommu_flush_tlb(domain, pasid); 3493 spin_unlock_irqrestore(&domain->lock, flags); 3494 3495 return ret; 3496} 3497EXPORT_SYMBOL(amd_iommu_flush_tlb); 3498 3499static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc) 3500{ 3501 int index; 3502 u64 *pte; 3503 3504 while (true) { 3505 3506 index = (pasid >> (9 * level)) & 0x1ff; 3507 pte = &root[index]; 3508 3509 if (level == 0) 3510 break; 3511 3512 if (!(*pte & GCR3_VALID)) { 3513 if (!alloc) 3514 return NULL; 3515 3516 root = (void *)get_zeroed_page(GFP_ATOMIC); 3517 if (root == NULL) 3518 return NULL; 3519 3520 *pte = __pa(root) | GCR3_VALID; 3521 } 3522 3523 root = __va(*pte & PAGE_MASK); 3524 3525 level -= 1; 3526 } 3527 3528 return pte; 3529} 3530 3531static int __set_gcr3(struct protection_domain *domain, int pasid, 3532 unsigned long cr3) 3533{ 3534 u64 *pte; 3535 3536 if (domain->mode != PAGE_MODE_NONE) 3537 return -EINVAL; 3538 3539 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); 3540 if (pte == NULL) 3541 return -ENOMEM; 3542 3543 *pte = (cr3 & PAGE_MASK) | GCR3_VALID; 3544 3545 return __amd_iommu_flush_tlb(domain, pasid); 3546} 3547 3548static int __clear_gcr3(struct protection_domain *domain, int pasid) 3549{ 3550 u64 *pte; 3551 3552 if (domain->mode != PAGE_MODE_NONE) 3553 return -EINVAL; 3554 3555 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); 3556 if (pte == NULL) 3557 return 0; 3558 3559 *pte = 0; 3560 3561 return __amd_iommu_flush_tlb(domain, pasid); 3562} 3563 3564int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, 3565 unsigned long cr3) 3566{ 3567 struct protection_domain *domain = dom->priv; 3568 unsigned long flags; 3569 int ret; 3570 3571 spin_lock_irqsave(&domain->lock, flags); 3572 ret = __set_gcr3(domain, pasid, cr3); 3573 spin_unlock_irqrestore(&domain->lock, flags); 3574 3575 return ret; 3576} 3577EXPORT_SYMBOL(amd_iommu_domain_set_gcr3); 3578 3579int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid) 3580{ 3581 struct protection_domain *domain = dom->priv; 3582 unsigned long flags; 3583 int ret; 3584 3585 spin_lock_irqsave(&domain->lock, flags); 3586 ret = __clear_gcr3(domain, pasid); 3587 spin_unlock_irqrestore(&domain->lock, flags); 3588 3589 return ret; 3590} 3591EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3); 3592 3593int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, 3594 int status, int tag) 3595{ 3596 struct iommu_dev_data *dev_data; 3597 struct amd_iommu *iommu; 3598 struct iommu_cmd cmd; 3599 3600 INC_STATS_COUNTER(complete_ppr); 3601 3602 dev_data = get_dev_data(&pdev->dev); 3603 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3604 3605 build_complete_ppr(&cmd, dev_data->devid, pasid, status, 3606 tag, dev_data->pri_tlp); 3607 3608 return iommu_queue_command(iommu, &cmd); 3609} 3610EXPORT_SYMBOL(amd_iommu_complete_ppr); 3611 3612struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) 3613{ 3614 struct protection_domain *domain; 3615 3616 domain = get_domain(&pdev->dev); 3617 if (IS_ERR(domain)) 3618 return NULL; 3619 3620 /* Only return IOMMUv2 domains */ 3621 if (!(domain->flags & PD_IOMMUV2_MASK)) 3622 return NULL; 3623 3624 return domain->iommu_domain; 3625} 3626EXPORT_SYMBOL(amd_iommu_get_v2_domain); 3627 3628void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum) 3629{ 3630 struct iommu_dev_data *dev_data; 3631 3632 if (!amd_iommu_v2_supported()) 3633 return; 3634 3635 dev_data = get_dev_data(&pdev->dev); 3636 dev_data->errata |= (1 << erratum); 3637} 3638EXPORT_SYMBOL(amd_iommu_enable_device_erratum); 3639 3640int amd_iommu_device_info(struct pci_dev *pdev, 3641 struct amd_iommu_device_info *info) 3642{ 3643 int max_pasids; 3644 int pos; 3645 3646 if (pdev == NULL || info == NULL) 3647 return -EINVAL; 3648 3649 if (!amd_iommu_v2_supported()) 3650 return -EINVAL; 3651 3652 memset(info, 0, sizeof(*info)); 3653 3654 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS); 3655 if (pos) 3656 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; 3657 3658 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); 3659 if (pos) 3660 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP; 3661 3662 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); 3663 if (pos) { 3664 int features; 3665 3666 max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1)); 3667 max_pasids = min(max_pasids, (1 << 20)); 3668 3669 info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP; 3670 info->max_pasids = min(pci_max_pasids(pdev), max_pasids); 3671 3672 features = pci_pasid_features(pdev); 3673 if (features & PCI_PASID_CAP_EXEC) 3674 info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP; 3675 if (features & PCI_PASID_CAP_PRIV) 3676 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP; 3677 } 3678 3679 return 0; 3680} 3681EXPORT_SYMBOL(amd_iommu_device_info);