Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.28-rc3 762 lines 18 kB view raw
1/* 2 * Copyright (c) 2006, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Copyright (C) 2006-2008 Intel Corporation 18 * Author: Ashok Raj <ashok.raj@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 21 * 22 * This file implements early detection/parsing of Remapping Devices 23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI 24 * tables. 25 * 26 * These routines are used by both DMA-remapping and Interrupt-remapping 27 */ 28 29#include <linux/pci.h> 30#include <linux/dmar.h> 31#include <linux/iova.h> 32#include <linux/intel-iommu.h> 33#include <linux/timer.h> 34 35#undef PREFIX 36#define PREFIX "DMAR:" 37 38/* No locks are needed as DMA remapping hardware unit 39 * list is constructed at boot time and hotplug of 40 * these units are not supported by the architecture. 41 */ 42LIST_HEAD(dmar_drhd_units); 43 44static struct acpi_table_header * __initdata dmar_tbl; 45 46static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) 47{ 48 /* 49 * add INCLUDE_ALL at the tail, so scan the list will find it at 50 * the very end. 51 */ 52 if (drhd->include_all) 53 list_add_tail(&drhd->list, &dmar_drhd_units); 54 else 55 list_add(&drhd->list, &dmar_drhd_units); 56} 57 58static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, 59 struct pci_dev **dev, u16 segment) 60{ 61 struct pci_bus *bus; 62 struct pci_dev *pdev = NULL; 63 struct acpi_dmar_pci_path *path; 64 int count; 65 66 bus = pci_find_bus(segment, scope->bus); 67 path = (struct acpi_dmar_pci_path *)(scope + 1); 68 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) 69 / sizeof(struct acpi_dmar_pci_path); 70 71 while (count) { 72 if (pdev) 73 pci_dev_put(pdev); 74 /* 75 * Some BIOSes list non-exist devices in DMAR table, just 76 * ignore it 77 */ 78 if (!bus) { 79 printk(KERN_WARNING 80 PREFIX "Device scope bus [%d] not found\n", 81 scope->bus); 82 break; 83 } 84 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); 85 if (!pdev) { 86 printk(KERN_WARNING PREFIX 87 "Device scope device [%04x:%02x:%02x.%02x] not found\n", 88 segment, bus->number, path->dev, path->fn); 89 break; 90 } 91 path ++; 92 count --; 93 bus = pdev->subordinate; 94 } 95 if (!pdev) { 96 printk(KERN_WARNING PREFIX 97 "Device scope device [%04x:%02x:%02x.%02x] not found\n", 98 segment, scope->bus, path->dev, path->fn); 99 *dev = NULL; 100 return 0; 101 } 102 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ 103 pdev->subordinate) || (scope->entry_type == \ 104 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { 105 pci_dev_put(pdev); 106 printk(KERN_WARNING PREFIX 107 "Device scope type does not match for %s\n", 108 pci_name(pdev)); 109 return -EINVAL; 110 } 111 *dev = pdev; 112 return 0; 113} 114 115static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, 116 struct pci_dev ***devices, u16 segment) 117{ 118 struct acpi_dmar_device_scope *scope; 119 void * tmp = start; 120 int index; 121 int ret; 122 123 *cnt = 0; 124 while (start < end) { 125 scope = start; 126 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || 127 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) 128 (*cnt)++; 129 else 130 printk(KERN_WARNING PREFIX 131 "Unsupported device scope\n"); 132 start += scope->length; 133 } 134 if (*cnt == 0) 135 return 0; 136 137 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); 138 if (!*devices) 139 return -ENOMEM; 140 141 start = tmp; 142 index = 0; 143 while (start < end) { 144 scope = start; 145 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || 146 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { 147 ret = dmar_parse_one_dev_scope(scope, 148 &(*devices)[index], segment); 149 if (ret) { 150 kfree(*devices); 151 return ret; 152 } 153 index ++; 154 } 155 start += scope->length; 156 } 157 158 return 0; 159} 160 161/** 162 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition 163 * structure which uniquely represent one DMA remapping hardware unit 164 * present in the platform 165 */ 166static int __init 167dmar_parse_one_drhd(struct acpi_dmar_header *header) 168{ 169 struct acpi_dmar_hardware_unit *drhd; 170 struct dmar_drhd_unit *dmaru; 171 int ret = 0; 172 173 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 174 if (!dmaru) 175 return -ENOMEM; 176 177 dmaru->hdr = header; 178 drhd = (struct acpi_dmar_hardware_unit *)header; 179 dmaru->reg_base_addr = drhd->address; 180 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 181 182 ret = alloc_iommu(dmaru); 183 if (ret) { 184 kfree(dmaru); 185 return ret; 186 } 187 dmar_register_drhd_unit(dmaru); 188 return 0; 189} 190 191static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) 192{ 193 struct acpi_dmar_hardware_unit *drhd; 194 static int include_all; 195 int ret = 0; 196 197 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; 198 199 if (!dmaru->include_all) 200 ret = dmar_parse_dev_scope((void *)(drhd + 1), 201 ((void *)drhd) + drhd->header.length, 202 &dmaru->devices_cnt, &dmaru->devices, 203 drhd->segment); 204 else { 205 /* Only allow one INCLUDE_ALL */ 206 if (include_all) { 207 printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL " 208 "device scope is allowed\n"); 209 ret = -EINVAL; 210 } 211 include_all = 1; 212 } 213 214 if (ret) { 215 list_del(&dmaru->list); 216 kfree(dmaru); 217 } 218 return ret; 219} 220 221#ifdef CONFIG_DMAR 222LIST_HEAD(dmar_rmrr_units); 223 224static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) 225{ 226 list_add(&rmrr->list, &dmar_rmrr_units); 227} 228 229 230static int __init 231dmar_parse_one_rmrr(struct acpi_dmar_header *header) 232{ 233 struct acpi_dmar_reserved_memory *rmrr; 234 struct dmar_rmrr_unit *rmrru; 235 236 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); 237 if (!rmrru) 238 return -ENOMEM; 239 240 rmrru->hdr = header; 241 rmrr = (struct acpi_dmar_reserved_memory *)header; 242 rmrru->base_address = rmrr->base_address; 243 rmrru->end_address = rmrr->end_address; 244 245 dmar_register_rmrr_unit(rmrru); 246 return 0; 247} 248 249static int __init 250rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) 251{ 252 struct acpi_dmar_reserved_memory *rmrr; 253 int ret; 254 255 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; 256 ret = dmar_parse_dev_scope((void *)(rmrr + 1), 257 ((void *)rmrr) + rmrr->header.length, 258 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); 259 260 if (ret || (rmrru->devices_cnt == 0)) { 261 list_del(&rmrru->list); 262 kfree(rmrru); 263 } 264 return ret; 265} 266#endif 267 268static void __init 269dmar_table_print_dmar_entry(struct acpi_dmar_header *header) 270{ 271 struct acpi_dmar_hardware_unit *drhd; 272 struct acpi_dmar_reserved_memory *rmrr; 273 274 switch (header->type) { 275 case ACPI_DMAR_TYPE_HARDWARE_UNIT: 276 drhd = (struct acpi_dmar_hardware_unit *)header; 277 printk (KERN_INFO PREFIX 278 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 279 drhd->flags, (unsigned long long)drhd->address); 280 break; 281 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 282 rmrr = (struct acpi_dmar_reserved_memory *)header; 283 284 printk (KERN_INFO PREFIX 285 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 286 (unsigned long long)rmrr->base_address, 287 (unsigned long long)rmrr->end_address); 288 break; 289 } 290} 291 292/** 293 * dmar_table_detect - checks to see if the platform supports DMAR devices 294 */ 295static int __init dmar_table_detect(void) 296{ 297 acpi_status status = AE_OK; 298 299 /* if we could find DMAR table, then there are DMAR devices */ 300 status = acpi_get_table(ACPI_SIG_DMAR, 0, 301 (struct acpi_table_header **)&dmar_tbl); 302 303 if (ACPI_SUCCESS(status) && !dmar_tbl) { 304 printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); 305 status = AE_NOT_FOUND; 306 } 307 308 return (ACPI_SUCCESS(status) ? 1 : 0); 309} 310 311/** 312 * parse_dmar_table - parses the DMA reporting table 313 */ 314static int __init 315parse_dmar_table(void) 316{ 317 struct acpi_table_dmar *dmar; 318 struct acpi_dmar_header *entry_header; 319 int ret = 0; 320 321 /* 322 * Do it again, earlier dmar_tbl mapping could be mapped with 323 * fixed map. 324 */ 325 dmar_table_detect(); 326 327 dmar = (struct acpi_table_dmar *)dmar_tbl; 328 if (!dmar) 329 return -ENODEV; 330 331 if (dmar->width < PAGE_SHIFT - 1) { 332 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); 333 return -EINVAL; 334 } 335 336 printk (KERN_INFO PREFIX "Host address width %d\n", 337 dmar->width + 1); 338 339 entry_header = (struct acpi_dmar_header *)(dmar + 1); 340 while (((unsigned long)entry_header) < 341 (((unsigned long)dmar) + dmar_tbl->length)) { 342 dmar_table_print_dmar_entry(entry_header); 343 344 switch (entry_header->type) { 345 case ACPI_DMAR_TYPE_HARDWARE_UNIT: 346 ret = dmar_parse_one_drhd(entry_header); 347 break; 348 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 349#ifdef CONFIG_DMAR 350 ret = dmar_parse_one_rmrr(entry_header); 351#endif 352 break; 353 default: 354 printk(KERN_WARNING PREFIX 355 "Unknown DMAR structure type\n"); 356 ret = 0; /* for forward compatibility */ 357 break; 358 } 359 if (ret) 360 break; 361 362 entry_header = ((void *)entry_header + entry_header->length); 363 } 364 return ret; 365} 366 367int dmar_pci_device_match(struct pci_dev *devices[], int cnt, 368 struct pci_dev *dev) 369{ 370 int index; 371 372 while (dev) { 373 for (index = 0; index < cnt; index++) 374 if (dev == devices[index]) 375 return 1; 376 377 /* Check our parent */ 378 dev = dev->bus->self; 379 } 380 381 return 0; 382} 383 384struct dmar_drhd_unit * 385dmar_find_matched_drhd_unit(struct pci_dev *dev) 386{ 387 struct dmar_drhd_unit *drhd = NULL; 388 389 list_for_each_entry(drhd, &dmar_drhd_units, list) { 390 if (drhd->include_all || dmar_pci_device_match(drhd->devices, 391 drhd->devices_cnt, dev)) 392 return drhd; 393 } 394 395 return NULL; 396} 397 398int __init dmar_dev_scope_init(void) 399{ 400 struct dmar_drhd_unit *drhd, *drhd_n; 401 int ret = -ENODEV; 402 403 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { 404 ret = dmar_parse_dev(drhd); 405 if (ret) 406 return ret; 407 } 408 409#ifdef CONFIG_DMAR 410 { 411 struct dmar_rmrr_unit *rmrr, *rmrr_n; 412 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { 413 ret = rmrr_parse_dev(rmrr); 414 if (ret) 415 return ret; 416 } 417 } 418#endif 419 420 return ret; 421} 422 423 424int __init dmar_table_init(void) 425{ 426 static int dmar_table_initialized; 427 int ret; 428 429 if (dmar_table_initialized) 430 return 0; 431 432 dmar_table_initialized = 1; 433 434 ret = parse_dmar_table(); 435 if (ret) { 436 if (ret != -ENODEV) 437 printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); 438 return ret; 439 } 440 441 if (list_empty(&dmar_drhd_units)) { 442 printk(KERN_INFO PREFIX "No DMAR devices found\n"); 443 return -ENODEV; 444 } 445 446#ifdef CONFIG_DMAR 447 if (list_empty(&dmar_rmrr_units)) 448 printk(KERN_INFO PREFIX "No RMRR found\n"); 449#endif 450 451#ifdef CONFIG_INTR_REMAP 452 parse_ioapics_under_ir(); 453#endif 454 return 0; 455} 456 457void __init detect_intel_iommu(void) 458{ 459 int ret; 460 461 ret = dmar_table_detect(); 462 463 { 464#ifdef CONFIG_INTR_REMAP 465 struct acpi_table_dmar *dmar; 466 /* 467 * for now we will disable dma-remapping when interrupt 468 * remapping is enabled. 469 * When support for queued invalidation for IOTLB invalidation 470 * is added, we will not need this any more. 471 */ 472 dmar = (struct acpi_table_dmar *) dmar_tbl; 473 if (ret && cpu_has_x2apic && dmar->flags & 0x1) 474 printk(KERN_INFO 475 "Queued invalidation will be enabled to support " 476 "x2apic and Intr-remapping.\n"); 477#endif 478#ifdef CONFIG_DMAR 479 if (ret && !no_iommu && !iommu_detected && !swiotlb && 480 !dmar_disabled) 481 iommu_detected = 1; 482#endif 483 } 484 dmar_tbl = NULL; 485} 486 487 488int alloc_iommu(struct dmar_drhd_unit *drhd) 489{ 490 struct intel_iommu *iommu; 491 int map_size; 492 u32 ver; 493 static int iommu_allocated = 0; 494 495 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); 496 if (!iommu) 497 return -ENOMEM; 498 499 iommu->seq_id = iommu_allocated++; 500 501 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); 502 if (!iommu->reg) { 503 printk(KERN_ERR "IOMMU: can't map the region\n"); 504 goto error; 505 } 506 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); 507 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); 508 509 /* the registers might be more than one page */ 510 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), 511 cap_max_fault_reg_offset(iommu->cap)); 512 map_size = VTD_PAGE_ALIGN(map_size); 513 if (map_size > VTD_PAGE_SIZE) { 514 iounmap(iommu->reg); 515 iommu->reg = ioremap(drhd->reg_base_addr, map_size); 516 if (!iommu->reg) { 517 printk(KERN_ERR "IOMMU: can't map the region\n"); 518 goto error; 519 } 520 } 521 522 ver = readl(iommu->reg + DMAR_VER_REG); 523 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", 524 (unsigned long long)drhd->reg_base_addr, 525 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), 526 (unsigned long long)iommu->cap, 527 (unsigned long long)iommu->ecap); 528 529 spin_lock_init(&iommu->register_lock); 530 531 drhd->iommu = iommu; 532 return 0; 533error: 534 kfree(iommu); 535 return -1; 536} 537 538void free_iommu(struct intel_iommu *iommu) 539{ 540 if (!iommu) 541 return; 542 543#ifdef CONFIG_DMAR 544 free_dmar_iommu(iommu); 545#endif 546 547 if (iommu->reg) 548 iounmap(iommu->reg); 549 kfree(iommu); 550} 551 552/* 553 * Reclaim all the submitted descriptors which have completed its work. 554 */ 555static inline void reclaim_free_desc(struct q_inval *qi) 556{ 557 while (qi->desc_status[qi->free_tail] == QI_DONE) { 558 qi->desc_status[qi->free_tail] = QI_FREE; 559 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; 560 qi->free_cnt++; 561 } 562} 563 564/* 565 * Submit the queued invalidation descriptor to the remapping 566 * hardware unit and wait for its completion. 567 */ 568void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) 569{ 570 struct q_inval *qi = iommu->qi; 571 struct qi_desc *hw, wait_desc; 572 int wait_index, index; 573 unsigned long flags; 574 575 if (!qi) 576 return; 577 578 hw = qi->desc; 579 580 spin_lock_irqsave(&qi->q_lock, flags); 581 while (qi->free_cnt < 3) { 582 spin_unlock_irqrestore(&qi->q_lock, flags); 583 cpu_relax(); 584 spin_lock_irqsave(&qi->q_lock, flags); 585 } 586 587 index = qi->free_head; 588 wait_index = (index + 1) % QI_LENGTH; 589 590 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; 591 592 hw[index] = *desc; 593 594 wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; 595 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); 596 597 hw[wait_index] = wait_desc; 598 599 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); 600 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); 601 602 qi->free_head = (qi->free_head + 2) % QI_LENGTH; 603 qi->free_cnt -= 2; 604 605 spin_lock(&iommu->register_lock); 606 /* 607 * update the HW tail register indicating the presence of 608 * new descriptors. 609 */ 610 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 611 spin_unlock(&iommu->register_lock); 612 613 while (qi->desc_status[wait_index] != QI_DONE) { 614 /* 615 * We will leave the interrupts disabled, to prevent interrupt 616 * context to queue another cmd while a cmd is already submitted 617 * and waiting for completion on this cpu. This is to avoid 618 * a deadlock where the interrupt context can wait indefinitely 619 * for free slots in the queue. 620 */ 621 spin_unlock(&qi->q_lock); 622 cpu_relax(); 623 spin_lock(&qi->q_lock); 624 } 625 626 qi->desc_status[index] = QI_DONE; 627 628 reclaim_free_desc(qi); 629 spin_unlock_irqrestore(&qi->q_lock, flags); 630} 631 632/* 633 * Flush the global interrupt entry cache. 634 */ 635void qi_global_iec(struct intel_iommu *iommu) 636{ 637 struct qi_desc desc; 638 639 desc.low = QI_IEC_TYPE; 640 desc.high = 0; 641 642 qi_submit_sync(&desc, iommu); 643} 644 645int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, 646 u64 type, int non_present_entry_flush) 647{ 648 649 struct qi_desc desc; 650 651 if (non_present_entry_flush) { 652 if (!cap_caching_mode(iommu->cap)) 653 return 1; 654 else 655 did = 0; 656 } 657 658 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) 659 | QI_CC_GRAN(type) | QI_CC_TYPE; 660 desc.high = 0; 661 662 qi_submit_sync(&desc, iommu); 663 664 return 0; 665 666} 667 668int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, 669 unsigned int size_order, u64 type, 670 int non_present_entry_flush) 671{ 672 u8 dw = 0, dr = 0; 673 674 struct qi_desc desc; 675 int ih = 0; 676 677 if (non_present_entry_flush) { 678 if (!cap_caching_mode(iommu->cap)) 679 return 1; 680 else 681 did = 0; 682 } 683 684 if (cap_write_drain(iommu->cap)) 685 dw = 1; 686 687 if (cap_read_drain(iommu->cap)) 688 dr = 1; 689 690 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) 691 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; 692 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) 693 | QI_IOTLB_AM(size_order); 694 695 qi_submit_sync(&desc, iommu); 696 697 return 0; 698 699} 700 701/* 702 * Enable Queued Invalidation interface. This is a must to support 703 * interrupt-remapping. Also used by DMA-remapping, which replaces 704 * register based IOTLB invalidation. 705 */ 706int dmar_enable_qi(struct intel_iommu *iommu) 707{ 708 u32 cmd, sts; 709 unsigned long flags; 710 struct q_inval *qi; 711 712 if (!ecap_qis(iommu->ecap)) 713 return -ENOENT; 714 715 /* 716 * queued invalidation is already setup and enabled. 717 */ 718 if (iommu->qi) 719 return 0; 720 721 iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); 722 if (!iommu->qi) 723 return -ENOMEM; 724 725 qi = iommu->qi; 726 727 qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); 728 if (!qi->desc) { 729 kfree(qi); 730 iommu->qi = 0; 731 return -ENOMEM; 732 } 733 734 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); 735 if (!qi->desc_status) { 736 free_page((unsigned long) qi->desc); 737 kfree(qi); 738 iommu->qi = 0; 739 return -ENOMEM; 740 } 741 742 qi->free_head = qi->free_tail = 0; 743 qi->free_cnt = QI_LENGTH; 744 745 spin_lock_init(&qi->q_lock); 746 747 spin_lock_irqsave(&iommu->register_lock, flags); 748 /* write zero to the tail reg */ 749 writel(0, iommu->reg + DMAR_IQT_REG); 750 751 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); 752 753 cmd = iommu->gcmd | DMA_GCMD_QIE; 754 iommu->gcmd |= DMA_GCMD_QIE; 755 writel(cmd, iommu->reg + DMAR_GCMD_REG); 756 757 /* Make sure hardware complete it */ 758 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 759 spin_unlock_irqrestore(&iommu->register_lock, flags); 760 761 return 0; 762}