Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu/vt-d, trivial: simplify code with existing macros

Simplify vt-d related code with existing macros and introduce a new
macro for_each_active_drhd_unit() to enumerate all active DRHD unit.

Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <joro@8bytes.org>

authored by

Jiang Liu and committed by
Joerg Roedel
7c919779 2fe2c602

+29 -68
+3 -4
drivers/iommu/dmar.c
··· 1305 1305 int __init enable_drhd_fault_handling(void) 1306 1306 { 1307 1307 struct dmar_drhd_unit *drhd; 1308 + struct intel_iommu *iommu; 1308 1309 1309 1310 /* 1310 1311 * Enable fault control interrupt. 1311 1312 */ 1312 - for_each_drhd_unit(drhd) { 1313 - int ret; 1314 - struct intel_iommu *iommu = drhd->iommu; 1313 + for_each_iommu(iommu, drhd) { 1315 1314 u32 fault_status; 1316 - ret = dmar_set_interrupt(iommu); 1315 + int ret = dmar_set_interrupt(iommu); 1317 1316 1318 1317 if (ret) { 1319 1318 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
+12 -43
drivers/iommu/intel-iommu.c
··· 628 628 struct dmar_drhd_unit *drhd = NULL; 629 629 int i; 630 630 631 - for_each_drhd_unit(drhd) { 632 - if (drhd->ignored) 633 - continue; 631 + for_each_active_drhd_unit(drhd) { 634 632 if (segment != drhd->segment) 635 633 continue; 636 634 ··· 2468 2470 goto error; 2469 2471 } 2470 2472 2471 - for_each_drhd_unit(drhd) { 2472 - if (drhd->ignored) 2473 - continue; 2474 - 2475 - iommu = drhd->iommu; 2473 + for_each_active_iommu(iommu, drhd) { 2476 2474 g_iommus[iommu->seq_id] = iommu; 2477 2475 2478 2476 ret = iommu_init_domains(iommu); ··· 2492 2498 /* 2493 2499 * Start from the sane iommu hardware state. 2494 2500 */ 2495 - for_each_drhd_unit(drhd) { 2496 - if (drhd->ignored) 2497 - continue; 2498 - 2499 - iommu = drhd->iommu; 2500 - 2501 + for_each_active_iommu(iommu, drhd) { 2501 2502 /* 2502 2503 * If the queued invalidation is already initialized by us 2503 2504 * (for example, while enabling interrupt-remapping) then ··· 2512 2523 dmar_disable_qi(iommu); 2513 2524 } 2514 2525 2515 - for_each_drhd_unit(drhd) { 2516 - if (drhd->ignored) 2517 - continue; 2518 - 2519 - iommu = drhd->iommu; 2520 - 2526 + for_each_active_iommu(iommu, drhd) { 2521 2527 if (dmar_enable_qi(iommu)) { 2522 2528 /* 2523 2529 * Queued Invalidate not enabled, use Register Based ··· 2595 2611 * global invalidate iotlb 2596 2612 * enable translation 2597 2613 */ 2598 - for_each_drhd_unit(drhd) { 2614 + for_each_iommu(iommu, drhd) { 2599 2615 if (drhd->ignored) { 2600 2616 /* 2601 2617 * we always have to disable PMRs or DMA may fail on 2602 2618 * this device 2603 2619 */ 2604 2620 if (force_on) 2605 - iommu_disable_protect_mem_regions(drhd->iommu); 2621 + iommu_disable_protect_mem_regions(iommu); 2606 2622 continue; 2607 2623 } 2608 - iommu = drhd->iommu; 2609 2624 2610 2625 iommu_flush_write_buffer(iommu); 2611 2626 ··· 2626 2643 2627 2644 return 0; 2628 2645 error: 2629 - for_each_drhd_unit(drhd) { 2630 - if (drhd->ignored) 2631 - continue; 2632 - iommu = drhd->iommu; 2646 + for_each_active_iommu(iommu, drhd) 2633 2647 free_iommu(iommu); 2634 - } 2635 2648 kfree(g_iommus); 2636 2649 return ret; 2637 2650 } ··· 3275 3296 } 3276 3297 } 3277 3298 3278 - for_each_drhd_unit(drhd) { 3299 + for_each_active_drhd_unit(drhd) { 3279 3300 int i; 3280 - if (drhd->ignored || drhd->include_all) 3301 + if (drhd->include_all) 3281 3302 continue; 3282 3303 3283 3304 for (i = 0; i < drhd->devices_cnt; i++) ··· 3626 3647 { 3627 3648 int ret = 0; 3628 3649 struct dmar_drhd_unit *drhd; 3650 + struct intel_iommu *iommu; 3629 3651 3630 3652 /* VT-d is required for a TXT/tboot launch, so enforce that */ 3631 3653 force_on = tboot_force_iommu(); ··· 3640 3660 /* 3641 3661 * Disable translation if already enabled prior to OS handover. 3642 3662 */ 3643 - for_each_drhd_unit(drhd) { 3644 - struct intel_iommu *iommu; 3645 - 3646 - if (drhd->ignored) 3647 - continue; 3648 - 3649 - iommu = drhd->iommu; 3663 + for_each_active_iommu(iommu, drhd) 3650 3664 if (iommu->gcmd & DMA_GCMD_TE) 3651 3665 iommu_disable_translation(iommu); 3652 - } 3653 3666 3654 3667 if (dmar_dev_scope_init() < 0) { 3655 3668 if (force_on) ··· 3885 3912 unsigned long i; 3886 3913 unsigned long ndomains; 3887 3914 3888 - for_each_drhd_unit(drhd) { 3889 - if (drhd->ignored) 3890 - continue; 3891 - iommu = drhd->iommu; 3892 - 3915 + for_each_active_iommu(iommu, drhd) { 3893 3916 ndomains = cap_ndoms(iommu->cap); 3894 3917 for_each_set_bit(i, iommu->domain_ids, ndomains) { 3895 3918 if (iommu->domains[i] == domain) {
+10 -21
drivers/iommu/intel_irq_remapping.c
··· 520 520 static int __init intel_irq_remapping_supported(void) 521 521 { 522 522 struct dmar_drhd_unit *drhd; 523 + struct intel_iommu *iommu; 523 524 524 525 if (disable_irq_remap) 525 526 return 0; ··· 539 538 if (!dmar_ir_support()) 540 539 return 0; 541 540 542 - for_each_drhd_unit(drhd) { 543 - struct intel_iommu *iommu = drhd->iommu; 544 - 541 + for_each_iommu(iommu, drhd) 545 542 if (!ecap_ir_support(iommu->ecap)) 546 543 return 0; 547 - } 548 544 549 545 return 1; 550 546 } ··· 549 551 static int __init intel_enable_irq_remapping(void) 550 552 { 551 553 struct dmar_drhd_unit *drhd; 554 + struct intel_iommu *iommu; 552 555 bool x2apic_present; 553 556 int setup = 0; 554 557 int eim = 0; ··· 572 573 "Use 'intremap=no_x2apic_optout' to override BIOS request.\n"); 573 574 } 574 575 575 - for_each_drhd_unit(drhd) { 576 - struct intel_iommu *iommu = drhd->iommu; 577 - 576 + for_each_iommu(iommu, drhd) { 578 577 /* 579 578 * If the queued invalidation is already initialized, 580 579 * shouldn't disable it. ··· 597 600 /* 598 601 * check for the Interrupt-remapping support 599 602 */ 600 - for_each_drhd_unit(drhd) { 601 - struct intel_iommu *iommu = drhd->iommu; 602 - 603 + for_each_iommu(iommu, drhd) { 603 604 if (!ecap_ir_support(iommu->ecap)) 604 605 continue; 605 606 ··· 611 616 /* 612 617 * Enable queued invalidation for all the DRHD's. 613 618 */ 614 - for_each_drhd_unit(drhd) { 615 - int ret; 616 - struct intel_iommu *iommu = drhd->iommu; 617 - ret = dmar_enable_qi(iommu); 619 + for_each_iommu(iommu, drhd) { 620 + int ret = dmar_enable_qi(iommu); 618 621 619 622 if (ret) { 620 623 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " ··· 625 632 /* 626 633 * Setup Interrupt-remapping for all the DRHD's now. 627 634 */ 628 - for_each_drhd_unit(drhd) { 629 - struct intel_iommu *iommu = drhd->iommu; 630 - 635 + for_each_iommu(iommu, drhd) { 631 636 if (!ecap_ir_support(iommu->ecap)) 632 637 continue; 633 638 ··· 769 778 static int __init parse_ioapics_under_ir(void) 770 779 { 771 780 struct dmar_drhd_unit *drhd; 781 + struct intel_iommu *iommu; 772 782 int ir_supported = 0; 773 783 int ioapic_idx; 774 784 775 - for_each_drhd_unit(drhd) { 776 - struct intel_iommu *iommu = drhd->iommu; 777 - 785 + for_each_iommu(iommu, drhd) 778 786 if (ecap_ir_support(iommu->ecap)) { 779 787 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) 780 788 return -1; 781 789 782 790 ir_supported = 1; 783 791 } 784 - } 785 792 786 793 if (!ir_supported) 787 794 return 0;
+4
include/linux/dmar.h
··· 53 53 #define for_each_drhd_unit(drhd) \ 54 54 list_for_each_entry(drhd, &dmar_drhd_units, list) 55 55 56 + #define for_each_active_drhd_unit(drhd) \ 57 + list_for_each_entry(drhd, &dmar_drhd_units, list) \ 58 + if (drhd->ignored) {} else 59 + 56 60 #define for_each_active_iommu(i, drhd) \ 57 61 list_for_each_entry(drhd, &dmar_drhd_units, list) \ 58 62 if (i=drhd->iommu, drhd->ignored) {} else