Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)

Replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)

Signed-off-by: Yang Hongyang<yanghy@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Yang Hongyang and committed by
Linus Torvalds
6a35528a 8a59f5d2

+140 -143
+4 -4
arch/arm/mach-iop13xx/setup.c
··· 307 307 } 308 308 }; 309 309 310 - static u64 iop13xx_adma_dmamask = DMA_64BIT_MASK; 310 + static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); 311 311 static struct iop_adma_platform_data iop13xx_adma_0_data = { 312 312 .hw_id = 0, 313 313 .pool_size = PAGE_SIZE, ··· 331 331 .resource = iop13xx_adma_0_resources, 332 332 .dev = { 333 333 .dma_mask = &iop13xx_adma_dmamask, 334 - .coherent_dma_mask = DMA_64BIT_MASK, 334 + .coherent_dma_mask = DMA_BIT_MASK(64), 335 335 .platform_data = (void *) &iop13xx_adma_0_data, 336 336 }, 337 337 }; ··· 343 343 .resource = iop13xx_adma_1_resources, 344 344 .dev = { 345 345 .dma_mask = &iop13xx_adma_dmamask, 346 - .coherent_dma_mask = DMA_64BIT_MASK, 346 + .coherent_dma_mask = DMA_BIT_MASK(64), 347 347 .platform_data = (void *) &iop13xx_adma_1_data, 348 348 }, 349 349 }; ··· 355 355 .resource = iop13xx_adma_2_resources, 356 356 .dev = { 357 357 .dma_mask = &iop13xx_adma_dmamask, 358 - .coherent_dma_mask = DMA_64BIT_MASK, 358 + .coherent_dma_mask = DMA_BIT_MASK(64), 359 359 .platform_data = (void *) &iop13xx_adma_2_data, 360 360 }, 361 361 };
+5 -5
arch/arm/mach-iop13xx/tpmi.c
··· 151 151 } 152 152 }; 153 153 154 - u64 iop13xx_tpmi_mask = DMA_64BIT_MASK; 154 + u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64); 155 155 static struct platform_device iop13xx_tpmi_0_device = { 156 156 .name = "iop-tpmi", 157 157 .id = 0, ··· 159 159 .resource = iop13xx_tpmi_0_resources, 160 160 .dev = { 161 161 .dma_mask = &iop13xx_tpmi_mask, 162 - .coherent_dma_mask = DMA_64BIT_MASK, 162 + .coherent_dma_mask = DMA_BIT_MASK(64), 163 163 }, 164 164 }; 165 165 ··· 170 170 .resource = iop13xx_tpmi_1_resources, 171 171 .dev = { 172 172 .dma_mask = &iop13xx_tpmi_mask, 173 - .coherent_dma_mask = DMA_64BIT_MASK, 173 + .coherent_dma_mask = DMA_BIT_MASK(64), 174 174 }, 175 175 }; 176 176 ··· 181 181 .resource = iop13xx_tpmi_2_resources, 182 182 .dev = { 183 183 .dma_mask = &iop13xx_tpmi_mask, 184 - .coherent_dma_mask = DMA_64BIT_MASK, 184 + .coherent_dma_mask = DMA_BIT_MASK(64), 185 185 }, 186 186 }; 187 187 ··· 192 192 .resource = iop13xx_tpmi_3_resources, 193 193 .dev = { 194 194 .dma_mask = &iop13xx_tpmi_mask, 195 - .coherent_dma_mask = DMA_64BIT_MASK, 195 + .coherent_dma_mask = DMA_BIT_MASK(64), 196 196 }, 197 197 }; 198 198
+4 -4
arch/arm/mach-kirkwood/common.c
··· 559 559 .resource = kirkwood_xor00_resources, 560 560 .dev = { 561 561 .dma_mask = &kirkwood_xor_dmamask, 562 - .coherent_dma_mask = DMA_64BIT_MASK, 562 + .coherent_dma_mask = DMA_BIT_MASK(64), 563 563 .platform_data = (void *)&kirkwood_xor00_data, 564 564 }, 565 565 }; ··· 585 585 .resource = kirkwood_xor01_resources, 586 586 .dev = { 587 587 .dma_mask = &kirkwood_xor_dmamask, 588 - .coherent_dma_mask = DMA_64BIT_MASK, 588 + .coherent_dma_mask = DMA_BIT_MASK(64), 589 589 .platform_data = (void *)&kirkwood_xor01_data, 590 590 }, 591 591 }; ··· 657 657 .resource = kirkwood_xor10_resources, 658 658 .dev = { 659 659 .dma_mask = &kirkwood_xor_dmamask, 660 - .coherent_dma_mask = DMA_64BIT_MASK, 660 + .coherent_dma_mask = DMA_BIT_MASK(64), 661 661 .platform_data = (void *)&kirkwood_xor10_data, 662 662 }, 663 663 }; ··· 683 683 .resource = kirkwood_xor11_resources, 684 684 .dev = { 685 685 .dma_mask = &kirkwood_xor_dmamask, 686 - .coherent_dma_mask = DMA_64BIT_MASK, 686 + .coherent_dma_mask = DMA_BIT_MASK(64), 687 687 .platform_data = (void *)&kirkwood_xor11_data, 688 688 }, 689 689 };
+2 -2
arch/arm/mach-orion5x/common.c
··· 486 486 .resource = orion5x_xor0_resources, 487 487 .dev = { 488 488 .dma_mask = &orion5x_xor_dmamask, 489 - .coherent_dma_mask = DMA_64BIT_MASK, 489 + .coherent_dma_mask = DMA_BIT_MASK(64), 490 490 .platform_data = (void *)&orion5x_xor0_data, 491 491 }, 492 492 }; ··· 512 512 .resource = orion5x_xor1_resources, 513 513 .dev = { 514 514 .dma_mask = &orion5x_xor_dmamask, 515 - .coherent_dma_mask = DMA_64BIT_MASK, 515 + .coherent_dma_mask = DMA_BIT_MASK(64), 516 516 .platform_data = (void *)&orion5x_xor1_data, 517 517 }, 518 518 };
+3 -3
arch/arm/plat-iop/adma.c
··· 143 143 .resource = iop3xx_dma_0_resources, 144 144 .dev = { 145 145 .dma_mask = &iop3xx_adma_dmamask, 146 - .coherent_dma_mask = DMA_64BIT_MASK, 146 + .coherent_dma_mask = DMA_BIT_MASK(64), 147 147 .platform_data = (void *) &iop3xx_dma_0_data, 148 148 }, 149 149 }; ··· 155 155 .resource = iop3xx_dma_1_resources, 156 156 .dev = { 157 157 .dma_mask = &iop3xx_adma_dmamask, 158 - .coherent_dma_mask = DMA_64BIT_MASK, 158 + .coherent_dma_mask = DMA_BIT_MASK(64), 159 159 .platform_data = (void *) &iop3xx_dma_1_data, 160 160 }, 161 161 }; ··· 167 167 .resource = iop3xx_aau_resources, 168 168 .dev = { 169 169 .dma_mask = &iop3xx_adma_dmamask, 170 - .coherent_dma_mask = DMA_64BIT_MASK, 170 + .coherent_dma_mask = DMA_BIT_MASK(64), 171 171 .platform_data = (void *) &iop3xx_aau_data, 172 172 }, 173 173 };
+1 -1
arch/ia64/sn/pci/pci_dma.c
··· 349 349 350 350 u64 sn_dma_get_required_mask(struct device *dev) 351 351 { 352 - return DMA_64BIT_MASK; 352 + return DMA_BIT_MASK(64); 353 353 } 354 354 EXPORT_SYMBOL_GPL(sn_dma_get_required_mask); 355 355
+1 -1
arch/powerpc/platforms/cell/iommu.c
··· 644 644 645 645 static int dma_fixed_dma_supported(struct device *dev, u64 mask) 646 646 { 647 - return mask == DMA_64BIT_MASK; 647 + return mask == DMA_BIT_MASK(64); 648 648 } 649 649 650 650 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+2 -2
drivers/ata/ahci.c
··· 2405 2405 int rc; 2406 2406 2407 2407 if (using_dac && 2408 - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 2409 - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 2408 + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 2409 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2410 2410 if (rc) { 2411 2411 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2412 2412 if (rc) {
+2 -2
drivers/ata/sata_mv.c
··· 3913 3913 { 3914 3914 int rc; 3915 3915 3916 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3917 - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3916 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3917 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3918 3918 if (rc) { 3919 3919 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3920 3920 if (rc) {
+2 -2
drivers/ata/sata_qstor.c
··· 584 584 int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT); 585 585 586 586 if (have_64bit_bus && 587 - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 588 - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 587 + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 588 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 589 589 if (rc) { 590 590 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 591 591 if (rc) {
+2 -2
drivers/ata/sata_sil24.c
··· 1297 1297 host->iomap = iomap; 1298 1298 1299 1299 /* configure and activate the device */ 1300 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1301 - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1300 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 1301 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1302 1302 if (rc) { 1303 1303 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1304 1304 if (rc) {
+2 -2
drivers/block/DAC960.c
··· 1372 1372 dma_addr_t CommandMailboxDMA; 1373 1373 DAC960_V2_CommandStatus_T CommandStatus; 1374 1374 1375 - if (!pci_set_dma_mask(Controller->PCIDevice, DMA_64BIT_MASK)) 1376 - Controller->BounceBufferLimit = DMA_64BIT_MASK; 1375 + if (!pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64))) 1376 + Controller->BounceBufferLimit = DMA_BIT_MASK(64); 1377 1377 else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK)) 1378 1378 Controller->BounceBufferLimit = DMA_32BIT_MASK; 1379 1379 else
+1 -1
drivers/block/cciss.c
··· 3637 3637 hba[i]->pdev = pdev; 3638 3638 3639 3639 /* configure PCI DMA stuff */ 3640 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) 3640 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 3641 3641 dac = 1; 3642 3642 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) 3643 3643 dac = 0;
+2 -2
drivers/block/sx8.c
··· 1586 1586 goto err_out; 1587 1587 1588 1588 #ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ 1589 - rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 1589 + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1590 1590 if (!rc) { 1591 - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1591 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1592 1592 if (rc) { 1593 1593 printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n", 1594 1594 pci_name(pdev));
+1 -1
drivers/block/umem.c
··· 829 829 dev_printk(KERN_INFO, &dev->dev, 830 830 "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n"); 831 831 832 - if (pci_set_dma_mask(dev, DMA_64BIT_MASK) && 832 + if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) && 833 833 pci_set_dma_mask(dev, DMA_32BIT_MASK)) { 834 834 dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n"); 835 835 return -ENOMEM;
+2 -2
drivers/dma/ioat.c
··· 98 98 if (err) 99 99 goto err_request_regions; 100 100 101 - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 101 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 102 102 if (err) 103 103 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 104 104 if (err) 105 105 goto err_set_dma_mask; 106 106 107 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 107 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 108 108 if (err) 109 109 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 110 110 if (err)
+1 -1
drivers/idle/i7300_idle.c
··· 178 178 179 179 static struct device dummy_dma_dev = { 180 180 .init_name = "fallback device", 181 - .coherent_dma_mask = DMA_64BIT_MASK, 181 + .coherent_dma_mask = DMA_BIT_MASK(64), 182 182 .dma_mask = &dummy_dma_dev.coherent_dma_mask, 183 183 }; 184 184
+1 -1
drivers/infiniband/hw/amso1100/c2.c
··· 989 989 } 990 990 991 991 if ((sizeof(dma_addr_t) > 4)) { 992 - ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK); 992 + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 993 993 if (ret < 0) { 994 994 printk(KERN_ERR PFX "64b DMA configuration failed\n"); 995 995 goto bail2;
+2 -2
drivers/infiniband/hw/ipath/ipath_driver.c
··· 470 470 goto bail_disable; 471 471 } 472 472 473 - ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 473 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 474 474 if (ret) { 475 475 /* 476 476 * if the 64 bit setup fails, try 32 bit. Some systems ··· 496 496 } 497 497 } 498 498 else { 499 - ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 499 + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 500 500 if (ret) 501 501 dev_info(&pdev->dev, 502 502 "Unable to set DMA consistent mask "
+2 -2
drivers/infiniband/hw/mthca/mthca_main.c
··· 1016 1016 1017 1017 pci_set_master(pdev); 1018 1018 1019 - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 1019 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1020 1020 if (err) { 1021 1021 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1022 1022 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); ··· 1025 1025 goto err_free_res; 1026 1026 } 1027 1027 } 1028 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1028 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1029 1029 if (err) { 1030 1030 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1031 1031 "consistent PCI DMA mask.\n");
+2 -2
drivers/infiniband/hw/nes/nes.c
··· 478 478 } 479 479 480 480 if ((sizeof(dma_addr_t) > 4)) { 481 - ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK); 481 + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 482 482 if (ret < 0) { 483 483 printk(KERN_ERR PFX "64b DMA mask configuration failed\n"); 484 484 goto bail2; 485 485 } 486 - ret = pci_set_consistent_dma_mask(pcidev, DMA_64BIT_MASK); 486 + ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 487 487 if (ret) { 488 488 printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n"); 489 489 goto bail2;
+2 -2
drivers/message/fusion/mptbase.c
··· 1534 1534 1535 1535 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1536 1536 1537 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) 1538 - && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { 1537 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1538 + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1539 1539 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1540 1540 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1541 1541 ioc->name));
+2 -2
drivers/message/i2o/memory.c
··· 185 185 int dma_64 = 0; 186 186 187 187 mutex_lock(&mem_lock); 188 - if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { 188 + if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) { 189 189 dma_64 = 1; 190 190 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 191 191 mutex_unlock(&mem_lock); ··· 196 196 addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL); 197 197 198 198 if ((sizeof(dma_addr_t) > 4) && dma_64) 199 - if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) 199 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 200 200 printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); 201 201 mutex_unlock(&mem_lock); 202 202
+1 -1
drivers/message/i2o/pci.c
··· 397 397 } 398 398 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 399 399 if (sizeof(dma_addr_t) > 4) { 400 - if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) 400 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 401 401 printk(KERN_INFO "%s: 64-bit DMA unavailable\n", 402 402 c->name); 403 403 else {
+2 -2
drivers/net/8139cp.c
··· 1929 1929 1930 1930 /* Configure DMA attributes. */ 1931 1931 if ((sizeof(dma_addr_t) > 4) && 1932 - !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) && 1933 - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1932 + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) && 1933 + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 1934 1934 pci_using_dac = 1; 1935 1935 } else { 1936 1936 pci_using_dac = 0;
+1 -1
drivers/net/acenic.c
··· 1161 1161 /* 1162 1162 * Configure DMA attributes. 1163 1163 */ 1164 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1164 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 1165 1165 ap->pci_using_dac = 1; 1166 1166 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 1167 1167 ap->pci_using_dac = 0;
+1 -1
drivers/net/bnx2.c
··· 7527 7527 if (CHIP_NUM(bp) == CHIP_NUM_5708) 7528 7528 persist_dma_mask = dma_mask = DMA_40BIT_MASK; 7529 7529 else 7530 - persist_dma_mask = dma_mask = DMA_64BIT_MASK; 7530 + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 7531 7531 7532 7532 /* Configure DMA attributes. */ 7533 7533 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
+2 -2
drivers/net/bnx2x_main.c
··· 10979 10979 goto err_out_release; 10980 10980 } 10981 10981 10982 - if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) { 10982 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 10983 10983 bp->flags |= USING_DAC_FLAG; 10984 - if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) { 10984 + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 10985 10985 printk(KERN_ERR PFX "pci_set_consistent_dma_mask" 10986 10986 " failed, aborting\n"); 10987 10987 rc = -EIO;
+2 -2
drivers/net/cassini.c
··· 5074 5074 5075 5075 5076 5076 /* Configure DMA attributes. */ 5077 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 5077 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 5078 5078 pci_using_dac = 1; 5079 5079 err = pci_set_consistent_dma_mask(pdev, 5080 - DMA_64BIT_MASK); 5080 + DMA_BIT_MASK(64)); 5081 5081 if (err < 0) { 5082 5082 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " 5083 5083 "for consistent allocations\n");
+2 -2
drivers/net/chelsio/cxgb2.c
··· 1056 1056 goto out_disable_pdev; 1057 1057 } 1058 1058 1059 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1059 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 1060 1060 pci_using_dac = 1; 1061 1061 1062 - if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { 1062 + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1063 1063 CH_ERR("%s: unable to obtain 64-bit DMA for " 1064 1064 "consistent allocations\n", pci_name(pdev)); 1065 1065 err = -ENODEV;
+2 -2
drivers/net/cxgb3/cxgb3_main.c
··· 3038 3038 goto out_release_regions; 3039 3039 } 3040 3040 3041 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3041 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3042 3042 pci_using_dac = 1; 3043 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3043 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3044 3044 if (err) { 3045 3045 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 3046 3046 "coherent allocations\n");
+2 -2
drivers/net/e1000/e1000_main.c
··· 962 962 if (err) 963 963 return err; 964 964 965 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && 966 - !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { 965 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 966 + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 967 967 pci_using_dac = 1; 968 968 } else { 969 969 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+2 -2
drivers/net/e1000e/netdev.c
··· 4763 4763 return err; 4764 4764 4765 4765 pci_using_dac = 0; 4766 - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 4766 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 4767 4767 if (!err) { 4768 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 4768 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4769 4769 if (!err) 4770 4770 pci_using_dac = 1; 4771 4771 } else {
+2 -2
drivers/net/igb/igb_main.c
··· 1154 1154 return err; 1155 1155 1156 1156 pci_using_dac = 0; 1157 - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 1157 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1158 1158 if (!err) { 1159 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1159 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1160 1160 if (!err) 1161 1161 pci_using_dac = 1; 1162 1162 } else {
+2 -2
drivers/net/ioc3-eth.c
··· 1226 1226 int err, pci_using_dac; 1227 1227 1228 1228 /* Configure DMA attributes. */ 1229 - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 1229 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1230 1230 if (!err) { 1231 1231 pci_using_dac = 1; 1232 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1232 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1233 1233 if (err < 0) { 1234 1234 printk(KERN_ERR "%s: Unable to obtain 64 bit DMA " 1235 1235 "for consistent allocations\n", pci_name(pdev));
+2 -2
drivers/net/ixgb/ixgb_main.c
··· 365 365 if (err) 366 366 return err; 367 367 368 - if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && 369 - !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { 368 + if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) && 369 + !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) { 370 370 pci_using_dac = 1; 371 371 } else { 372 372 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
+2 -2
drivers/net/ixgbe/ixgbe_main.c
··· 4509 4509 if (err) 4510 4510 return err; 4511 4511 4512 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && 4513 - !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { 4512 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 4513 + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 4514 4514 pci_using_dac = 1; 4515 4515 } else { 4516 4516 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+2 -2
drivers/net/mlx4/main.c
··· 1076 1076 1077 1077 pci_set_master(pdev); 1078 1078 1079 - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 1079 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1080 1080 if (err) { 1081 1081 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1082 1082 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); ··· 1085 1085 goto err_release_bar2; 1086 1086 } 1087 1087 } 1088 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 1088 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1089 1089 if (err) { 1090 1090 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1091 1091 "consistent PCI DMA mask.\n");
+2 -2
drivers/net/myri10ge/myri10ge.c
··· 3792 3792 3793 3793 pci_set_master(pdev); 3794 3794 dac_enabled = 1; 3795 - status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 3795 + status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3796 3796 if (status != 0) { 3797 3797 dac_enabled = 0; 3798 3798 dev_err(&pdev->dev, ··· 3804 3804 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3805 3805 goto abort_with_enabled; 3806 3806 } 3807 - (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3807 + (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3808 3808 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3809 3809 &mgp->cmd_bus, GFP_KERNEL); 3810 3810 if (mgp->cmd == NULL)
+1 -1
drivers/net/ns83820.c
··· 1973 1973 1974 1974 /* See if we can set the dma mask early on; failure is fatal. */ 1975 1975 if (sizeof(dma_addr_t) == 8 && 1976 - !pci_set_dma_mask(pci_dev, DMA_64BIT_MASK)) { 1976 + !pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { 1977 1977 using_dac = 1; 1978 1978 } else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { 1979 1979 using_dac = 0;
+2 -2
drivers/net/qla3xxx.c
··· 3934 3934 3935 3935 pci_set_master(pdev); 3936 3936 3937 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3937 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3938 3938 pci_using_dac = 1; 3939 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3939 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3940 3940 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 3941 3941 pci_using_dac = 0; 3942 3942 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+2 -2
drivers/net/qlge/qlge_main.c
··· 3726 3726 } 3727 3727 3728 3728 pci_set_master(pdev); 3729 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3729 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3730 3730 set_bit(QL_DMA64, &qdev->flags); 3731 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3731 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3732 3732 } else { 3733 3733 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3734 3734 if (!err)
+1 -1
drivers/net/r8169.c
··· 2046 2046 tp->cp_cmd = PCIMulRW | RxChkSum; 2047 2047 2048 2048 if ((sizeof(dma_addr_t) > 4) && 2049 - !pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) { 2049 + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { 2050 2050 tp->cp_cmd |= PCIDAC; 2051 2051 dev->features |= NETIF_F_HIGHDMA; 2052 2052 } else {
+2 -2
drivers/net/s2io.c
··· 7775 7775 return ret; 7776 7776 } 7777 7777 7778 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 7778 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 7779 7779 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n"); 7780 7780 dma_flag = TRUE; 7781 7781 if (pci_set_consistent_dma_mask 7782 - (pdev, DMA_64BIT_MASK)) { 7782 + (pdev, DMA_BIT_MASK(64))) { 7783 7783 DBG_PRINT(ERR_DBG, 7784 7784 "Unable to obtain 64bit DMA for \ 7785 7785 consistent allocations\n");
+2 -2
drivers/net/skge.c
··· 3912 3912 3913 3913 pci_set_master(pdev); 3914 3914 3915 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3915 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3916 3916 using_dac = 1; 3917 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3917 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3918 3918 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 3919 3919 using_dac = 0; 3920 3920 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+2 -2
drivers/net/sky2.c
··· 4374 4374 pci_set_master(pdev); 4375 4375 4376 4376 if (sizeof(dma_addr_t) > sizeof(u32) && 4377 - !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 4377 + !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) { 4378 4378 using_dac = 1; 4379 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 4379 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4380 4380 if (err < 0) { 4381 4381 dev_err(&pdev->dev, "unable to obtain 64 bit DMA " 4382 4382 "for consistent allocations\n");
+1 -1
drivers/net/sungem.c
··· 3042 3042 */ 3043 3043 if (pdev->vendor == PCI_VENDOR_ID_SUN && 3044 3044 pdev->device == PCI_DEVICE_ID_SUN_GEM && 3045 - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3045 + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3046 3046 pci_using_dac = 1; 3047 3047 } else { 3048 3048 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+2 -2
drivers/net/tehuti.c
··· 1941 1941 if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */ 1942 1942 goto err_pci; /* it's not a problem though */ 1943 1943 1944 - if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && 1945 - !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { 1944 + if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) && 1945 + !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) { 1946 1946 pci_using_dac = 1; 1947 1947 } else { 1948 1948 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
+1 -4
drivers/net/tehuti.h
··· 31 31 #include <linux/vmalloc.h> 32 32 #include <linux/firmware.h> 33 33 #include <asm/byteorder.h> 34 + #include <linux/dma-mapping.h> 34 35 35 36 /* Compile Time Switches */ 36 37 /* start */ ··· 98 97 99 98 #define READ_REG(pp, reg) readl(pp->pBdxRegs + reg) 100 99 #define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg) 101 - 102 - #ifndef DMA_64BIT_MASK 103 - # define DMA_64BIT_MASK 0xffffffffffffffffULL 104 - #endif 105 100 106 101 #ifndef DMA_32BIT_MASK 107 102 # define DMA_32BIT_MASK 0x00000000ffffffffULL
+2 -2
drivers/net/tg3.c
··· 13232 13232 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { 13233 13233 persist_dma_mask = dma_mask = DMA_40BIT_MASK; 13234 13234 #ifdef CONFIG_HIGHMEM 13235 - dma_mask = DMA_64BIT_MASK; 13235 + dma_mask = DMA_BIT_MASK(64); 13236 13236 #endif 13237 13237 } else 13238 - persist_dma_mask = dma_mask = DMA_64BIT_MASK; 13238 + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 13239 13239 13240 13240 /* Configure DMA attributes. */ 13241 13241 if (dma_mask > DMA_32BIT_MASK) {
+1 -1
drivers/net/usb/usbnet.c
··· 1180 1180 #if 0 1181 1181 // dma_supported() is deeply broken on almost all architectures 1182 1182 // possible with some EHCI controllers 1183 - if (dma_supported (&udev->dev, DMA_64BIT_MASK)) 1183 + if (dma_supported (&udev->dev, DMA_BIT_MASK(64))) 1184 1184 net->features |= NETIF_F_HIGHDMA; 1185 1185 #endif 1186 1186
+3 -3
drivers/net/wireless/b43/dma.c
··· 772 772 773 773 tmp = b43_read32(dev, SSB_TMSHIGH); 774 774 if (tmp & SSB_TMSHIGH_DMA64) 775 - return DMA_64BIT_MASK; 775 + return DMA_BIT_MASK(64); 776 776 mmio_base = b43_dmacontroller_base(0, 0); 777 777 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); 778 778 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); ··· 788 788 return B43_DMA_30BIT; 789 789 if (dmamask == DMA_32BIT_MASK) 790 790 return B43_DMA_32BIT; 791 - if (dmamask == DMA_64BIT_MASK) 791 + if (dmamask == DMA_BIT_MASK(64)) 792 792 return B43_DMA_64BIT; 793 793 B43_WARN_ON(1); 794 794 return B43_DMA_30BIT; ··· 999 999 err = ssb_dma_set_mask(dev->dev, mask); 1000 1000 if (!err) 1001 1001 break; 1002 - if (mask == DMA_64BIT_MASK) { 1002 + if (mask == DMA_BIT_MASK(64)) { 1003 1003 mask = DMA_32BIT_MASK; 1004 1004 fallback = 1; 1005 1005 continue;
+3 -3
drivers/net/wireless/b43legacy/dma.c
··· 846 846 847 847 tmp = b43legacy_read32(dev, SSB_TMSHIGH); 848 848 if (tmp & SSB_TMSHIGH_DMA64) 849 - return DMA_64BIT_MASK; 849 + return DMA_BIT_MASK(64); 850 850 mmio_base = b43legacy_dmacontroller_base(0, 0); 851 851 b43legacy_write32(dev, 852 852 mmio_base + B43legacy_DMA32_TXCTL, ··· 865 865 return B43legacy_DMA_30BIT; 866 866 if (dmamask == DMA_32BIT_MASK) 867 867 return B43legacy_DMA_32BIT; 868 - if (dmamask == DMA_64BIT_MASK) 868 + if (dmamask == DMA_BIT_MASK(64)) 869 869 return B43legacy_DMA_64BIT; 870 870 B43legacy_WARN_ON(1); 871 871 return B43legacy_DMA_30BIT; ··· 1042 1042 err = ssb_dma_set_mask(dev->dev, mask); 1043 1043 if (!err) 1044 1044 break; 1045 - if (mask == DMA_64BIT_MASK) { 1045 + if (mask == DMA_BIT_MASK(64)) { 1046 1046 mask = DMA_32BIT_MASK; 1047 1047 fallback = 1; 1048 1048 continue;
+1 -1
drivers/pci/intel-iommu.c
··· 57 57 58 58 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) 59 59 #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) 60 - #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) 60 + #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 61 61 62 62 /* global iommu list, set NULL for ignored DMAR units */ 63 63 static struct intel_iommu **g_iommus;
+2 -2
drivers/scsi/3w-9xxx.c
··· 2016 2016 pci_set_master(pdev); 2017 2017 pci_try_set_mwi(pdev); 2018 2018 2019 - if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) 2020 - || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) 2019 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 2020 + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 2021 2021 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) 2022 2022 || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { 2023 2023 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
+2 -2
drivers/scsi/aacraid/aachba.c
··· 1402 1402 } 1403 1403 1404 1404 if(dev->dac_support != 0) { 1405 - if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && 1406 - !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { 1405 + if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) && 1406 + !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) { 1407 1407 if (!dev->in_reset) 1408 1408 printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", 1409 1409 dev->name, dev->id);
+1 -1
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
··· 195 195 const u64 required_mask = dma_get_required_mask(dev); 196 196 197 197 if (required_mask > DMA_39BIT_MASK && 198 - dma_set_mask(dev, DMA_64BIT_MASK) == 0) 198 + dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) 199 199 ahd->flags |= AHD_64BIT_ADDRESSING; 200 200 else if (required_mask > DMA_32BIT_MASK && 201 201 dma_set_mask(dev, DMA_39BIT_MASK) == 0)
+2 -2
drivers/scsi/aic94xx/aic94xx_init.c
··· 790 790 goto Err_remove; 791 791 792 792 err = -ENODEV; 793 - if (!pci_set_dma_mask(dev, DMA_64BIT_MASK) 794 - && !pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK)) 793 + if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64)) 794 + && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) 795 795 ; 796 796 else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK) 797 797 && !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK))
+1 -1
drivers/scsi/arcmsr/arcmsr_hba.c
··· 393 393 acb = (struct AdapterControlBlock *)host->hostdata; 394 394 memset(acb, 0, sizeof (struct AdapterControlBlock)); 395 395 396 - error = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 396 + error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 397 397 if (error) { 398 398 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 399 399 if (error) {
+1 -1
drivers/scsi/dpt_i2o.c
··· 1014 1014 * See if we should enable dma64 mode. 1015 1015 */ 1016 1016 if (sizeof(dma_addr_t) > 4 && 1017 - pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) { 1017 + pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) { 1018 1018 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK) 1019 1019 dma64 = 1; 1020 1020 }
+1 -1
drivers/scsi/gdth.c
··· 5030 5030 } 5031 5031 } else { 5032 5032 shp->max_cmd_len = 16; 5033 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 5033 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 5034 5034 printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum); 5035 5035 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 5036 5036 printk(KERN_WARNING "GDT-PCI %d: "
+1 -1
drivers/scsi/hptiop.c
··· 958 958 pci_set_master(pcidev); 959 959 960 960 /* Enable 64bit DMA if possible */ 961 - if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) { 961 + if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { 962 962 if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) { 963 963 printk(KERN_ERR "hptiop: fail to set dma_mask\n"); 964 964 goto disable_pci_device;
+1 -1
drivers/scsi/ips.c
··· 7048 7048 * are guaranteed to be < 4G. 7049 7049 */ 7050 7050 if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) && 7051 - !pci_set_dma_mask(ha->pcidev, DMA_64BIT_MASK)) { 7051 + !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) { 7052 7052 (ha)->flags |= IPS_HA_ENH_SG; 7053 7053 } else { 7054 7054 if (pci_set_dma_mask(ha->pcidev, DMA_32BIT_MASK) != 0) {
+1 -1
drivers/scsi/lpfc/lpfc_init.c
··· 2660 2660 pci_save_state(pdev); 2661 2661 pci_try_set_mwi(pdev); 2662 2662 2663 - if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 2663 + if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) 2664 2664 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 2665 2665 goto out_idr_remove; 2666 2666
+1 -1
drivers/scsi/megaraid.c
··· 4793 4793 4794 4794 /* Set the Mode of addressing to 64 bit if we can */ 4795 4795 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) { 4796 - pci_set_dma_mask(pdev, DMA_64BIT_MASK); 4796 + pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 4797 4797 adapter->has_64bit_addr = 1; 4798 4798 } else { 4799 4799 pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+1 -1
drivers/scsi/megaraid/megaraid_mbox.c
··· 900 900 adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || 901 901 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && 902 902 adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { 903 - if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) { 903 + if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) { 904 904 con_log(CL_ANN, (KERN_WARNING 905 905 "megaraid: DMA mask for 64-bit failed\n")); 906 906
+1 -1
drivers/scsi/megaraid/megaraid_sas.c
··· 2497 2497 * All our contollers are capable of performing 64-bit DMA 2498 2498 */ 2499 2499 if (IS_DMA64) { 2500 - if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) { 2500 + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 2501 2501 2502 2502 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) 2503 2503 goto fail_set_dma_mask;
+2 -2
drivers/scsi/mvsas.c
··· 875 875 { 876 876 int rc; 877 877 878 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 879 - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 878 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 879 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 880 880 if (rc) { 881 881 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 882 882 if (rc) {
+1 -1
drivers/scsi/qla1280.c
··· 4275 4275 ha->devnum = devnum; /* specifies microcode load address */ 4276 4276 4277 4277 #ifdef QLA_64BIT_PTR 4278 - if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 4278 + if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 4279 4279 if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) { 4280 4280 printk(KERN_WARNING "scsi(%li): Unable to set a " 4281 4281 "suitable DMA mask - aborting\n", ha->host_no);
+2 -2
drivers/scsi/qla2xxx/qla_os.c
··· 1176 1176 /* Assume a 32bit DMA mask. */ 1177 1177 ha->flags.enable_64bit_addressing = 0; 1178 1178 1179 - if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) { 1179 + if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 1180 1180 /* Any upper-dword bits set? */ 1181 1181 if (MSD(dma_get_required_mask(&ha->pdev->dev)) && 1182 - !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 1182 + !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 1183 1183 /* Ok, a 64bit DMA mask is applicable. */ 1184 1184 ha->flags.enable_64bit_addressing = 1; 1185 1185 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+2 -2
drivers/scsi/qla4xxx/ql4_os.c
··· 1369 1369 int retval; 1370 1370 1371 1371 /* Update our PCI device dma_mask for full 64 bit mask */ 1372 - if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) { 1373 - if (pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) { 1372 + if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) { 1373 + if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 1374 1374 dev_dbg(&ha->pdev->dev, 1375 1375 "Failed to set 64 bit PCI consistent mask; " 1376 1376 "using 32 bit.\n");
+2 -2
drivers/scsi/stex.c
··· 1395 1395 { 1396 1396 int ret; 1397 1397 1398 - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) 1399 - && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) 1398 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1399 + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 1400 1400 return 0; 1401 1401 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1402 1402 if (!ret)
+1 -1
drivers/scsi/sym53c8xx_2/sym_hipd.h
··· 1094 1094 (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \ 1095 1095 } while (0) 1096 1096 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 1097 - #define DMA_DAC_MASK DMA_64BIT_MASK 1097 + #define DMA_DAC_MASK DMA_BIT_MASK(64) 1098 1098 int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s); 1099 1099 static inline void 1100 1100 sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
+2 -2
drivers/sn/ioc3.c
··· 617 617 pci_set_master(pdev); 618 618 619 619 #ifdef USE_64BIT_DMA 620 - ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 620 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 621 621 if (!ret) { 622 - ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 622 + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 623 623 if (ret < 0) { 624 624 printk(KERN_WARNING "%s: Unable to obtain 64 bit DMA " 625 625 "for consistent allocations\n",
+2 -2
drivers/staging/altpciechdma/altpciechdma.c
··· 849 849 #if 1 /* @todo For now, disable 64-bit, because I do not understand the implications (DAC!) */ 850 850 /* query for DMA transfer */ 851 851 /* @see Documentation/PCI/PCI-DMA-mapping.txt */ 852 - if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) { 853 - pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK); 852 + if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))) { 853 + pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)); 854 854 /* use 64-bit DMA */ 855 855 printk(KERN_DEBUG "Using a 64-bit DMA mask.\n"); 856 856 } else
+2 -2
drivers/staging/slicoss/slicoss.c
··· 371 371 printk(KERN_DEBUG "%s\n", slic_proc_version); 372 372 } 373 373 374 - err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK); 374 + err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 375 375 if (err) { 376 - err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK); 376 + err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 377 377 if (err) 378 378 goto err_out_disable_pci; 379 379 }
+2 -2
drivers/staging/sxg/sxg.c
··· 934 934 935 935 pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id); 936 936 937 - if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) { 938 - DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n"); 937 + if (!(err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)))) { 938 + DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n"); 939 939 } else { 940 940 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) { 941 941 DBG_ERROR
+1 -1
drivers/usb/host/ehci-hcd.c
··· 622 622 ehci_writel(ehci, 0, &ehci->regs->segment); 623 623 #if 0 624 624 // this is deeply broken on almost all architectures 625 - if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK)) 625 + if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64))) 626 626 ehci_info(ehci, "enabled 64bit DMA\n"); 627 627 #endif 628 628 }
+2 -2
drivers/uwb/whci.c
··· 160 160 pci_enable_msi(pci); 161 161 pci_set_master(pci); 162 162 err = -ENXIO; 163 - if (!pci_set_dma_mask(pci, DMA_64BIT_MASK)) 164 - pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK); 163 + if (!pci_set_dma_mask(pci, DMA_BIT_MASK(64))) 164 + pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); 165 165 else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK)) 166 166 pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK); 167 167 else