[PATCH] ib_verbs: Use explicit if-else statements to avoid errors with do-while macros

At least on PPC, the "op ? op : dma" construct causes a compile failure
because the dma_* is a do{}while(0) macro.

This turns all of them into proper if/else to avoid this problem.

Signed-off-by: Ben Collins <bcollins@ubuntu.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ben Collins and committed by Linus Torvalds d1998ef3 cc016448

+39 -31
+39 -31
include/rdma/ib_verbs.h
··· 1456 */ 1457 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 1458 { 1459 - return dev->dma_ops ? 1460 - dev->dma_ops->mapping_error(dev, dma_addr) : 1461 - dma_mapping_error(dma_addr); 1462 } 1463 1464 /** ··· 1472 void *cpu_addr, size_t size, 1473 enum dma_data_direction direction) 1474 { 1475 - return dev->dma_ops ? 1476 - dev->dma_ops->map_single(dev, cpu_addr, size, direction) : 1477 - dma_map_single(dev->dma_device, cpu_addr, size, direction); 1478 } 1479 1480 /** ··· 1488 u64 addr, size_t size, 1489 enum dma_data_direction direction) 1490 { 1491 - dev->dma_ops ? 1492 - dev->dma_ops->unmap_single(dev, addr, size, direction) : 1493 dma_unmap_single(dev->dma_device, addr, size, direction); 1494 } 1495 ··· 1508 size_t size, 1509 enum dma_data_direction direction) 1510 { 1511 - return dev->dma_ops ? 1512 - dev->dma_ops->map_page(dev, page, offset, size, direction) : 1513 - dma_map_page(dev->dma_device, page, offset, size, direction); 1514 } 1515 1516 /** ··· 1524 u64 addr, size_t size, 1525 enum dma_data_direction direction) 1526 { 1527 - dev->dma_ops ? 1528 - dev->dma_ops->unmap_page(dev, addr, size, direction) : 1529 dma_unmap_page(dev->dma_device, addr, size, direction); 1530 } 1531 ··· 1541 struct scatterlist *sg, int nents, 1542 enum dma_data_direction direction) 1543 { 1544 - return dev->dma_ops ? 1545 - dev->dma_ops->map_sg(dev, sg, nents, direction) : 1546 - dma_map_sg(dev->dma_device, sg, nents, direction); 1547 } 1548 1549 /** ··· 1557 struct scatterlist *sg, int nents, 1558 enum dma_data_direction direction) 1559 { 1560 - dev->dma_ops ? 1561 - dev->dma_ops->unmap_sg(dev, sg, nents, direction) : 1562 dma_unmap_sg(dev->dma_device, sg, nents, direction); 1563 } 1564 ··· 1571 static inline u64 ib_sg_dma_address(struct ib_device *dev, 1572 struct scatterlist *sg) 1573 { 1574 - return dev->dma_ops ? 1575 - dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg); 1576 } 1577 1578 /** ··· 1584 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 1585 struct scatterlist *sg) 1586 { 1587 - return dev->dma_ops ? 1588 - dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg); 1589 } 1590 1591 /** ··· 1601 size_t size, 1602 enum dma_data_direction dir) 1603 { 1604 - dev->dma_ops ? 1605 - dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) : 1606 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 1607 } 1608 ··· 1619 size_t size, 1620 enum dma_data_direction dir) 1621 { 1622 - dev->dma_ops ? 1623 - dev->dma_ops->sync_single_for_device(dev, addr, size, dir) : 1624 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 1625 } 1626 ··· 1637 u64 *dma_handle, 1638 gfp_t flag) 1639 { 1640 - return dev->dma_ops ? 1641 - dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) : 1642 - dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 1643 } 1644 1645 /** ··· 1653 size_t size, void *cpu_addr, 1654 u64 dma_handle) 1655 { 1656 - dev->dma_ops ? 1657 - dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) : 1658 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 1659 } 1660
··· 1456 */ 1457 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 1458 { 1459 + if (dev->dma_ops) 1460 + return dev->dma_ops->mapping_error(dev, dma_addr); 1461 + return dma_mapping_error(dma_addr); 1462 } 1463 1464 /** ··· 1472 void *cpu_addr, size_t size, 1473 enum dma_data_direction direction) 1474 { 1475 + if (dev->dma_ops) 1476 + return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 1477 + return dma_map_single(dev->dma_device, cpu_addr, size, direction); 1478 } 1479 1480 /** ··· 1488 u64 addr, size_t size, 1489 enum dma_data_direction direction) 1490 { 1491 + if (dev->dma_ops) 1492 + dev->dma_ops->unmap_single(dev, addr, size, direction); 1493 + else 1494 dma_unmap_single(dev->dma_device, addr, size, direction); 1495 } 1496 ··· 1507 size_t size, 1508 enum dma_data_direction direction) 1509 { 1510 + if (dev->dma_ops) 1511 + return dev->dma_ops->map_page(dev, page, offset, size, direction); 1512 + return dma_map_page(dev->dma_device, page, offset, size, direction); 1513 } 1514 1515 /** ··· 1523 u64 addr, size_t size, 1524 enum dma_data_direction direction) 1525 { 1526 + if (dev->dma_ops) 1527 + dev->dma_ops->unmap_page(dev, addr, size, direction); 1528 + else 1529 dma_unmap_page(dev->dma_device, addr, size, direction); 1530 } 1531 ··· 1539 struct scatterlist *sg, int nents, 1540 enum dma_data_direction direction) 1541 { 1542 + if (dev->dma_ops) 1543 + return dev->dma_ops->map_sg(dev, sg, nents, direction); 1544 + return dma_map_sg(dev->dma_device, sg, nents, direction); 1545 } 1546 1547 /** ··· 1555 struct scatterlist *sg, int nents, 1556 enum dma_data_direction direction) 1557 { 1558 + if (dev->dma_ops) 1559 + dev->dma_ops->unmap_sg(dev, sg, nents, direction); 1560 + else 1561 dma_unmap_sg(dev->dma_device, sg, nents, direction); 1562 } 1563 ··· 1568 static inline u64 ib_sg_dma_address(struct ib_device *dev, 1569 struct scatterlist *sg) 1570 { 1571 + if (dev->dma_ops) 1572 + return dev->dma_ops->dma_address(dev, sg); 1573 + return sg_dma_address(sg); 1574 } 1575 1576 /** ··· 1580 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 1581 struct scatterlist *sg) 1582 { 1583 + if (dev->dma_ops) 1584 + return dev->dma_ops->dma_len(dev, sg); 1585 + return sg_dma_len(sg); 1586 } 1587 1588 /** ··· 1596 size_t size, 1597 enum dma_data_direction dir) 1598 { 1599 + if (dev->dma_ops) 1600 + dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 1601 + else 1602 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 1603 } 1604 ··· 1613 size_t size, 1614 enum dma_data_direction dir) 1615 { 1616 + if (dev->dma_ops) 1617 + dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 1618 + else 1619 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 1620 } 1621 ··· 1630 u64 *dma_handle, 1631 gfp_t flag) 1632 { 1633 + if (dev->dma_ops) 1634 + return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 1635 + return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 1636 } 1637 1638 /** ··· 1646 size_t size, void *cpu_addr, 1647 u64 dma_handle) 1648 { 1649 + if (dev->dma_ops) 1650 + dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 1651 + else 1652 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 1653 } 1654