[PATCH] ib_verbs: Use explicit if-else statements to avoid errors with do-while macros

At least on PPC, the "op ? op : dma" construct causes a compile failure
because the dma_* is a do{}while(0) macro.

This turns all of them into proper if/else to avoid this problem.

Signed-off-by: Ben Collins <bcollins@ubuntu.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ben Collins and committed by Linus Torvalds d1998ef3 cc016448

+39 -31
+39 -31
include/rdma/ib_verbs.h
··· 1456 1456 */ 1457 1457 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 1458 1458 { 1459 - return dev->dma_ops ? 1460 - dev->dma_ops->mapping_error(dev, dma_addr) : 1461 - dma_mapping_error(dma_addr); 1459 + if (dev->dma_ops) 1460 + return dev->dma_ops->mapping_error(dev, dma_addr); 1461 + return dma_mapping_error(dma_addr); 1462 1462 } 1463 1463 1464 1464 /** ··· 1472 1472 void *cpu_addr, size_t size, 1473 1473 enum dma_data_direction direction) 1474 1474 { 1475 - return dev->dma_ops ? 1476 - dev->dma_ops->map_single(dev, cpu_addr, size, direction) : 1477 - dma_map_single(dev->dma_device, cpu_addr, size, direction); 1475 + if (dev->dma_ops) 1476 + return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 1477 + return dma_map_single(dev->dma_device, cpu_addr, size, direction); 1478 1478 } 1479 1479 1480 1480 /** ··· 1488 1488 u64 addr, size_t size, 1489 1489 enum dma_data_direction direction) 1490 1490 { 1491 - dev->dma_ops ? 1492 - dev->dma_ops->unmap_single(dev, addr, size, direction) : 1491 + if (dev->dma_ops) 1492 + dev->dma_ops->unmap_single(dev, addr, size, direction); 1493 + else 1493 1494 dma_unmap_single(dev->dma_device, addr, size, direction); 1494 1495 } 1495 1496 ··· 1508 1507 size_t size, 1509 1508 enum dma_data_direction direction) 1510 1509 { 1511 - return dev->dma_ops ? 1512 - dev->dma_ops->map_page(dev, page, offset, size, direction) : 1513 - dma_map_page(dev->dma_device, page, offset, size, direction); 1510 + if (dev->dma_ops) 1511 + return dev->dma_ops->map_page(dev, page, offset, size, direction); 1512 + return dma_map_page(dev->dma_device, page, offset, size, direction); 1514 1513 } 1515 1514 1516 1515 /** ··· 1524 1523 u64 addr, size_t size, 1525 1524 enum dma_data_direction direction) 1526 1525 { 1527 - dev->dma_ops ? 1528 - dev->dma_ops->unmap_page(dev, addr, size, direction) : 1526 + if (dev->dma_ops) 1527 + dev->dma_ops->unmap_page(dev, addr, size, direction); 1528 + else 1529 1529 dma_unmap_page(dev->dma_device, addr, size, direction); 1530 1530 } 1531 1531 ··· 1541 1539 struct scatterlist *sg, int nents, 1542 1540 enum dma_data_direction direction) 1543 1541 { 1544 - return dev->dma_ops ? 1545 - dev->dma_ops->map_sg(dev, sg, nents, direction) : 1546 - dma_map_sg(dev->dma_device, sg, nents, direction); 1542 + if (dev->dma_ops) 1543 + return dev->dma_ops->map_sg(dev, sg, nents, direction); 1544 + return dma_map_sg(dev->dma_device, sg, nents, direction); 1547 1545 } 1548 1546 1549 1547 /** ··· 1557 1555 struct scatterlist *sg, int nents, 1558 1556 enum dma_data_direction direction) 1559 1557 { 1560 - dev->dma_ops ? 1561 - dev->dma_ops->unmap_sg(dev, sg, nents, direction) : 1558 + if (dev->dma_ops) 1559 + dev->dma_ops->unmap_sg(dev, sg, nents, direction); 1560 + else 1562 1561 dma_unmap_sg(dev->dma_device, sg, nents, direction); 1563 1562 } 1564 1563 ··· 1571 1568 static inline u64 ib_sg_dma_address(struct ib_device *dev, 1572 1569 struct scatterlist *sg) 1573 1570 { 1574 - return dev->dma_ops ? 1575 - dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg); 1571 + if (dev->dma_ops) 1572 + return dev->dma_ops->dma_address(dev, sg); 1573 + return sg_dma_address(sg); 1576 1574 } 1577 1575 1578 1576 /** ··· 1584 1580 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 1585 1581 struct scatterlist *sg) 1586 1582 { 1587 - return dev->dma_ops ? 1588 - dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg); 1583 + if (dev->dma_ops) 1584 + return dev->dma_ops->dma_len(dev, sg); 1585 + return sg_dma_len(sg); 1589 1586 } 1590 1587 1591 1588 /** ··· 1601 1596 size_t size, 1602 1597 enum dma_data_direction dir) 1603 1598 { 1604 - dev->dma_ops ? 1605 - dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) : 1599 + if (dev->dma_ops) 1600 + dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 1601 + else 1606 1602 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 1607 1603 } 1608 1604 ··· 1619 1613 size_t size, 1620 1614 enum dma_data_direction dir) 1621 1615 { 1622 - dev->dma_ops ? 1623 - dev->dma_ops->sync_single_for_device(dev, addr, size, dir) : 1616 + if (dev->dma_ops) 1617 + dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 1618 + else 1624 1619 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 1625 1620 } 1626 1621 ··· 1637 1630 u64 *dma_handle, 1638 1631 gfp_t flag) 1639 1632 { 1640 - return dev->dma_ops ? 1641 - dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) : 1642 - dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 1633 + if (dev->dma_ops) 1634 + return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 1635 + return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 1643 1636 } 1644 1637 1645 1638 /** ··· 1653 1646 size_t size, void *cpu_addr, 1654 1647 u64 dma_handle) 1655 1648 { 1656 - dev->dma_ops ? 1657 - dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) : 1649 + if (dev->dma_ops) 1650 + dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 1651 + else 1658 1652 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 1659 1653 } 1660 1654