iop-adma.c: replace remaining __FUNCTION__ occurrences

__FUNCTION__ is gcc-specific, use __func__

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

authored by Harvey Harrison and committed by Dan Williams 3d9b525b 9c98718e

+16 -16
+16 -16
drivers/dma/iop-adma.c
··· 140 140 int busy = iop_chan_is_busy(iop_chan); 141 141 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 142 142 143 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 143 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 144 144 /* free completed slots from the chain starting with 145 145 * the oldest descriptor 146 146 */ ··· 438 438 spin_unlock_bh(&iop_chan->lock); 439 439 440 440 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 441 - __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); 441 + __func__, sw_desc->async_tx.cookie, sw_desc->idx); 442 442 443 443 return cookie; 444 444 } ··· 520 520 struct iop_adma_desc_slot *sw_desc, *grp_start; 521 521 int slot_cnt, slots_per_op; 522 522 523 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 523 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 524 524 525 525 spin_lock_bh(&iop_chan->lock); 526 526 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); ··· 548 548 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 549 549 550 550 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 551 - __FUNCTION__, len); 551 + __func__, len); 552 552 553 553 spin_lock_bh(&iop_chan->lock); 554 554 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); ··· 580 580 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 581 581 582 582 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 583 - __FUNCTION__, len); 583 + __func__, len); 584 584 585 585 spin_lock_bh(&iop_chan->lock); 586 586 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); ··· 614 614 615 615 dev_dbg(iop_chan->device->common.dev, 616 616 "%s src_cnt: %d len: %u flags: %lx\n", 617 - __FUNCTION__, src_cnt, len, flags); 617 + __func__, src_cnt, len, flags); 618 618 619 619 spin_lock_bh(&iop_chan->lock); 620 620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); ··· 648 648 return NULL; 649 649 650 650 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 651 - __FUNCTION__, src_cnt, len); 651 + __func__, src_cnt, len); 652 652 653 653 spin_lock_bh(&iop_chan->lock); 654 654 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); ··· 659 659 iop_desc_set_zero_sum_byte_count(grp_start, len); 660 660 grp_start->xor_check_result = result; 661 661 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 662 - __FUNCTION__, grp_start->xor_check_result); 662 + __func__, grp_start->xor_check_result); 663 663 sw_desc->unmap_src_cnt = src_cnt; 664 664 sw_desc->unmap_len = len; 665 665 while (src_cnt--) ··· 700 700 iop_chan->last_used = NULL; 701 701 702 702 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 703 - __FUNCTION__, iop_chan->slots_allocated); 703 + __func__, iop_chan->slots_allocated); 704 704 spin_unlock_bh(&iop_chan->lock); 705 705 706 706 /* one is ok since we left it on there on purpose */ ··· 753 753 { 754 754 struct iop_adma_chan *chan = data; 755 755 756 - dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); 756 + dev_dbg(chan->device->common.dev, "%s\n", __func__); 757 757 758 758 tasklet_schedule(&chan->irq_tasklet); 759 759 ··· 766 766 { 767 767 struct iop_adma_chan *chan = data; 768 768 769 - dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); 769 + dev_dbg(chan->device->common.dev, "%s\n", __func__); 770 770 771 771 tasklet_schedule(&chan->irq_tasklet); 772 772 ··· 823 823 int err = 0; 824 824 struct iop_adma_chan *iop_chan; 825 825 826 - dev_dbg(device->common.dev, "%s\n", __FUNCTION__); 826 + dev_dbg(device->common.dev, "%s\n", __func__); 827 827 828 828 src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); 829 829 if (!src) ··· 906 906 int err = 0; 907 907 struct iop_adma_chan *iop_chan; 908 908 909 - dev_dbg(device->common.dev, "%s\n", __FUNCTION__); 909 + dev_dbg(device->common.dev, "%s\n", __func__); 910 910 911 911 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 912 912 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); ··· 1159 1159 } 1160 1160 1161 1161 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", 1162 - __FUNCTION__, adev->dma_desc_pool_virt, 1162 + __func__, adev->dma_desc_pool_virt, 1163 1163 (void *) adev->dma_desc_pool); 1164 1164 1165 1165 adev->id = plat_data->hw_id; ··· 1289 1289 dma_cookie_t cookie; 1290 1290 int slot_cnt, slots_per_op; 1291 1291 1292 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 1292 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1293 1293 1294 1294 spin_lock_bh(&iop_chan->lock); 1295 1295 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); ··· 1346 1346 dma_cookie_t cookie; 1347 1347 int slot_cnt, slots_per_op; 1348 1348 1349 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 1349 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1350 1350 1351 1351 spin_lock_bh(&iop_chan->lock); 1352 1352 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);