Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md: replace remaining __FUNCTION__ occurrences

__FUNCTION__ is gcc-specific, use __func__

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Cc: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Harvey Harrison and committed by
Linus Torvalds
e46b272b 9a7b2b0f

+25 -25
+11 -11
drivers/md/dm-uevent.c
··· 78 78 79 79 event = dm_uevent_alloc(md); 80 80 if (!event) { 81 - DMERR("%s: dm_uevent_alloc() failed", __FUNCTION__); 81 + DMERR("%s: dm_uevent_alloc() failed", __func__); 82 82 goto err_nomem; 83 83 } 84 84 ··· 86 86 87 87 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { 88 88 DMERR("%s: add_uevent_var() for DM_TARGET failed", 89 - __FUNCTION__); 89 + __func__); 90 90 goto err_add; 91 91 } 92 92 93 93 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { 94 94 DMERR("%s: add_uevent_var() for DM_ACTION failed", 95 - __FUNCTION__); 95 + __func__); 96 96 goto err_add; 97 97 } 98 98 99 99 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", 100 100 dm_next_uevent_seq(md))) { 101 101 DMERR("%s: add_uevent_var() for DM_SEQNUM failed", 102 - __FUNCTION__); 102 + __func__); 103 103 goto err_add; 104 104 } 105 105 106 106 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { 107 - DMERR("%s: add_uevent_var() for DM_PATH failed", __FUNCTION__); 107 + DMERR("%s: add_uevent_var() for DM_PATH failed", __func__); 108 108 goto err_add; 109 109 } 110 110 111 111 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", 112 112 nr_valid_paths)) { 113 113 DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed", 114 - __FUNCTION__); 114 + __func__); 115 115 goto err_add; 116 116 } 117 117 ··· 146 146 if (dm_copy_name_and_uuid(event->md, event->name, 147 147 event->uuid)) { 148 148 DMERR("%s: dm_copy_name_and_uuid() failed", 149 - __FUNCTION__); 149 + __func__); 150 150 goto uevent_free; 151 151 } 152 152 153 153 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { 154 154 DMERR("%s: add_uevent_var() for DM_NAME failed", 155 - __FUNCTION__); 155 + __func__); 156 156 goto uevent_free; 157 157 } 158 158 159 159 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { 160 160 DMERR("%s: add_uevent_var() for DM_UUID failed", 161 - __FUNCTION__); 161 + __func__); 162 162 goto uevent_free; 163 163 } 164 164 165 165 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); 166 166 if (r) 167 - DMERR("%s: kobject_uevent_env failed", __FUNCTION__); 167 + DMERR("%s: kobject_uevent_env failed", __func__); 168 168 uevent_free: 169 169 dm_uevent_free(event); 170 170 } ··· 187 187 struct dm_uevent *event; 188 188 189 189 if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) { 190 - DMERR("%s: Invalid event_type %d", __FUNCTION__, event_type); 190 + DMERR("%s: Invalid event_type %d", __func__, event_type); 191 191 goto out; 192 192 } 193 193
+14 -14
drivers/md/raid5.c
··· 433 433 434 434 bi->bi_bdev = rdev->bdev; 435 435 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 436 - __FUNCTION__, (unsigned long long)sh->sector, 436 + __func__, (unsigned long long)sh->sector, 437 437 bi->bi_rw, i); 438 438 atomic_inc(&sh->count); 439 439 bi->bi_sector = sh->sector + rdev->data_offset; ··· 520 520 raid5_conf_t *conf = sh->raid_conf; 521 521 int i; 522 522 523 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 523 + pr_debug("%s: stripe %llu\n", __func__, 524 524 (unsigned long long)sh->sector); 525 525 526 526 /* clear completed biofills */ ··· 569 569 raid5_conf_t *conf = sh->raid_conf; 570 570 int i; 571 571 572 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 572 + pr_debug("%s: stripe %llu\n", __func__, 573 573 (unsigned long long)sh->sector); 574 574 575 575 for (i = sh->disks; i--; ) { ··· 600 600 int target = sh->ops.target; 601 601 struct r5dev *tgt = &sh->dev[target]; 602 602 603 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 603 + pr_debug("%s: stripe %llu\n", __func__, 604 604 (unsigned long long)sh->sector); 605 605 606 606 set_bit(R5_UPTODATE, &tgt->flags); ··· 625 625 int i; 626 626 627 627 pr_debug("%s: stripe %llu block: %d\n", 628 - __FUNCTION__, (unsigned long long)sh->sector, target); 628 + __func__, (unsigned long long)sh->sector, target); 629 629 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 630 630 631 631 for (i = disks; i--; ) ··· 653 653 { 654 654 struct stripe_head *sh = stripe_head_ref; 655 655 656 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 656 + pr_debug("%s: stripe %llu\n", __func__, 657 657 (unsigned long long)sh->sector); 658 658 659 659 set_bit(STRIPE_OP_PREXOR, &sh->ops.complete); ··· 670 670 /* existing parity data subtracted */ 671 671 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 672 672 673 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 673 + pr_debug("%s: stripe %llu\n", __func__, 674 674 (unsigned long long)sh->sector); 675 675 676 676 for (i = disks; i--; ) { ··· 699 699 */ 700 700 int prexor = test_bit(STRIPE_OP_PREXOR, &pending); 701 701 702 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 702 + pr_debug("%s: stripe %llu\n", __func__, 703 703 (unsigned long long)sh->sector); 704 704 705 705 for (i = disks; i--; ) { ··· 744 744 { 745 745 struct stripe_head *sh = stripe_head_ref; 746 746 747 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 747 + pr_debug("%s: stripe %llu\n", __func__, 748 748 (unsigned long long)sh->sector); 749 749 750 750 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); ··· 757 757 struct stripe_head *sh = stripe_head_ref; 758 758 int disks = sh->disks, i, pd_idx = sh->pd_idx; 759 759 760 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 760 + pr_debug("%s: stripe %llu\n", __func__, 761 761 (unsigned long long)sh->sector); 762 762 763 763 for (i = disks; i--; ) { ··· 787 787 unsigned long flags; 788 788 dma_async_tx_callback callback; 789 789 790 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 790 + pr_debug("%s: stripe %llu\n", __func__, 791 791 (unsigned long long)sh->sector); 792 792 793 793 /* check if prexor is active which means only process blocks ··· 837 837 struct stripe_head *sh = stripe_head_ref; 838 838 int pd_idx = sh->pd_idx; 839 839 840 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 840 + pr_debug("%s: stripe %llu\n", __func__, 841 841 (unsigned long long)sh->sector); 842 842 843 843 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) && ··· 859 859 int count = 0, pd_idx = sh->pd_idx, i; 860 860 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 861 861 862 - pr_debug("%s: stripe %llu\n", __FUNCTION__, 862 + pr_debug("%s: stripe %llu\n", __func__, 863 863 (unsigned long long)sh->sector); 864 864 865 865 for (i = disks; i--; ) { ··· 1759 1759 locked++; 1760 1760 1761 1761 pr_debug("%s: stripe %llu locked: %d pending: %lx\n", 1762 - __FUNCTION__, (unsigned long long)sh->sector, 1762 + __func__, (unsigned long long)sh->sector, 1763 1763 locked, sh->ops.pending); 1764 1764 1765 1765 return locked;