Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: Switch printk users from %pf and %pF to %ps and %pS, respectively

%pF and %pf are functionally equivalent to %pS and %ps conversion
specifiers. The former are deprecated, therefore switch the current users
to use the preferred variant.

The changes have been produced by the following command:

git grep -l '%p[fF]' | grep -v '^\(tools\|Documentation\)/' | \
while read i; do perl -i -pe 's/%pf/%ps/g; s/%pF/%pS/g;' $i; done

And verifying the result.

Link: http://lkml.kernel.org/r/20190325193229.23390-1-sakari.ailus@linux.intel.com
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: sparclinux@vger.kernel.org
Cc: linux-um@lists.infradead.org
Cc: xen-devel@lists.xenproject.org
Cc: linux-acpi@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: drbd-dev@lists.linbit.com
Cc: linux-block@vger.kernel.org
Cc: linux-mmc@vger.kernel.org
Cc: linux-nvdimm@lists.01.org
Cc: linux-pci@vger.kernel.org
Cc: linux-scsi@vger.kernel.org
Cc: linux-btrfs@vger.kernel.org
Cc: linux-f2fs-devel@lists.sourceforge.net
Cc: linux-mm@kvack.org
Cc: ceph-devel@vger.kernel.org
Cc: netdev@vger.kernel.org
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Acked-by: David Sterba <dsterba@suse.com> (for btrfs)
Acked-by: Mike Rapoport <rppt@linux.ibm.com> (for mm/memblock.c)
Acked-by: Bjorn Helgaas <bhelgaas@google.com> (for drivers/pci)
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Petr Mladek <pmladek@suse.com>

authored by

Sakari Ailus and committed by
Petr Mladek
d75f773c c4703acd

+106 -106
+10 -10
arch/alpha/kernel/pci_iommu.c
··· 237 237 ok = 0; 238 238 239 239 /* If both conditions above are met, we are fine. */ 240 - DBGA("pci_dac_dma_supported %s from %pf\n", 240 + DBGA("pci_dac_dma_supported %s from %ps\n", 241 241 ok ? "yes" : "no", __builtin_return_address(0)); 242 242 243 243 return ok; ··· 269 269 && paddr + size <= __direct_map_size) { 270 270 ret = paddr + __direct_map_base; 271 271 272 - DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n", 272 + DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n", 273 273 cpu_addr, size, ret, __builtin_return_address(0)); 274 274 275 275 return ret; ··· 280 280 if (dac_allowed) { 281 281 ret = paddr + alpha_mv.pci_dac_offset; 282 282 283 - DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n", 283 + DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n", 284 284 cpu_addr, size, ret, __builtin_return_address(0)); 285 285 286 286 return ret; ··· 317 317 ret = arena->dma_base + dma_ofs * PAGE_SIZE; 318 318 ret += (unsigned long)cpu_addr & ~PAGE_MASK; 319 319 320 - DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n", 320 + DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n", 321 321 cpu_addr, size, npages, ret, __builtin_return_address(0)); 322 322 323 323 return ret; ··· 384 384 && dma_addr < __direct_map_base + __direct_map_size) { 385 385 /* Nothing to do. */ 386 386 387 - DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n", 387 + DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n", 388 388 dma_addr, size, __builtin_return_address(0)); 389 389 390 390 return; 391 391 } 392 392 393 393 if (dma_addr > 0xffffffff) { 394 - DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n", 394 + DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n", 395 395 dma_addr, size, __builtin_return_address(0)); 396 396 return; 397 397 } ··· 423 423 424 424 spin_unlock_irqrestore(&arena->lock, flags); 425 425 426 - DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n", 426 + DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n", 427 427 dma_addr, size, npages, __builtin_return_address(0)); 428 428 } 429 429 ··· 446 446 cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order); 447 447 if (! cpu_addr) { 448 448 printk(KERN_INFO "pci_alloc_consistent: " 449 - "get_free_pages failed from %pf\n", 449 + "get_free_pages failed from %ps\n", 450 450 __builtin_return_address(0)); 451 451 /* ??? Really atomic allocation? Otherwise we could play 452 452 with vmalloc and sg if we can't find contiguous memory. */ ··· 465 465 goto try_again; 466 466 } 467 467 468 - DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n", 468 + DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n", 469 469 size, cpu_addr, *dma_addrp, __builtin_return_address(0)); 470 470 471 471 return cpu_addr; ··· 485 485 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); 486 486 free_pages((unsigned long)cpu_addr, get_order(size)); 487 487 488 - DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n", 488 + DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n", 489 489 dma_addr, size, __builtin_return_address(0)); 490 490 } 491 491
+1 -1
arch/arm/mach-imx/pm-imx6.c
··· 631 631 static int imx6_pm_stby_poweroff_probe(void) 632 632 { 633 633 if (pm_power_off) { 634 - pr_warn("%s: pm_power_off already claimed %p %pf!\n", 634 + pr_warn("%s: pm_power_off already claimed %p %ps!\n", 635 635 __func__, pm_power_off, pm_power_off); 636 636 return -EBUSY; 637 637 }
+1 -1
arch/arm/mm/alignment.c
··· 133 133 static int alignment_proc_show(struct seq_file *m, void *v) 134 134 { 135 135 seq_printf(m, "User:\t\t%lu\n", ai_user); 136 - seq_printf(m, "System:\t\t%lu (%pF)\n", ai_sys, ai_sys_last_pc); 136 + seq_printf(m, "System:\t\t%lu (%pS)\n", ai_sys, ai_sys_last_pc); 137 137 seq_printf(m, "Skipped:\t%lu\n", ai_skipped); 138 138 seq_printf(m, "Half:\t\t%lu\n", ai_half); 139 139 seq_printf(m, "Word:\t\t%lu\n", ai_word);
+1 -1
arch/arm/nwfpe/fpmodule.c
··· 147 147 #ifdef CONFIG_DEBUG_USER 148 148 if (flags & debug) 149 149 printk(KERN_DEBUG 150 - "NWFPE: %s[%d] takes exception %08x at %pf from %08lx\n", 150 + "NWFPE: %s[%d] takes exception %08x at %ps from %08lx\n", 151 151 current->comm, current->pid, flags, 152 152 __builtin_return_address(0), GET_USERREG()->ARM_pc); 153 153 #endif
+1 -1
arch/microblaze/mm/pgtable.c
··· 75 75 p >= memory_start && p < virt_to_phys(high_memory) && 76 76 !(p >= __virt_to_phys((phys_addr_t)__bss_stop) && 77 77 p < __virt_to_phys((phys_addr_t)__bss_stop))) { 78 - pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n", 78 + pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %ps\n", 79 79 (unsigned long)p, __builtin_return_address(0)); 80 80 return NULL; 81 81 }
+1 -1
arch/sparc/kernel/ds.c
··· 876 876 877 877 static void ds_conn_reset(struct ds_info *dp) 878 878 { 879 - printk(KERN_ERR "ds-%llu: ds_conn_reset() from %pf\n", 879 + printk(KERN_ERR "ds-%llu: ds_conn_reset() from %ps\n", 880 880 dp->id, __builtin_return_address(0)); 881 881 } 882 882
+1 -1
arch/um/kernel/sysrq.c
··· 20 20 21 21 static void _print_addr(void *data, unsigned long address, int reliable) 22 22 { 23 - pr_info(" [<%08lx>] %s%pF\n", address, reliable ? "" : "? ", 23 + pr_info(" [<%08lx>] %s%pS\n", address, reliable ? "" : "? ", 24 24 (void *)address); 25 25 } 26 26
+1 -1
arch/x86/include/asm/trace/exceptions.h
··· 30 30 __entry->error_code = error_code; 31 31 ), 32 32 33 - TP_printk("address=%pf ip=%pf error_code=0x%lx", 33 + TP_printk("address=%ps ip=%ps error_code=0x%lx", 34 34 (void *)__entry->address, (void *)__entry->ip, 35 35 __entry->error_code) ); 36 36
+1 -1
arch/x86/kernel/irq_64.c
··· 58 58 if (regs->sp >= estack_top && regs->sp <= estack_bottom) 59 59 return; 60 60 61 - WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n", 61 + WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pS)\n", 62 62 current->comm, curbase, regs->sp, 63 63 irq_stack_top, irq_stack_bottom, 64 64 estack_top, estack_bottom, (void *)regs->ip);
+2 -2
arch/x86/mm/extable.c
··· 145 145 unsigned long error_code, 146 146 unsigned long fault_addr) 147 147 { 148 - if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n", 148 + if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", 149 149 (unsigned int)regs->cx, regs->ip, (void *)regs->ip)) 150 150 show_stack_regs(regs); 151 151 ··· 162 162 unsigned long error_code, 163 163 unsigned long fault_addr) 164 164 { 165 - if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n", 165 + if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", 166 166 (unsigned int)regs->cx, (unsigned int)regs->dx, 167 167 (unsigned int)regs->ax, regs->ip, (void *)regs->ip)) 168 168 show_stack_regs(regs);
+1 -1
arch/x86/xen/multicalls.c
··· 105 105 for (i = 0; i < b->mcidx; i++) { 106 106 if (b->entries[i].result < 0) { 107 107 #if MC_DEBUG 108 - pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pF\n", 108 + pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n", 109 109 i + 1, 110 110 b->debug[i].op, 111 111 b->debug[i].args[0],
+1 -1
drivers/acpi/device_pm.c
··· 414 414 if (adev->wakeup.flags.notifier_present) { 415 415 pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup()); 416 416 if (adev->wakeup.context.func) { 417 - acpi_handle_debug(handle, "Running %pF for %s\n", 417 + acpi_handle_debug(handle, "Running %pS for %s\n", 418 418 adev->wakeup.context.func, 419 419 dev_name(adev->wakeup.context.dev)); 420 420 adev->wakeup.context.func(&adev->wakeup.context);
+3 -3
drivers/base/power/main.c
··· 205 205 if (!pm_print_times_enabled) 206 206 return 0; 207 207 208 - dev_info(dev, "calling %pF @ %i, parent: %s\n", cb, 208 + dev_info(dev, "calling %pS @ %i, parent: %s\n", cb, 209 209 task_pid_nr(current), 210 210 dev->parent ? dev_name(dev->parent) : "none"); 211 211 return ktime_get(); ··· 223 223 rettime = ktime_get(); 224 224 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); 225 225 226 - dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error, 226 + dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error, 227 227 (unsigned long long)nsecs >> 10); 228 228 } 229 229 ··· 2062 2062 void __suspend_report_result(const char *function, void *fn, int ret) 2063 2063 { 2064 2064 if (ret) 2065 - printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 2065 + printk(KERN_ERR "%s(): %pS returns %d\n", function, fn, ret); 2066 2066 } 2067 2067 EXPORT_SYMBOL_GPL(__suspend_report_result); 2068 2068
+6 -6
drivers/base/syscore.c
··· 62 62 list_for_each_entry_reverse(ops, &syscore_ops_list, node) 63 63 if (ops->suspend) { 64 64 if (initcall_debug) 65 - pr_info("PM: Calling %pF\n", ops->suspend); 65 + pr_info("PM: Calling %pS\n", ops->suspend); 66 66 ret = ops->suspend(); 67 67 if (ret) 68 68 goto err_out; 69 69 WARN_ONCE(!irqs_disabled(), 70 - "Interrupts enabled after %pF\n", ops->suspend); 70 + "Interrupts enabled after %pS\n", ops->suspend); 71 71 } 72 72 73 73 trace_suspend_resume(TPS("syscore_suspend"), 0, false); 74 74 return 0; 75 75 76 76 err_out: 77 - pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend); 77 + pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend); 78 78 79 79 list_for_each_entry_continue(ops, &syscore_ops_list, node) 80 80 if (ops->resume) ··· 100 100 list_for_each_entry(ops, &syscore_ops_list, node) 101 101 if (ops->resume) { 102 102 if (initcall_debug) 103 - pr_info("PM: Calling %pF\n", ops->resume); 103 + pr_info("PM: Calling %pS\n", ops->resume); 104 104 ops->resume(); 105 105 WARN_ONCE(!irqs_disabled(), 106 - "Interrupts enabled after %pF\n", ops->resume); 106 + "Interrupts enabled after %pS\n", ops->resume); 107 107 } 108 108 trace_suspend_resume(TPS("syscore_resume"), 0, false); 109 109 } ··· 122 122 list_for_each_entry_reverse(ops, &syscore_ops_list, node) 123 123 if (ops->shutdown) { 124 124 if (initcall_debug) 125 - pr_info("PM: Calling %pF\n", ops->shutdown); 125 + pr_info("PM: Calling %pS\n", ops->shutdown); 126 126 ops->shutdown(); 127 127 } 128 128
+1 -1
drivers/block/drbd/drbd_receiver.c
··· 6116 6116 6117 6117 err = cmd->fn(connection, &pi); 6118 6118 if (err) { 6119 - drbd_err(connection, "%pf failed\n", cmd->fn); 6119 + drbd_err(connection, "%ps failed\n", cmd->fn); 6120 6120 goto reconnect; 6121 6121 } 6122 6122
+5 -5
drivers/block/floppy.c
··· 1693 1693 /* we don't even know which FDC is the culprit */ 1694 1694 pr_info("DOR0=%x\n", fdc_state[0].dor); 1695 1695 pr_info("floppy interrupt on bizarre fdc %d\n", fdc); 1696 - pr_info("handler=%pf\n", handler); 1696 + pr_info("handler=%ps\n", handler); 1697 1697 is_alive(__func__, "bizarre fdc"); 1698 1698 return IRQ_NONE; 1699 1699 } ··· 1752 1752 debugt(__func__, ""); 1753 1753 result(); /* get the status ready for set_fdc */ 1754 1754 if (FDCS->reset) { 1755 - pr_info("reset set in interrupt, calling %pf\n", cont->error); 1755 + pr_info("reset set in interrupt, calling %ps\n", cont->error); 1756 1756 cont->error(); /* a reset just after a reset. BAD! */ 1757 1757 } 1758 1758 cont->redo(); ··· 1793 1793 pr_info("\n"); 1794 1794 pr_info("floppy driver state\n"); 1795 1795 pr_info("-------------------\n"); 1796 - pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%pf\n", 1796 + pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%ps\n", 1797 1797 jiffies, interruptjiffies, jiffies - interruptjiffies, 1798 1798 lasthandler); 1799 1799 ··· 1812 1812 pr_info("status=%x\n", fd_inb(FD_STATUS)); 1813 1813 pr_info("fdc_busy=%lu\n", fdc_busy); 1814 1814 if (do_floppy) 1815 - pr_info("do_floppy=%pf\n", do_floppy); 1815 + pr_info("do_floppy=%ps\n", do_floppy); 1816 1816 if (work_pending(&floppy_work)) 1817 - pr_info("floppy_work.func=%pf\n", floppy_work.func); 1817 + pr_info("floppy_work.func=%ps\n", floppy_work.func); 1818 1818 if (delayed_work_pending(&fd_timer)) 1819 1819 pr_info("delayed work.function=%p expires=%ld\n", 1820 1820 fd_timer.work.func,
+1 -1
drivers/cpufreq/cpufreq.c
··· 432 432 mutex_lock(&cpufreq_transition_notifier_list.mutex); 433 433 434 434 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next) 435 - pr_info("%pF\n", nb->notifier_call); 435 + pr_info("%pS\n", nb->notifier_call); 436 436 437 437 mutex_unlock(&cpufreq_transition_notifier_list.mutex); 438 438 }
+1 -1
drivers/mmc/core/quirks.h
··· 159 159 (f->ext_csd_rev == EXT_CSD_REV_ANY || 160 160 f->ext_csd_rev == card->ext_csd.rev) && 161 161 rev >= f->rev_start && rev <= f->rev_end) { 162 - dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup); 162 + dev_dbg(&card->dev, "calling %ps\n", f->vendor_fixup); 163 163 f->vendor_fixup(card, f->data); 164 164 } 165 165 }
+1 -1
drivers/nvdimm/bus.c
··· 581 581 struct device_driver *drv = &nd_drv->drv; 582 582 583 583 if (!nd_drv->type) { 584 - pr_debug("driver type bitmask not set (%pf)\n", 584 + pr_debug("driver type bitmask not set (%ps)\n", 585 585 __builtin_return_address(0)); 586 586 return -EINVAL; 587 587 }
+1 -1
drivers/nvdimm/dimm_devs.c
··· 53 53 54 54 rc = nvdimm_check_config_data(ndd->dev); 55 55 if (rc) 56 - dev_dbg(ndd->dev, "%pf: %s error: %d\n", 56 + dev_dbg(ndd->dev, "%ps: %s error: %d\n", 57 57 __builtin_return_address(0), __func__, rc); 58 58 return rc; 59 59 }
+7 -7
drivers/pci/pci-driver.c
··· 578 578 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 579 579 && pci_dev->current_state != PCI_UNKNOWN) { 580 580 WARN_ONCE(pci_dev->current_state != prev, 581 - "PCI PM: Device state not saved by %pF\n", 581 + "PCI PM: Device state not saved by %pS\n", 582 582 drv->suspend); 583 583 } 584 584 } ··· 605 605 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 606 606 && pci_dev->current_state != PCI_UNKNOWN) { 607 607 WARN_ONCE(pci_dev->current_state != prev, 608 - "PCI PM: Device state not saved by %pF\n", 608 + "PCI PM: Device state not saved by %pS\n", 609 609 drv->suspend_late); 610 610 goto Fixup; 611 611 } ··· 773 773 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 774 774 && pci_dev->current_state != PCI_UNKNOWN) { 775 775 WARN_ONCE(pci_dev->current_state != prev, 776 - "PCI PM: State of device not saved by %pF\n", 776 + "PCI PM: State of device not saved by %pS\n", 777 777 pm->suspend); 778 778 } 779 779 } ··· 821 821 if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 822 822 && pci_dev->current_state != PCI_UNKNOWN) { 823 823 WARN_ONCE(pci_dev->current_state != prev, 824 - "PCI PM: State of device not saved by %pF\n", 824 + "PCI PM: State of device not saved by %pS\n", 825 825 pm->suspend_noirq); 826 826 goto Fixup; 827 827 } ··· 1260 1260 * log level. 1261 1261 */ 1262 1262 if (error == -EBUSY || error == -EAGAIN) { 1263 - dev_dbg(dev, "can't suspend now (%pf returned %d)\n", 1263 + dev_dbg(dev, "can't suspend now (%ps returned %d)\n", 1264 1264 pm->runtime_suspend, error); 1265 1265 return error; 1266 1266 } else if (error) { 1267 - dev_err(dev, "can't suspend (%pf returned %d)\n", 1267 + dev_err(dev, "can't suspend (%ps returned %d)\n", 1268 1268 pm->runtime_suspend, error); 1269 1269 return error; 1270 1270 } ··· 1276 1276 && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 1277 1277 && pci_dev->current_state != PCI_UNKNOWN) { 1278 1278 WARN_ONCE(pci_dev->current_state != prev, 1279 - "PCI PM: State of device not saved by %pF\n", 1279 + "PCI PM: State of device not saved by %pS\n", 1280 1280 pm->runtime_suspend); 1281 1281 return 0; 1282 1282 }
+2 -2
drivers/pci/quirks.c
··· 36 36 void (*fn)(struct pci_dev *dev)) 37 37 { 38 38 if (initcall_debug) 39 - pci_info(dev, "calling %pF @ %i\n", fn, task_pid_nr(current)); 39 + pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current)); 40 40 41 41 return ktime_get(); 42 42 } ··· 51 51 delta = ktime_sub(rettime, calltime); 52 52 duration = (unsigned long long) ktime_to_ns(delta) >> 10; 53 53 if (initcall_debug || duration > 10000) 54 - pci_info(dev, "%pF took %lld usecs\n", fn, duration); 54 + pci_info(dev, "%pS took %lld usecs\n", fn, duration); 55 55 } 56 56 57 57 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+1 -1
drivers/pnp/quirks.c
··· 458 458 for (f = pnp_fixups; *f->id; f++) { 459 459 if (!compare_pnp_id(dev->id, f->id)) 460 460 continue; 461 - pnp_dbg(&dev->dev, "%s: calling %pF\n", f->id, 461 + pnp_dbg(&dev->dev, "%s: calling %pS\n", f->id, 462 462 f->quirk_function); 463 463 f->quirk_function(dev); 464 464 }
+1 -1
drivers/scsi/esp_scsi.c
··· 1031 1031 1032 1032 static void esp_schedule_reset(struct esp *esp) 1033 1033 { 1034 - esp_log_reset("esp_schedule_reset() from %pf\n", 1034 + esp_log_reset("esp_schedule_reset() from %ps\n", 1035 1035 __builtin_return_address(0)); 1036 1036 esp->flags |= ESP_FLAG_RESETTING; 1037 1037 esp_event(esp, ESP_EVENT_RESET);
+2 -2
fs/btrfs/tests/free-space-tree-tests.c
··· 539 539 ret = run_test(test_func, 0, sectorsize, nodesize, alignment); 540 540 if (ret) { 541 541 test_err( 542 - "%pf failed with extents, sectorsize=%u, nodesize=%u, alignment=%u", 542 + "%ps failed with extents, sectorsize=%u, nodesize=%u, alignment=%u", 543 543 test_func, sectorsize, nodesize, alignment); 544 544 test_ret = ret; 545 545 } ··· 547 547 ret = run_test(test_func, 1, sectorsize, nodesize, alignment); 548 548 if (ret) { 549 549 test_err( 550 - "%pf failed with bitmaps, sectorsize=%u, nodesize=%u, alignment=%u", 550 + "%ps failed with bitmaps, sectorsize=%u, nodesize=%u, alignment=%u", 551 551 test_func, sectorsize, nodesize, alignment); 552 552 test_ret = ret; 553 553 }
+1 -1
fs/f2fs/f2fs.h
··· 1337 1337 1338 1338 #ifdef CONFIG_F2FS_FAULT_INJECTION 1339 1339 #define f2fs_show_injection_info(type) \ 1340 - printk_ratelimited("%sF2FS-fs : inject %s in %s of %pF\n", \ 1340 + printk_ratelimited("%sF2FS-fs : inject %s in %s of %pS\n", \ 1341 1341 KERN_INFO, f2fs_fault_name[type], \ 1342 1342 __func__, __builtin_return_address(0)) 1343 1343 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+1 -1
fs/pstore/inode.c
··· 115 115 116 116 rec = (struct pstore_ftrace_record *)(ps->record->buf + data->off); 117 117 118 - seq_printf(s, "CPU:%d ts:%llu %08lx %08lx %pf <- %pF\n", 118 + seq_printf(s, "CPU:%d ts:%llu %08lx %08lx %ps <- %pS\n", 119 119 pstore_ftrace_decode_cpu(rec), 120 120 pstore_ftrace_read_timestamp(rec), 121 121 rec->ip, rec->parent_ip, (void *)rec->ip,
+1 -1
include/trace/events/btrfs.h
··· 1345 1345 __entry->normal_work = &work->normal_work; 1346 1346 ), 1347 1347 1348 - TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p " 1348 + TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p " 1349 1349 "ordered_free=%p", 1350 1350 __entry->work, __entry->normal_work, __entry->wq, 1351 1351 __entry->func, __entry->ordered_func, __entry->ordered_free)
+2 -2
include/trace/events/cpuhp.h
··· 30 30 __entry->fun = fun; 31 31 ), 32 32 33 - TP_printk("cpu: %04u target: %3d step: %3d (%pf)", 33 + TP_printk("cpu: %04u target: %3d step: %3d (%ps)", 34 34 __entry->cpu, __entry->target, __entry->idx, __entry->fun) 35 35 ); 36 36 ··· 58 58 __entry->fun = fun; 59 59 ), 60 60 61 - TP_printk("cpu: %04u target: %3d step: %3d (%pf)", 61 + TP_printk("cpu: %04u target: %3d step: %3d (%ps)", 62 62 __entry->cpu, __entry->target, __entry->idx, __entry->fun) 63 63 ); 64 64
+1 -1
include/trace/events/preemptirq.h
··· 27 27 __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext); 28 28 ), 29 29 30 - TP_printk("caller=%pF parent=%pF", 30 + TP_printk("caller=%pS parent=%pS", 31 31 (void *)((unsigned long)(_stext) + __entry->caller_offs), 32 32 (void *)((unsigned long)(_stext) + __entry->parent_offs)) 33 33 );
+2 -2
include/trace/events/rcu.h
··· 491 491 __entry->qlen = qlen; 492 492 ), 493 493 494 - TP_printk("%s rhp=%p func=%pf %ld/%ld", 494 + TP_printk("%s rhp=%p func=%ps %ld/%ld", 495 495 __entry->rcuname, __entry->rhp, __entry->func, 496 496 __entry->qlen_lazy, __entry->qlen) 497 497 ); ··· 587 587 __entry->func = rhp->func; 588 588 ), 589 589 590 - TP_printk("%s rhp=%p func=%pf", 590 + TP_printk("%s rhp=%p func=%ps", 591 591 __entry->rcuname, __entry->rhp, __entry->func) 592 592 ); 593 593
+1 -1
include/trace/events/sunrpc.h
··· 102 102 __entry->flags = task->tk_flags; 103 103 ), 104 104 105 - TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf", 105 + TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%ps", 106 106 __entry->task_id, __entry->client_id, 107 107 __entry->flags, 108 108 __entry->runstate,
+2 -2
include/trace/events/vmscan.h
··· 226 226 __entry->priority = priority; 227 227 ), 228 228 229 - TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d", 229 + TP_printk("%pS %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d", 230 230 __entry->shrink, 231 231 __entry->shr, 232 232 __entry->nid, ··· 265 265 __entry->total_scan = total_scan; 266 266 ), 267 267 268 - TP_printk("%pF %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d", 268 + TP_printk("%pS %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d", 269 269 __entry->shrink, 270 270 __entry->shr, 271 271 __entry->nid,
+2 -2
include/trace/events/workqueue.h
··· 60 60 __entry->cpu = pwq->pool->cpu; 61 61 ), 62 62 63 - TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", 63 + TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u", 64 64 __entry->work, __entry->function, __entry->workqueue, 65 65 __entry->req_cpu, __entry->cpu) 66 66 ); ··· 102 102 __entry->function = work->func; 103 103 ), 104 104 105 - TP_printk("work struct %p: function %pf", __entry->work, __entry->function) 105 + TP_printk("work struct %p: function %ps", __entry->work, __entry->function) 106 106 ); 107 107 108 108 /**
+1 -1
include/trace/events/xen.h
··· 73 73 __entry->fn = fn; 74 74 __entry->data = data; 75 75 ), 76 - TP_printk("callback %pf, data %p", 76 + TP_printk("callback %ps, data %p", 77 77 __entry->fn, __entry->data) 78 78 ); 79 79
+3 -3
init/main.c
··· 826 826 { 827 827 ktime_t *calltime = (ktime_t *)data; 828 828 829 - printk(KERN_DEBUG "calling %pF @ %i\n", fn, task_pid_nr(current)); 829 + printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current)); 830 830 *calltime = ktime_get(); 831 831 } 832 832 ··· 840 840 rettime = ktime_get(); 841 841 delta = ktime_sub(rettime, *calltime); 842 842 duration = (unsigned long long) ktime_to_ns(delta) >> 10; 843 - printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs\n", 843 + printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n", 844 844 fn, ret, duration); 845 845 } 846 846 ··· 897 897 strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); 898 898 local_irq_enable(); 899 899 } 900 - WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf); 900 + WARN(msgbuf[0], "initcall %pS returned with %s\n", fn, msgbuf); 901 901 902 902 add_latent_entropy(); 903 903 return ret;
+2 -2
kernel/async.c
··· 119 119 120 120 /* 1) run (and print duration) */ 121 121 if (initcall_debug && system_state < SYSTEM_RUNNING) { 122 - pr_debug("calling %lli_%pF @ %i\n", 122 + pr_debug("calling %lli_%pS @ %i\n", 123 123 (long long)entry->cookie, 124 124 entry->func, task_pid_nr(current)); 125 125 calltime = ktime_get(); ··· 128 128 if (initcall_debug && system_state < SYSTEM_RUNNING) { 129 129 rettime = ktime_get(); 130 130 delta = ktime_sub(rettime, calltime); 131 - pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n", 131 + pr_debug("initcall %lli_%pS returned 0 after %lld usecs\n", 132 132 (long long)entry->cookie, 133 133 entry->func, 134 134 (long long)ktime_to_ns(delta) >> 10);
+1 -1
kernel/events/uprobes.c
··· 2028 2028 if (uc->handler) { 2029 2029 rc = uc->handler(uc, regs); 2030 2030 WARN(rc & ~UPROBE_HANDLER_MASK, 2031 - "bad rc=0x%x from %pf()\n", rc, uc->handler); 2031 + "bad rc=0x%x from %ps()\n", rc, uc->handler); 2032 2032 } 2033 2033 2034 2034 if (uc->ret_handler)
+1 -1
kernel/fail_function.c
··· 210 210 { 211 211 struct fei_attr *attr = list_entry(v, struct fei_attr, list); 212 212 213 - seq_printf(m, "%pf\n", attr->kp.addr); 213 + seq_printf(m, "%ps\n", attr->kp.addr); 214 214 return 0; 215 215 } 216 216
+1 -1
kernel/irq/debugfs.c
··· 152 152 153 153 raw_spin_lock_irq(&desc->lock); 154 154 data = irq_desc_get_irq_data(desc); 155 - seq_printf(m, "handler: %pf\n", desc->handle_irq); 155 + seq_printf(m, "handler: %ps\n", desc->handle_irq); 156 156 seq_printf(m, "device: %s\n", desc->dev_name); 157 157 seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors); 158 158 irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
+1 -1
kernel/irq/handle.c
··· 149 149 res = action->handler(irq, action->dev_id); 150 150 trace_irq_handler_exit(irq, action, res); 151 151 152 - if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", 152 + if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pS enabled interrupts\n", 153 153 irq, action->handler)) 154 154 local_irq_disable(); 155 155
+1 -1
kernel/irq/manage.c
··· 778 778 ret = 0; 779 779 break; 780 780 default: 781 - pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", 781 + pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n", 782 782 flags, irq_desc_get_irq(desc), chip->irq_set_type); 783 783 } 784 784 if (unmask)
+2 -2
kernel/irq/spurious.c
··· 212 212 */ 213 213 raw_spin_lock_irqsave(&desc->lock, flags); 214 214 for_each_action_of_desc(desc, action) { 215 - printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); 215 + printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler); 216 216 if (action->thread_fn) 217 - printk(KERN_CONT " threaded [<%p>] %pf", 217 + printk(KERN_CONT " threaded [<%p>] %ps", 218 218 action->thread_fn, action->thread_fn); 219 219 printk(KERN_CONT "\n"); 220 220 }
+1 -1
kernel/rcu/tree.c
··· 2870 2870 * Use rcu:rcu_callback trace event to find the previous 2871 2871 * time callback was passed to __call_rcu(). 2872 2872 */ 2873 - WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n", 2873 + WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n", 2874 2874 head, head->func); 2875 2875 WRITE_ONCE(head->func, rcu_leak_callback); 2876 2876 return;
+1 -1
kernel/stop_machine.c
··· 513 513 } 514 514 preempt_count_dec(); 515 515 WARN_ONCE(preempt_count(), 516 - "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg); 516 + "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); 517 517 goto repeat; 518 518 } 519 519 }
+1 -1
kernel/time/sched_clock.c
··· 231 231 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) 232 232 enable_sched_clock_irqtime(); 233 233 234 - pr_debug("Registered %pF as sched_clock source\n", read); 234 + pr_debug("Registered %pS as sched_clock source\n", read); 235 235 } 236 236 237 237 void __init generic_sched_clock_init(void)
+1 -1
kernel/time/timer.c
··· 1328 1328 lock_map_release(&lockdep_map); 1329 1329 1330 1330 if (count != preempt_count()) { 1331 - WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", 1331 + WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n", 1332 1332 fn, count, preempt_count()); 1333 1333 /* 1334 1334 * Restore the preempt count. That gives us a decent
+6 -6
kernel/workqueue.c
··· 2277 2277 2278 2278 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2279 2279 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2280 - " last function: %pf\n", 2280 + " last function: %ps\n", 2281 2281 current->comm, preempt_count(), task_pid_nr(current), 2282 2282 worker->current_func); 2283 2283 debug_show_held_locks(current); ··· 2596 2596 worker = current_wq_worker(); 2597 2597 2598 2598 WARN_ONCE(current->flags & PF_MEMALLOC, 2599 - "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf", 2599 + "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 2600 2600 current->pid, current->comm, target_wq->name, target_func); 2601 2601 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 2602 2602 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 2603 - "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf", 2603 + "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 2604 2604 worker->current_pwq->wq->name, worker->current_func, 2605 2605 target_wq->name, target_func); 2606 2606 } ··· 4582 4582 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); 4583 4583 4584 4584 if (fn || name[0] || desc[0]) { 4585 - printk("%sWorkqueue: %s %pf", log_lvl, name, fn); 4585 + printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 4586 4586 if (strcmp(name, desc)) 4587 4587 pr_cont(" (%s)", desc); 4588 4588 pr_cont("\n"); ··· 4607 4607 pr_cont("%s BAR(%d)", comma ? "," : "", 4608 4608 task_pid_nr(barr->task)); 4609 4609 } else { 4610 - pr_cont("%s %pf", comma ? "," : "", work->func); 4610 + pr_cont("%s %ps", comma ? "," : "", work->func); 4611 4611 } 4612 4612 } 4613 4613 ··· 4639 4639 if (worker->current_pwq != pwq) 4640 4640 continue; 4641 4641 4642 - pr_cont("%s %d%s:%pf", comma ? "," : "", 4642 + pr_cont("%s %d%s:%ps", comma ? "," : "", 4643 4643 task_pid_nr(worker->task), 4644 4644 worker == pwq->wq->rescuer ? "(RESCUER)" : "", 4645 4645 worker->current_func);
+1 -1
lib/error-inject.c
··· 189 189 { 190 190 struct ei_entry *ent = list_entry(v, struct ei_entry, list); 191 191 192 - seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr, 192 + seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr, 193 193 error_type_string(ent->etype)); 194 194 return 0; 195 195 }
+2 -2
lib/percpu-refcount.c
··· 151 151 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); 152 152 153 153 WARN_ONCE(atomic_long_read(&ref->count) <= 0, 154 - "percpu ref (%pf) <= 0 (%ld) after switching to atomic", 154 + "percpu ref (%ps) <= 0 (%ld) after switching to atomic", 155 155 ref->release, atomic_long_read(&ref->count)); 156 156 157 157 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ ··· 333 333 spin_lock_irqsave(&percpu_ref_switch_lock, flags); 334 334 335 335 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, 336 - "%s called more than once on %pf!", __func__, ref->release); 336 + "%s called more than once on %ps!", __func__, ref->release); 337 337 338 338 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; 339 339 __percpu_ref_switch_mode(ref, confirm_kill);
+7 -7
mm/memblock.c
··· 701 701 { 702 702 phys_addr_t end = base + size - 1; 703 703 704 - memblock_dbg("memblock_add: [%pa-%pa] %pF\n", 704 + memblock_dbg("memblock_add: [%pa-%pa] %pS\n", 705 705 &base, &end, (void *)_RET_IP_); 706 706 707 707 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); ··· 820 820 { 821 821 phys_addr_t end = base + size - 1; 822 822 823 - memblock_dbg(" memblock_free: [%pa-%pa] %pF\n", 823 + memblock_dbg(" memblock_free: [%pa-%pa] %pS\n", 824 824 &base, &end, (void *)_RET_IP_); 825 825 826 826 kmemleak_free_part_phys(base, size); ··· 831 831 { 832 832 phys_addr_t end = base + size - 1; 833 833 834 - memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n", 834 + memblock_dbg("memblock_reserve: [%pa-%pa] %pS\n", 835 835 &base, &end, (void *)_RET_IP_); 836 836 837 837 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); ··· 1466 1466 { 1467 1467 void *ptr; 1468 1468 1469 - memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", 1469 + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1470 1470 __func__, (u64)size, (u64)align, nid, &min_addr, 1471 1471 &max_addr, (void *)_RET_IP_); 1472 1472 ··· 1502 1502 { 1503 1503 void *ptr; 1504 1504 1505 - memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", 1505 + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1506 1506 __func__, (u64)size, (u64)align, nid, &min_addr, 1507 1507 &max_addr, (void *)_RET_IP_); 1508 1508 ··· 1538 1538 { 1539 1539 void *ptr; 1540 1540 1541 - memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", 1541 + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1542 1542 __func__, (u64)size, (u64)align, nid, &min_addr, 1543 1543 &max_addr, (void *)_RET_IP_); 1544 1544 ptr = memblock_alloc_internal(size, align, ··· 1567 1567 phys_addr_t cursor, end; 1568 1568 1569 1569 end = base + size - 1; 1570 - memblock_dbg("%s: [%pa-%pa] %pF\n", 1570 + memblock_dbg("%s: [%pa-%pa] %pS\n", 1571 1571 __func__, &base, &end, (void *)_RET_IP_); 1572 1572 kmemleak_free_part_phys(base, size); 1573 1573 cursor = PFN_UP(base);
+1 -1
mm/memory.c
··· 519 519 dump_page(page, "bad pte"); 520 520 pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", 521 521 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 522 - pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", 522 + pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n", 523 523 vma->vm_file, 524 524 vma->vm_ops ? vma->vm_ops->fault : NULL, 525 525 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
+1 -1
mm/vmscan.c
··· 493 493 494 494 total_scan += delta; 495 495 if (total_scan < 0) { 496 - pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", 496 + pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n", 497 497 shrinker->scan_objects, total_scan); 498 498 total_scan = freeable; 499 499 next_deferred = nr;
+1 -1
net/ceph/osd_client.c
··· 2398 2398 2399 2399 static void __complete_request(struct ceph_osd_request *req) 2400 2400 { 2401 - dout("%s req %p tid %llu cb %pf result %d\n", __func__, req, 2401 + dout("%s req %p tid %llu cb %ps result %d\n", __func__, req, 2402 2402 req->r_tid, req->r_callback, req->r_result); 2403 2403 2404 2404 if (req->r_callback)
+1 -1
net/core/net-procfs.c
··· 258 258 else 259 259 seq_printf(seq, "%04x", ntohs(pt->type)); 260 260 261 - seq_printf(seq, " %-8s %pf\n", 261 + seq_printf(seq, " %-8s %ps\n", 262 262 pt->dev ? pt->dev->name : "", pt->func); 263 263 } 264 264
+2 -2
net/core/netpoll.c
··· 149 149 * indicate that we are clearing the Tx path only. 150 150 */ 151 151 work = napi->poll(napi, 0); 152 - WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll); 152 + WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll); 153 153 trace_napi_poll(napi, work, 0); 154 154 155 155 clear_bit(NAPI_STATE_NPSVC, &napi->state); ··· 346 346 } 347 347 348 348 WARN_ONCE(!irqs_disabled(), 349 - "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", 349 + "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n", 350 350 dev->name, dev->netdev_ops->ndo_start_xmit); 351 351 352 352 }