Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'printk-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux

Pull printk updates from Petr Mladek:

- Allow creaing nbcon console drivers with an unsafe write_atomic()
callback that can only be called by the final nbcon_atomic_flush_unsafe().
Otherwise, the driver would rely on the kthread.

It is going to be used as the-best-effort approach for an
experimental nbcon netconsole driver, see

https://lore.kernel.org/r/20251121-nbcon-v1-2-503d17b2b4af@debian.org

Note that a safe .write_atomic() callback is supposed to work in NMI
context. But some networking drivers are not safe even in IRQ
context:

https://lore.kernel.org/r/oc46gdpmmlly5o44obvmoatfqo5bhpgv7pabpvb6sjuqioymcg@gjsma3ghoz35

In an ideal world, all networking drivers would be fixed first and
the atomic flush would be blocked only in NMI context. But it brings
the question how reliable networking drivers are when the system is
in a bad state. They might block flushing more reliable serial
consoles which are more suitable for serious debugging anyway.

- Allow to use the last 4 bytes of the printk ring buffer.

- Prevent queuing IRQ work and block printk kthreads when consoles are
suspended. Otherwise, they create non-necessary churn or even block
the suspend.

- Release console_lock() between each record in the kthread used for
legacy consoles on RT. It might significantly speed up the boot.

- Release nbcon context between each record in the atomic flush. It
prevents stalls of the related printk kthread after it has lost the
ownership in the middle of a record

- Add support for NBCON consoles into KDB

- Add %ptsP modifier for printing struct timespec64 and use it where
possible

- Misc code clean up

* tag 'printk-for-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux: (48 commits)
printk: Use console_is_usable on console_unblank
arch: um: kmsg_dump: Use console_is_usable
drivers: serial: kgdboc: Drop checks for CON_ENABLED and CON_BOOT
lib/vsprintf: Unify FORMAT_STATE_NUM handlers
printk: Avoid irq_work for printk_deferred() on suspend
printk: Avoid scheduling irq_work on suspend
printk: Allow printk_trigger_flush() to flush all types
tracing: Switch to use %ptSp
scsi: snic: Switch to use %ptSp
scsi: fnic: Switch to use %ptSp
s390/dasd: Switch to use %ptSp
ptp: ocp: Switch to use %ptSp
pps: Switch to use %ptSp
PCI: epf-test: Switch to use %ptSp
net: dsa: sja1105: Switch to use %ptSp
mmc: mmc_test: Switch to use %ptSp
media: av7110: Switch to use %ptSp
ipmi: Switch to use %ptSp
igb: Switch to use %ptSp
e1000e: Switch to use %ptSp
...

+682 -391
+9 -2
Documentation/core-api/printk-formats.rst
··· 547 547 %pt[RT]s YYYY-mm-dd HH:MM:SS 548 548 %pt[RT]d YYYY-mm-dd 549 549 %pt[RT]t HH:MM:SS 550 - %pt[RT][dt][r][s] 550 + %ptSp <seconds>.<nanoseconds> 551 + %pt[RST][dt][r][s] 551 552 552 553 For printing date and time as represented by:: 553 554 554 - R struct rtc_time structure 555 + R content of struct rtc_time 556 + S content of struct timespec64 555 557 T time64_t type 556 558 557 559 in human readable format. ··· 564 562 The %pt[RT]s (space) will override ISO 8601 separator by using ' ' (space) 565 563 instead of 'T' (Capital T) between date and time. It won't have any effect 566 564 when date or time is omitted. 565 + 566 + The %ptSp is equivalent to %lld.%09ld for the content of the struct timespec64. 567 + When the other specifiers are given, it becomes the respective equivalent of 568 + %ptT[dt][r][s].%09ld. In other words, the seconds are being printed in 569 + the human readable format followed by a dot and nanoseconds. 567 570 568 571 Passed by reference. 569 572
+1
MAINTAINERS
··· 20608 20608 R: Sergey Senozhatsky <senozhatsky@chromium.org> 20609 20609 S: Maintained 20610 20610 T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git 20611 + F: Documentation/core-api/printk-basics.rst 20611 20612 F: include/linux/printk.h 20612 20613 F: kernel/printk/ 20613 20614
+1 -1
arch/um/kernel/kmsg_dump.c
··· 31 31 * expected to output the crash information. 32 32 */ 33 33 if (strcmp(con->name, "ttynull") != 0 && 34 - (console_srcu_read_flags(con) & CON_ENABLED)) { 34 + console_is_usable(con, console_srcu_read_flags(con), true)) { 35 35 break; 36 36 } 37 37 }
+1 -2
drivers/char/ipmi/ipmi_si_intf.c
··· 275 275 struct timespec64 t; 276 276 277 277 ktime_get_ts64(&t); 278 - dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n", 279 - msg, t.tv_sec, t.tv_nsec); 278 + dev_dbg(smi_info->io.dev, "**%s: %ptSp\n", msg, &t); 280 279 } 281 280 #else 282 281 #define debug_timestamp(smi_info, x)
+2 -4
drivers/char/ipmi/ipmi_ssif.c
··· 1083 1083 struct timespec64 t; 1084 1084 1085 1085 ktime_get_real_ts64(&t); 1086 - dev_dbg(&ssif_info->client->dev, 1087 - "**Enqueue %02x %02x: %lld.%6.6ld\n", 1088 - msg->data[0], msg->data[1], 1089 - (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC); 1086 + dev_dbg(&ssif_info->client->dev, "**Enqueue %02x %02x: %ptSp\n", 1087 + msg->data[0], msg->data[1], &t); 1090 1088 } 1091 1089 return IPMI_CC_NO_ERROR; 1092 1090 }
+1 -1
drivers/dma-buf/sync_debug.c
··· 59 59 struct timespec64 ts64 = 60 60 ktime_to_timespec64(fence->timestamp); 61 61 62 - seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec); 62 + seq_printf(s, "@%ptSp", &ts64); 63 63 } 64 64 65 65 seq_printf(s, ": %lld", fence->seqno);
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
··· 217 217 drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n"); 218 218 drm_printf(&p, "kernel: " UTS_RELEASE "\n"); 219 219 drm_printf(&p, "module: " KBUILD_MODNAME "\n"); 220 - drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, 221 - coredump->reset_time.tv_nsec); 220 + drm_printf(&p, "time: %ptSp\n", &coredump->reset_time); 222 221 223 222 if (coredump->reset_task_info.task.pid) 224 223 drm_printf(&p, "process_name: %s PID: %d\n",
+2 -4
drivers/gpu/drm/drm_vblank.c
··· 794 794 ts_vblank_time = ktime_to_timespec64(*vblank_time); 795 795 796 796 drm_dbg_vbl(dev, 797 - "crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n", 798 - pipe, hpos, vpos, 799 - (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000, 800 - (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000, 797 + "crtc %u : v p(%d,%d)@ %ptSp -> %ptSp [e %d us, %d rep]\n", 798 + pipe, hpos, vpos, &ts_etime, &ts_vblank_time, 801 799 duration_ns / 1000, i); 802 800 803 801 return true;
+1 -2
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
··· 82 82 drm_printf(p, "kernel: " UTS_RELEASE "\n"); 83 83 drm_printf(p, "module: " KBUILD_MODNAME "\n"); 84 84 drm_printf(p, "dpu devcoredump\n"); 85 - drm_printf(p, "time: %lld.%09ld\n", 86 - state->time.tv_sec, state->time.tv_nsec); 85 + drm_printf(p, "time: %ptSp\n", &state->time); 87 86 88 87 list_for_each_entry_safe(block, tmp, &state->blocks, node) { 89 88 drm_printf(p, "====================%s================\n", block->name);
+1 -2
drivers/gpu/drm/msm/msm_gpu.c
··· 197 197 drm_printf(&p, "---\n"); 198 198 drm_printf(&p, "kernel: " UTS_RELEASE "\n"); 199 199 drm_printf(&p, "module: " KBUILD_MODNAME "\n"); 200 - drm_printf(&p, "time: %lld.%09ld\n", 201 - state->time.tv_sec, state->time.tv_nsec); 200 + drm_printf(&p, "time: %ptSp\n", &state->time); 202 201 if (state->comm) 203 202 drm_printf(&p, "comm: %s\n", state->comm); 204 203 if (state->cmd)
+2 -2
drivers/gpu/drm/xe/xe_devcoredump.c
··· 106 106 drm_puts(&p, "module: " KBUILD_MODNAME "\n"); 107 107 108 108 ts = ktime_to_timespec64(ss->snapshot_time); 109 - drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); 109 + drm_printf(&p, "Snapshot time: %ptSp\n", &ts); 110 110 ts = ktime_to_timespec64(ss->boot_time); 111 - drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); 111 + drm_printf(&p, "Uptime: %ptSp\n", &ts); 112 112 drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid); 113 113 xe_device_snapshot_print(xe, &p); 114 114
+8 -12
drivers/mmc/core/mmc_test.c
··· 586 586 rate = mmc_test_rate(tot, &ts); 587 587 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ 588 588 589 - pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " 590 - "%llu.%09u seconds (%u kB/s, %u KiB/s, " 591 - "%u.%02u IOPS, sg_len %d)\n", 592 - mmc_hostname(test->card->host), count, sectors, count, 593 - sectors >> 1, (sectors & 1 ? ".5" : ""), 594 - (u64)ts.tv_sec, (u32)ts.tv_nsec, 595 - rate / 1000, rate / 1024, iops / 100, iops % 100, 596 - test->area.sg_len); 589 + pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took %ptSp seconds (%u kB/s, %u KiB/s, %u.%02u IOPS, sg_len %d)\n", 590 + mmc_hostname(test->card->host), count, sectors, count, 591 + sectors >> 1, (sectors & 1 ? ".5" : ""), &ts, 592 + rate / 1000, rate / 1024, iops / 100, iops % 100, 593 + test->area.sg_len); 597 594 598 595 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); 599 596 } ··· 3071 3074 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); 3072 3075 3073 3076 list_for_each_entry(tr, &gr->tr_lst, link) { 3074 - seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n", 3075 - tr->count, tr->sectors, 3076 - (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec, 3077 - tr->rate, tr->iops / 100, tr->iops % 100); 3077 + seq_printf(sf, "%u %d %ptSp %u %u.%02u\n", 3078 + tr->count, tr->sectors, &tr->ts, tr->rate, 3079 + tr->iops / 100, tr->iops % 100); 3078 3080 } 3079 3081 } 3080 3082
+3 -5
drivers/net/dsa/sja1105/sja1105_tas.c
··· 775 775 base_time_ts = ns_to_timespec64(base_time); 776 776 now_ts = ns_to_timespec64(now); 777 777 778 - dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n", 779 - base_time_ts.tv_sec, base_time_ts.tv_nsec, 780 - now_ts.tv_sec, now_ts.tv_nsec); 778 + dev_dbg(ds->dev, "OPER base time %ptSp (now %ptSp)\n", 779 + &base_time_ts, &now_ts); 781 780 782 781 break; 783 782 ··· 797 798 if (now < tas_data->oper_base_time) { 798 799 /* TAS has not started yet */ 799 800 diff = ns_to_timespec64(tas_data->oper_base_time - now); 800 - dev_dbg(ds->dev, "time to start: [%lld.%09ld]", 801 - diff.tv_sec, diff.tv_nsec); 801 + dev_dbg(ds->dev, "time to start: [%ptSp]", &diff); 802 802 break; 803 803 } 804 804
+2 -5
drivers/net/ethernet/intel/e1000e/ptp.c
··· 229 229 systim_overflow_work.work); 230 230 struct e1000_hw *hw = &adapter->hw; 231 231 struct timespec64 ts; 232 - u64 ns; 233 232 234 233 /* Update the timecounter */ 235 - ns = timecounter_read(&adapter->tc); 234 + ts = ns_to_timespec64(timecounter_read(&adapter->tc)); 236 235 237 - ts = ns_to_timespec64(ns); 238 - e_dbg("SYSTIM overflow check at %lld.%09lu\n", 239 - (long long) ts.tv_sec, ts.tv_nsec); 236 + e_dbg("SYSTIM overflow check at %ptSp\n", &ts); 240 237 241 238 schedule_delayed_work(&adapter->systim_overflow_work, 242 239 E1000_SYSTIM_OVERFLOW_PERIOD);
+2 -5
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 840 840 struct igb_adapter *igb = 841 841 container_of(work, struct igb_adapter, ptp_overflow_work.work); 842 842 struct timespec64 ts; 843 - u64 ns; 844 843 845 844 /* Update the timecounter */ 846 - ns = timecounter_read(&igb->tc); 845 + ts = ns_to_timespec64(timecounter_read(&igb->tc)); 847 846 848 - ts = ns_to_timespec64(ns); 849 - pr_debug("igb overflow check at %lld.%09lu\n", 850 - (long long) ts.tv_sec, ts.tv_nsec); 847 + pr_debug("igb overflow check at %ptSp\n", &ts); 851 848 852 849 schedule_delayed_work(&igb->ptp_overflow_work, 853 850 IGB_SYSTIM_OVERFLOW_PERIOD);
+2 -3
drivers/pci/endpoint/functions/pci-epf-test.c
··· 331 331 rate = div64_u64(size * NSEC_PER_SEC, ns * 1000); 332 332 333 333 dev_info(&epf_test->epf->dev, 334 - "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n", 335 - op, size, dma ? "YES" : "NO", 336 - (u64)ts.tv_sec, (u32)ts.tv_nsec, rate); 334 + "%s => Size: %llu B, DMA: %s, Time: %ptSp s, Rate: %llu KB/s\n", 335 + op, size, dma ? "YES" : "NO", &ts, rate); 337 336 } 338 337 339 338 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
+1 -2
drivers/pps/generators/pps_gen_parport.c
··· 80 80 /* check if we are late */ 81 81 if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { 82 82 local_irq_restore(flags); 83 - pr_err("we are late this time %lld.%09ld\n", 84 - (s64)ts1.tv_sec, ts1.tv_nsec); 83 + pr_err("we are late this time %ptSp\n", &ts1); 85 84 goto done; 86 85 } 87 86
+1 -2
drivers/pps/kapi.c
··· 163 163 /* check event type */ 164 164 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); 165 165 166 - dev_dbg(&pps->dev, "PPS event at %lld.%09ld\n", 167 - (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec); 166 + dev_dbg(&pps->dev, "PPS event at %ptSp\n", &ts->ts_real); 168 167 169 168 timespec_to_pps_ktime(&ts_real, ts->ts_real); 170 169
+5 -8
drivers/ptp/ptp_ocp.c
··· 4287 4287 ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC; 4288 4288 sys_ts = ns_to_timespec64(ns); 4289 4289 4290 - seq_printf(s, "%7s: %lld.%ld == %ptT TAI\n", "PHC", 4291 - ts.tv_sec, ts.tv_nsec, &ts); 4292 - seq_printf(s, "%7s: %lld.%ld == %ptT UTC offset %d\n", "SYS", 4293 - sys_ts.tv_sec, sys_ts.tv_nsec, &sys_ts, 4294 - bp->utc_tai_offset); 4290 + seq_printf(s, "%7s: %ptSp == %ptS TAI\n", "PHC", &ts, &ts); 4291 + seq_printf(s, "%7s: %ptSp == %ptS UTC offset %d\n", "SYS", 4292 + &sys_ts, &sys_ts, bp->utc_tai_offset); 4295 4293 seq_printf(s, "%7s: PHC:SYS offset: %lld window: %lld\n", "", 4296 4294 timespec64_to_ns(&ts) - ns, 4297 4295 post_ns - pre_ns); ··· 4497 4499 ptp_clock_index(bp->ptp)); 4498 4500 4499 4501 if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL)) 4500 - dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n", 4501 - ts.tv_sec, ts.tv_nsec, 4502 - bp->sync ? "in-sync" : "UNSYNCED"); 4502 + dev_info(&bp->pdev->dev, "Time: %ptSp, %s\n", 4503 + &ts, bp->sync ? "in-sync" : "UNSYNCED"); 4503 4504 } 4504 4505 4505 4506 static void
+1 -2
drivers/s390/block/dasd.c
··· 974 974 static void dasd_stats_seq_print(struct seq_file *m, 975 975 struct dasd_profile_info *data) 976 976 { 977 - seq_printf(m, "start_time %lld.%09ld\n", 978 - (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 977 + seq_printf(m, "start_time %ptSp\n", &data->starttod); 979 978 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 980 979 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 981 980 seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
+24 -31
drivers/scsi/fnic/fnic_trace.c
··· 138 138 */ 139 139 len += scnprintf(fnic_dbgfs_prt->buffer + len, 140 140 (trace_max_pages * PAGE_SIZE * 3) - len, 141 - "%16llu.%09lu %-50s %8x %8x %16llx %16llx " 142 - "%16llx %16llx %16llx\n", (u64)val.tv_sec, 143 - val.tv_nsec, str, tbp->host_no, tbp->tag, 141 + "%ptSp %-50s %8x %8x %16llx %16llx %16llx %16llx %16llx\n", 142 + &val, str, tbp->host_no, tbp->tag, 144 143 tbp->data[0], tbp->data[1], tbp->data[2], 145 144 tbp->data[3], tbp->data[4]); 146 145 rd_idx++; ··· 179 180 */ 180 181 len += scnprintf(fnic_dbgfs_prt->buffer + len, 181 182 (trace_max_pages * PAGE_SIZE * 3) - len, 182 - "%16llu.%09lu %-50s %8x %8x %16llx %16llx " 183 - "%16llx %16llx %16llx\n", (u64)val.tv_sec, 184 - val.tv_nsec, str, tbp->host_no, tbp->tag, 183 + "%ptSp %-50s %8x %8x %16llx %16llx %16llx %16llx %16llx\n", 184 + &val, str, tbp->host_no, tbp->tag, 185 185 tbp->data[0], tbp->data[1], tbp->data[2], 186 186 tbp->data[3], tbp->data[4]); 187 187 rd_idx++; ··· 213 215 { 214 216 int len = 0; 215 217 int buf_size = debug->buf_size; 216 - struct timespec64 val1, val2; 218 + struct timespec64 val, val1, val2; 217 219 int i = 0; 218 220 219 - ktime_get_real_ts64(&val1); 221 + ktime_get_real_ts64(&val); 220 222 len = scnprintf(debug->debug_buffer + len, buf_size - len, 221 223 "------------------------------------------\n" 222 224 "\t\tTime\n" 223 225 "------------------------------------------\n"); 224 226 227 + val1 = timespec64_sub(val, stats->stats_timestamps.last_reset_time); 228 + val2 = timespec64_sub(val, stats->stats_timestamps.last_read_time); 225 229 len += scnprintf(debug->debug_buffer + len, buf_size - len, 226 - "Current time : [%lld:%ld]\n" 227 - "Last stats reset time: [%lld:%09ld]\n" 228 - "Last stats read time: [%lld:%ld]\n" 229 - "delta since last reset: [%lld:%ld]\n" 230 - "delta since last read: [%lld:%ld]\n", 231 - (s64)val1.tv_sec, val1.tv_nsec, 232 - (s64)stats->stats_timestamps.last_reset_time.tv_sec, 233 - stats->stats_timestamps.last_reset_time.tv_nsec, 234 - (s64)stats->stats_timestamps.last_read_time.tv_sec, 235 - stats->stats_timestamps.last_read_time.tv_nsec, 236 - (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec, 237 - timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec, 238 - (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec, 239 - timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec); 230 + "Current time : [%ptSp]\n" 231 + "Last stats reset time: [%ptSp]\n" 232 + "Last stats read time: [%ptSp]\n" 233 + "delta since last reset: [%ptSp]\n" 234 + "delta since last read: [%ptSp]\n", 235 + &val, 236 + &stats->stats_timestamps.last_reset_time, 237 + &stats->stats_timestamps.last_read_time, 238 + &val1, &val2); 240 239 241 - stats->stats_timestamps.last_read_time = val1; 240 + stats->stats_timestamps.last_read_time = val; 242 241 243 242 len += scnprintf(debug->debug_buffer + len, buf_size - len, 244 243 "------------------------------------------\n" ··· 411 416 jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2); 412 417 413 418 len += scnprintf(debug->debug_buffer + len, buf_size - len, 414 - "Last ISR time: %llu (%8llu.%09lu)\n" 415 - "Last ACK time: %llu (%8llu.%09lu)\n" 419 + "Last ISR time: %llu (%ptSp)\n" 420 + "Last ACK time: %llu (%ptSp)\n" 416 421 "Max ISR jiffies: %llu\n" 417 422 "Max ISR time (ms) (0 denotes < 1 ms): %llu\n" 418 423 "Corr. work done: %llu\n" ··· 432 437 "Number of rport not ready: %lld\n" 433 438 "Number of receive frame errors: %lld\n" 434 439 "Port speed (in Mbps): %lld\n", 435 - (u64)stats->misc_stats.last_isr_time, 436 - (s64)val1.tv_sec, val1.tv_nsec, 437 - (u64)stats->misc_stats.last_ack_time, 438 - (s64)val2.tv_sec, val2.tv_nsec, 440 + (u64)stats->misc_stats.last_isr_time, &val1, 441 + (u64)stats->misc_stats.last_ack_time, &val2, 439 442 (u64)atomic64_read(&stats->misc_stats.max_isr_jiffies), 440 443 (u64)atomic64_read(&stats->misc_stats.max_isr_time_ms), 441 444 (u64)atomic64_read(&stats->misc_stats.corr_work_done), ··· 850 857 len = *orig_len; 851 858 852 859 len += scnprintf(fnic_dbgfs_prt->buffer + len, max_size - len, 853 - "%ptTs.%09lu ns%8x %c%8x\t", 854 - &tdata->time_stamp.tv_sec, tdata->time_stamp.tv_nsec, 860 + "%ptSs ns%8x %c%8x\t", 861 + &tdata->time_stamp, 855 862 tdata->host_no, tdata->frame_type, tdata->frame_len); 856 863 857 864 fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
+4 -6
drivers/scsi/snic/snic_debugfs.c
··· 282 282 jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms); 283 283 284 284 seq_printf(sfp, 285 - "Last ISR Time : %llu (%8llu.%09lu)\n" 286 - "Last Ack Time : %llu (%8llu.%09lu)\n" 285 + "Last ISR Time : %llu (%ptSp)\n" 286 + "Last Ack Time : %llu (%ptSp)\n" 287 287 "Ack ISRs : %llu\n" 288 288 "IO Cmpl ISRs : %llu\n" 289 289 "Err Notify ISRs : %llu\n" ··· 298 298 "Queue Ramp Down : %lld\n" 299 299 "Queue Last Queue Depth : %lld\n" 300 300 "Target Not Ready : %lld\n", 301 - (u64) stats->misc.last_isr_time, 302 - last_isr_tms.tv_sec, last_isr_tms.tv_nsec, 303 - (u64)stats->misc.last_ack_time, 304 - last_ack_tms.tv_sec, last_ack_tms.tv_nsec, 301 + (u64) stats->misc.last_isr_time, &last_isr_tms, 302 + (u64) stats->misc.last_ack_time, &last_ack_tms, 305 303 (u64) atomic64_read(&stats->misc.ack_isr_cnt), 306 304 (u64) atomic64_read(&stats->misc.cmpl_isr_cnt), 307 305 (u64) atomic64_read(&stats->misc.errnotify_isr_cnt),
+2 -3
drivers/scsi/snic/snic_trc.c
··· 56 56 jiffies_to_timespec64(td->ts, &tmspec); 57 57 58 58 len += snprintf(buf, buf_sz, 59 - "%llu.%09lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n", 60 - tmspec.tv_sec, 61 - tmspec.tv_nsec, 59 + "%ptSp %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n", 60 + &tmspec, 62 61 td->fn, 63 62 td->hno, 64 63 td->tag,
+1 -1
drivers/staging/media/av7110/av7110.c
··· 321 321 struct timespec64 ts; 322 322 323 323 ktime_get_real_ts64(&ts); 324 - pr_info("%s(): %lld.%09ld\n", s, (s64)ts.tv_sec, ts.tv_nsec); 324 + pr_info("%s(): %ptSp\n", s, &ts); 325 325 #endif 326 326 } 327 327
-1
drivers/tty/serial/kgdboc.c
··· 577 577 console_list_lock(); 578 578 for_each_console(con) { 579 579 if (con->write && con->read && 580 - (con->flags & (CON_BOOT | CON_ENABLED)) && 581 580 (!opt || !opt[0] || strcmp(con->name, opt) == 0)) 582 581 break; 583 582 }
+2 -3
fs/ceph/dir.c
··· 2155 2155 " rfiles: %20lld\n" 2156 2156 " rsubdirs: %20lld\n" 2157 2157 "rbytes: %20lld\n" 2158 - "rctime: %10lld.%09ld\n", 2158 + "rctime: %ptSp\n", 2159 2159 ci->i_files + ci->i_subdirs, 2160 2160 ci->i_files, 2161 2161 ci->i_subdirs, ··· 2163 2163 ci->i_rfiles, 2164 2164 ci->i_rsubdirs, 2165 2165 ci->i_rbytes, 2166 - ci->i_rctime.tv_sec, 2167 - ci->i_rctime.tv_nsec); 2166 + &ci->i_rctime); 2168 2167 } 2169 2168 2170 2169 if (*ppos >= dfi->dir_info_len)
+16 -33
fs/ceph/inode.c
··· 879 879 { 880 880 struct ceph_client *cl = ceph_inode_to_client(inode); 881 881 struct ceph_inode_info *ci = ceph_inode(inode); 882 + struct timespec64 iatime = inode_get_atime(inode); 882 883 struct timespec64 ictime = inode_get_ctime(inode); 884 + struct timespec64 imtime = inode_get_mtime(inode); 883 885 int warn = 0; 884 886 885 887 if (issued & (CEPH_CAP_FILE_EXCL| ··· 891 889 CEPH_CAP_XATTR_EXCL)) { 892 890 if (ci->i_version == 0 || 893 891 timespec64_compare(ctime, &ictime) > 0) { 894 - doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n", 895 - ictime.tv_sec, ictime.tv_nsec, 896 - ctime->tv_sec, ctime->tv_nsec); 892 + doutc(cl, "ctime %ptSp -> %ptSp inc w/ cap\n", &ictime, ctime); 897 893 inode_set_ctime_to_ts(inode, *ctime); 898 894 } 899 895 if (ci->i_version == 0 || 900 896 ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 901 897 /* the MDS did a utimes() */ 902 - doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n", 903 - inode_get_mtime_sec(inode), 904 - inode_get_mtime_nsec(inode), 905 - mtime->tv_sec, mtime->tv_nsec, 906 - ci->i_time_warp_seq, (int)time_warp_seq); 898 + doutc(cl, "mtime %ptSp -> %ptSp tw %d -> %d\n", &imtime, mtime, 899 + ci->i_time_warp_seq, (int)time_warp_seq); 907 900 908 901 inode_set_mtime_to_ts(inode, *mtime); 909 902 inode_set_atime_to_ts(inode, *atime); 910 903 ci->i_time_warp_seq = time_warp_seq; 911 904 } else if (time_warp_seq == ci->i_time_warp_seq) { 912 - struct timespec64 ts; 913 - 914 905 /* nobody did utimes(); take the max */ 915 - ts = inode_get_mtime(inode); 916 - if (timespec64_compare(mtime, &ts) > 0) { 917 - doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n", 918 - ts.tv_sec, ts.tv_nsec, 919 - mtime->tv_sec, mtime->tv_nsec); 906 + if (timespec64_compare(mtime, &imtime) > 0) { 907 + doutc(cl, "mtime %ptSp -> %ptSp inc\n", &imtime, mtime); 920 908 inode_set_mtime_to_ts(inode, *mtime); 921 909 } 922 - ts = inode_get_atime(inode); 923 - if (timespec64_compare(atime, &ts) > 0) { 924 - doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n", 925 - ts.tv_sec, ts.tv_nsec, 926 - atime->tv_sec, atime->tv_nsec); 910 + if (timespec64_compare(atime, &iatime) > 0) { 911 + doutc(cl, "atime %ptSp -> %ptSp inc\n", &iatime, atime); 927 912 inode_set_atime_to_ts(inode, *atime); 928 913 } 929 914 } else if (issued & CEPH_CAP_FILE_EXCL) { ··· 2692 2703 if (ia_valid & ATTR_ATIME) { 2693 2704 struct timespec64 atime = inode_get_atime(inode); 2694 2705 2695 - doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n", 2696 - inode, ceph_vinop(inode), 2697 - atime.tv_sec, atime.tv_nsec, 2698 - attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 2706 + doutc(cl, "%p %llx.%llx atime %ptSp -> %ptSp\n", 2707 + inode, ceph_vinop(inode), &atime, &attr->ia_atime); 2699 2708 if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) { 2700 2709 ci->i_time_warp_seq++; 2701 2710 inode_set_atime_to_ts(inode, attr->ia_atime); ··· 2767 2780 if (ia_valid & ATTR_MTIME) { 2768 2781 struct timespec64 mtime = inode_get_mtime(inode); 2769 2782 2770 - doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n", 2771 - inode, ceph_vinop(inode), 2772 - mtime.tv_sec, mtime.tv_nsec, 2773 - attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 2783 + doutc(cl, "%p %llx.%llx mtime %ptSp -> %ptSp\n", 2784 + inode, ceph_vinop(inode), &mtime, &attr->ia_mtime); 2774 2785 if (!do_sync && (issued & CEPH_CAP_FILE_EXCL)) { 2775 2786 ci->i_time_warp_seq++; 2776 2787 inode_set_mtime_to_ts(inode, attr->ia_mtime); ··· 2789 2804 2790 2805 /* these do nothing */ 2791 2806 if (ia_valid & ATTR_CTIME) { 2807 + struct timespec64 ictime = inode_get_ctime(inode); 2792 2808 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 2793 2809 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 2794 - doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n", 2795 - inode, ceph_vinop(inode), 2796 - inode_get_ctime_sec(inode), 2797 - inode_get_ctime_nsec(inode), 2798 - attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2810 + doutc(cl, "%p %llx.%llx ctime %ptSp -> %ptSp (%s)\n", 2811 + inode, ceph_vinop(inode), &ictime, &attr->ia_ctime, 2799 2812 only ? "ctime only" : "ignored"); 2800 2813 if (only) { 2801 2814 /*
+2 -4
fs/ceph/xattr.c
··· 249 249 static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val, 250 250 size_t size) 251 251 { 252 - return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec, 253 - ci->i_rctime.tv_nsec); 252 + return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_rctime); 254 253 } 255 254 256 255 /* dir pin */ ··· 306 307 static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val, 307 308 size_t size) 308 309 { 309 - return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec, 310 - ci->i_snap_btime.tv_nsec); 310 + return ceph_fmt_xattr(val, size, "%ptSp", &ci->i_snap_btime); 311 311 } 312 312 313 313 static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
+68
include/linux/console.h
··· 19 19 #include <linux/irq_work.h> 20 20 #include <linux/rculist.h> 21 21 #include <linux/rcuwait.h> 22 + #include <linux/smp.h> 22 23 #include <linux/types.h> 23 24 #include <linux/vesa.h> 24 25 ··· 186 185 * printing callbacks must not be called. 187 186 * @CON_NBCON: Console can operate outside of the legacy style console_lock 188 187 * constraints. 188 + * @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is 189 + * therefore only used by nbcon_atomic_flush_unsafe(). 189 190 */ 190 191 enum cons_flags { 191 192 CON_PRINTBUFFER = BIT(0), ··· 199 196 CON_EXTENDED = BIT(6), 200 197 CON_SUSPENDED = BIT(7), 201 198 CON_NBCON = BIT(8), 199 + CON_NBCON_ATOMIC_UNSAFE = BIT(9), 202 200 }; 203 201 204 202 /** ··· 606 602 extern void nbcon_cpu_emergency_enter(void); 607 603 extern void nbcon_cpu_emergency_exit(void); 608 604 extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt); 605 + extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt, 606 + char *buf, unsigned int len); 609 607 extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt); 610 608 extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt); 611 609 extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt); 610 + extern bool nbcon_allow_unsafe_takeover(void); 611 + extern bool nbcon_kdb_try_acquire(struct console *con, 612 + struct nbcon_write_context *wctxt); 613 + extern void nbcon_kdb_release(struct nbcon_write_context *wctxt); 614 + 615 + /* 616 + * Check if the given console is currently capable and allowed to print 617 + * records. Note that this function does not consider the current context, 618 + * which can also play a role in deciding if @con can be used to print 619 + * records. 620 + */ 621 + static inline bool console_is_usable(struct console *con, short flags, bool use_atomic) 622 + { 623 + if (!(flags & CON_ENABLED)) 624 + return false; 625 + 626 + if ((flags & CON_SUSPENDED)) 627 + return false; 628 + 629 + if (flags & CON_NBCON) { 630 + if (use_atomic) { 631 + /* The write_atomic() callback is optional. */ 632 + if (!con->write_atomic) 633 + return false; 634 + 635 + /* 636 + * An unsafe write_atomic() callback is only usable 637 + * when unsafe takeovers are allowed. 638 + */ 639 + if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover()) 640 + return false; 641 + } 642 + 643 + /* 644 + * For the !use_atomic case, @printk_kthreads_running is not 645 + * checked because the write_thread() callback is also used 646 + * via the legacy loop when the printer threads are not 647 + * available. 648 + */ 649 + } else { 650 + if (!con->write) 651 + return false; 652 + } 653 + 654 + /* 655 + * Console drivers may assume that per-cpu resources have been 656 + * allocated. So unless they're explicitly marked as being able to 657 + * cope (CON_ANYTIME) don't call them until this CPU is officially up. 658 + */ 659 + if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME)) 660 + return false; 661 + 662 + return true; 663 + } 664 + 612 665 #else 613 666 static inline void nbcon_cpu_emergency_enter(void) { } 614 667 static inline void nbcon_cpu_emergency_exit(void) { } 615 668 static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; } 669 + static inline void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt, 670 + char *buf, unsigned int len) { } 616 671 static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; } 617 672 static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; } 618 673 static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { } 674 + static inline bool nbcon_kdb_try_acquire(struct console *con, 675 + struct nbcon_write_context *wctxt) { return false; } 676 + static inline void nbcon_kdb_release(struct nbcon_write_context *wctxt) { } 677 + static inline bool console_is_usable(struct console *con, short flags, 678 + bool use_atomic) { return false; } 619 679 #endif 620 680 621 681 extern int console_set_on_cmdline;
+16
include/linux/kdb.h
··· 14 14 */ 15 15 16 16 #include <linux/list.h> 17 + #include <linux/smp.h> 17 18 18 19 /* Shifted versions of the command enable bits are be used if the command 19 20 * has no arguments (see kdb_check_flags). This allows commands, such as ··· 208 207 /* Dynamic kdb shell command registration */ 209 208 extern int kdb_register(kdbtab_t *cmd); 210 209 extern void kdb_unregister(kdbtab_t *cmd); 210 + 211 + /* Return true when KDB as locked for printing a message on this CPU. */ 212 + static inline 213 + bool kdb_printf_on_this_cpu(void) 214 + { 215 + /* 216 + * We can use raw_smp_processor_id() here because the task could 217 + * not get migrated when KDB has locked for printing on this CPU. 218 + */ 219 + return unlikely(READ_ONCE(kdb_printf_cpu) == raw_smp_processor_id()); 220 + } 221 + 211 222 #else /* ! CONFIG_KGDB_KDB */ 212 223 static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } 213 224 static inline void kdb_init(int level) {} 214 225 static inline int kdb_register(kdbtab_t *cmd) { return 0; } 215 226 static inline void kdb_unregister(kdbtab_t *cmd) {} 227 + 228 + static inline bool kdb_printf_on_this_cpu(void) { return false; } 229 + 216 230 #endif /* CONFIG_KGDB_KDB */ 217 231 enum { 218 232 KDB_NOT_INITIALIZED,
+32 -15
kernel/debug/kdb/kdb_io.c
··· 589 589 */ 590 590 cookie = console_srcu_read_lock(); 591 591 for_each_console_srcu(c) { 592 - if (!(console_srcu_read_flags(c) & CON_ENABLED)) 592 + short flags = console_srcu_read_flags(c); 593 + 594 + if (!console_is_usable(c, flags, true)) 593 595 continue; 594 596 if (c == dbg_io_ops->cons) 595 597 continue; 596 - if (!c->write) 597 - continue; 598 - /* 599 - * Set oops_in_progress to encourage the console drivers to 600 - * disregard their internal spin locks: in the current calling 601 - * context the risk of deadlock is a bigger problem than risks 602 - * due to re-entering the console driver. We operate directly on 603 - * oops_in_progress rather than using bust_spinlocks() because 604 - * the calls bust_spinlocks() makes on exit are not appropriate 605 - * for this calling context. 606 - */ 607 - ++oops_in_progress; 608 - c->write(c, msg, msg_len); 609 - --oops_in_progress; 598 + 599 + if (flags & CON_NBCON) { 600 + struct nbcon_write_context wctxt = { }; 601 + 602 + /* 603 + * Do not continue if the console is NBCON and the context 604 + * can't be acquired. 605 + */ 606 + if (!nbcon_kdb_try_acquire(c, &wctxt)) 607 + continue; 608 + 609 + nbcon_write_context_set_buf(&wctxt, (char *)msg, msg_len); 610 + 611 + c->write_atomic(c, &wctxt); 612 + nbcon_kdb_release(&wctxt); 613 + } else { 614 + /* 615 + * Set oops_in_progress to encourage the console drivers to 616 + * disregard their internal spin locks: in the current calling 617 + * context the risk of deadlock is a bigger problem than risks 618 + * due to re-entering the console driver. We operate directly on 619 + * oops_in_progress rather than using bust_spinlocks() because 620 + * the calls bust_spinlocks() makes on exit are not appropriate 621 + * for this calling context. 622 + */ 623 + ++oops_in_progress; 624 + c->write(c, msg, msg_len); 625 + --oops_in_progress; 626 + } 610 627 touch_nmi_watchdog(); 611 628 } 612 629 console_srcu_read_unlock(cookie);
+5 -48
kernel/printk/internal.h
··· 3 3 * internal.h - printk internal definitions 4 4 */ 5 5 #include <linux/console.h> 6 - #include <linux/percpu.h> 7 6 #include <linux/types.h> 8 7 9 8 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) ··· 111 112 void nbcon_kthread_stop(struct console *con); 112 113 void nbcon_kthreads_wake(void); 113 114 114 - /* 115 - * Check if the given console is currently capable and allowed to print 116 - * records. Note that this function does not consider the current context, 117 - * which can also play a role in deciding if @con can be used to print 118 - * records. 119 - */ 120 - static inline bool console_is_usable(struct console *con, short flags, bool use_atomic) 121 - { 122 - if (!(flags & CON_ENABLED)) 123 - return false; 124 - 125 - if ((flags & CON_SUSPENDED)) 126 - return false; 127 - 128 - if (flags & CON_NBCON) { 129 - /* The write_atomic() callback is optional. */ 130 - if (use_atomic && !con->write_atomic) 131 - return false; 132 - 133 - /* 134 - * For the !use_atomic case, @printk_kthreads_running is not 135 - * checked because the write_thread() callback is also used 136 - * via the legacy loop when the printer threads are not 137 - * available. 138 - */ 139 - } else { 140 - if (!con->write) 141 - return false; 142 - } 143 - 144 - /* 145 - * Console drivers may assume that per-cpu resources have been 146 - * allocated. So unless they're explicitly marked as being able to 147 - * cope (CON_ANYTIME) don't call them until this CPU is officially up. 148 - */ 149 - if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME)) 150 - return false; 151 - 152 - return true; 153 - } 154 - 155 115 /** 156 116 * nbcon_kthread_wake - Wake up a console printing thread 157 117 * @con: Console to operate on ··· 162 204 static inline void nbcon_kthread_wake(struct console *con) { } 163 205 static inline void nbcon_kthreads_wake(void) { } 164 206 165 - static inline bool console_is_usable(struct console *con, short flags, 166 - bool use_atomic) { return false; } 167 - 168 207 #endif /* CONFIG_PRINTK */ 169 208 170 209 extern bool have_boot_console; ··· 185 230 bool legacy_offload; 186 231 }; 187 232 233 + extern bool console_irqwork_blocked; 234 + 188 235 /* 189 236 * Identify which console flushing methods should be used in the context of 190 237 * the caller. ··· 198 241 switch (nbcon_get_default_prio()) { 199 242 case NBCON_PRIO_NORMAL: 200 243 if (have_nbcon_console && !have_boot_console) { 201 - if (printk_kthreads_running) 244 + if (printk_kthreads_running && !console_irqwork_blocked) 202 245 ft->nbcon_offload = true; 203 246 else 204 247 ft->nbcon_atomic = true; ··· 208 251 if (have_legacy_console || have_boot_console) { 209 252 if (!is_printk_legacy_deferred()) 210 253 ft->legacy_direct = true; 211 - else 254 + else if (!console_irqwork_blocked) 212 255 ft->legacy_offload = true; 213 256 } 214 257 break; ··· 221 264 if (have_legacy_console || have_boot_console) { 222 265 if (!is_printk_legacy_deferred()) 223 266 ft->legacy_direct = true; 224 - else 267 + else if (!console_irqwork_blocked) 225 268 ft->legacy_offload = true; 226 269 } 227 270 break;
+151 -23
kernel/printk/nbcon.c
··· 10 10 #include <linux/export.h> 11 11 #include <linux/init.h> 12 12 #include <linux/irqflags.h> 13 + #include <linux/kdb.h> 13 14 #include <linux/kthread.h> 14 15 #include <linux/minmax.h> 15 16 #include <linux/panic.h> ··· 118 117 * context. The new owner is supposed to reprint the entire interrupted record 119 118 * from scratch. 120 119 */ 120 + 121 + /* Counter of active nbcon emergency contexts. */ 122 + static atomic_t nbcon_cpu_emergency_cnt = ATOMIC_INIT(0); 121 123 122 124 /** 123 125 * nbcon_state_set - Helper function to set the console state ··· 253 249 * since all non-panic CPUs are stopped during panic(), it 254 250 * is safer to have them avoid gaining console ownership. 255 251 * 256 - * If this acquire is a reacquire (and an unsafe takeover 252 + * One exception is when kdb has locked for printing on this CPU. 253 + * 254 + * Second exception is a reacquire (and an unsafe takeover 257 255 * has not previously occurred) then it is allowed to attempt 258 256 * a direct acquire in panic. This gives console drivers an 259 257 * opportunity to perform any necessary cleanup if they were 260 258 * interrupted by the panic CPU while printing. 261 259 */ 262 260 if (panic_on_other_cpu() && 261 + !kdb_printf_on_this_cpu() && 263 262 (!is_reacquire || cur->unsafe_takeover)) { 264 263 return -EPERM; 265 264 } ··· 857 850 return nbcon_context_can_proceed(ctxt, &cur); 858 851 } 859 852 860 - static void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt, 861 - char *buf, unsigned int len) 853 + void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt, 854 + char *buf, unsigned int len) 862 855 { 863 856 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); 864 857 struct console *con = ctxt->console; ··· 1170 1163 if (kthread_should_stop()) 1171 1164 return true; 1172 1165 1166 + /* 1167 + * Block the kthread when the system is in an emergency or panic mode. 1168 + * It increases the chance that these contexts would be able to show 1169 + * the messages directly. And it reduces the risk of interrupted writes 1170 + * where the context with a higher priority takes over the nbcon console 1171 + * ownership in the middle of a message. 1172 + */ 1173 + if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) || 1174 + unlikely(panic_in_progress())) 1175 + return false; 1176 + 1173 1177 cookie = console_srcu_read_lock(); 1174 1178 1175 1179 flags = console_srcu_read_flags(con); ··· 1231 1213 do { 1232 1214 if (kthread_should_stop()) 1233 1215 return 0; 1216 + 1217 + /* 1218 + * Block the kthread when the system is in an emergency or panic 1219 + * mode. See nbcon_kthread_should_wakeup() for more details. 1220 + */ 1221 + if (unlikely(atomic_read(&nbcon_cpu_emergency_cnt)) || 1222 + unlikely(panic_in_progress())) 1223 + goto wait_for_event; 1234 1224 1235 1225 backlog = false; 1236 1226 ··· 1300 1274 int cookie; 1301 1275 1302 1276 if (!printk_kthreads_running) 1277 + return; 1278 + 1279 + /* 1280 + * It is not allowed to call this function when console irq_work 1281 + * is blocked. 1282 + */ 1283 + if (WARN_ON_ONCE(console_irqwork_blocked)) 1303 1284 return; 1304 1285 1305 1286 cookie = console_srcu_read_lock(); ··· 1437 1404 return NBCON_PRIO_NORMAL; 1438 1405 } 1439 1406 1407 + /* 1408 + * Track if it is allowed to perform unsafe hostile takeovers of console 1409 + * ownership. When true, console drivers might perform unsafe actions while 1410 + * printing. It is externally available via nbcon_allow_unsafe_takeover(). 1411 + */ 1412 + static bool panic_nbcon_allow_unsafe_takeover; 1413 + 1414 + /** 1415 + * nbcon_allow_unsafe_takeover - Check if unsafe console takeovers are allowed 1416 + * 1417 + * Return: True, when it is permitted to perform unsafe console printing 1418 + * 1419 + * This is also used by console_is_usable() to determine if it is allowed to 1420 + * call write_atomic() callbacks flagged as unsafe (CON_NBCON_ATOMIC_UNSAFE). 1421 + */ 1422 + bool nbcon_allow_unsafe_takeover(void) 1423 + { 1424 + return panic_on_this_cpu() && panic_nbcon_allow_unsafe_takeover; 1425 + } 1426 + 1440 1427 /** 1441 1428 * nbcon_legacy_emit_next_record - Print one record for an nbcon console 1442 1429 * in legacy contexts ··· 1527 1474 * write_atomic() callback 1528 1475 * @con: The nbcon console to flush 1529 1476 * @stop_seq: Flush up until this record 1530 - * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers 1531 1477 * 1532 1478 * Return: 0 if @con was flushed up to @stop_seq Otherwise, error code on 1533 1479 * failure. ··· 1545 1493 * returned, it cannot be expected that the unfinalized record will become 1546 1494 * available. 1547 1495 */ 1548 - static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, 1549 - bool allow_unsafe_takeover) 1496 + static int __nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq) 1550 1497 { 1551 1498 struct nbcon_write_context wctxt = { }; 1552 1499 struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); ··· 1554 1503 ctxt->console = con; 1555 1504 ctxt->spinwait_max_us = 2000; 1556 1505 ctxt->prio = nbcon_get_default_prio(); 1557 - ctxt->allow_unsafe_takeover = allow_unsafe_takeover; 1558 - 1559 - if (!nbcon_context_try_acquire(ctxt, false)) 1560 - return -EPERM; 1506 + ctxt->allow_unsafe_takeover = nbcon_allow_unsafe_takeover(); 1561 1507 1562 1508 while (nbcon_seq_read(con) < stop_seq) { 1509 + if (!nbcon_context_try_acquire(ctxt, false)) 1510 + return -EPERM; 1511 + 1563 1512 /* 1564 1513 * nbcon_emit_next_record() returns false when the console was 1565 1514 * handed over or taken over. In both cases the context is no ··· 1567 1516 */ 1568 1517 if (!nbcon_emit_next_record(&wctxt, true)) 1569 1518 return -EAGAIN; 1519 + 1520 + nbcon_context_release(ctxt); 1570 1521 1571 1522 if (!ctxt->backlog) { 1572 1523 /* Are there reserved but not yet finalized records? */ ··· 1578 1525 } 1579 1526 } 1580 1527 1581 - nbcon_context_release(ctxt); 1582 1528 return err; 1583 1529 } 1584 1530 ··· 1586 1534 * write_atomic() callback 1587 1535 * @con: The nbcon console to flush 1588 1536 * @stop_seq: Flush up until this record 1589 - * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers 1590 1537 * 1591 1538 * This will stop flushing before @stop_seq if another context has ownership. 1592 1539 * That context is then responsible for the flushing. Likewise, if new records 1593 1540 * are added while this context was flushing and there is no other context 1594 1541 * to handle the printing, this context must also flush those records. 1595 1542 */ 1596 - static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq, 1597 - bool allow_unsafe_takeover) 1543 + static void nbcon_atomic_flush_pending_con(struct console *con, u64 stop_seq) 1598 1544 { 1599 1545 struct console_flush_type ft; 1600 1546 unsigned long flags; ··· 1607 1557 */ 1608 1558 local_irq_save(flags); 1609 1559 1610 - err = __nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover); 1560 + err = __nbcon_atomic_flush_pending_con(con, stop_seq); 1611 1561 1612 1562 local_irq_restore(flags); 1613 1563 ··· 1639 1589 * __nbcon_atomic_flush_pending - Flush all nbcon consoles using their 1640 1590 * write_atomic() callback 1641 1591 * @stop_seq: Flush up until this record 1642 - * @allow_unsafe_takeover: True, to allow unsafe hostile takeovers 1643 1592 */ 1644 - static void __nbcon_atomic_flush_pending(u64 stop_seq, bool allow_unsafe_takeover) 1593 + static void __nbcon_atomic_flush_pending(u64 stop_seq) 1645 1594 { 1646 1595 struct console *con; 1647 1596 int cookie; ··· 1658 1609 if (nbcon_seq_read(con) >= stop_seq) 1659 1610 continue; 1660 1611 1661 - nbcon_atomic_flush_pending_con(con, stop_seq, allow_unsafe_takeover); 1612 + nbcon_atomic_flush_pending_con(con, stop_seq); 1662 1613 } 1663 1614 console_srcu_read_unlock(cookie); 1664 1615 } ··· 1674 1625 */ 1675 1626 void nbcon_atomic_flush_pending(void) 1676 1627 { 1677 - __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), false); 1628 + __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb)); 1678 1629 } 1679 1630 1680 1631 /** ··· 1686 1637 */ 1687 1638 void nbcon_atomic_flush_unsafe(void) 1688 1639 { 1689 - __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb), true); 1640 + panic_nbcon_allow_unsafe_takeover = true; 1641 + __nbcon_atomic_flush_pending(prb_next_reserve_seq(prb)); 1642 + panic_nbcon_allow_unsafe_takeover = false; 1690 1643 } 1691 1644 1692 1645 /** ··· 1706 1655 1707 1656 preempt_disable(); 1708 1657 1658 + atomic_inc(&nbcon_cpu_emergency_cnt); 1659 + 1709 1660 cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting(); 1710 1661 (*cpu_emergency_nesting)++; 1711 1662 } ··· 1722 1669 unsigned int *cpu_emergency_nesting; 1723 1670 1724 1671 cpu_emergency_nesting = nbcon_get_cpu_emergency_nesting(); 1725 - 1726 1672 if (!WARN_ON_ONCE(*cpu_emergency_nesting == 0)) 1727 1673 (*cpu_emergency_nesting)--; 1674 + 1675 + /* 1676 + * Wake up kthreads because there might be some pending messages 1677 + * added by other CPUs with normal priority since the last flush 1678 + * in the emergency context. 1679 + */ 1680 + if (!WARN_ON_ONCE(atomic_read(&nbcon_cpu_emergency_cnt) == 0)) { 1681 + if (atomic_dec_return(&nbcon_cpu_emergency_cnt) == 0) { 1682 + struct console_flush_type ft; 1683 + 1684 + printk_get_console_flush_type(&ft); 1685 + if (ft.nbcon_offload) 1686 + nbcon_kthreads_wake(); 1687 + } 1688 + } 1728 1689 1729 1690 preempt_enable(); 1730 1691 } ··· 1911 1844 * using the legacy loop. 1912 1845 */ 1913 1846 if (ft.nbcon_atomic) { 1914 - __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb), false); 1847 + __nbcon_atomic_flush_pending_con(con, prb_next_reserve_seq(prb)); 1915 1848 } else if (ft.legacy_direct) { 1916 1849 if (console_trylock()) 1917 1850 console_unlock(); 1918 1851 } else if (ft.legacy_offload) { 1919 - printk_trigger_flush(); 1852 + defer_console_output(); 1920 1853 } 1921 1854 } 1922 1855 console_srcu_read_unlock(cookie); 1923 1856 } 1924 1857 EXPORT_SYMBOL_GPL(nbcon_device_release); 1858 + 1859 + /** 1860 + * nbcon_kdb_try_acquire - Try to acquire nbcon console and enter unsafe 1861 + * section 1862 + * @con: The nbcon console to acquire 1863 + * @wctxt: The nbcon write context to be used on success 1864 + * 1865 + * Context: Under console_srcu_read_lock() for emitting a single kdb message 1866 + * using the given con->write_atomic() callback. Can be called 1867 + * only when the console is usable at the moment. 1868 + * 1869 + * Return: True if the console was acquired. False otherwise. 1870 + * 1871 + * kdb emits messages on consoles registered for printk() without 1872 + * storing them into the ring buffer. It has to acquire the console 1873 + * ownerhip so that it could call con->write_atomic() callback a safe way. 1874 + * 1875 + * This function acquires the nbcon console using priority NBCON_PRIO_EMERGENCY 1876 + * and marks it unsafe for handover/takeover. 1877 + */ 1878 + bool nbcon_kdb_try_acquire(struct console *con, 1879 + struct nbcon_write_context *wctxt) 1880 + { 1881 + struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); 1882 + 1883 + memset(ctxt, 0, sizeof(*ctxt)); 1884 + ctxt->console = con; 1885 + ctxt->prio = NBCON_PRIO_EMERGENCY; 1886 + 1887 + if (!nbcon_context_try_acquire(ctxt, false)) 1888 + return false; 1889 + 1890 + if (!nbcon_context_enter_unsafe(ctxt)) 1891 + return false; 1892 + 1893 + return true; 1894 + } 1895 + 1896 + /** 1897 + * nbcon_kdb_release - Exit unsafe section and release the nbcon console 1898 + * 1899 + * @wctxt: The nbcon write context initialized by a successful 1900 + * nbcon_kdb_try_acquire() 1901 + */ 1902 + void nbcon_kdb_release(struct nbcon_write_context *wctxt) 1903 + { 1904 + struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); 1905 + 1906 + if (!nbcon_context_exit_unsafe(ctxt)) 1907 + return; 1908 + 1909 + nbcon_context_release(ctxt); 1910 + 1911 + /* 1912 + * Flush any new printk() messages added when the console was blocked. 1913 + * Only the console used by the given write context was blocked. 1914 + * The console was locked only when the write_atomic() callback 1915 + * was usable. 1916 + */ 1917 + __nbcon_atomic_flush_pending_con(ctxt->console, prb_next_reserve_seq(prb)); 1918 + }
+200 -106
kernel/printk/printk.c
··· 462 462 /* See printk_legacy_allow_panic_sync() for details. */ 463 463 bool legacy_allow_panic_sync; 464 464 465 + /* Avoid using irq_work when suspending. */ 466 + bool console_irqwork_blocked; 467 + 465 468 #ifdef CONFIG_PRINTK 466 469 DECLARE_WAIT_QUEUE_HEAD(log_wait); 467 470 static DECLARE_WAIT_QUEUE_HEAD(legacy_wait); ··· 2393 2390 /* If called from the scheduler, we can not call up(). */ 2394 2391 if (level == LOGLEVEL_SCHED) { 2395 2392 level = LOGLEVEL_DEFAULT; 2396 - ft.legacy_offload |= ft.legacy_direct; 2393 + ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked; 2397 2394 ft.legacy_direct = false; 2398 2395 } 2399 2396 ··· 2429 2426 2430 2427 if (ft.legacy_offload) 2431 2428 defer_console_output(); 2432 - else 2429 + else if (!console_irqwork_blocked) 2433 2430 wake_up_klogd(); 2434 2431 2435 2432 return printed_len; ··· 2733 2730 { 2734 2731 struct console *con; 2735 2732 2733 + if (console_suspend_enabled) 2734 + pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); 2735 + 2736 + /* 2737 + * Flush any console backlog and then avoid queueing irq_work until 2738 + * console_resume_all(). Until then deferred printing is no longer 2739 + * triggered, NBCON consoles transition to atomic flushing, and 2740 + * any klogd waiters are not triggered. 2741 + */ 2742 + pr_flush(1000, true); 2743 + console_irqwork_blocked = true; 2744 + 2736 2745 if (!console_suspend_enabled) 2737 2746 return; 2738 - pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); 2739 - pr_flush(1000, true); 2740 2747 2741 2748 console_list_lock(); 2742 2749 for_each_console(con) ··· 2767 2754 struct console_flush_type ft; 2768 2755 struct console *con; 2769 2756 2770 - if (!console_suspend_enabled) 2771 - return; 2772 - 2773 - console_list_lock(); 2774 - for_each_console(con) 2775 - console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED); 2776 - console_list_unlock(); 2777 - 2778 2757 /* 2779 - * Ensure that all SRCU list walks have completed. All printing 2780 - * contexts must be able to see they are no longer suspended so 2781 - * that they are guaranteed to wake up and resume printing. 2758 + * Allow queueing irq_work. After restoring console state, deferred 2759 + * printing and any klogd waiters need to be triggered in case there 2760 + * is now a console backlog. 2782 2761 */ 2783 - synchronize_srcu(&console_srcu); 2762 + console_irqwork_blocked = false; 2763 + 2764 + if (console_suspend_enabled) { 2765 + console_list_lock(); 2766 + for_each_console(con) 2767 + console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED); 2768 + console_list_unlock(); 2769 + 2770 + /* 2771 + * Ensure that all SRCU list walks have completed. All printing 2772 + * contexts must be able to see they are no longer suspended so 2773 + * that they are guaranteed to wake up and resume printing. 2774 + */ 2775 + synchronize_srcu(&console_srcu); 2776 + } 2784 2777 2785 2778 printk_get_console_flush_type(&ft); 2786 2779 if (ft.nbcon_offload) 2787 2780 nbcon_kthreads_wake(); 2788 2781 if (ft.legacy_offload) 2789 2782 defer_console_output(); 2783 + else 2784 + wake_up_klogd(); 2790 2785 2791 2786 pr_flush(1000, true); 2792 2787 } ··· 3023 3002 } 3024 3003 3025 3004 /* 3026 - * Legacy console printing from printk() caller context does not respect 3027 - * raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a 3028 - * false positive. For PREEMPT_RT the false positive condition does not 3029 - * occur. 3030 - * 3031 - * This map is used to temporarily establish LD_WAIT_SLEEP context for the 3032 - * console write() callback when legacy printing to avoid false positive 3033 - * lockdep complaints, thus allowing lockdep to continue to function for 3034 - * real issues. 3005 + * The legacy console always acquires a spinlock_t from its printing 3006 + * callback. This violates lock nesting if the caller acquired an always 3007 + * spinning lock (raw_spinlock_t) while invoking printk(). This is not a 3008 + * problem on PREEMPT_RT because legacy consoles print always from a 3009 + * dedicated thread and never from within printk(). Therefore we tell 3010 + * lockdep that a sleeping spin lock (spinlock_t) is valid here. 3035 3011 */ 3036 3012 #ifdef CONFIG_PREEMPT_RT 3037 3013 static inline void printk_legacy_allow_spinlock_enter(void) { } 3038 3014 static inline void printk_legacy_allow_spinlock_exit(void) { } 3039 3015 #else 3040 - static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP); 3016 + static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_CONFIG); 3041 3017 3042 3018 static inline void printk_legacy_allow_spinlock_enter(void) 3043 3019 { ··· 3152 3134 3153 3135 #endif /* CONFIG_PRINTK */ 3154 3136 3137 + 3138 + /* 3139 + * Print out one record for each console. 3140 + * 3141 + * @do_cond_resched is set by the caller. It can be true only in schedulable 3142 + * context. 3143 + * 3144 + * @next_seq is set to the sequence number after the last available record. 3145 + * The value is valid only when all usable consoles were flushed. It is 3146 + * when the function returns true (can do the job) and @try_again parameter 3147 + * is set to false, see below. 3148 + * 3149 + * @handover will be set to true if a printk waiter has taken over the 3150 + * console_lock, in which case the caller is no longer holding the 3151 + * console_lock. Otherwise it is set to false. 3152 + * 3153 + * @try_again will be set to true when it still makes sense to call this 3154 + * function again. The function could do the job, see the return value. 3155 + * And some consoles still make progress. 3156 + * 3157 + * Returns true when the function could do the job. Some consoles are usable, 3158 + * and there was no takeover and no panic_on_other_cpu(). 3159 + * 3160 + * Requires the console_lock. 3161 + */ 3162 + static bool console_flush_one_record(bool do_cond_resched, u64 *next_seq, bool *handover, 3163 + bool *try_again) 3164 + { 3165 + struct console_flush_type ft; 3166 + bool any_usable = false; 3167 + struct console *con; 3168 + int cookie; 3169 + 3170 + *try_again = false; 3171 + 3172 + printk_get_console_flush_type(&ft); 3173 + 3174 + cookie = console_srcu_read_lock(); 3175 + for_each_console_srcu(con) { 3176 + short flags = console_srcu_read_flags(con); 3177 + u64 printk_seq; 3178 + bool progress; 3179 + 3180 + /* 3181 + * console_flush_one_record() is only responsible for 3182 + * nbcon consoles when the nbcon consoles cannot print via 3183 + * their atomic or threaded flushing. 3184 + */ 3185 + if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) 3186 + continue; 3187 + 3188 + if (!console_is_usable(con, flags, !do_cond_resched)) 3189 + continue; 3190 + any_usable = true; 3191 + 3192 + if (flags & CON_NBCON) { 3193 + progress = nbcon_legacy_emit_next_record(con, handover, cookie, 3194 + !do_cond_resched); 3195 + printk_seq = nbcon_seq_read(con); 3196 + } else { 3197 + progress = console_emit_next_record(con, handover, cookie); 3198 + printk_seq = con->seq; 3199 + } 3200 + 3201 + /* 3202 + * If a handover has occurred, the SRCU read lock 3203 + * is already released. 3204 + */ 3205 + if (*handover) 3206 + goto fail; 3207 + 3208 + /* Track the next of the highest seq flushed. */ 3209 + if (printk_seq > *next_seq) 3210 + *next_seq = printk_seq; 3211 + 3212 + if (!progress) 3213 + continue; 3214 + 3215 + /* 3216 + * An usable console made a progress. There might still be 3217 + * pending messages. 3218 + */ 3219 + *try_again = true; 3220 + 3221 + /* Allow panic_cpu to take over the consoles safely. */ 3222 + if (panic_on_other_cpu()) 3223 + goto fail_srcu; 3224 + 3225 + if (do_cond_resched) 3226 + cond_resched(); 3227 + } 3228 + console_srcu_read_unlock(cookie); 3229 + 3230 + return any_usable; 3231 + 3232 + fail_srcu: 3233 + console_srcu_read_unlock(cookie); 3234 + fail: 3235 + *try_again = false; 3236 + return false; 3237 + } 3238 + 3155 3239 /* 3156 3240 * Print out all remaining records to all consoles. 3157 3241 * ··· 3279 3159 */ 3280 3160 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) 3281 3161 { 3282 - struct console_flush_type ft; 3283 - bool any_usable = false; 3284 - struct console *con; 3285 - bool any_progress; 3286 - int cookie; 3162 + bool try_again; 3163 + bool ret; 3287 3164 3288 3165 *next_seq = 0; 3289 3166 *handover = false; 3290 3167 3291 3168 do { 3292 - any_progress = false; 3169 + ret = console_flush_one_record(do_cond_resched, next_seq, 3170 + handover, &try_again); 3171 + } while (try_again); 3293 3172 3294 - printk_get_console_flush_type(&ft); 3295 - 3296 - cookie = console_srcu_read_lock(); 3297 - for_each_console_srcu(con) { 3298 - short flags = console_srcu_read_flags(con); 3299 - u64 printk_seq; 3300 - bool progress; 3301 - 3302 - /* 3303 - * console_flush_all() is only responsible for nbcon 3304 - * consoles when the nbcon consoles cannot print via 3305 - * their atomic or threaded flushing. 3306 - */ 3307 - if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) 3308 - continue; 3309 - 3310 - if (!console_is_usable(con, flags, !do_cond_resched)) 3311 - continue; 3312 - any_usable = true; 3313 - 3314 - if (flags & CON_NBCON) { 3315 - progress = nbcon_legacy_emit_next_record(con, handover, cookie, 3316 - !do_cond_resched); 3317 - printk_seq = nbcon_seq_read(con); 3318 - } else { 3319 - progress = console_emit_next_record(con, handover, cookie); 3320 - printk_seq = con->seq; 3321 - } 3322 - 3323 - /* 3324 - * If a handover has occurred, the SRCU read lock 3325 - * is already released. 3326 - */ 3327 - if (*handover) 3328 - return false; 3329 - 3330 - /* Track the next of the highest seq flushed. */ 3331 - if (printk_seq > *next_seq) 3332 - *next_seq = printk_seq; 3333 - 3334 - if (!progress) 3335 - continue; 3336 - any_progress = true; 3337 - 3338 - /* Allow panic_cpu to take over the consoles safely. */ 3339 - if (panic_on_other_cpu()) 3340 - goto abandon; 3341 - 3342 - if (do_cond_resched) 3343 - cond_resched(); 3344 - } 3345 - console_srcu_read_unlock(cookie); 3346 - } while (any_progress); 3347 - 3348 - return any_usable; 3349 - 3350 - abandon: 3351 - console_srcu_read_unlock(cookie); 3352 - return false; 3173 + return ret; 3353 3174 } 3354 3175 3355 3176 static void __console_flush_and_unlock(void) ··· 3392 3331 */ 3393 3332 cookie = console_srcu_read_lock(); 3394 3333 for_each_console_srcu(c) { 3395 - short flags = console_srcu_read_flags(c); 3396 - 3397 - if (flags & CON_SUSPENDED) 3334 + if (!console_is_usable(c, console_srcu_read_flags(c), true)) 3398 3335 continue; 3399 3336 3400 - if ((flags & CON_ENABLED) && c->unblank) { 3337 + if (c->unblank) { 3401 3338 found_unblank = true; 3402 3339 break; 3403 3340 } ··· 3432 3373 3433 3374 cookie = console_srcu_read_lock(); 3434 3375 for_each_console_srcu(c) { 3435 - short flags = console_srcu_read_flags(c); 3436 - 3437 - if (flags & CON_SUSPENDED) 3376 + if (!console_is_usable(c, console_srcu_read_flags(c), true)) 3438 3377 continue; 3439 3378 3440 - if ((flags & CON_ENABLED) && c->unblank) 3379 + if (c->unblank) 3441 3380 c->unblank(); 3442 3381 } 3443 3382 console_srcu_read_unlock(cookie); ··· 3658 3601 3659 3602 static int legacy_kthread_func(void *unused) 3660 3603 { 3661 - for (;;) { 3662 - wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup()); 3604 + bool try_again; 3605 + 3606 + wait_for_event: 3607 + wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup()); 3608 + 3609 + do { 3610 + bool handover = false; 3611 + u64 next_seq = 0; 3663 3612 3664 3613 if (kthread_should_stop()) 3665 - break; 3614 + return 0; 3666 3615 3667 3616 console_lock(); 3668 - __console_flush_and_unlock(); 3669 - } 3617 + console_flush_one_record(true, &next_seq, &handover, &try_again); 3618 + if (!handover) 3619 + __console_unlock(); 3670 3620 3671 - return 0; 3621 + } while (try_again); 3622 + 3623 + goto wait_for_event; 3672 3624 } 3673 3625 3674 3626 static bool legacy_kthread_create(void) ··· 4577 4511 if (!printk_percpu_data_ready()) 4578 4512 return; 4579 4513 4514 + /* 4515 + * It is not allowed to call this function when console irq_work 4516 + * is blocked. 4517 + */ 4518 + if (WARN_ON_ONCE(console_irqwork_blocked)) 4519 + return; 4520 + 4580 4521 preempt_disable(); 4581 4522 /* 4582 4523 * Guarantee any new records can be seen by tasks preparing to wait ··· 4640 4567 __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); 4641 4568 } 4642 4569 4570 + /** 4571 + * printk_trigger_flush - Attempt to flush printk buffer to consoles. 4572 + * 4573 + * If possible, flush the printk buffer to all consoles in the caller's 4574 + * context. If offloading is available, trigger deferred printing. 4575 + * 4576 + * This is best effort. Depending on the system state, console states, 4577 + * and caller context, no actual flushing may result from this call. 4578 + */ 4643 4579 void printk_trigger_flush(void) 4644 4580 { 4645 - defer_console_output(); 4581 + struct console_flush_type ft; 4582 + 4583 + printk_get_console_flush_type(&ft); 4584 + if (ft.nbcon_atomic) 4585 + nbcon_atomic_flush_pending(); 4586 + if (ft.nbcon_offload) 4587 + nbcon_kthreads_wake(); 4588 + if (ft.legacy_direct) { 4589 + if (console_trylock()) 4590 + console_unlock(); 4591 + } 4592 + if (ft.legacy_offload) 4593 + defer_console_output(); 4646 4594 } 4647 4595 4648 4596 int vprintk_deferred(const char *fmt, va_list args)
+53 -14
kernel/printk/printk_ringbuffer.c
··· 411 411 return to_blk_size(size) <= DATA_SIZE(data_ring) / 2; 412 412 } 413 413 414 + /* 415 + * Compare the current and requested logical position and decide 416 + * whether more space is needed. 417 + * 418 + * Return false when @lpos_current is already at or beyond @lpos_target. 419 + * 420 + * Also return false when the difference between the positions is bigger 421 + * than the size of the data buffer. It might happen only when the caller 422 + * raced with another CPU(s) which already made and used the space. 423 + */ 424 + static bool need_more_space(struct prb_data_ring *data_ring, 425 + unsigned long lpos_current, 426 + unsigned long lpos_target) 427 + { 428 + return lpos_target - lpos_current - 1 < DATA_SIZE(data_ring); 429 + } 430 + 414 431 /* Query the state of a descriptor. */ 415 432 static enum desc_state get_desc_state(unsigned long id, 416 433 unsigned long state_val) ··· 594 577 unsigned long id; 595 578 596 579 /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ 597 - while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { 580 + while (need_more_space(data_ring, lpos_begin, lpos_end)) { 598 581 blk = to_block(data_ring, lpos_begin); 599 582 600 583 /* ··· 685 668 * sees the new tail lpos, any descriptor states that transitioned to 686 669 * the reusable state must already be visible. 687 670 */ 688 - while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { 671 + while (need_more_space(data_ring, tail_lpos, lpos)) { 689 672 /* 690 673 * Make all descriptors reusable that are associated with 691 674 * data blocks before @lpos. ··· 1016 999 return true; 1017 1000 } 1018 1001 1002 + static bool is_blk_wrapped(struct prb_data_ring *data_ring, 1003 + unsigned long begin_lpos, unsigned long next_lpos) 1004 + { 1005 + /* 1006 + * Subtract one from next_lpos since it's not actually part of this data 1007 + * block. This allows perfectly fitting records to not wrap. 1008 + */ 1009 + return DATA_WRAPS(data_ring, begin_lpos) != 1010 + DATA_WRAPS(data_ring, next_lpos - 1); 1011 + } 1012 + 1019 1013 /* Determine the end of a data block. */ 1020 1014 static unsigned long get_next_lpos(struct prb_data_ring *data_ring, 1021 1015 unsigned long lpos, unsigned int size) ··· 1038 1010 next_lpos = lpos + size; 1039 1011 1040 1012 /* First check if the data block does not wrap. */ 1041 - if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) 1013 + if (!is_blk_wrapped(data_ring, begin_lpos, next_lpos)) 1042 1014 return next_lpos; 1043 1015 1044 1016 /* Wrapping data blocks store their data at the beginning. */ ··· 1115 1087 blk = to_block(data_ring, begin_lpos); 1116 1088 blk->id = id; /* LMM(data_alloc:B) */ 1117 1089 1118 - if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { 1090 + if (is_blk_wrapped(data_ring, begin_lpos, next_lpos)) { 1119 1091 /* Wrapping data blocks store their data at the beginning. */ 1120 1092 blk = to_block(data_ring, 0); 1121 1093 ··· 1159 1131 return NULL; 1160 1132 1161 1133 /* Keep track if @blk_lpos was a wrapping data block. */ 1162 - wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next)); 1134 + wrapped = is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next); 1163 1135 1164 1136 size = to_blk_size(size); 1165 1137 1166 1138 next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size); 1167 1139 1168 - /* If the data block does not increase, there is nothing to do. */ 1169 - if (head_lpos - next_lpos < DATA_SIZE(data_ring)) { 1140 + /* 1141 + * Use the current data block when the size does not increase, i.e. 1142 + * when @head_lpos is already able to accommodate the new @next_lpos. 1143 + * 1144 + * Note that need_more_space() could never return false here because 1145 + * the difference between the positions was bigger than the data 1146 + * buffer size. The data block is reopened and can't get reused. 1147 + */ 1148 + if (!need_more_space(data_ring, head_lpos, next_lpos)) { 1170 1149 if (wrapped) 1171 1150 blk = to_block(data_ring, 0); 1172 1151 else ··· 1202 1167 1203 1168 blk = to_block(data_ring, blk_lpos->begin); 1204 1169 1205 - if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) { 1170 + if (is_blk_wrapped(data_ring, blk_lpos->begin, next_lpos)) { 1206 1171 struct prb_data_block *old_blk = blk; 1207 1172 1208 1173 /* Wrapping data blocks store their data at the beginning. */ ··· 1238 1203 if (BLK_DATALESS(blk_lpos)) 1239 1204 return 0; 1240 1205 1241 - if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { 1206 + if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) { 1242 1207 /* Data block does not wrap. */ 1243 1208 return (DATA_INDEX(data_ring, blk_lpos->next) - 1244 1209 DATA_INDEX(data_ring, blk_lpos->begin)); ··· 1284 1249 return NULL; 1285 1250 } 1286 1251 1287 - /* Regular data block: @begin less than @next and in same wrap. */ 1288 - if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && 1289 - blk_lpos->begin < blk_lpos->next) { 1252 + /* Regular data block: @begin and @next in the same wrap. */ 1253 + if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) { 1290 1254 db = to_block(data_ring, blk_lpos->begin); 1291 1255 *data_size = blk_lpos->next - blk_lpos->begin; 1292 1256 1293 1257 /* Wrapping data block: @begin is one wrap behind @next. */ 1294 - } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == 1295 - DATA_WRAPS(data_ring, blk_lpos->next)) { 1258 + } else if (!is_blk_wrapped(data_ring, 1259 + blk_lpos->begin + DATA_SIZE(data_ring), 1260 + blk_lpos->next)) { 1296 1261 db = to_block(data_ring, 0); 1297 1262 *data_size = DATA_INDEX(data_ring, blk_lpos->next); 1298 1263 ··· 1301 1266 WARN_ON_ONCE(1); 1302 1267 return NULL; 1303 1268 } 1269 + 1270 + /* Sanity check. Data-less blocks were handled earlier. */ 1271 + if (WARN_ON_ONCE(!data_check_size(data_ring, *data_size) || !*data_size)) 1272 + return NULL; 1304 1273 1305 1274 /* A valid data block will always be aligned to the ID size. */ 1306 1275 if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
+3 -3
kernel/trace/trace_output.c
··· 1467 1467 1468 1468 trace_assign_type(field, entry); 1469 1469 1470 - trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d", 1470 + trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ptSp count:%d", 1471 1471 field->seqnum, 1472 1472 field->duration, 1473 1473 field->outer_duration, 1474 - (long long)field->timestamp.tv_sec, 1475 - field->timestamp.tv_nsec, field->count); 1474 + &field->timestamp, 1475 + field->count); 1476 1476 1477 1477 if (field->nmi_count) { 1478 1478 /*
+4
lib/tests/printf_kunit.c
··· 504 504 }; 505 505 /* 2019-01-04T15:32:23 */ 506 506 time64_t t = 1546615943; 507 + struct timespec64 ts = { .tv_sec = t, .tv_nsec = 11235813 }; 507 508 508 509 test("(%pt?)", "%pt", &tm); 509 510 test("2018-11-26T05:35:43", "%ptR", &tm); ··· 523 522 test("0119-00-04 15:32:23", "%ptTsr", &t); 524 523 test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t); 525 524 test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t); 525 + 526 + test("2019-01-04T15:32:23.011235813", "%ptS", &ts); 527 + test("1546615943.011235813", "%ptSp", &ts); 526 528 } 527 529 528 530 static void
+50 -30
lib/vsprintf.c
··· 582 582 return buf; 583 583 } 584 584 585 + #define special_hex_spec(size) \ 586 + (struct printf_spec) { \ 587 + .field_width = 2 + 2 * (size), /* 0x + hex */ \ 588 + .flags = SPECIAL | SMALL | ZEROPAD, \ 589 + .base = 16, \ 590 + .precision = -1, \ 591 + } 592 + 585 593 static noinline_for_stack 586 594 char *special_hex_number(char *buf, char *end, unsigned long long num, int size) 587 595 { 588 - struct printf_spec spec; 589 - 590 - spec.field_width = 2 + 2 * size; /* 0x + hex */ 591 - spec.flags = SPECIAL | SMALL | ZEROPAD; 592 - spec.base = 16; 593 - spec.precision = -1; 594 - 595 - return number(buf, end, num, spec); 596 + return number(buf, end, num, special_hex_spec(size)); 596 597 } 597 598 598 599 static void move_right(char *buf, char *end, unsigned len, unsigned spaces) ··· 1165 1164 char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")]; 1166 1165 char *p = sym, *pend = sym + sizeof(sym); 1167 1166 1168 - struct printf_spec range_spec = { 1169 - .field_width = 2 + 2 * sizeof(range->start), /* 0x + 2 * 8 */ 1170 - .flags = SPECIAL | SMALL | ZEROPAD, 1171 - .base = 16, 1172 - .precision = -1, 1173 - }; 1174 - 1175 1167 if (check_pointer(&buf, end, range, spec)) 1176 1168 return buf; 1177 1169 1178 1170 p = string_nocheck(p, pend, "[range ", default_str_spec); 1179 - p = hex_range(p, pend, range->start, range->end, range_spec); 1171 + p = hex_range(p, pend, range->start, range->end, special_hex_spec(sizeof(range->start))); 1180 1172 *p++ = ']'; 1181 1173 *p = '\0'; 1182 1174 ··· 1922 1928 bool found = true; 1923 1929 int count = 2; 1924 1930 1925 - if (check_pointer(&buf, end, tm, spec)) 1926 - return buf; 1927 - 1928 1931 switch (fmt[count]) { 1929 1932 case 'd': 1930 1933 have_t = false; ··· 1984 1993 } 1985 1994 1986 1995 static noinline_for_stack 1996 + char *timespec64_str(char *buf, char *end, const struct timespec64 *ts, 1997 + struct printf_spec spec, const char *fmt) 1998 + { 1999 + static const struct printf_spec default_dec09_spec = { 2000 + .base = 10, 2001 + .field_width = 9, 2002 + .precision = -1, 2003 + .flags = ZEROPAD, 2004 + }; 2005 + 2006 + if (fmt[2] == 'p') 2007 + buf = number(buf, end, ts->tv_sec, default_dec_spec); 2008 + else 2009 + buf = time64_str(buf, end, ts->tv_sec, spec, fmt); 2010 + if (buf < end) 2011 + *buf = '.'; 2012 + buf++; 2013 + 2014 + return number(buf, end, ts->tv_nsec, default_dec09_spec); 2015 + } 2016 + 2017 + static noinline_for_stack 1987 2018 char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec, 1988 2019 const char *fmt) 1989 2020 { 2021 + if (check_pointer(&buf, end, ptr, spec)) 2022 + return buf; 2023 + 1990 2024 switch (fmt[1]) { 1991 2025 case 'R': 1992 2026 return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt); 2027 + case 'S': 2028 + return timespec64_str(buf, end, (const struct timespec64 *)ptr, spec, fmt); 1993 2029 case 'T': 1994 2030 return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt); 1995 2031 default: ··· 2480 2462 * - 'd[234]' For a dentry name (optionally 2-4 last components) 2481 2463 * - 'D[234]' Same as 'd' but for a struct file 2482 2464 * - 'g' For block_device name (gendisk + partition number) 2483 - * - 't[RT][dt][r][s]' For time and date as represented by: 2465 + * - 't[RST][dt][r][s]' For time and date as represented by: 2484 2466 * R struct rtc_time 2467 + * S struct timespec64 2485 2468 * T time64_t 2469 + * - 'tSp' For time represented by struct timespec64 printed as <seconds>.<nanoseconds> 2486 2470 * - 'C' For a clock, it prints the name (Common Clock Framework) or address 2487 2471 * (legacy clock framework) of the clock 2488 2472 * - 'G' For flags to be printed as a collection of symbolic strings that would ··· 2903 2883 2904 2884 case FORMAT_STATE_NUM: { 2905 2885 unsigned long long num; 2906 - if (fmt.size <= sizeof(int)) 2907 - num = convert_num_spec(va_arg(args, int), fmt.size, spec); 2908 - else 2886 + 2887 + if (fmt.size > sizeof(int)) 2909 2888 num = va_arg(args, long long); 2889 + else 2890 + num = convert_num_spec(va_arg(args, int), fmt.size, spec); 2910 2891 str = number(str, end, num, spec); 2911 2892 continue; 2912 2893 } ··· 3075 3054 * @fmt: The format string to use 3076 3055 * @args: Arguments for the format string 3077 3056 * 3078 - * The function returns the number of characters written 3079 - * into @buf. Use vsnprintf() or vscnprintf() in order to avoid 3057 + * The return value is the number of characters written into @buf not including 3058 + * the trailing '\0'. Use vsnprintf() or vscnprintf() in order to avoid 3080 3059 * buffer overflows. 3081 3060 * 3082 3061 * If you're not already dealing with a va_list consider using sprintf(). ··· 3095 3074 * @fmt: The format string to use 3096 3075 * @...: Arguments for the format string 3097 3076 * 3098 - * The function returns the number of characters written 3099 - * into @buf. Use snprintf() or scnprintf() in order to avoid 3077 + * The return value is the number of characters written into @buf not including 3078 + * the trailing '\0'. Use snprintf() or scnprintf() in order to avoid 3100 3079 * buffer overflows. 3101 3080 * 3102 3081 * See the vsnprintf() documentation for format string extensions over C99. ··· 3415 3394 goto out; 3416 3395 3417 3396 case FORMAT_STATE_NUM: 3418 - if (fmt.size > sizeof(int)) { 3397 + if (fmt.size > sizeof(int)) 3419 3398 num = get_arg(long long); 3420 - } else { 3399 + else 3421 3400 num = convert_num_spec(get_arg(int), fmt.size, spec); 3422 - } 3423 3401 str = number(str, end, num, spec); 3424 3402 continue; 3425 3403 }
+2 -4
net/ceph/messenger_v2.c
··· 1538 1538 struct timespec64 now; 1539 1539 1540 1540 ktime_get_real_ts64(&now); 1541 - dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec, 1542 - now.tv_nsec); 1541 + dout("%s con %p timestamp %ptSp\n", __func__, con, &now); 1543 1542 1544 1543 ceph_encode_timespec64(ts, &now); 1545 1544 ··· 2731 2732 ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad); 2732 2733 ceph_decode_timespec64(&con->last_keepalive_ack, p); 2733 2734 2734 - dout("%s con %p timestamp %lld.%09ld\n", __func__, con, 2735 - con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec); 2735 + dout("%s con %p timestamp %ptSp\n", __func__, con, &con->last_keepalive_ack); 2736 2736 2737 2737 return 0; 2738 2738