Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'driver-core-3.5-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core

Pull driver core and printk fixes from Greg Kroah-Hartman:
"Here are some fixes for 3.5-rc4 that resolve the kmsg problems that
people have reported showing up after the printk and kmsg changes went
into 3.5-rc1. There are also a smattering of other tiny fixes for the
extcon and hyper-v drivers that people have reported.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>"

* tag 'driver-core-3.5-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core:
extcon: max8997: Add missing kfree for info->edev in max8997_muic_remove()
extcon: Set platform drvdata in gpio_extcon_probe() and fix irq leak
extcon: Fix wrong index in max8997_extcon_cable[]
kmsg - kmsg_dump() fix CONFIG_PRINTK=n compilation
printk: return -EINVAL if the message len is bigger than the buf size
printk: use mutex lock to stop syslog_seq from going wild
kmsg - kmsg_dump() use iterator to receive log buffer content
vme: change maintainer e-mail address
Extcon: Don't try to create duplicate link names
driver core: fixup reversed deferred probe order
printk: Fix alignment of buf causing crash on ARM EABI
Tools: hv: verify origin of netlink connector message

+296 -157
+1 -1
MAINTAINERS
··· 7421 7421 7422 7422 VME SUBSYSTEM 7423 7423 M: Martyn Welch <martyn.welch@ge.com> 7424 - M: Manohar Vanga <manohar.vanga@cern.ch> 7424 + M: Manohar Vanga <manohar.vanga@gmail.com> 7425 7425 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 7426 7426 L: devel@driverdev.osuosl.org 7427 7427 S: Maintained
+7 -54
arch/powerpc/platforms/pseries/nvram.c
··· 68 68 }; 69 69 70 70 static void oops_to_nvram(struct kmsg_dumper *dumper, 71 - enum kmsg_dump_reason reason, 72 - const char *old_msgs, unsigned long old_len, 73 - const char *new_msgs, unsigned long new_len); 71 + enum kmsg_dump_reason reason); 74 72 75 73 static struct kmsg_dumper nvram_kmsg_dumper = { 76 74 .dump = oops_to_nvram ··· 502 504 } 503 505 504 506 /* 505 - * Try to capture the last capture_len bytes of the printk buffer. Return 506 - * the amount actually captured. 507 - */ 508 - static size_t capture_last_msgs(const char *old_msgs, size_t old_len, 509 - const char *new_msgs, size_t new_len, 510 - char *captured, size_t capture_len) 511 - { 512 - if (new_len >= capture_len) { 513 - memcpy(captured, new_msgs + (new_len - capture_len), 514 - capture_len); 515 - return capture_len; 516 - } else { 517 - /* Grab the end of old_msgs. */ 518 - size_t old_tail_len = min(old_len, capture_len - new_len); 519 - memcpy(captured, old_msgs + (old_len - old_tail_len), 520 - old_tail_len); 521 - memcpy(captured + old_tail_len, new_msgs, new_len); 522 - return old_tail_len + new_len; 523 - } 524 - } 525 - 526 - /* 527 507 * Are we using the ibm,rtas-log for oops/panic reports? And if so, 528 508 * would logging this oops/panic overwrite an RTAS event that rtas_errd 529 509 * hasn't had a chance to read and process? Return 1 if so, else 0. ··· 515 539 && last_unread_rtas_event 516 540 && get_seconds() - last_unread_rtas_event <= 517 541 NVRAM_RTAS_READ_TIMEOUT); 518 - } 519 - 520 - /* Squeeze out each line's <n> severity prefix. */ 521 - static size_t elide_severities(char *buf, size_t len) 522 - { 523 - char *in, *out, *buf_end = buf + len; 524 - /* Assume a <n> at the very beginning marks the start of a line. */ 525 - int newline = 1; 526 - 527 - in = out = buf; 528 - while (in < buf_end) { 529 - if (newline && in+3 <= buf_end && 530 - *in == '<' && isdigit(in[1]) && in[2] == '>') { 531 - in += 3; 532 - newline = 0; 533 - } else { 534 - newline = (*in == '\n'); 535 - *out++ = *in++; 536 - } 537 - } 538 - return out - buf; 539 542 } 540 543 541 544 /* Derived from logfs_compress() */ ··· 574 619 * partition. If that's too much, go back and capture uncompressed text. 575 620 */ 576 621 static void oops_to_nvram(struct kmsg_dumper *dumper, 577 - enum kmsg_dump_reason reason, 578 - const char *old_msgs, unsigned long old_len, 579 - const char *new_msgs, unsigned long new_len) 622 + enum kmsg_dump_reason reason) 580 623 { 581 624 static unsigned int oops_count = 0; 582 625 static bool panicking = false; ··· 613 660 return; 614 661 615 662 if (big_oops_buf) { 616 - text_len = capture_last_msgs(old_msgs, old_len, 617 - new_msgs, new_len, big_oops_buf, big_oops_buf_sz); 618 - text_len = elide_severities(big_oops_buf, text_len); 663 + kmsg_dump_get_buffer(dumper, false, 664 + big_oops_buf, big_oops_buf_sz, &text_len); 619 665 rc = zip_oops(text_len); 620 666 } 621 667 if (rc != 0) { 622 - text_len = capture_last_msgs(old_msgs, old_len, 623 - new_msgs, new_len, oops_data, oops_data_sz); 668 + kmsg_dump_rewind(dumper); 669 + kmsg_dump_get_buffer(dumper, true, 670 + oops_data, oops_data_sz, &text_len); 624 671 err_type = ERR_TYPE_KERNEL_PANIC; 625 672 *oops_len = (u16) text_len; 626 673 }
+5 -8
arch/x86/platform/mrst/early_printk_mrst.c
··· 110 110 static int dumper_registered; 111 111 112 112 static void dw_kmsg_dump(struct kmsg_dumper *dumper, 113 - enum kmsg_dump_reason reason, 114 - const char *s1, unsigned long l1, 115 - const char *s2, unsigned long l2) 113 + enum kmsg_dump_reason reason) 116 114 { 117 - int i; 115 + static char line[1024]; 116 + size_t len; 118 117 119 118 /* When run to this, we'd better re-init the HW */ 120 119 mrst_early_console_init(); 121 120 122 - for (i = 0; i < l1; i++) 123 - early_mrst_console.write(&early_mrst_console, s1 + i, 1); 124 - for (i = 0; i < l2; i++) 125 - early_mrst_console.write(&early_mrst_console, s2 + i, 1); 121 + while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) 122 + early_mrst_console.write(&early_mrst_console, line, len); 126 123 } 127 124 128 125 /* Set the ratio rate to 115200, 8n1, IRQ disabled */
+1 -1
drivers/base/dd.c
··· 100 100 mutex_lock(&deferred_probe_mutex); 101 101 if (list_empty(&dev->p->deferred_probe)) { 102 102 dev_dbg(dev, "Added to deferred list\n"); 103 - list_add(&dev->p->deferred_probe, &deferred_probe_pending_list); 103 + list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list); 104 104 } 105 105 mutex_unlock(&deferred_probe_mutex); 106 106 }
+3 -2
drivers/extcon/extcon-max8997.c
··· 116 116 [5] = "Charge-downstream", 117 117 [6] = "MHL", 118 118 [7] = "Dock-desk", 119 - [7] = "Dock-card", 120 - [8] = "JIG", 119 + [8] = "Dock-card", 120 + [9] = "JIG", 121 121 122 122 NULL, 123 123 }; ··· 514 514 515 515 extcon_dev_unregister(info->edev); 516 516 517 + kfree(info->edev); 517 518 kfree(info); 518 519 519 520 return 0;
+1 -1
drivers/extcon/extcon_class.c
··· 762 762 #if defined(CONFIG_ANDROID) 763 763 if (switch_class) 764 764 ret = class_compat_create_link(switch_class, edev->dev, 765 - dev); 765 + NULL); 766 766 #endif /* CONFIG_ANDROID */ 767 767 768 768 spin_lock_init(&edev->lock);
+2
drivers/extcon/extcon_gpio.c
··· 125 125 if (ret < 0) 126 126 goto err_request_irq; 127 127 128 + platform_set_drvdata(pdev, extcon_data); 128 129 /* Perform initial detection */ 129 130 gpio_extcon_work(&extcon_data->work.work); 130 131 ··· 147 146 struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev); 148 147 149 148 cancel_delayed_work_sync(&extcon_data->work); 149 + free_irq(extcon_data->irq, extcon_data); 150 150 gpio_free(extcon_data->gpio); 151 151 extcon_dev_unregister(&extcon_data->edev); 152 152 devm_kfree(&pdev->dev, extcon_data);
+4 -18
drivers/mtd/mtdoops.c
··· 304 304 } 305 305 306 306 static void mtdoops_do_dump(struct kmsg_dumper *dumper, 307 - enum kmsg_dump_reason reason, const char *s1, unsigned long l1, 308 - const char *s2, unsigned long l2) 307 + enum kmsg_dump_reason reason) 309 308 { 310 309 struct mtdoops_context *cxt = container_of(dumper, 311 310 struct mtdoops_context, dump); 312 - unsigned long s1_start, s2_start; 313 - unsigned long l1_cpy, l2_cpy; 314 - char *dst; 315 - 316 - if (reason != KMSG_DUMP_OOPS && 317 - reason != KMSG_DUMP_PANIC) 318 - return; 319 311 320 312 /* Only dump oopses if dump_oops is set */ 321 313 if (reason == KMSG_DUMP_OOPS && !dump_oops) 322 314 return; 323 315 324 - dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ 325 - l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); 326 - l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy); 327 - 328 - s2_start = l2 - l2_cpy; 329 - s1_start = l1 - l1_cpy; 330 - 331 - memcpy(dst, s1 + s1_start, l1_cpy); 332 - memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); 316 + kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, 317 + record_size - MTDOOPS_HEADER_SIZE, NULL); 333 318 334 319 /* Panics must be written immediately */ 335 320 if (reason != KMSG_DUMP_OOPS) ··· 360 375 return; 361 376 } 362 377 378 + cxt->dump.max_reason = KMSG_DUMP_OOPS; 363 379 cxt->dump.dump = mtdoops_do_dump; 364 380 err = kmsg_dump_register(&cxt->dump); 365 381 if (err) {
+12 -22
fs/pstore/platform.c
··· 94 94 * as we can from the end of the buffer. 95 95 */ 96 96 static void pstore_dump(struct kmsg_dumper *dumper, 97 - enum kmsg_dump_reason reason, 98 - const char *s1, unsigned long l1, 99 - const char *s2, unsigned long l2) 97 + enum kmsg_dump_reason reason) 100 98 { 101 - unsigned long s1_start, s2_start; 102 - unsigned long l1_cpy, l2_cpy; 103 - unsigned long size, total = 0; 104 - char *dst; 99 + unsigned long total = 0; 105 100 const char *why; 106 101 u64 id; 107 - int hsize, ret; 108 102 unsigned int part = 1; 109 103 unsigned long flags = 0; 110 104 int is_locked = 0; 105 + int ret; 111 106 112 107 why = get_reason_str(reason); 113 108 ··· 114 119 spin_lock_irqsave(&psinfo->buf_lock, flags); 115 120 oopscount++; 116 121 while (total < kmsg_bytes) { 122 + char *dst; 123 + unsigned long size; 124 + int hsize; 125 + size_t len; 126 + 117 127 dst = psinfo->buf; 118 128 hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); 119 129 size = psinfo->bufsize - hsize; 120 130 dst += hsize; 121 131 122 - l2_cpy = min(l2, size); 123 - l1_cpy = min(l1, size - l2_cpy); 124 - 125 - if (l1_cpy + l2_cpy == 0) 132 + if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len)) 126 133 break; 127 134 128 - s2_start = l2 - l2_cpy; 129 - s1_start = l1 - l1_cpy; 130 - 131 - memcpy(dst, s1 + s1_start, l1_cpy); 132 - memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); 133 - 134 135 ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, 135 - hsize + l1_cpy + l2_cpy, psinfo); 136 + hsize + len, psinfo); 136 137 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) 137 138 pstore_new_entry = 1; 138 - l1 -= l1_cpy; 139 - l2 -= l2_cpy; 140 - total += l1_cpy + l2_cpy; 139 + 140 + total += hsize + len; 141 141 part++; 142 142 } 143 143 if (in_nmi()) {
+38 -7
include/linux/kmsg_dump.h
··· 21 21 * is passed to the kernel. 22 22 */ 23 23 enum kmsg_dump_reason { 24 + KMSG_DUMP_UNDEF, 24 25 KMSG_DUMP_PANIC, 25 26 KMSG_DUMP_OOPS, 26 27 KMSG_DUMP_EMERG, ··· 32 31 33 32 /** 34 33 * struct kmsg_dumper - kernel crash message dumper structure 35 - * @dump: The callback which gets called on crashes. The buffer is passed 36 - * as two sections, where s1 (length l1) contains the older 37 - * messages and s2 (length l2) contains the newer. 38 34 * @list: Entry in the dumper list (private) 35 + * @dump: Call into dumping code which will retrieve the data with 36 + * through the record iterator 37 + * @max_reason: filter for highest reason number that should be dumped 39 38 * @registered: Flag that specifies if this is already registered 40 39 */ 41 40 struct kmsg_dumper { 42 - void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason, 43 - const char *s1, unsigned long l1, 44 - const char *s2, unsigned long l2); 45 41 struct list_head list; 46 - int registered; 42 + void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); 43 + enum kmsg_dump_reason max_reason; 44 + bool active; 45 + bool registered; 46 + 47 + /* private state of the kmsg iterator */ 48 + u32 cur_idx; 49 + u32 next_idx; 50 + u64 cur_seq; 51 + u64 next_seq; 47 52 }; 48 53 49 54 #ifdef CONFIG_PRINTK 50 55 void kmsg_dump(enum kmsg_dump_reason reason); 56 + 57 + bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, 58 + char *line, size_t size, size_t *len); 59 + 60 + bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, 61 + char *buf, size_t size, size_t *len); 62 + 63 + void kmsg_dump_rewind(struct kmsg_dumper *dumper); 51 64 52 65 int kmsg_dump_register(struct kmsg_dumper *dumper); 53 66 54 67 int kmsg_dump_unregister(struct kmsg_dumper *dumper); 55 68 #else 56 69 static inline void kmsg_dump(enum kmsg_dump_reason reason) 70 + { 71 + } 72 + 73 + static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, 74 + const char *line, size_t size, size_t *len) 75 + { 76 + return false; 77 + } 78 + 79 + static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, 80 + char *buf, size_t size, size_t *len) 81 + { 82 + return false; 83 + } 84 + 85 + static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper) 57 86 { 58 87 } 59 88
+215 -40
kernel/printk.c
··· 227 227 #define LOG_LINE_MAX 1024 228 228 229 229 /* record buffer */ 230 - #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 230 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 231 231 #define LOG_ALIGN 4 232 232 #else 233 - #define LOG_ALIGN 8 233 + #define LOG_ALIGN __alignof__(struct log) 234 234 #endif 235 235 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) 236 236 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); ··· 414 414 if (!user) 415 415 return -EBADF; 416 416 417 - mutex_lock(&user->lock); 417 + ret = mutex_lock_interruptible(&user->lock); 418 + if (ret) 419 + return ret; 418 420 raw_spin_lock(&logbuf_lock); 419 421 while (user->seq == log_next_seq) { 420 422 if (file->f_flags & O_NONBLOCK) { ··· 880 878 syslog_seq++; 881 879 raw_spin_unlock_irq(&logbuf_lock); 882 880 883 - if (len > 0 && copy_to_user(buf, text, len)) 881 + if (len > size) 882 + len = -EINVAL; 883 + else if (len > 0 && copy_to_user(buf, text, len)) 884 884 len = -EFAULT; 885 885 886 886 kfree(text); ··· 913 909 /* 914 910 * Find first record that fits, including all following records, 915 911 * into the user-provided buffer for this dump. 916 - */ 912 + */ 917 913 seq = clear_seq; 918 914 idx = clear_idx; 919 915 while (seq < log_next_seq) { ··· 923 919 idx = log_next(idx); 924 920 seq++; 925 921 } 922 + 923 + /* move first record forward until length fits into the buffer */ 926 924 seq = clear_seq; 927 925 idx = clear_idx; 928 926 while (len > size && seq < log_next_seq) { ··· 935 929 seq++; 936 930 } 937 931 938 - /* last message in this dump */ 932 + /* last message fitting into this dump */ 939 933 next_seq = log_next_seq; 940 934 941 935 len = 0; ··· 980 974 { 981 975 bool clear = false; 982 976 static int saved_console_loglevel = -1; 977 + static DEFINE_MUTEX(syslog_mutex); 983 978 int error; 984 979 985 980 error = check_syslog_permissions(type, from_file); ··· 1007 1000 error = -EFAULT; 1008 1001 goto out; 1009 1002 } 1010 - error = wait_event_interruptible(log_wait, 1011 - syslog_seq != log_next_seq); 1003 + error = mutex_lock_interruptible(&syslog_mutex); 1012 1004 if (error) 1013 1005 goto out; 1006 + error = wait_event_interruptible(log_wait, 1007 + syslog_seq != log_next_seq); 1008 + if (error) { 1009 + mutex_unlock(&syslog_mutex); 1010 + goto out; 1011 + } 1014 1012 error = syslog_print(buf, len); 1013 + mutex_unlock(&syslog_mutex); 1015 1014 break; 1016 1015 /* Read/clear last kernel messages */ 1017 1016 case SYSLOG_ACTION_READ_CLEAR: ··· 2313 2300 * kmsg_dump - dump kernel log to kernel message dumpers. 2314 2301 * @reason: the reason (oops, panic etc) for dumping 2315 2302 * 2316 - * Iterate through each of the dump devices and call the oops/panic 2317 - * callbacks with the log buffer. 2303 + * Call each of the registered dumper's dump() callback, which can 2304 + * retrieve the kmsg records with kmsg_dump_get_line() or 2305 + * kmsg_dump_get_buffer(). 2318 2306 */ 2319 2307 void kmsg_dump(enum kmsg_dump_reason reason) 2320 2308 { 2321 - u64 idx; 2322 2309 struct kmsg_dumper *dumper; 2323 - const char *s1, *s2; 2324 - unsigned long l1, l2; 2325 2310 unsigned long flags; 2326 2311 2327 2312 if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) 2328 2313 return; 2329 2314 2330 - /* Theoretically, the log could move on after we do this, but 2331 - there's not a lot we can do about that. The new messages 2332 - will overwrite the start of what we dump. */ 2333 - 2334 - raw_spin_lock_irqsave(&logbuf_lock, flags); 2335 - if (syslog_seq < log_first_seq) 2336 - idx = syslog_idx; 2337 - else 2338 - idx = log_first_idx; 2339 - 2340 - if (idx > log_next_idx) { 2341 - s1 = log_buf; 2342 - l1 = log_next_idx; 2343 - 2344 - s2 = log_buf + idx; 2345 - l2 = log_buf_len - idx; 2346 - } else { 2347 - s1 = ""; 2348 - l1 = 0; 2349 - 2350 - s2 = log_buf + idx; 2351 - l2 = log_next_idx - idx; 2352 - } 2353 - raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2354 - 2355 2315 rcu_read_lock(); 2356 - list_for_each_entry_rcu(dumper, &dump_list, list) 2357 - dumper->dump(dumper, reason, s1, l1, s2, l2); 2316 + list_for_each_entry_rcu(dumper, &dump_list, list) { 2317 + if (dumper->max_reason && reason > dumper->max_reason) 2318 + continue; 2319 + 2320 + /* initialize iterator with data about the stored records */ 2321 + dumper->active = true; 2322 + 2323 + raw_spin_lock_irqsave(&logbuf_lock, flags); 2324 + dumper->cur_seq = clear_seq; 2325 + dumper->cur_idx = clear_idx; 2326 + dumper->next_seq = log_next_seq; 2327 + dumper->next_idx = log_next_idx; 2328 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2329 + 2330 + /* invoke dumper which will iterate over records */ 2331 + dumper->dump(dumper, reason); 2332 + 2333 + /* reset iterator */ 2334 + dumper->active = false; 2335 + } 2358 2336 rcu_read_unlock(); 2359 2337 } 2338 + 2339 + /** 2340 + * kmsg_dump_get_line - retrieve one kmsg log line 2341 + * @dumper: registered kmsg dumper 2342 + * @syslog: include the "<4>" prefixes 2343 + * @line: buffer to copy the line to 2344 + * @size: maximum size of the buffer 2345 + * @len: length of line placed into buffer 2346 + * 2347 + * Start at the beginning of the kmsg buffer, with the oldest kmsg 2348 + * record, and copy one record into the provided buffer. 2349 + * 2350 + * Consecutive calls will return the next available record moving 2351 + * towards the end of the buffer with the youngest messages. 2352 + * 2353 + * A return value of FALSE indicates that there are no more records to 2354 + * read. 2355 + */ 2356 + bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, 2357 + char *line, size_t size, size_t *len) 2358 + { 2359 + unsigned long flags; 2360 + struct log *msg; 2361 + size_t l = 0; 2362 + bool ret = false; 2363 + 2364 + if (!dumper->active) 2365 + goto out; 2366 + 2367 + raw_spin_lock_irqsave(&logbuf_lock, flags); 2368 + if (dumper->cur_seq < log_first_seq) { 2369 + /* messages are gone, move to first available one */ 2370 + dumper->cur_seq = log_first_seq; 2371 + dumper->cur_idx = log_first_idx; 2372 + } 2373 + 2374 + /* last entry */ 2375 + if (dumper->cur_seq >= log_next_seq) { 2376 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2377 + goto out; 2378 + } 2379 + 2380 + msg = log_from_idx(dumper->cur_idx); 2381 + l = msg_print_text(msg, syslog, 2382 + line, size); 2383 + 2384 + dumper->cur_idx = log_next(dumper->cur_idx); 2385 + dumper->cur_seq++; 2386 + ret = true; 2387 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2388 + out: 2389 + if (len) 2390 + *len = l; 2391 + return ret; 2392 + } 2393 + EXPORT_SYMBOL_GPL(kmsg_dump_get_line); 2394 + 2395 + /** 2396 + * kmsg_dump_get_buffer - copy kmsg log lines 2397 + * @dumper: registered kmsg dumper 2398 + * @syslog: include the "<4>" prefixes 2399 + * @line: buffer to copy the line to 2400 + * @size: maximum size of the buffer 2401 + * @len: length of line placed into buffer 2402 + * 2403 + * Start at the end of the kmsg buffer and fill the provided buffer 2404 + * with as many of the the *youngest* kmsg records that fit into it. 2405 + * If the buffer is large enough, all available kmsg records will be 2406 + * copied with a single call. 2407 + * 2408 + * Consecutive calls will fill the buffer with the next block of 2409 + * available older records, not including the earlier retrieved ones. 2410 + * 2411 + * A return value of FALSE indicates that there are no more records to 2412 + * read. 2413 + */ 2414 + bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, 2415 + char *buf, size_t size, size_t *len) 2416 + { 2417 + unsigned long flags; 2418 + u64 seq; 2419 + u32 idx; 2420 + u64 next_seq; 2421 + u32 next_idx; 2422 + size_t l = 0; 2423 + bool ret = false; 2424 + 2425 + if (!dumper->active) 2426 + goto out; 2427 + 2428 + raw_spin_lock_irqsave(&logbuf_lock, flags); 2429 + if (dumper->cur_seq < log_first_seq) { 2430 + /* messages are gone, move to first available one */ 2431 + dumper->cur_seq = log_first_seq; 2432 + dumper->cur_idx = log_first_idx; 2433 + } 2434 + 2435 + /* last entry */ 2436 + if (dumper->cur_seq >= dumper->next_seq) { 2437 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2438 + goto out; 2439 + } 2440 + 2441 + /* calculate length of entire buffer */ 2442 + seq = dumper->cur_seq; 2443 + idx = dumper->cur_idx; 2444 + while (seq < dumper->next_seq) { 2445 + struct log *msg = log_from_idx(idx); 2446 + 2447 + l += msg_print_text(msg, true, NULL, 0); 2448 + idx = log_next(idx); 2449 + seq++; 2450 + } 2451 + 2452 + /* move first record forward until length fits into the buffer */ 2453 + seq = dumper->cur_seq; 2454 + idx = dumper->cur_idx; 2455 + while (l > size && seq < dumper->next_seq) { 2456 + struct log *msg = log_from_idx(idx); 2457 + 2458 + l -= msg_print_text(msg, true, NULL, 0); 2459 + idx = log_next(idx); 2460 + seq++; 2461 + } 2462 + 2463 + /* last message in next interation */ 2464 + next_seq = seq; 2465 + next_idx = idx; 2466 + 2467 + l = 0; 2468 + while (seq < dumper->next_seq) { 2469 + struct log *msg = log_from_idx(idx); 2470 + 2471 + l += msg_print_text(msg, syslog, 2472 + buf + l, size - l); 2473 + 2474 + idx = log_next(idx); 2475 + seq++; 2476 + } 2477 + 2478 + dumper->next_seq = next_seq; 2479 + dumper->next_idx = next_idx; 2480 + ret = true; 2481 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2482 + out: 2483 + if (len) 2484 + *len = l; 2485 + return ret; 2486 + } 2487 + EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); 2488 + 2489 + /** 2490 + * kmsg_dump_rewind - reset the interator 2491 + * @dumper: registered kmsg dumper 2492 + * 2493 + * Reset the dumper's iterator so that kmsg_dump_get_line() and 2494 + * kmsg_dump_get_buffer() can be called again and used multiple 2495 + * times within the same dumper.dump() callback. 2496 + */ 2497 + void kmsg_dump_rewind(struct kmsg_dumper *dumper) 2498 + { 2499 + unsigned long flags; 2500 + 2501 + raw_spin_lock_irqsave(&logbuf_lock, flags); 2502 + dumper->cur_seq = clear_seq; 2503 + dumper->cur_idx = clear_idx; 2504 + dumper->next_seq = log_next_seq; 2505 + dumper->next_idx = log_next_idx; 2506 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2507 + } 2508 + EXPORT_SYMBOL_GPL(kmsg_dump_rewind); 2360 2509 #endif
+7 -3
tools/hv/hv_kvp_daemon.c
··· 701 701 pfd.fd = fd; 702 702 703 703 while (1) { 704 + struct sockaddr *addr_p = (struct sockaddr *) &addr; 705 + socklen_t addr_l = sizeof(addr); 704 706 pfd.events = POLLIN; 705 707 pfd.revents = 0; 706 708 poll(&pfd, 1, -1); 707 709 708 - len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0); 710 + len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0, 711 + addr_p, &addr_l); 709 712 710 - if (len < 0) { 711 - syslog(LOG_ERR, "recv failed; error:%d", len); 713 + if (len < 0 || addr.nl_pid) { 714 + syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s", 715 + addr.nl_pid, errno, strerror(errno)); 712 716 close(fd); 713 717 return -1; 714 718 }