Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.11-rc1 837 lines 19 kB view raw
1/* 2 * Persistent Storage - platform driver interface parts. 3 * 4 * Copyright (C) 2007-2008 Google, Inc. 5 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21#define pr_fmt(fmt) "pstore: " fmt 22 23#include <linux/atomic.h> 24#include <linux/types.h> 25#include <linux/errno.h> 26#include <linux/init.h> 27#include <linux/kmsg_dump.h> 28#include <linux/console.h> 29#include <linux/module.h> 30#include <linux/pstore.h> 31#ifdef CONFIG_PSTORE_ZLIB_COMPRESS 32#include <linux/zlib.h> 33#endif 34#ifdef CONFIG_PSTORE_LZO_COMPRESS 35#include <linux/lzo.h> 36#endif 37#ifdef CONFIG_PSTORE_LZ4_COMPRESS 38#include <linux/lz4.h> 39#endif 40#include <linux/string.h> 41#include <linux/timer.h> 42#include <linux/slab.h> 43#include <linux/uaccess.h> 44#include <linux/hardirq.h> 45#include <linux/jiffies.h> 46#include <linux/workqueue.h> 47 48#include "internal.h" 49 50/* 51 * We defer making "oops" entries appear in pstore - see 52 * whether the system is actually still running well enough 53 * to let someone see the entry 54 */ 55static int pstore_update_ms = -1; 56module_param_named(update_ms, pstore_update_ms, int, 0600); 57MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content " 58 "(default is -1, which means runtime updates are disabled; " 59 "enabling this option is not safe, it may lead to further " 60 "corruption on Oopses)"); 61 62static int pstore_new_entry; 63 64static void pstore_timefunc(unsigned long); 65static DEFINE_TIMER(pstore_timer, pstore_timefunc, 0, 0); 66 67static void pstore_dowork(struct work_struct *); 68static DECLARE_WORK(pstore_work, pstore_dowork); 69 70/* 71 * pstore_lock just protects "psinfo" during 72 * calls to pstore_register() 73 */ 74static DEFINE_SPINLOCK(pstore_lock); 75struct pstore_info *psinfo; 76 77static char *backend; 78 79/* Compression parameters */ 80#ifdef CONFIG_PSTORE_ZLIB_COMPRESS 81#define COMPR_LEVEL 6 82#define WINDOW_BITS 12 83#define MEM_LEVEL 4 84static struct z_stream_s stream; 85#else 86static unsigned char *workspace; 87#endif 88 89struct pstore_zbackend { 90 int (*compress)(const void *in, void *out, size_t inlen, size_t outlen); 91 int (*decompress)(void *in, void *out, size_t inlen, size_t outlen); 92 void (*allocate)(void); 93 void (*free)(void); 94 95 const char *name; 96}; 97 98static char *big_oops_buf; 99static size_t big_oops_buf_sz; 100 101/* How much of the console log to snapshot */ 102static unsigned long kmsg_bytes = 10240; 103 104void pstore_set_kmsg_bytes(int bytes) 105{ 106 kmsg_bytes = bytes; 107} 108 109/* Tag each group of saved records with a sequence number */ 110static int oopscount; 111 112static const char *get_reason_str(enum kmsg_dump_reason reason) 113{ 114 switch (reason) { 115 case KMSG_DUMP_PANIC: 116 return "Panic"; 117 case KMSG_DUMP_OOPS: 118 return "Oops"; 119 case KMSG_DUMP_EMERG: 120 return "Emergency"; 121 case KMSG_DUMP_RESTART: 122 return "Restart"; 123 case KMSG_DUMP_HALT: 124 return "Halt"; 125 case KMSG_DUMP_POWEROFF: 126 return "Poweroff"; 127 default: 128 return "Unknown"; 129 } 130} 131 132bool pstore_cannot_block_path(enum kmsg_dump_reason reason) 133{ 134 /* 135 * In case of NMI path, pstore shouldn't be blocked 136 * regardless of reason. 137 */ 138 if (in_nmi()) 139 return true; 140 141 switch (reason) { 142 /* In panic case, other cpus are stopped by smp_send_stop(). */ 143 case KMSG_DUMP_PANIC: 144 /* Emergency restart shouldn't be blocked by spin lock. */ 145 case KMSG_DUMP_EMERG: 146 return true; 147 default: 148 return false; 149 } 150} 151EXPORT_SYMBOL_GPL(pstore_cannot_block_path); 152 153#ifdef CONFIG_PSTORE_ZLIB_COMPRESS 154/* Derived from logfs_compress() */ 155static int compress_zlib(const void *in, void *out, size_t inlen, size_t outlen) 156{ 157 int err, ret; 158 159 ret = -EIO; 160 err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS, 161 MEM_LEVEL, Z_DEFAULT_STRATEGY); 162 if (err != Z_OK) 163 goto error; 164 165 stream.next_in = in; 166 stream.avail_in = inlen; 167 stream.total_in = 0; 168 stream.next_out = out; 169 stream.avail_out = outlen; 170 stream.total_out = 0; 171 172 err = zlib_deflate(&stream, Z_FINISH); 173 if (err != Z_STREAM_END) 174 goto error; 175 176 err = zlib_deflateEnd(&stream); 177 if (err != Z_OK) 178 goto error; 179 180 if (stream.total_out >= stream.total_in) 181 goto error; 182 183 ret = stream.total_out; 184error: 185 return ret; 186} 187 188/* Derived from logfs_uncompress */ 189static int decompress_zlib(void *in, void *out, size_t inlen, size_t outlen) 190{ 191 int err, ret; 192 193 ret = -EIO; 194 err = zlib_inflateInit2(&stream, WINDOW_BITS); 195 if (err != Z_OK) 196 goto error; 197 198 stream.next_in = in; 199 stream.avail_in = inlen; 200 stream.total_in = 0; 201 stream.next_out = out; 202 stream.avail_out = outlen; 203 stream.total_out = 0; 204 205 err = zlib_inflate(&stream, Z_FINISH); 206 if (err != Z_STREAM_END) 207 goto error; 208 209 err = zlib_inflateEnd(&stream); 210 if (err != Z_OK) 211 goto error; 212 213 ret = stream.total_out; 214error: 215 return ret; 216} 217 218static void allocate_zlib(void) 219{ 220 size_t size; 221 size_t cmpr; 222 223 switch (psinfo->bufsize) { 224 /* buffer range for efivars */ 225 case 1000 ... 2000: 226 cmpr = 56; 227 break; 228 case 2001 ... 3000: 229 cmpr = 54; 230 break; 231 case 3001 ... 3999: 232 cmpr = 52; 233 break; 234 /* buffer range for nvram, erst */ 235 case 4000 ... 10000: 236 cmpr = 45; 237 break; 238 default: 239 cmpr = 60; 240 break; 241 } 242 243 big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr; 244 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 245 if (big_oops_buf) { 246 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL), 247 zlib_inflate_workspacesize()); 248 stream.workspace = kmalloc(size, GFP_KERNEL); 249 if (!stream.workspace) { 250 pr_err("No memory for compression workspace; skipping compression\n"); 251 kfree(big_oops_buf); 252 big_oops_buf = NULL; 253 } 254 } else { 255 pr_err("No memory for uncompressed data; skipping compression\n"); 256 stream.workspace = NULL; 257 } 258 259} 260 261static void free_zlib(void) 262{ 263 kfree(stream.workspace); 264 stream.workspace = NULL; 265 kfree(big_oops_buf); 266 big_oops_buf = NULL; 267 big_oops_buf_sz = 0; 268} 269 270static struct pstore_zbackend backend_zlib = { 271 .compress = compress_zlib, 272 .decompress = decompress_zlib, 273 .allocate = allocate_zlib, 274 .free = free_zlib, 275 .name = "zlib", 276}; 277#endif 278 279#ifdef CONFIG_PSTORE_LZO_COMPRESS 280static int compress_lzo(const void *in, void *out, size_t inlen, size_t outlen) 281{ 282 int ret; 283 284 ret = lzo1x_1_compress(in, inlen, out, &outlen, workspace); 285 if (ret != LZO_E_OK) { 286 pr_err("lzo_compress error, ret = %d!\n", ret); 287 return -EIO; 288 } 289 290 return outlen; 291} 292 293static int decompress_lzo(void *in, void *out, size_t inlen, size_t outlen) 294{ 295 int ret; 296 297 ret = lzo1x_decompress_safe(in, inlen, out, &outlen); 298 if (ret != LZO_E_OK) { 299 pr_err("lzo_decompress error, ret = %d!\n", ret); 300 return -EIO; 301 } 302 303 return outlen; 304} 305 306static void allocate_lzo(void) 307{ 308 big_oops_buf_sz = lzo1x_worst_compress(psinfo->bufsize); 309 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 310 if (big_oops_buf) { 311 workspace = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); 312 if (!workspace) { 313 pr_err("No memory for compression workspace; skipping compression\n"); 314 kfree(big_oops_buf); 315 big_oops_buf = NULL; 316 } 317 } else { 318 pr_err("No memory for uncompressed data; skipping compression\n"); 319 workspace = NULL; 320 } 321} 322 323static void free_lzo(void) 324{ 325 kfree(workspace); 326 kfree(big_oops_buf); 327 big_oops_buf = NULL; 328 big_oops_buf_sz = 0; 329} 330 331static struct pstore_zbackend backend_lzo = { 332 .compress = compress_lzo, 333 .decompress = decompress_lzo, 334 .allocate = allocate_lzo, 335 .free = free_lzo, 336 .name = "lzo", 337}; 338#endif 339 340#ifdef CONFIG_PSTORE_LZ4_COMPRESS 341static int compress_lz4(const void *in, void *out, size_t inlen, size_t outlen) 342{ 343 int ret; 344 345 ret = LZ4_compress_default(in, out, inlen, outlen, workspace); 346 if (!ret) { 347 pr_err("LZ4_compress_default error; compression failed!\n"); 348 return -EIO; 349 } 350 351 return ret; 352} 353 354static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen) 355{ 356 int ret; 357 358 ret = LZ4_decompress_safe(in, out, inlen, outlen); 359 if (ret < 0) { 360 /* 361 * LZ4_decompress_safe will return an error code 362 * (< 0) if decompression failed 363 */ 364 pr_err("LZ4_decompress_safe error, ret = %d!\n", ret); 365 return -EIO; 366 } 367 368 return ret; 369} 370 371static void allocate_lz4(void) 372{ 373 big_oops_buf_sz = LZ4_compressBound(psinfo->bufsize); 374 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 375 if (big_oops_buf) { 376 workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL); 377 if (!workspace) { 378 pr_err("No memory for compression workspace; skipping compression\n"); 379 kfree(big_oops_buf); 380 big_oops_buf = NULL; 381 } 382 } else { 383 pr_err("No memory for uncompressed data; skipping compression\n"); 384 workspace = NULL; 385 } 386} 387 388static void free_lz4(void) 389{ 390 kfree(workspace); 391 kfree(big_oops_buf); 392 big_oops_buf = NULL; 393 big_oops_buf_sz = 0; 394} 395 396static struct pstore_zbackend backend_lz4 = { 397 .compress = compress_lz4, 398 .decompress = decompress_lz4, 399 .allocate = allocate_lz4, 400 .free = free_lz4, 401 .name = "lz4", 402}; 403#endif 404 405static struct pstore_zbackend *zbackend = 406#if defined(CONFIG_PSTORE_ZLIB_COMPRESS) 407 &backend_zlib; 408#elif defined(CONFIG_PSTORE_LZO_COMPRESS) 409 &backend_lzo; 410#elif defined(CONFIG_PSTORE_LZ4_COMPRESS) 411 &backend_lz4; 412#else 413 NULL; 414#endif 415 416static int pstore_compress(const void *in, void *out, 417 size_t inlen, size_t outlen) 418{ 419 if (zbackend) 420 return zbackend->compress(in, out, inlen, outlen); 421 else 422 return -EIO; 423} 424 425static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen) 426{ 427 if (zbackend) 428 return zbackend->decompress(in, out, inlen, outlen); 429 else 430 return -EIO; 431} 432 433static void allocate_buf_for_compression(void) 434{ 435 if (zbackend) { 436 pr_info("using %s compression\n", zbackend->name); 437 zbackend->allocate(); 438 } else { 439 pr_err("allocate compression buffer error!\n"); 440 } 441} 442 443static void free_buf_for_compression(void) 444{ 445 if (zbackend) 446 zbackend->free(); 447 else 448 pr_err("free compression buffer error!\n"); 449} 450 451/* 452 * Called when compression fails, since the printk buffer 453 * would be fetched for compression calling it again when 454 * compression fails would have moved the iterator of 455 * printk buffer which results in fetching old contents. 456 * Copy the recent messages from big_oops_buf to psinfo->buf 457 */ 458static size_t copy_kmsg_to_buffer(int hsize, size_t len) 459{ 460 size_t total_len; 461 size_t diff; 462 463 total_len = hsize + len; 464 465 if (total_len > psinfo->bufsize) { 466 diff = total_len - psinfo->bufsize + hsize; 467 memcpy(psinfo->buf, big_oops_buf, hsize); 468 memcpy(psinfo->buf + hsize, big_oops_buf + diff, 469 psinfo->bufsize - hsize); 470 total_len = psinfo->bufsize; 471 } else 472 memcpy(psinfo->buf, big_oops_buf, total_len); 473 474 return total_len; 475} 476 477/* 478 * callback from kmsg_dump. (s2,l2) has the most recently 479 * written bytes, older bytes are in (s1,l1). Save as much 480 * as we can from the end of the buffer. 481 */ 482static void pstore_dump(struct kmsg_dumper *dumper, 483 enum kmsg_dump_reason reason) 484{ 485 unsigned long total = 0; 486 const char *why; 487 u64 id; 488 unsigned int part = 1; 489 unsigned long flags = 0; 490 int is_locked; 491 int ret; 492 493 why = get_reason_str(reason); 494 495 if (pstore_cannot_block_path(reason)) { 496 is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags); 497 if (!is_locked) { 498 pr_err("pstore dump routine blocked in %s path, may corrupt error record\n" 499 , in_nmi() ? "NMI" : why); 500 return; 501 } 502 } else { 503 spin_lock_irqsave(&psinfo->buf_lock, flags); 504 is_locked = 1; 505 } 506 oopscount++; 507 while (total < kmsg_bytes) { 508 char *dst; 509 unsigned long size; 510 int hsize; 511 int zipped_len = -1; 512 size_t len; 513 bool compressed = false; 514 size_t total_len; 515 516 if (big_oops_buf && is_locked) { 517 dst = big_oops_buf; 518 size = big_oops_buf_sz; 519 } else { 520 dst = psinfo->buf; 521 size = psinfo->bufsize; 522 } 523 524 hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount, part); 525 size -= hsize; 526 527 if (!kmsg_dump_get_buffer(dumper, true, dst + hsize, 528 size, &len)) 529 break; 530 531 if (big_oops_buf && is_locked) { 532 zipped_len = pstore_compress(dst, psinfo->buf, 533 hsize + len, psinfo->bufsize); 534 535 if (zipped_len > 0) { 536 compressed = true; 537 total_len = zipped_len; 538 } else { 539 total_len = copy_kmsg_to_buffer(hsize, len); 540 } 541 } else { 542 total_len = hsize + len; 543 } 544 545 ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, 546 oopscount, compressed, total_len, psinfo); 547 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) 548 pstore_new_entry = 1; 549 550 total += total_len; 551 part++; 552 } 553 if (is_locked) 554 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 555} 556 557static struct kmsg_dumper pstore_dumper = { 558 .dump = pstore_dump, 559}; 560 561/* 562 * Register with kmsg_dump to save last part of console log on panic. 563 */ 564static void pstore_register_kmsg(void) 565{ 566 kmsg_dump_register(&pstore_dumper); 567} 568 569static void pstore_unregister_kmsg(void) 570{ 571 kmsg_dump_unregister(&pstore_dumper); 572} 573 574#ifdef CONFIG_PSTORE_CONSOLE 575static void pstore_console_write(struct console *con, const char *s, unsigned c) 576{ 577 const char *e = s + c; 578 579 while (s < e) { 580 unsigned long flags; 581 u64 id; 582 583 if (c > psinfo->bufsize) 584 c = psinfo->bufsize; 585 586 if (oops_in_progress) { 587 if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) 588 break; 589 } else { 590 spin_lock_irqsave(&psinfo->buf_lock, flags); 591 } 592 psinfo->write_buf(PSTORE_TYPE_CONSOLE, 0, &id, 0, 593 s, 0, c, psinfo); 594 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 595 s += c; 596 c = e - s; 597 } 598} 599 600static struct console pstore_console = { 601 .name = "pstore", 602 .write = pstore_console_write, 603 .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME, 604 .index = -1, 605}; 606 607static void pstore_register_console(void) 608{ 609 register_console(&pstore_console); 610} 611 612static void pstore_unregister_console(void) 613{ 614 unregister_console(&pstore_console); 615} 616#else 617static void pstore_register_console(void) {} 618static void pstore_unregister_console(void) {} 619#endif 620 621static int pstore_write_compat(enum pstore_type_id type, 622 enum kmsg_dump_reason reason, 623 u64 *id, unsigned int part, int count, 624 bool compressed, size_t size, 625 struct pstore_info *psi) 626{ 627 return psi->write_buf(type, reason, id, part, psinfo->buf, compressed, 628 size, psi); 629} 630 631static int pstore_write_buf_user_compat(enum pstore_type_id type, 632 enum kmsg_dump_reason reason, 633 u64 *id, unsigned int part, 634 const char __user *buf, 635 bool compressed, size_t size, 636 struct pstore_info *psi) 637{ 638 unsigned long flags = 0; 639 size_t i, bufsize = size; 640 long ret = 0; 641 642 if (unlikely(!access_ok(VERIFY_READ, buf, size))) 643 return -EFAULT; 644 if (bufsize > psinfo->bufsize) 645 bufsize = psinfo->bufsize; 646 spin_lock_irqsave(&psinfo->buf_lock, flags); 647 for (i = 0; i < size; ) { 648 size_t c = min(size - i, bufsize); 649 650 ret = __copy_from_user(psinfo->buf, buf + i, c); 651 if (unlikely(ret != 0)) { 652 ret = -EFAULT; 653 break; 654 } 655 ret = psi->write_buf(type, reason, id, part, psinfo->buf, 656 compressed, c, psi); 657 if (unlikely(ret < 0)) 658 break; 659 i += c; 660 } 661 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 662 return unlikely(ret < 0) ? ret : size; 663} 664 665/* 666 * platform specific persistent storage driver registers with 667 * us here. If pstore is already mounted, call the platform 668 * read function right away to populate the file system. If not 669 * then the pstore mount code will call us later to fill out 670 * the file system. 671 */ 672int pstore_register(struct pstore_info *psi) 673{ 674 struct module *owner = psi->owner; 675 676 if (backend && strcmp(backend, psi->name)) 677 return -EPERM; 678 679 spin_lock(&pstore_lock); 680 if (psinfo) { 681 spin_unlock(&pstore_lock); 682 return -EBUSY; 683 } 684 685 if (!psi->write) 686 psi->write = pstore_write_compat; 687 if (!psi->write_buf_user) 688 psi->write_buf_user = pstore_write_buf_user_compat; 689 psinfo = psi; 690 mutex_init(&psinfo->read_mutex); 691 spin_unlock(&pstore_lock); 692 693 if (owner && !try_module_get(owner)) { 694 psinfo = NULL; 695 return -EINVAL; 696 } 697 698 allocate_buf_for_compression(); 699 700 if (pstore_is_mounted()) 701 pstore_get_records(0); 702 703 if (psi->flags & PSTORE_FLAGS_DMESG) 704 pstore_register_kmsg(); 705 if (psi->flags & PSTORE_FLAGS_CONSOLE) 706 pstore_register_console(); 707 if (psi->flags & PSTORE_FLAGS_FTRACE) 708 pstore_register_ftrace(); 709 if (psi->flags & PSTORE_FLAGS_PMSG) 710 pstore_register_pmsg(); 711 712 if (pstore_update_ms >= 0) { 713 pstore_timer.expires = jiffies + 714 msecs_to_jiffies(pstore_update_ms); 715 add_timer(&pstore_timer); 716 } 717 718 /* 719 * Update the module parameter backend, so it is visible 720 * through /sys/module/pstore/parameters/backend 721 */ 722 backend = psi->name; 723 724 module_put(owner); 725 726 pr_info("Registered %s as persistent store backend\n", psi->name); 727 728 return 0; 729} 730EXPORT_SYMBOL_GPL(pstore_register); 731 732void pstore_unregister(struct pstore_info *psi) 733{ 734 if (psi->flags & PSTORE_FLAGS_PMSG) 735 pstore_unregister_pmsg(); 736 if (psi->flags & PSTORE_FLAGS_FTRACE) 737 pstore_unregister_ftrace(); 738 if (psi->flags & PSTORE_FLAGS_CONSOLE) 739 pstore_unregister_console(); 740 if (psi->flags & PSTORE_FLAGS_DMESG) 741 pstore_unregister_kmsg(); 742 743 free_buf_for_compression(); 744 745 psinfo = NULL; 746 backend = NULL; 747} 748EXPORT_SYMBOL_GPL(pstore_unregister); 749 750/* 751 * Read all the records from the persistent store. Create 752 * files in our filesystem. Don't warn about -EEXIST errors 753 * when we are re-scanning the backing store looking to add new 754 * error records. 755 */ 756void pstore_get_records(int quiet) 757{ 758 struct pstore_info *psi = psinfo; 759 char *buf = NULL; 760 ssize_t size; 761 u64 id; 762 int count; 763 enum pstore_type_id type; 764 struct timespec time; 765 int failed = 0, rc; 766 bool compressed; 767 int unzipped_len = -1; 768 ssize_t ecc_notice_size = 0; 769 770 if (!psi) 771 return; 772 773 mutex_lock(&psi->read_mutex); 774 if (psi->open && psi->open(psi)) 775 goto out; 776 777 while ((size = psi->read(&id, &type, &count, &time, &buf, &compressed, 778 &ecc_notice_size, psi)) > 0) { 779 if (compressed && (type == PSTORE_TYPE_DMESG)) { 780 if (big_oops_buf) 781 unzipped_len = pstore_decompress(buf, 782 big_oops_buf, size, 783 big_oops_buf_sz); 784 785 if (unzipped_len > 0) { 786 if (ecc_notice_size) 787 memcpy(big_oops_buf + unzipped_len, 788 buf + size, ecc_notice_size); 789 kfree(buf); 790 buf = big_oops_buf; 791 size = unzipped_len; 792 compressed = false; 793 } else { 794 pr_err("decompression failed;returned %d\n", 795 unzipped_len); 796 compressed = true; 797 } 798 } 799 rc = pstore_mkfile(type, psi->name, id, count, buf, 800 compressed, size + ecc_notice_size, 801 time, psi); 802 if (unzipped_len < 0) { 803 /* Free buffer other than big oops */ 804 kfree(buf); 805 buf = NULL; 806 } else 807 unzipped_len = -1; 808 if (rc && (rc != -EEXIST || !quiet)) 809 failed++; 810 } 811 if (psi->close) 812 psi->close(psi); 813out: 814 mutex_unlock(&psi->read_mutex); 815 816 if (failed) 817 pr_warn("failed to load %d record(s) from '%s'\n", 818 failed, psi->name); 819} 820 821static void pstore_dowork(struct work_struct *work) 822{ 823 pstore_get_records(1); 824} 825 826static void pstore_timefunc(unsigned long dummy) 827{ 828 if (pstore_new_entry) { 829 pstore_new_entry = 0; 830 schedule_work(&pstore_work); 831 } 832 833 mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms)); 834} 835 836module_param(backend, charp, 0444); 837MODULE_PARM_DESC(backend, "Pstore backend to use");