Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pstore-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull pstore subsystem updates from Kees Cook:
"This expands the supported compressors, fixes some bugs, and finally
adds DT bindings"

* tag 'pstore-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
pstore/ram: add Device Tree bindings
efi-pstore: implement efivars_pstore_exit()
pstore: drop file opened reference count
pstore: add lzo/lz4 compression support
pstore: Cleanup pstore_dump()
pstore: Enable compression on normal path (again)
ramoops: Only unregister when registered

+436 -51
+48
Documentation/devicetree/bindings/misc/ramoops.txt
··· 1 + Ramoops oops/panic logger 2 + ========================= 3 + 4 + ramoops provides persistent RAM storage for oops and panics, so they can be 5 + recovered after a reboot. It is a backend to pstore, so this node is named 6 + "ramoops" after the backend, rather than "pstore" which is the subsystem. 7 + 8 + Parts of this storage may be set aside for other persistent log buffers, such 9 + as kernel log messages, or for optional ECC error-correction data. The total 10 + size of these optional buffers must fit in the reserved region. 11 + 12 + Any remaining space will be used for a circular buffer of oops and panic 13 + records. These records have a configurable size, with a size of 0 indicating 14 + that they should be disabled. 15 + 16 + At least one of "record-size", "console-size", "ftrace-size", or "pmsg-size" 17 + must be set non-zero, but are otherwise optional as listed below. 18 + 19 + 20 + Required properties: 21 + 22 + - compatible: must be "ramoops" 23 + 24 + - memory-region: phandle to a region of memory that is preserved between 25 + reboots 26 + 27 + 28 + Optional properties: 29 + 30 + - ecc-size: enables ECC support and specifies ECC buffer size in bytes 31 + (defaults to 0: no ECC) 32 + 33 + - record-size: maximum size in bytes of each dump done on oops/panic 34 + (defaults to 0: disabled) 35 + 36 + - console-size: size in bytes of log buffer reserved for kernel messages 37 + (defaults to 0: disabled) 38 + 39 + - ftrace-size: size in bytes of log buffer reserved for function tracing and 40 + profiling (defaults to 0: disabled) 41 + 42 + - pmsg-size: size in bytes of log buffer reserved for userspace messages 43 + (defaults to 0: disabled) 44 + 45 + - unbuffered: if present, use unbuffered mappings to map the reserved region 46 + (defaults to buffered mappings) 47 + 48 + - no-dump-oops: if present, only dump panics (defaults to panics and oops)
+4 -2
Documentation/ramoops.txt
··· 45 45 46 46 2. Setting the parameters 47 47 48 - Setting the ramoops parameters can be done in 2 different manners: 48 + Setting the ramoops parameters can be done in 3 different manners: 49 49 1. Use the module parameters (which have the names of the variables described 50 50 as before). 51 51 For quick debugging, you can also reserve parts of memory during boot ··· 54 54 kernel to use only the first 128 MB of memory, and place ECC-protected ramoops 55 55 region at 128 MB boundary: 56 56 "mem=128M ramoops.mem_address=0x8000000 ramoops.ecc=1" 57 - 2. Use a platform device and set the platform data. The parameters can then 57 + 2. Use Device Tree bindings, as described in 58 + Documentation/device-tree/bindings/misc/ramoops.txt. 59 + 3. Use a platform device and set the platform data. The parameters can then 58 60 be set through that platform data. An example of doing that is: 59 61 60 62 #include <linux/pstore_ram.h>
+3 -1
arch/powerpc/kernel/nvram_64.c
··· 444 444 */ 445 445 static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, 446 446 int *count, struct timespec *time, char **buf, 447 - bool *compressed, struct pstore_info *psi) 447 + bool *compressed, ssize_t *ecc_notice_size, 448 + struct pstore_info *psi) 448 449 { 449 450 struct oops_log_info *oops_hdr; 450 451 unsigned int err_type, id_no, size = 0; ··· 546 545 return -ENOMEM; 547 546 kfree(buff); 548 547 548 + *ecc_notice_size = 0; 549 549 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) 550 550 *compressed = true; 551 551 else
+5 -2
drivers/acpi/apei/erst.c
··· 927 927 static int erst_close_pstore(struct pstore_info *psi); 928 928 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count, 929 929 struct timespec *time, char **buf, 930 - bool *compressed, struct pstore_info *psi); 930 + bool *compressed, ssize_t *ecc_notice_size, 931 + struct pstore_info *psi); 931 932 static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason, 932 933 u64 *id, unsigned int part, int count, bool compressed, 933 934 size_t size, struct pstore_info *psi); ··· 988 987 989 988 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count, 990 989 struct timespec *time, char **buf, 991 - bool *compressed, struct pstore_info *psi) 990 + bool *compressed, ssize_t *ecc_notice_size, 991 + struct pstore_info *psi) 992 992 { 993 993 int rc; 994 994 ssize_t len = 0; ··· 1035 1033 memcpy(*buf, rcd->data, len - sizeof(*rcd)); 1036 1034 *id = record_id; 1037 1035 *compressed = false; 1036 + *ecc_notice_size = 0; 1038 1037 if (uuid_le_cmp(rcd->sec_hdr.section_type, 1039 1038 CPER_SECTION_TYPE_DMESG_Z) == 0) { 1040 1039 *type = PSTORE_TYPE_DMESG;
+13
drivers/firmware/efi/efi-pstore.c
··· 34 34 int *count; 35 35 struct timespec *timespec; 36 36 bool *compressed; 37 + ssize_t *ecc_notice_size; 37 38 char **buf; 38 39 }; 39 40 ··· 70 69 *cb_data->compressed = true; 71 70 else 72 71 *cb_data->compressed = false; 72 + *cb_data->ecc_notice_size = 0; 73 73 } else if (sscanf(name, "dump-type%u-%u-%d-%lu", 74 74 cb_data->type, &part, &cnt, &time) == 4) { 75 75 *cb_data->id = generic_id(time, part, cnt); ··· 78 76 cb_data->timespec->tv_sec = time; 79 77 cb_data->timespec->tv_nsec = 0; 80 78 *cb_data->compressed = false; 79 + *cb_data->ecc_notice_size = 0; 81 80 } else if (sscanf(name, "dump-type%u-%u-%lu", 82 81 cb_data->type, &part, &time) == 3) { 83 82 /* ··· 91 88 cb_data->timespec->tv_sec = time; 92 89 cb_data->timespec->tv_nsec = 0; 93 90 *cb_data->compressed = false; 91 + *cb_data->ecc_notice_size = 0; 94 92 } else 95 93 return 0; 96 94 ··· 214 210 static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, 215 211 int *count, struct timespec *timespec, 216 212 char **buf, bool *compressed, 213 + ssize_t *ecc_notice_size, 217 214 struct pstore_info *psi) 218 215 { 219 216 struct pstore_read_data data; ··· 225 220 data.count = count; 226 221 data.timespec = timespec; 227 222 data.compressed = compressed; 223 + data.ecc_notice_size = ecc_notice_size; 228 224 data.buf = buf; 229 225 230 226 *data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL); ··· 399 393 400 394 static __exit void efivars_pstore_exit(void) 401 395 { 396 + if (!efi_pstore_info.bufsize) 397 + return; 398 + 399 + pstore_unregister(&efi_pstore_info); 400 + kfree(efi_pstore_info.buf); 401 + efi_pstore_info.buf = NULL; 402 + efi_pstore_info.bufsize = 0; 402 403 } 403 404 404 405 module_init(efivars_pstore_init);
+29 -2
fs/pstore/Kconfig
··· 1 1 config PSTORE 2 2 tristate "Persistent store support" 3 3 default n 4 - select ZLIB_DEFLATE 5 - select ZLIB_INFLATE 6 4 help 7 5 This option enables generic access to platform level 8 6 persistent storage via "pstore" filesystem that can ··· 11 13 (e.g. ACPI_APEI on X86) which will select this for you. 12 14 If you don't have a platform persistent store driver, 13 15 say N. 16 + 17 + choice 18 + prompt "Choose compression algorithm" 19 + depends on PSTORE 20 + default PSTORE_ZLIB_COMPRESS 21 + help 22 + This option chooses compression algorithm. 23 + 24 + config PSTORE_ZLIB_COMPRESS 25 + bool "ZLIB" 26 + select ZLIB_DEFLATE 27 + select ZLIB_INFLATE 28 + help 29 + This option enables ZLIB compression algorithm support. 30 + 31 + config PSTORE_LZO_COMPRESS 32 + bool "LZO" 33 + select LZO_COMPRESS 34 + select LZO_DECOMPRESS 35 + help 36 + This option enables LZO compression algorithm support. 37 + 38 + config PSTORE_LZ4_COMPRESS 39 + bool "LZ4" 40 + select LZ4_COMPRESS 41 + select LZ4_DECOMPRESS 42 + help 43 + This option enables LZ4 compression algorithm support. 44 + endchoice 14 45 15 46 config PSTORE_CONSOLE 16 47 bool "Log kernel console messages"
-1
fs/pstore/inode.c
··· 178 178 } 179 179 180 180 static const struct file_operations pstore_file_operations = { 181 - .owner = THIS_MODULE, 182 181 .open = pstore_file_open, 183 182 .read = pstore_file_read, 184 183 .llseek = pstore_file_llseek,
+234 -35
fs/pstore/platform.c
··· 28 28 #include <linux/console.h> 29 29 #include <linux/module.h> 30 30 #include <linux/pstore.h> 31 + #ifdef CONFIG_PSTORE_ZLIB_COMPRESS 31 32 #include <linux/zlib.h> 33 + #endif 34 + #ifdef CONFIG_PSTORE_LZO_COMPRESS 35 + #include <linux/lzo.h> 36 + #endif 37 + #ifdef CONFIG_PSTORE_LZ4_COMPRESS 38 + #include <linux/lz4.h> 39 + #endif 32 40 #include <linux/string.h> 33 41 #include <linux/timer.h> 34 42 #include <linux/slab.h> ··· 77 69 static char *backend; 78 70 79 71 /* Compression parameters */ 72 + #ifdef CONFIG_PSTORE_ZLIB_COMPRESS 80 73 #define COMPR_LEVEL 6 81 74 #define WINDOW_BITS 12 82 75 #define MEM_LEVEL 4 83 76 static struct z_stream_s stream; 77 + #else 78 + static unsigned char *workspace; 79 + #endif 80 + 81 + struct pstore_zbackend { 82 + int (*compress)(const void *in, void *out, size_t inlen, size_t outlen); 83 + int (*decompress)(void *in, void *out, size_t inlen, size_t outlen); 84 + void (*allocate)(void); 85 + void (*free)(void); 86 + 87 + const char *name; 88 + }; 84 89 85 90 static char *big_oops_buf; 86 91 static size_t big_oops_buf_sz; ··· 150 129 } 151 130 EXPORT_SYMBOL_GPL(pstore_cannot_block_path); 152 131 132 + #ifdef CONFIG_PSTORE_ZLIB_COMPRESS 153 133 /* Derived from logfs_compress() */ 154 - static int pstore_compress(const void *in, void *out, size_t inlen, 155 - size_t outlen) 134 + static int compress_zlib(const void *in, void *out, size_t inlen, size_t outlen) 156 135 { 157 136 int err, ret; 158 137 ··· 186 165 } 187 166 188 167 /* Derived from logfs_uncompress */ 189 - static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen) 168 + static int decompress_zlib(void *in, void *out, size_t inlen, size_t outlen) 190 169 { 191 170 int err, ret; 192 171 ··· 215 194 return ret; 216 195 } 217 196 218 - static void allocate_buf_for_compression(void) 197 + static void allocate_zlib(void) 219 198 { 220 199 size_t size; 221 200 size_t cmpr; ··· 258 237 259 238 } 260 239 261 - static void free_buf_for_compression(void) 240 + static void free_zlib(void) 262 241 { 263 242 kfree(stream.workspace); 264 243 stream.workspace = NULL; 265 244 kfree(big_oops_buf); 266 245 big_oops_buf = NULL; 246 + big_oops_buf_sz = 0; 247 + } 248 + 249 + static struct pstore_zbackend backend_zlib = { 250 + .compress = compress_zlib, 251 + .decompress = decompress_zlib, 252 + .allocate = allocate_zlib, 253 + .free = free_zlib, 254 + .name = "zlib", 255 + }; 256 + #endif 257 + 258 + #ifdef CONFIG_PSTORE_LZO_COMPRESS 259 + static int compress_lzo(const void *in, void *out, size_t inlen, size_t outlen) 260 + { 261 + int ret; 262 + 263 + ret = lzo1x_1_compress(in, inlen, out, &outlen, workspace); 264 + if (ret != LZO_E_OK) { 265 + pr_err("lzo_compress error, ret = %d!\n", ret); 266 + return -EIO; 267 + } 268 + 269 + return outlen; 270 + } 271 + 272 + static int decompress_lzo(void *in, void *out, size_t inlen, size_t outlen) 273 + { 274 + int ret; 275 + 276 + ret = lzo1x_decompress_safe(in, inlen, out, &outlen); 277 + if (ret != LZO_E_OK) { 278 + pr_err("lzo_decompress error, ret = %d!\n", ret); 279 + return -EIO; 280 + } 281 + 282 + return outlen; 283 + } 284 + 285 + static void allocate_lzo(void) 286 + { 287 + big_oops_buf_sz = lzo1x_worst_compress(psinfo->bufsize); 288 + big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 289 + if (big_oops_buf) { 290 + workspace = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); 291 + if (!workspace) { 292 + pr_err("No memory for compression workspace; skipping compression\n"); 293 + kfree(big_oops_buf); 294 + big_oops_buf = NULL; 295 + } 296 + } else { 297 + pr_err("No memory for uncompressed data; skipping compression\n"); 298 + workspace = NULL; 299 + } 300 + } 301 + 302 + static void free_lzo(void) 303 + { 304 + kfree(workspace); 305 + kfree(big_oops_buf); 306 + big_oops_buf = NULL; 307 + big_oops_buf_sz = 0; 308 + } 309 + 310 + static struct pstore_zbackend backend_lzo = { 311 + .compress = compress_lzo, 312 + .decompress = decompress_lzo, 313 + .allocate = allocate_lzo, 314 + .free = free_lzo, 315 + .name = "lzo", 316 + }; 317 + #endif 318 + 319 + #ifdef CONFIG_PSTORE_LZ4_COMPRESS 320 + static int compress_lz4(const void *in, void *out, size_t inlen, size_t outlen) 321 + { 322 + int ret; 323 + 324 + ret = lz4_compress(in, inlen, out, &outlen, workspace); 325 + if (ret) { 326 + pr_err("lz4_compress error, ret = %d!\n", ret); 327 + return -EIO; 328 + } 329 + 330 + return outlen; 331 + } 332 + 333 + static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen) 334 + { 335 + int ret; 336 + 337 + ret = lz4_decompress_unknownoutputsize(in, inlen, out, &outlen); 338 + if (ret) { 339 + pr_err("lz4_decompress error, ret = %d!\n", ret); 340 + return -EIO; 341 + } 342 + 343 + return outlen; 344 + } 345 + 346 + static void allocate_lz4(void) 347 + { 348 + big_oops_buf_sz = lz4_compressbound(psinfo->bufsize); 349 + big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 350 + if (big_oops_buf) { 351 + workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL); 352 + if (!workspace) { 353 + pr_err("No memory for compression workspace; skipping compression\n"); 354 + kfree(big_oops_buf); 355 + big_oops_buf = NULL; 356 + } 357 + } else { 358 + pr_err("No memory for uncompressed data; skipping compression\n"); 359 + workspace = NULL; 360 + } 361 + } 362 + 363 + static void free_lz4(void) 364 + { 365 + kfree(workspace); 366 + kfree(big_oops_buf); 367 + big_oops_buf = NULL; 368 + big_oops_buf_sz = 0; 369 + } 370 + 371 + static struct pstore_zbackend backend_lz4 = { 372 + .compress = compress_lz4, 373 + .decompress = decompress_lz4, 374 + .allocate = allocate_lz4, 375 + .free = free_lz4, 376 + .name = "lz4", 377 + }; 378 + #endif 379 + 380 + static struct pstore_zbackend *zbackend = 381 + #if defined(CONFIG_PSTORE_ZLIB_COMPRESS) 382 + &backend_zlib; 383 + #elif defined(CONFIG_PSTORE_LZO_COMPRESS) 384 + &backend_lzo; 385 + #elif defined(CONFIG_PSTORE_LZ4_COMPRESS) 386 + &backend_lz4; 387 + #else 388 + NULL; 389 + #endif 390 + 391 + static int pstore_compress(const void *in, void *out, 392 + size_t inlen, size_t outlen) 393 + { 394 + if (zbackend) 395 + return zbackend->compress(in, out, inlen, outlen); 396 + else 397 + return -EIO; 398 + } 399 + 400 + static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen) 401 + { 402 + if (zbackend) 403 + return zbackend->decompress(in, out, inlen, outlen); 404 + else 405 + return -EIO; 406 + } 407 + 408 + static void allocate_buf_for_compression(void) 409 + { 410 + if (zbackend) { 411 + pr_info("using %s compression\n", zbackend->name); 412 + zbackend->allocate(); 413 + } else { 414 + pr_err("allocate compression buffer error!\n"); 415 + } 416 + } 417 + 418 + static void free_buf_for_compression(void) 419 + { 420 + if (zbackend) 421 + zbackend->free(); 422 + else 423 + pr_err("free compression buffer error!\n"); 267 424 } 268 425 269 426 /* ··· 483 284 u64 id; 484 285 unsigned int part = 1; 485 286 unsigned long flags = 0; 486 - int is_locked = 0; 287 + int is_locked; 487 288 int ret; 488 289 489 290 why = get_reason_str(reason); ··· 494 295 pr_err("pstore dump routine blocked in %s path, may corrupt error record\n" 495 296 , in_nmi() ? "NMI" : why); 496 297 } 497 - } else 298 + } else { 498 299 spin_lock_irqsave(&psinfo->buf_lock, flags); 300 + is_locked = 1; 301 + } 499 302 oopscount++; 500 303 while (total < kmsg_bytes) { 501 304 char *dst; ··· 505 304 int hsize; 506 305 int zipped_len = -1; 507 306 size_t len; 508 - bool compressed; 307 + bool compressed = false; 509 308 size_t total_len; 510 309 511 310 if (big_oops_buf && is_locked) { 512 311 dst = big_oops_buf; 513 - hsize = sprintf(dst, "%s#%d Part%u\n", why, 514 - oopscount, part); 515 - size = big_oops_buf_sz - hsize; 312 + size = big_oops_buf_sz; 313 + } else { 314 + dst = psinfo->buf; 315 + size = psinfo->bufsize; 316 + } 516 317 517 - if (!kmsg_dump_get_buffer(dumper, true, dst + hsize, 518 - size, &len)) 519 - break; 318 + hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount, part); 319 + size -= hsize; 520 320 321 + if (!kmsg_dump_get_buffer(dumper, true, dst + hsize, 322 + size, &len)) 323 + break; 324 + 325 + if (big_oops_buf && is_locked) { 521 326 zipped_len = pstore_compress(dst, psinfo->buf, 522 327 hsize + len, psinfo->bufsize); 523 328 ··· 531 324 compressed = true; 532 325 total_len = zipped_len; 533 326 } else { 534 - compressed = false; 535 327 total_len = copy_kmsg_to_buffer(hsize, len); 536 328 } 537 329 } else { 538 - dst = psinfo->buf; 539 - hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount, 540 - part); 541 - size = psinfo->bufsize - hsize; 542 - dst += hsize; 543 - 544 - if (!kmsg_dump_get_buffer(dumper, true, dst, 545 - size, &len)) 546 - break; 547 - 548 - compressed = false; 549 330 total_len = hsize + len; 550 331 } 551 332 ··· 545 350 total += total_len; 546 351 part++; 547 352 } 548 - if (pstore_cannot_block_path(reason)) { 549 - if (is_locked) 550 - spin_unlock_irqrestore(&psinfo->buf_lock, flags); 551 - } else 353 + if (is_locked) 552 354 spin_unlock_irqrestore(&psinfo->buf_lock, flags); 553 355 } 554 356 ··· 689 497 690 498 void pstore_unregister(struct pstore_info *psi) 691 499 { 692 - pstore_unregister_pmsg(); 693 - pstore_unregister_ftrace(); 694 - pstore_unregister_console(); 500 + if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) { 501 + pstore_unregister_pmsg(); 502 + pstore_unregister_ftrace(); 503 + pstore_unregister_console(); 504 + } 695 505 pstore_unregister_kmsg(); 696 506 697 507 free_buf_for_compression(); ··· 721 527 int failed = 0, rc; 722 528 bool compressed; 723 529 int unzipped_len = -1; 530 + ssize_t ecc_notice_size = 0; 724 531 725 532 if (!psi) 726 533 return; ··· 731 536 goto out; 732 537 733 538 while ((size = psi->read(&id, &type, &count, &time, &buf, &compressed, 734 - psi)) > 0) { 539 + &ecc_notice_size, psi)) > 0) { 735 540 if (compressed && (type == PSTORE_TYPE_DMESG)) { 736 541 if (big_oops_buf) 737 542 unzipped_len = pstore_decompress(buf, ··· 739 544 big_oops_buf_sz); 740 545 741 546 if (unzipped_len > 0) { 547 + if (ecc_notice_size) 548 + memcpy(big_oops_buf + unzipped_len, 549 + buf + size, ecc_notice_size); 742 550 kfree(buf); 743 551 buf = big_oops_buf; 744 552 size = unzipped_len; ··· 753 555 } 754 556 } 755 557 rc = pstore_mkfile(type, psi->name, id, count, buf, 756 - compressed, (size_t)size, time, psi); 558 + compressed, size + ecc_notice_size, 559 + time, psi); 757 560 if (unzipped_len < 0) { 758 561 /* Free buffer other than big oops */ 759 562 kfree(buf);
+98 -7
fs/pstore/ram.c
··· 34 34 #include <linux/slab.h> 35 35 #include <linux/compiler.h> 36 36 #include <linux/pstore_ram.h> 37 + #include <linux/of.h> 38 + #include <linux/of_address.h> 37 39 38 40 #define RAMOOPS_KERNMSG_HDR "====" 39 41 #define MIN_MEM_SIZE 4096UL ··· 183 181 static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type, 184 182 int *count, struct timespec *time, 185 183 char **buf, bool *compressed, 184 + ssize_t *ecc_notice_size, 186 185 struct pstore_info *psi) 187 186 { 188 187 ssize_t size; 189 - ssize_t ecc_notice_size; 190 188 struct ramoops_context *cxt = psi->data; 191 189 struct persistent_ram_zone *prz = NULL; 192 190 int header_length = 0; ··· 231 229 size = persistent_ram_old_size(prz) - header_length; 232 230 233 231 /* ECC correction notice */ 234 - ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0); 232 + *ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0); 235 233 236 - *buf = kmalloc(size + ecc_notice_size + 1, GFP_KERNEL); 234 + *buf = kmalloc(size + *ecc_notice_size + 1, GFP_KERNEL); 237 235 if (*buf == NULL) 238 236 return -ENOMEM; 239 237 240 238 memcpy(*buf, (char *)persistent_ram_old(prz) + header_length, size); 241 - persistent_ram_ecc_string(prz, *buf + size, ecc_notice_size + 1); 239 + persistent_ram_ecc_string(prz, *buf + size, *ecc_notice_size + 1); 242 240 243 - return size + ecc_notice_size; 241 + return size; 244 242 } 245 243 246 244 static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz, ··· 460 458 return 0; 461 459 } 462 460 461 + static int ramoops_parse_dt_size(struct platform_device *pdev, 462 + const char *propname, u32 *value) 463 + { 464 + u32 val32 = 0; 465 + int ret; 466 + 467 + ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); 468 + if (ret < 0 && ret != -EINVAL) { 469 + dev_err(&pdev->dev, "failed to parse property %s: %d\n", 470 + propname, ret); 471 + return ret; 472 + } 473 + 474 + if (val32 > INT_MAX) { 475 + dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32); 476 + return -EOVERFLOW; 477 + } 478 + 479 + *value = val32; 480 + return 0; 481 + } 482 + 483 + static int ramoops_parse_dt(struct platform_device *pdev, 484 + struct ramoops_platform_data *pdata) 485 + { 486 + struct device_node *of_node = pdev->dev.of_node; 487 + struct device_node *mem_region; 488 + struct resource res; 489 + u32 value; 490 + int ret; 491 + 492 + dev_dbg(&pdev->dev, "using Device Tree\n"); 493 + 494 + mem_region = of_parse_phandle(of_node, "memory-region", 0); 495 + if (!mem_region) { 496 + dev_err(&pdev->dev, "no memory-region phandle\n"); 497 + return -ENODEV; 498 + } 499 + 500 + ret = of_address_to_resource(mem_region, 0, &res); 501 + of_node_put(mem_region); 502 + if (ret) { 503 + dev_err(&pdev->dev, 504 + "failed to translate memory-region to resource: %d\n", 505 + ret); 506 + return ret; 507 + } 508 + 509 + pdata->mem_size = resource_size(&res); 510 + pdata->mem_address = res.start; 511 + pdata->mem_type = of_property_read_bool(of_node, "unbuffered"); 512 + pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops"); 513 + 514 + #define parse_size(name, field) { \ 515 + ret = ramoops_parse_dt_size(pdev, name, &value); \ 516 + if (ret < 0) \ 517 + return ret; \ 518 + field = value; \ 519 + } 520 + 521 + parse_size("record-size", pdata->record_size); 522 + parse_size("console-size", pdata->console_size); 523 + parse_size("ftrace-size", pdata->ftrace_size); 524 + parse_size("pmsg-size", pdata->pmsg_size); 525 + parse_size("ecc-size", pdata->ecc_info.ecc_size); 526 + 527 + #undef parse_size 528 + 529 + return 0; 530 + } 531 + 463 532 static int ramoops_probe(struct platform_device *pdev) 464 533 { 465 534 struct device *dev = &pdev->dev; 466 - struct ramoops_platform_data *pdata = pdev->dev.platform_data; 535 + struct ramoops_platform_data *pdata = dev->platform_data; 467 536 struct ramoops_context *cxt = &oops_cxt; 468 537 size_t dump_mem_sz; 469 538 phys_addr_t paddr; 470 539 int err = -EINVAL; 540 + 541 + if (dev_of_node(dev) && !pdata) { 542 + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 543 + if (!pdata) { 544 + err = -ENOMEM; 545 + goto fail_out; 546 + } 547 + 548 + err = ramoops_parse_dt(pdev, pdata); 549 + if (err < 0) 550 + goto fail_out; 551 + } 471 552 472 553 /* Only a single ramoops area allowed at a time, so fail extra 473 554 * probes. ··· 681 596 return 0; 682 597 } 683 598 599 + static const struct of_device_id dt_match[] = { 600 + { .compatible = "ramoops" }, 601 + {} 602 + }; 603 + 684 604 static struct platform_driver ramoops_driver = { 685 605 .probe = ramoops_probe, 686 606 .remove = ramoops_remove, 687 607 .driver = { 688 - .name = "ramoops", 608 + .name = "ramoops", 609 + .of_match_table = dt_match, 689 610 }, 690 611 }; 691 612
+2 -1
include/linux/pstore.h
··· 58 58 int (*close)(struct pstore_info *psi); 59 59 ssize_t (*read)(u64 *id, enum pstore_type_id *type, 60 60 int *count, struct timespec *time, char **buf, 61 - bool *compressed, struct pstore_info *psi); 61 + bool *compressed, ssize_t *ecc_notice_size, 62 + struct pstore_info *psi); 62 63 int (*write)(enum pstore_type_id type, 63 64 enum kmsg_dump_reason reason, u64 *id, 64 65 unsigned int part, int count, bool compressed,