libnvdimm, btt: ensure that initializing metadata clears poison

If we had badblocks/poison in the metadata area of a BTT, recreating the
BTT would not clear the poison in all cases, notably the flog area. This
is because rw_bytes will only clear errors if the request being sent
down is 512B aligned and sized.

Make sure that when writing the map and info blocks, the rw_bytes being
sent are of the correct size/alignment. For the flog, instead of doing
the smaller log_entry writes only, first do a 'wipe' of the entire area
by writing zeroes in large enough chunks so that errors get cleared.

Cc: Andy Rudoff <andy.rudoff@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

authored by Vishal Verma and committed by Dan Williams b177fe85 3ae3d67b

+47 -7
+47 -7
drivers/nvdimm/btt.c
··· 57 { 58 int ret; 59 60 ret = arena_write_bytes(arena, arena->info2off, super, 61 sizeof(struct btt_sb), 0); 62 if (ret) ··· 402 if (!zerobuf) 403 return -ENOMEM; 404 405 while (mapsize) { 406 size_t size = min(mapsize, chunk_size); 407 408 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, 409 size, 0); 410 if (ret) ··· 434 */ 435 static int btt_log_init(struct arena_info *arena) 436 { 437 int ret; 438 u32 i; 439 - struct log_entry log, zerolog; 440 441 - memset(&zerolog, 0, sizeof(zerolog)); 442 443 for (i = 0; i < arena->nfree; i++) { 444 log.lba = cpu_to_le32(i); ··· 472 log.seq = cpu_to_le32(LOG_SEQ_INIT); 473 ret = __btt_log_write(arena, i, 0, &log, 0); 474 if (ret) 475 - return ret; 476 - ret = __btt_log_write(arena, i, 1, &zerolog, 0); 477 - if (ret) 478 - return ret; 479 } 480 481 - return 0; 482 } 483 484 static int btt_freelist_init(struct arena_info *arena)
··· 57 { 58 int ret; 59 60 + /* 61 + * infooff and info2off should always be at least 512B aligned. 62 + * We rely on that to make sure rw_bytes does error clearing 63 + * correctly, so make sure that is the case. 64 + */ 65 + WARN_ON_ONCE(!IS_ALIGNED(arena->infooff, 512)); 66 + WARN_ON_ONCE(!IS_ALIGNED(arena->info2off, 512)); 67 + 68 ret = arena_write_bytes(arena, arena->info2off, super, 69 sizeof(struct btt_sb), 0); 70 if (ret) ··· 394 if (!zerobuf) 395 return -ENOMEM; 396 397 + /* 398 + * mapoff should always be at least 512B aligned. We rely on that to 399 + * make sure rw_bytes does error clearing correctly, so make sure that 400 + * is the case. 401 + */ 402 + WARN_ON_ONCE(!IS_ALIGNED(arena->mapoff, 512)); 403 + 404 while (mapsize) { 405 size_t size = min(mapsize, chunk_size); 406 407 + WARN_ON_ONCE(size < 512); 408 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, 409 size, 0); 410 if (ret) ··· 418 */ 419 static int btt_log_init(struct arena_info *arena) 420 { 421 + size_t logsize = arena->info2off - arena->logoff; 422 + size_t chunk_size = SZ_4K, offset = 0; 423 + struct log_entry log; 424 + void *zerobuf; 425 int ret; 426 u32 i; 427 428 + zerobuf = kzalloc(chunk_size, GFP_KERNEL); 429 + if (!zerobuf) 430 + return -ENOMEM; 431 + /* 432 + * logoff should always be at least 512B aligned. We rely on that to 433 + * make sure rw_bytes does error clearing correctly, so make sure that 434 + * is the case. 435 + */ 436 + WARN_ON_ONCE(!IS_ALIGNED(arena->logoff, 512)); 437 + 438 + while (logsize) { 439 + size_t size = min(logsize, chunk_size); 440 + 441 + WARN_ON_ONCE(size < 512); 442 + ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf, 443 + size, 0); 444 + if (ret) 445 + goto free; 446 + 447 + offset += size; 448 + logsize -= size; 449 + cond_resched(); 450 + } 451 452 for (i = 0; i < arena->nfree; i++) { 453 log.lba = cpu_to_le32(i); ··· 431 log.seq = cpu_to_le32(LOG_SEQ_INIT); 432 ret = __btt_log_write(arena, i, 0, &log, 0); 433 if (ret) 434 + goto free; 435 } 436 437 + free: 438 + kfree(zerobuf); 439 + return ret; 440 } 441 442 static int btt_freelist_init(struct arena_info *arena)