Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Persistent Memory Driver
4 *
5 * Copyright (c) 2014-2015, Intel Corporation.
6 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 */
9
10#include <linux/blkdev.h>
11#include <linux/pagemap.h>
12#include <linux/hdreg.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/set_memory.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/badblocks.h>
19#include <linux/memremap.h>
20#include <linux/kstrtox.h>
21#include <linux/vmalloc.h>
22#include <linux/blk-mq.h>
23#include <linux/slab.h>
24#include <linux/uio.h>
25#include <linux/dax.h>
26#include <linux/nd.h>
27#include <linux/mm.h>
28#include <asm/cacheflush.h>
29#include "pmem.h"
30#include "btt.h"
31#include "pfn.h"
32#include "nd.h"
33
34static struct device *to_dev(struct pmem_device *pmem)
35{
36 /*
37 * nvdimm bus services need a 'dev' parameter, and we record the device
38 * at init in bb.dev.
39 */
40 return pmem->bb.dev;
41}
42
43static struct nd_region *to_region(struct pmem_device *pmem)
44{
45 return to_nd_region(to_dev(pmem)->parent);
46}
47
48static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
49{
50 return pmem->phys_addr + offset;
51}
52
53static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset)
54{
55 return (offset - pmem->data_offset) >> SECTOR_SHIFT;
56}
57
58static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
59{
60 return (sector << SECTOR_SHIFT) + pmem->data_offset;
61}
62
63static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
64 unsigned int len)
65{
66 phys_addr_t phys = pmem_to_phys(pmem, offset);
67 unsigned long pfn_start, pfn_end, pfn;
68
69 /* only pmem in the linear map supports HWPoison */
70 if (is_vmalloc_addr(pmem->virt_addr))
71 return;
72
73 pfn_start = PHYS_PFN(phys);
74 pfn_end = pfn_start + PHYS_PFN(len);
75 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
76 struct page *page = pfn_to_page(pfn);
77
78 /*
79 * Note, no need to hold a get_dev_pagemap() reference
80 * here since we're in the driver I/O path and
81 * outstanding I/O requests pin the dev_pagemap.
82 */
83 if (test_and_clear_pmem_poison(page))
84 clear_mce_nospec(pfn);
85 }
86}
87
88static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
89{
90 if (blks == 0)
91 return;
92 badblocks_clear(&pmem->bb, sector, blks);
93 if (pmem->bb_state)
94 sysfs_notify_dirent(pmem->bb_state);
95}
96
97static long __pmem_clear_poison(struct pmem_device *pmem,
98 phys_addr_t offset, unsigned int len)
99{
100 phys_addr_t phys = pmem_to_phys(pmem, offset);
101 long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
102
103 if (cleared > 0) {
104 pmem_mkpage_present(pmem, offset, cleared);
105 arch_invalidate_pmem(pmem->virt_addr + offset, len);
106 }
107 return cleared;
108}
109
110static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
111 phys_addr_t offset, unsigned int len)
112{
113 long cleared = __pmem_clear_poison(pmem, offset, len);
114
115 if (cleared < 0)
116 return BLK_STS_IOERR;
117
118 pmem_clear_bb(pmem, to_sect(pmem, offset), cleared >> SECTOR_SHIFT);
119 if (cleared < len)
120 return BLK_STS_IOERR;
121 return BLK_STS_OK;
122}
123
124static void write_pmem(void *pmem_addr, struct page *page,
125 unsigned int off, unsigned int len)
126{
127 unsigned int chunk;
128 void *mem;
129
130 while (len) {
131 mem = kmap_atomic(page);
132 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
133 memcpy_flushcache(pmem_addr, mem + off, chunk);
134 kunmap_atomic(mem);
135 len -= chunk;
136 off = 0;
137 page++;
138 pmem_addr += chunk;
139 }
140}
141
142static blk_status_t read_pmem(struct page *page, unsigned int off,
143 void *pmem_addr, unsigned int len)
144{
145 unsigned int chunk;
146 unsigned long rem;
147 void *mem;
148
149 while (len) {
150 mem = kmap_atomic(page);
151 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
152 rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
153 kunmap_atomic(mem);
154 if (rem)
155 return BLK_STS_IOERR;
156 len -= chunk;
157 off = 0;
158 page++;
159 pmem_addr += chunk;
160 }
161 return BLK_STS_OK;
162}
163
164static blk_status_t pmem_do_read(struct pmem_device *pmem,
165 struct page *page, unsigned int page_off,
166 sector_t sector, unsigned int len)
167{
168 blk_status_t rc;
169 phys_addr_t pmem_off = to_offset(pmem, sector);
170 void *pmem_addr = pmem->virt_addr + pmem_off;
171
172 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
173 return BLK_STS_IOERR;
174
175 rc = read_pmem(page, page_off, pmem_addr, len);
176 flush_dcache_page(page);
177 return rc;
178}
179
180static blk_status_t pmem_do_write(struct pmem_device *pmem,
181 struct page *page, unsigned int page_off,
182 sector_t sector, unsigned int len)
183{
184 phys_addr_t pmem_off = to_offset(pmem, sector);
185 void *pmem_addr = pmem->virt_addr + pmem_off;
186
187 if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) {
188 blk_status_t rc = pmem_clear_poison(pmem, pmem_off, len);
189
190 if (rc != BLK_STS_OK)
191 return rc;
192 }
193
194 flush_dcache_page(page);
195 write_pmem(pmem_addr, page, page_off, len);
196
197 return BLK_STS_OK;
198}
199
200static void pmem_submit_bio(struct bio *bio)
201{
202 int ret = 0;
203 blk_status_t rc = 0;
204 bool do_acct;
205 unsigned long start;
206 struct bio_vec bvec;
207 struct bvec_iter iter;
208 struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data;
209 struct nd_region *nd_region = to_region(pmem);
210
211 if (bio->bi_opf & REQ_PREFLUSH)
212 ret = nvdimm_flush(nd_region, bio);
213
214 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
215 if (do_acct)
216 start = bio_start_io_acct(bio);
217 bio_for_each_segment(bvec, bio, iter) {
218 if (op_is_write(bio_op(bio)))
219 rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
220 iter.bi_sector, bvec.bv_len);
221 else
222 rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
223 iter.bi_sector, bvec.bv_len);
224 if (rc) {
225 bio->bi_status = rc;
226 break;
227 }
228 }
229 if (do_acct)
230 bio_end_io_acct(bio, start);
231
232 if (bio->bi_opf & REQ_FUA)
233 ret = nvdimm_flush(nd_region, bio);
234
235 if (ret)
236 bio->bi_status = errno_to_blk_status(ret);
237
238 bio_endio(bio);
239}
240
241/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
242__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
243 long nr_pages, enum dax_access_mode mode, void **kaddr,
244 unsigned long *pfn)
245{
246 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
247 sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT;
248 unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT;
249 struct badblocks *bb = &pmem->bb;
250 sector_t first_bad;
251 sector_t num_bad;
252
253 if (kaddr)
254 *kaddr = pmem->virt_addr + offset;
255 if (pfn)
256 *pfn = PHYS_PFN(pmem->phys_addr + offset);
257
258 if (bb->count &&
259 badblocks_check(bb, sector, num, &first_bad, &num_bad)) {
260 long actual_nr;
261
262 if (mode != DAX_RECOVERY_WRITE)
263 return -EHWPOISON;
264
265 /*
266 * Set the recovery stride is set to kernel page size because
267 * the underlying driver and firmware clear poison functions
268 * don't appear to handle large chunk(such as 2MiB) reliably.
269 */
270 actual_nr = PHYS_PFN(
271 PAGE_ALIGN((first_bad - sector) << SECTOR_SHIFT));
272 dev_dbg(pmem->bb.dev, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n",
273 sector, nr_pages, first_bad, actual_nr);
274 if (actual_nr)
275 return actual_nr;
276 return 1;
277 }
278
279 /*
280 * If badblocks are present but not in the range, limit known good range
281 * to the requested range.
282 */
283 if (bb->count)
284 return nr_pages;
285 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
286}
287
288static const struct block_device_operations pmem_fops = {
289 .owner = THIS_MODULE,
290 .submit_bio = pmem_submit_bio,
291};
292
293static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
294 size_t nr_pages)
295{
296 struct pmem_device *pmem = dax_get_private(dax_dev);
297
298 return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
299 PFN_PHYS(pgoff) >> SECTOR_SHIFT,
300 PAGE_SIZE));
301}
302
303static long pmem_dax_direct_access(struct dax_device *dax_dev,
304 pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
305 void **kaddr, unsigned long *pfn)
306{
307 struct pmem_device *pmem = dax_get_private(dax_dev);
308
309 return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn);
310}
311
312/*
313 * The recovery write thread started out as a normal pwrite thread and
314 * when the filesystem was told about potential media error in the
315 * range, filesystem turns the normal pwrite to a dax_recovery_write.
316 *
317 * The recovery write consists of clearing media poison, clearing page
318 * HWPoison bit, re-enable page-wide read-write permission, flush the
319 * caches and finally write. A competing pread thread will be held
320 * off during the recovery process since data read back might not be
321 * valid, and this is achieved by clearing the badblock records after
322 * the recovery write is complete. Competing recovery write threads
323 * are already serialized by writer lock held by dax_iomap_rw().
324 */
325static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
326 void *addr, size_t bytes, struct iov_iter *i)
327{
328 struct pmem_device *pmem = dax_get_private(dax_dev);
329 size_t olen, len, off;
330 phys_addr_t pmem_off;
331 struct device *dev = pmem->bb.dev;
332 long cleared;
333
334 off = offset_in_page(addr);
335 len = PFN_PHYS(PFN_UP(off + bytes));
336 if (!is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) >> SECTOR_SHIFT, len))
337 return _copy_from_iter_flushcache(addr, bytes, i);
338
339 /*
340 * Not page-aligned range cannot be recovered. This should not
341 * happen unless something else went wrong.
342 */
343 if (off || !PAGE_ALIGNED(bytes)) {
344 dev_dbg(dev, "Found poison, but addr(%p) or bytes(%#zx) not page aligned\n",
345 addr, bytes);
346 return 0;
347 }
348
349 pmem_off = PFN_PHYS(pgoff) + pmem->data_offset;
350 cleared = __pmem_clear_poison(pmem, pmem_off, len);
351 if (cleared > 0 && cleared < len) {
352 dev_dbg(dev, "poison cleared only %ld out of %zu bytes\n",
353 cleared, len);
354 return 0;
355 }
356 if (cleared < 0) {
357 dev_dbg(dev, "poison clear failed: %ld\n", cleared);
358 return 0;
359 }
360
361 olen = _copy_from_iter_flushcache(addr, bytes, i);
362 pmem_clear_bb(pmem, to_sect(pmem, pmem_off), cleared >> SECTOR_SHIFT);
363
364 return olen;
365}
366
367static const struct dax_operations pmem_dax_ops = {
368 .direct_access = pmem_dax_direct_access,
369 .zero_page_range = pmem_dax_zero_page_range,
370 .recovery_write = pmem_recovery_write,
371};
372
373static ssize_t write_cache_show(struct device *dev,
374 struct device_attribute *attr, char *buf)
375{
376 struct pmem_device *pmem = dev_to_disk(dev)->private_data;
377
378 return sprintf(buf, "%d\n", !!dax_write_cache_enabled(pmem->dax_dev));
379}
380
381static ssize_t write_cache_store(struct device *dev,
382 struct device_attribute *attr, const char *buf, size_t len)
383{
384 struct pmem_device *pmem = dev_to_disk(dev)->private_data;
385 bool write_cache;
386 int rc;
387
388 rc = kstrtobool(buf, &write_cache);
389 if (rc)
390 return rc;
391 dax_write_cache(pmem->dax_dev, write_cache);
392 return len;
393}
394static DEVICE_ATTR_RW(write_cache);
395
396static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
397{
398#ifndef CONFIG_ARCH_HAS_PMEM_API
399 if (a == &dev_attr_write_cache.attr)
400 return 0;
401#endif
402 return a->mode;
403}
404
405static struct attribute *dax_attributes[] = {
406 &dev_attr_write_cache.attr,
407 NULL,
408};
409
410static const struct attribute_group dax_attribute_group = {
411 .name = "dax",
412 .attrs = dax_attributes,
413 .is_visible = dax_visible,
414};
415
416static const struct attribute_group *pmem_attribute_groups[] = {
417 &dax_attribute_group,
418 NULL,
419};
420
421static void pmem_release_disk(void *__pmem)
422{
423 struct pmem_device *pmem = __pmem;
424
425 dax_remove_host(pmem->disk);
426 kill_dax(pmem->dax_dev);
427 put_dax(pmem->dax_dev);
428 del_gendisk(pmem->disk);
429
430 put_disk(pmem->disk);
431}
432
433static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
434 unsigned long pfn, unsigned long nr_pages, int mf_flags)
435{
436 struct pmem_device *pmem =
437 container_of(pgmap, struct pmem_device, pgmap);
438 u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
439 u64 len = nr_pages << PAGE_SHIFT;
440
441 return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
442}
443
444static const struct dev_pagemap_ops fsdax_pagemap_ops = {
445 .memory_failure = pmem_pagemap_memory_failure,
446};
447
448static int pmem_attach_disk(struct device *dev,
449 struct nd_namespace_common *ndns)
450{
451 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
452 struct nd_region *nd_region = to_nd_region(dev->parent);
453 struct queue_limits lim = {
454 .logical_block_size = pmem_sector_size(ndns),
455 .physical_block_size = PAGE_SIZE,
456 .max_hw_sectors = UINT_MAX,
457 .features = BLK_FEAT_WRITE_CACHE |
458 BLK_FEAT_SYNCHRONOUS,
459 };
460 int nid = dev_to_node(dev), fua;
461 struct resource *res = &nsio->res;
462 struct range bb_range;
463 struct nd_pfn *nd_pfn = NULL;
464 struct dax_device *dax_dev;
465 struct nd_pfn_sb *pfn_sb;
466 struct pmem_device *pmem;
467 struct gendisk *disk;
468 void *addr;
469 int rc;
470
471 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
472 if (!pmem)
473 return -ENOMEM;
474
475 rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
476 if (rc)
477 return rc;
478
479 /* while nsio_rw_bytes is active, parse a pfn info block if present */
480 if (is_nd_pfn(dev)) {
481 nd_pfn = to_nd_pfn(dev);
482 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
483 if (rc)
484 return rc;
485 }
486
487 /* we're attaching a block device, disable raw namespace access */
488 devm_namespace_disable(dev, ndns);
489
490 dev_set_drvdata(dev, pmem);
491 pmem->phys_addr = res->start;
492 pmem->size = resource_size(res);
493 fua = nvdimm_has_flush(nd_region);
494 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
495 dev_warn(dev, "unable to guarantee persistence of writes\n");
496 fua = 0;
497 }
498 if (fua)
499 lim.features |= BLK_FEAT_FUA;
500 if (is_nd_pfn(dev) || pmem_should_map_pages(dev))
501 lim.features |= BLK_FEAT_DAX;
502
503 if (!devm_request_mem_region(dev, res->start, resource_size(res),
504 dev_name(&ndns->dev))) {
505 dev_warn(dev, "could not reserve region %pR\n", res);
506 return -EBUSY;
507 }
508
509 disk = blk_alloc_disk(&lim, nid);
510 if (IS_ERR(disk))
511 return PTR_ERR(disk);
512
513 pmem->disk = disk;
514 pmem->pgmap.owner = pmem;
515 if (is_nd_pfn(dev)) {
516 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
517 pmem->pgmap.ops = &fsdax_pagemap_ops;
518 addr = devm_memremap_pages(dev, &pmem->pgmap);
519 pfn_sb = nd_pfn->pfn_sb;
520 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
521 pmem->pfn_pad = resource_size(res) -
522 range_len(&pmem->pgmap.range);
523 bb_range = pmem->pgmap.range;
524 bb_range.start += pmem->data_offset;
525 } else if (pmem_should_map_pages(dev)) {
526 pmem->pgmap.range.start = res->start;
527 pmem->pgmap.range.end = res->end;
528 pmem->pgmap.nr_range = 1;
529 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
530 pmem->pgmap.ops = &fsdax_pagemap_ops;
531 addr = devm_memremap_pages(dev, &pmem->pgmap);
532 bb_range = pmem->pgmap.range;
533 } else {
534 addr = devm_memremap(dev, pmem->phys_addr,
535 pmem->size, ARCH_MEMREMAP_PMEM);
536 bb_range.start = res->start;
537 bb_range.end = res->end;
538 }
539
540 if (IS_ERR(addr)) {
541 rc = PTR_ERR(addr);
542 goto out;
543 }
544 pmem->virt_addr = addr;
545
546 disk->fops = &pmem_fops;
547 disk->private_data = pmem;
548 nvdimm_namespace_disk_name(ndns, disk->disk_name);
549 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
550 / 512);
551 if (devm_init_badblocks(dev, &pmem->bb))
552 return -ENOMEM;
553 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
554 disk->bb = &pmem->bb;
555
556 dax_dev = alloc_dax(pmem, &pmem_dax_ops);
557 if (IS_ERR(dax_dev)) {
558 rc = PTR_ERR(dax_dev);
559 if (rc != -EOPNOTSUPP)
560 goto out;
561 } else {
562 set_dax_nocache(dax_dev);
563 set_dax_nomc(dax_dev);
564 if (is_nvdimm_sync(nd_region))
565 set_dax_synchronous(dax_dev);
566 pmem->dax_dev = dax_dev;
567 rc = dax_add_host(dax_dev, disk);
568 if (rc)
569 goto out_cleanup_dax;
570 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
571 }
572 rc = device_add_disk(dev, disk, pmem_attribute_groups);
573 if (rc)
574 goto out_remove_host;
575 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
576 return -ENOMEM;
577
578 nvdimm_check_and_set_ro(disk);
579
580 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
581 "badblocks");
582 if (!pmem->bb_state)
583 dev_warn(dev, "'badblocks' notification disabled\n");
584 return 0;
585
586out_remove_host:
587 dax_remove_host(pmem->disk);
588out_cleanup_dax:
589 kill_dax(pmem->dax_dev);
590 put_dax(pmem->dax_dev);
591out:
592 put_disk(pmem->disk);
593 return rc;
594}
595
596static int nd_pmem_probe(struct device *dev)
597{
598 int ret;
599 struct nd_namespace_common *ndns;
600
601 ndns = nvdimm_namespace_common_probe(dev);
602 if (IS_ERR(ndns))
603 return PTR_ERR(ndns);
604
605 if (is_nd_btt(dev))
606 return nvdimm_namespace_attach_btt(ndns);
607
608 if (is_nd_pfn(dev))
609 return pmem_attach_disk(dev, ndns);
610
611 ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
612 if (ret)
613 return ret;
614
615 ret = nd_btt_probe(dev, ndns);
616 if (ret == 0)
617 return -ENXIO;
618
619 /*
620 * We have two failure conditions here, there is no
621 * info reserver block or we found a valid info reserve block
622 * but failed to initialize the pfn superblock.
623 *
624 * For the first case consider namespace as a raw pmem namespace
625 * and attach a disk.
626 *
627 * For the latter, consider this a success and advance the namespace
628 * seed.
629 */
630 ret = nd_pfn_probe(dev, ndns);
631 if (ret == 0)
632 return -ENXIO;
633 else if (ret == -EOPNOTSUPP)
634 return ret;
635
636 ret = nd_dax_probe(dev, ndns);
637 if (ret == 0)
638 return -ENXIO;
639 else if (ret == -EOPNOTSUPP)
640 return ret;
641
642 /* probe complete, attach handles namespace enabling */
643 devm_namespace_disable(dev, ndns);
644
645 return pmem_attach_disk(dev, ndns);
646}
647
648static void nd_pmem_remove(struct device *dev)
649{
650 struct pmem_device *pmem = dev_get_drvdata(dev);
651
652 if (is_nd_btt(dev))
653 nvdimm_namespace_detach_btt(to_nd_btt(dev));
654 else {
655 /*
656 * Note, this assumes device_lock() context to not
657 * race nd_pmem_notify()
658 */
659 sysfs_put(pmem->bb_state);
660 pmem->bb_state = NULL;
661 }
662 nvdimm_flush(to_nd_region(dev->parent), NULL);
663}
664
665static void nd_pmem_shutdown(struct device *dev)
666{
667 nvdimm_flush(to_nd_region(dev->parent), NULL);
668}
669
670static void pmem_revalidate_poison(struct device *dev)
671{
672 struct nd_region *nd_region;
673 resource_size_t offset = 0, end_trunc = 0;
674 struct nd_namespace_common *ndns;
675 struct nd_namespace_io *nsio;
676 struct badblocks *bb;
677 struct range range;
678 struct kernfs_node *bb_state;
679
680 if (is_nd_btt(dev)) {
681 struct nd_btt *nd_btt = to_nd_btt(dev);
682
683 ndns = nd_btt->ndns;
684 nd_region = to_nd_region(ndns->dev.parent);
685 nsio = to_nd_namespace_io(&ndns->dev);
686 bb = &nsio->bb;
687 bb_state = NULL;
688 } else {
689 struct pmem_device *pmem = dev_get_drvdata(dev);
690
691 nd_region = to_region(pmem);
692 bb = &pmem->bb;
693 bb_state = pmem->bb_state;
694
695 if (is_nd_pfn(dev)) {
696 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
697 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
698
699 ndns = nd_pfn->ndns;
700 offset = pmem->data_offset +
701 __le32_to_cpu(pfn_sb->start_pad);
702 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
703 } else {
704 ndns = to_ndns(dev);
705 }
706
707 nsio = to_nd_namespace_io(&ndns->dev);
708 }
709
710 range.start = nsio->res.start + offset;
711 range.end = nsio->res.end - end_trunc;
712 nvdimm_badblocks_populate(nd_region, bb, &range);
713 if (bb_state)
714 sysfs_notify_dirent(bb_state);
715}
716
717static void pmem_revalidate_region(struct device *dev)
718{
719 struct pmem_device *pmem;
720
721 if (is_nd_btt(dev)) {
722 struct nd_btt *nd_btt = to_nd_btt(dev);
723 struct btt *btt = nd_btt->btt;
724
725 nvdimm_check_and_set_ro(btt->btt_disk);
726 return;
727 }
728
729 pmem = dev_get_drvdata(dev);
730 nvdimm_check_and_set_ro(pmem->disk);
731}
732
733static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
734{
735 switch (event) {
736 case NVDIMM_REVALIDATE_POISON:
737 pmem_revalidate_poison(dev);
738 break;
739 case NVDIMM_REVALIDATE_REGION:
740 pmem_revalidate_region(dev);
741 break;
742 default:
743 dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
744 break;
745 }
746}
747
748MODULE_ALIAS("pmem");
749MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
750MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
751static struct nd_device_driver nd_pmem_driver = {
752 .probe = nd_pmem_probe,
753 .remove = nd_pmem_remove,
754 .notify = nd_pmem_notify,
755 .shutdown = nd_pmem_shutdown,
756 .drv = {
757 .name = "nd_pmem",
758 },
759 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
760};
761
762module_nd_driver(nd_pmem_driver);
763
764MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
765MODULE_DESCRIPTION("NVDIMM Persistent Memory Driver");
766MODULE_LICENSE("GPL v2");