2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/memory_hotplug.h>
25 #include <linux/moduleparam.h>
26 #include <linux/badblocks.h>
27 #include <linux/vmalloc.h>
28 #include <linux/slab.h>
29 #include <linux/pmem.h>
35 struct request_queue
*pmem_queue
;
36 struct gendisk
*pmem_disk
;
37 struct nd_namespace_common
*ndns
;
39 /* One contiguous memory region per device */
40 phys_addr_t phys_addr
;
41 /* when non-zero this device is hosting a 'pfn' instance */
42 phys_addr_t data_offset
;
43 void __pmem
*virt_addr
;
48 static int pmem_major
;
50 static bool is_bad_pmem(struct badblocks
*bb
, sector_t sector
, unsigned int len
)
56 return !!badblocks_check(bb
, sector
, len
/ 512, &first_bad
,
63 static int pmem_do_bvec(struct pmem_device
*pmem
, struct page
*page
,
64 unsigned int len
, unsigned int off
, int rw
,
67 void *mem
= kmap_atomic(page
);
68 phys_addr_t pmem_off
= sector
* 512 + pmem
->data_offset
;
69 void __pmem
*pmem_addr
= pmem
->virt_addr
+ pmem_off
;
72 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
)))
74 memcpy_from_pmem(mem
+ off
, pmem_addr
, len
);
75 flush_dcache_page(page
);
77 flush_dcache_page(page
);
78 memcpy_to_pmem(pmem_addr
, mem
+ off
, len
);
85 static blk_qc_t
pmem_make_request(struct request_queue
*q
, struct bio
*bio
)
91 struct bvec_iter iter
;
92 struct block_device
*bdev
= bio
->bi_bdev
;
93 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
95 do_acct
= nd_iostat_start(bio
, &start
);
96 bio_for_each_segment(bvec
, bio
, iter
) {
97 rc
= pmem_do_bvec(pmem
, bvec
.bv_page
, bvec
.bv_len
,
98 bvec
.bv_offset
, bio_data_dir(bio
),
106 nd_iostat_end(bio
, start
);
108 if (bio_data_dir(bio
))
112 return BLK_QC_T_NONE
;
115 static int pmem_rw_page(struct block_device
*bdev
, sector_t sector
,
116 struct page
*page
, int rw
)
118 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
121 rc
= pmem_do_bvec(pmem
, page
, PAGE_CACHE_SIZE
, 0, rw
, sector
);
126 * The ->rw_page interface is subtle and tricky. The core
127 * retries on any error, so we can only invoke page_endio() in
128 * the successful completion case. Otherwise, we'll see crashes
129 * caused by double completion.
132 page_endio(page
, rw
& WRITE
, 0);
137 static long pmem_direct_access(struct block_device
*bdev
, sector_t sector
,
138 void __pmem
**kaddr
, unsigned long *pfn
)
140 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
141 resource_size_t offset
= sector
* 512 + pmem
->data_offset
;
143 *kaddr
= pmem
->virt_addr
+ offset
;
144 *pfn
= (pmem
->phys_addr
+ offset
) >> PAGE_SHIFT
;
146 return pmem
->size
- offset
;
149 static const struct block_device_operations pmem_fops
= {
150 .owner
= THIS_MODULE
,
151 .rw_page
= pmem_rw_page
,
152 .direct_access
= pmem_direct_access
,
153 .revalidate_disk
= nvdimm_revalidate_disk
,
156 static struct pmem_device
*pmem_alloc(struct device
*dev
,
157 struct resource
*res
, int id
)
159 struct pmem_device
*pmem
;
161 pmem
= devm_kzalloc(dev
, sizeof(*pmem
), GFP_KERNEL
);
163 return ERR_PTR(-ENOMEM
);
165 pmem
->phys_addr
= res
->start
;
166 pmem
->size
= resource_size(res
);
167 if (!arch_has_wmb_pmem())
168 dev_warn(dev
, "unable to guarantee persistence of writes\n");
170 if (!devm_request_mem_region(dev
, pmem
->phys_addr
, pmem
->size
,
172 dev_warn(dev
, "could not reserve region [0x%pa:0x%zx]\n",
173 &pmem
->phys_addr
, pmem
->size
);
174 return ERR_PTR(-EBUSY
);
177 if (pmem_should_map_pages(dev
))
178 pmem
->virt_addr
= (void __pmem
*) devm_memremap_pages(dev
, res
);
180 pmem
->virt_addr
= (void __pmem
*) devm_memremap(dev
,
181 pmem
->phys_addr
, pmem
->size
,
184 if (IS_ERR(pmem
->virt_addr
))
185 return (void __force
*) pmem
->virt_addr
;
190 static void pmem_detach_disk(struct pmem_device
*pmem
)
192 if (!pmem
->pmem_disk
)
195 del_gendisk(pmem
->pmem_disk
);
196 put_disk(pmem
->pmem_disk
);
197 blk_cleanup_queue(pmem
->pmem_queue
);
200 static int pmem_attach_disk(struct device
*dev
,
201 struct nd_namespace_common
*ndns
, struct pmem_device
*pmem
)
203 int nid
= dev_to_node(dev
);
204 struct gendisk
*disk
;
206 pmem
->pmem_queue
= blk_alloc_queue_node(GFP_KERNEL
, nid
);
207 if (!pmem
->pmem_queue
)
210 blk_queue_make_request(pmem
->pmem_queue
, pmem_make_request
);
211 blk_queue_physical_block_size(pmem
->pmem_queue
, PAGE_SIZE
);
212 blk_queue_max_hw_sectors(pmem
->pmem_queue
, UINT_MAX
);
213 blk_queue_bounce_limit(pmem
->pmem_queue
, BLK_BOUNCE_ANY
);
214 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, pmem
->pmem_queue
);
216 disk
= alloc_disk_node(0, nid
);
218 blk_cleanup_queue(pmem
->pmem_queue
);
222 disk
->major
= pmem_major
;
223 disk
->first_minor
= 0;
224 disk
->fops
= &pmem_fops
;
225 disk
->private_data
= pmem
;
226 disk
->queue
= pmem
->pmem_queue
;
227 disk
->flags
= GENHD_FL_EXT_DEVT
;
228 nvdimm_namespace_disk_name(ndns
, disk
->disk_name
);
229 disk
->driverfs_dev
= dev
;
230 set_capacity(disk
, (pmem
->size
- pmem
->data_offset
) / 512);
231 pmem
->pmem_disk
= disk
;
232 devm_exit_badblocks(dev
, &pmem
->bb
);
233 if (devm_init_badblocks(dev
, &pmem
->bb
))
235 nvdimm_namespace_add_poison(ndns
, &pmem
->bb
, pmem
->data_offset
);
237 disk
->bb
= &pmem
->bb
;
239 revalidate_disk(disk
);
244 static int pmem_rw_bytes(struct nd_namespace_common
*ndns
,
245 resource_size_t offset
, void *buf
, size_t size
, int rw
)
247 struct pmem_device
*pmem
= dev_get_drvdata(ndns
->claim
);
249 if (unlikely(offset
+ size
> pmem
->size
)) {
250 dev_WARN_ONCE(&ndns
->dev
, 1, "request out of range\n");
255 unsigned int sz_align
= ALIGN(size
+ (offset
& (512 - 1)), 512);
257 if (unlikely(is_bad_pmem(&pmem
->bb
, offset
/ 512, sz_align
)))
259 memcpy_from_pmem(buf
, pmem
->virt_addr
+ offset
, size
);
261 memcpy_to_pmem(pmem
->virt_addr
+ offset
, buf
, size
);
268 static int nd_pfn_init(struct nd_pfn
*nd_pfn
)
270 struct nd_pfn_sb
*pfn_sb
= kzalloc(sizeof(*pfn_sb
), GFP_KERNEL
);
271 struct pmem_device
*pmem
= dev_get_drvdata(&nd_pfn
->dev
);
272 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
273 struct nd_region
*nd_region
;
282 nd_pfn
->pfn_sb
= pfn_sb
;
283 rc
= nd_pfn_validate(nd_pfn
);
285 /* no info block, do init */;
289 nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
291 dev_info(&nd_pfn
->dev
,
292 "%s is read-only, unable to init metadata\n",
293 dev_name(&nd_region
->dev
));
297 memset(pfn_sb
, 0, sizeof(*pfn_sb
));
298 npfns
= (pmem
->size
- SZ_8K
) / SZ_4K
;
300 * Note, we use 64 here for the standard size of struct page,
301 * debugging options may cause it to be larger in which case the
302 * implementation will limit the pfns advertised through
303 * ->direct_access() to those that are included in the memmap.
305 if (nd_pfn
->mode
== PFN_MODE_PMEM
)
306 offset
= ALIGN(SZ_8K
+ 64 * npfns
, nd_pfn
->align
);
307 else if (nd_pfn
->mode
== PFN_MODE_RAM
)
308 offset
= ALIGN(SZ_8K
, nd_pfn
->align
);
312 npfns
= (pmem
->size
- offset
) / SZ_4K
;
313 pfn_sb
->mode
= cpu_to_le32(nd_pfn
->mode
);
314 pfn_sb
->dataoff
= cpu_to_le64(offset
);
315 pfn_sb
->npfns
= cpu_to_le64(npfns
);
316 memcpy(pfn_sb
->signature
, PFN_SIG
, PFN_SIG_LEN
);
317 memcpy(pfn_sb
->uuid
, nd_pfn
->uuid
, 16);
318 memcpy(pfn_sb
->parent_uuid
, nd_dev_to_uuid(&ndns
->dev
), 16);
319 pfn_sb
->version_major
= cpu_to_le16(1);
320 checksum
= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
);
321 pfn_sb
->checksum
= cpu_to_le64(checksum
);
323 rc
= nvdimm_write_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
));
329 nd_pfn
->pfn_sb
= NULL
;
334 static int nvdimm_namespace_detach_pfn(struct nd_namespace_common
*ndns
)
336 struct nd_pfn
*nd_pfn
= to_nd_pfn(ndns
->claim
);
337 struct pmem_device
*pmem
;
340 pmem
= dev_get_drvdata(&nd_pfn
->dev
);
341 pmem_detach_disk(pmem
);
343 /* release nd_pfn resources */
344 kfree(nd_pfn
->pfn_sb
);
345 nd_pfn
->pfn_sb
= NULL
;
350 static int nvdimm_namespace_attach_pfn(struct nd_namespace_common
*ndns
)
352 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
353 struct nd_pfn
*nd_pfn
= to_nd_pfn(ndns
->claim
);
354 struct device
*dev
= &nd_pfn
->dev
;
355 struct vmem_altmap
*altmap
;
356 struct nd_region
*nd_region
;
357 struct nd_pfn_sb
*pfn_sb
;
358 struct pmem_device
*pmem
;
362 if (!nd_pfn
->uuid
|| !nd_pfn
->ndns
)
365 nd_region
= to_nd_region(dev
->parent
);
366 rc
= nd_pfn_init(nd_pfn
);
370 pfn_sb
= nd_pfn
->pfn_sb
;
371 offset
= le64_to_cpu(pfn_sb
->dataoff
);
372 nd_pfn
->mode
= le32_to_cpu(nd_pfn
->pfn_sb
->mode
);
373 if (nd_pfn
->mode
== PFN_MODE_RAM
) {
376 nd_pfn
->npfns
= le64_to_cpu(pfn_sb
->npfns
);
383 /* establish pfn range for lookup, and switch to direct map */
384 pmem
= dev_get_drvdata(dev
);
385 devm_memunmap(dev
, (void __force
*) pmem
->virt_addr
);
386 pmem
->virt_addr
= (void __pmem
*) devm_memremap_pages(dev
, &nsio
->res
);
387 if (IS_ERR(pmem
->virt_addr
)) {
388 rc
= PTR_ERR(pmem
->virt_addr
);
392 /* attach pmem disk in "pfn-mode" */
393 pmem
->data_offset
= offset
;
394 rc
= pmem_attach_disk(dev
, ndns
, pmem
);
400 nvdimm_namespace_detach_pfn(ndns
);
404 static int nd_pmem_probe(struct device
*dev
)
406 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
407 struct nd_namespace_common
*ndns
;
408 struct nd_namespace_io
*nsio
;
409 struct pmem_device
*pmem
;
411 ndns
= nvdimm_namespace_common_probe(dev
);
413 return PTR_ERR(ndns
);
415 nsio
= to_nd_namespace_io(&ndns
->dev
);
416 pmem
= pmem_alloc(dev
, &nsio
->res
, nd_region
->id
);
418 return PTR_ERR(pmem
);
421 dev_set_drvdata(dev
, pmem
);
422 ndns
->rw_bytes
= pmem_rw_bytes
;
423 if (devm_init_badblocks(dev
, &pmem
->bb
))
425 nvdimm_namespace_add_poison(ndns
, &pmem
->bb
, 0);
428 return nvdimm_namespace_attach_btt(ndns
);
431 return nvdimm_namespace_attach_pfn(ndns
);
433 if (nd_btt_probe(ndns
, pmem
) == 0) {
434 /* we'll come back as btt-pmem */
438 if (nd_pfn_probe(ndns
, pmem
) == 0) {
439 /* we'll come back as pfn-pmem */
443 return pmem_attach_disk(dev
, ndns
, pmem
);
446 static int nd_pmem_remove(struct device
*dev
)
448 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
451 nvdimm_namespace_detach_btt(pmem
->ndns
);
452 else if (is_nd_pfn(dev
))
453 nvdimm_namespace_detach_pfn(pmem
->ndns
);
455 pmem_detach_disk(pmem
);
460 MODULE_ALIAS("pmem");
461 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO
);
462 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM
);
463 static struct nd_device_driver nd_pmem_driver
= {
464 .probe
= nd_pmem_probe
,
465 .remove
= nd_pmem_remove
,
469 .type
= ND_DRIVER_NAMESPACE_IO
| ND_DRIVER_NAMESPACE_PMEM
,
472 static int __init
pmem_init(void)
476 pmem_major
= register_blkdev(0, "pmem");
480 error
= nd_driver_register(&nd_pmem_driver
);
482 unregister_blkdev(pmem_major
, "pmem");
488 module_init(pmem_init
);
490 static void pmem_exit(void)
492 driver_unregister(&nd_pmem_driver
.drv
);
493 unregister_blkdev(pmem_major
, "pmem");
495 module_exit(pmem_exit
);
497 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
498 MODULE_LICENSE("GPL v2");