2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major
;
40 struct zram
*zram_devices
;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices
= 1;
45 static void zram_stat64_add(struct zram
*zram
, u64
*v
, u64 inc
)
47 spin_lock(&zram
->stat64_lock
);
49 spin_unlock(&zram
->stat64_lock
);
52 static void zram_stat64_sub(struct zram
*zram
, u64
*v
, u64 dec
)
54 spin_lock(&zram
->stat64_lock
);
56 spin_unlock(&zram
->stat64_lock
);
59 static void zram_stat64_inc(struct zram
*zram
, u64
*v
)
61 zram_stat64_add(zram
, v
, 1);
64 static int zram_test_flag(struct zram_meta
*meta
, u32 index
,
65 enum zram_pageflags flag
)
67 return meta
->table
[index
].flags
& BIT(flag
);
70 static void zram_set_flag(struct zram_meta
*meta
, u32 index
,
71 enum zram_pageflags flag
)
73 meta
->table
[index
].flags
|= BIT(flag
);
76 static void zram_clear_flag(struct zram_meta
*meta
, u32 index
,
77 enum zram_pageflags flag
)
79 meta
->table
[index
].flags
&= ~BIT(flag
);
82 static int page_zero_filled(void *ptr
)
87 page
= (unsigned long *)ptr
;
89 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
97 static void zram_free_page(struct zram
*zram
, size_t index
)
99 struct zram_meta
*meta
= zram
->meta
;
100 unsigned long handle
= meta
->table
[index
].handle
;
101 u16 size
= meta
->table
[index
].size
;
103 if (unlikely(!handle
)) {
105 * No memory is allocated for zero filled pages.
106 * Simply clear zero page flag.
108 if (zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
109 zram_clear_flag(meta
, index
, ZRAM_ZERO
);
110 zram
->stats
.pages_zero
--;
115 if (unlikely(size
> max_zpage_size
))
116 zram
->stats
.bad_compress
--;
118 zs_free(meta
->mem_pool
, handle
);
120 if (size
<= PAGE_SIZE
/ 2)
121 zram
->stats
.good_compress
--;
123 zram_stat64_sub(zram
, &zram
->stats
.compr_size
,
124 meta
->table
[index
].size
);
125 zram
->stats
.pages_stored
--;
127 meta
->table
[index
].handle
= 0;
128 meta
->table
[index
].size
= 0;
131 static void handle_zero_page(struct bio_vec
*bvec
)
133 struct page
*page
= bvec
->bv_page
;
136 user_mem
= kmap_atomic(page
);
137 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
138 kunmap_atomic(user_mem
);
140 flush_dcache_page(page
);
143 static inline int is_partial_io(struct bio_vec
*bvec
)
145 return bvec
->bv_len
!= PAGE_SIZE
;
148 static int zram_decompress_page(struct zram
*zram
, char *mem
, u32 index
)
151 size_t clen
= PAGE_SIZE
;
153 struct zram_meta
*meta
= zram
->meta
;
154 unsigned long handle
= meta
->table
[index
].handle
;
156 if (!handle
|| zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
157 memset(mem
, 0, PAGE_SIZE
);
161 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_RO
);
162 if (meta
->table
[index
].size
== PAGE_SIZE
)
163 memcpy(mem
, cmem
, PAGE_SIZE
);
165 ret
= lzo1x_decompress_safe(cmem
, meta
->table
[index
].size
,
167 zs_unmap_object(meta
->mem_pool
, handle
);
169 /* Should NEVER happen. Return bio error if it does. */
170 if (unlikely(ret
!= LZO_E_OK
)) {
171 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
172 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
179 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
180 u32 index
, int offset
, struct bio
*bio
)
184 unsigned char *user_mem
, *uncmem
= NULL
;
185 struct zram_meta
*meta
= zram
->meta
;
186 page
= bvec
->bv_page
;
188 if (unlikely(!meta
->table
[index
].handle
) ||
189 zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
190 handle_zero_page(bvec
);
194 if (is_partial_io(bvec
))
195 /* Use a temporary buffer to decompress the page */
196 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
198 user_mem
= kmap_atomic(page
);
199 if (!is_partial_io(bvec
))
203 pr_info("Unable to allocate temp memory\n");
208 ret
= zram_decompress_page(zram
, uncmem
, index
);
209 /* Should NEVER happen. Return bio error if it does. */
210 if (unlikely(ret
!= LZO_E_OK
)) {
211 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
212 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
216 if (is_partial_io(bvec
))
217 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
220 flush_dcache_page(page
);
223 kunmap_atomic(user_mem
);
224 if (is_partial_io(bvec
))
229 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
234 unsigned long handle
;
236 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
237 struct zram_meta
*meta
= zram
->meta
;
239 page
= bvec
->bv_page
;
240 src
= meta
->compress_buffer
;
242 if (is_partial_io(bvec
)) {
244 * This is a partial IO. We need to read the full page
245 * before to write the changes.
247 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
252 ret
= zram_decompress_page(zram
, uncmem
, index
);
258 * System overwrites unused sectors. Free memory associated
259 * with this sector now.
261 if (meta
->table
[index
].handle
||
262 zram_test_flag(meta
, index
, ZRAM_ZERO
))
263 zram_free_page(zram
, index
);
265 user_mem
= kmap_atomic(page
);
267 if (is_partial_io(bvec
)) {
268 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
270 kunmap_atomic(user_mem
);
276 if (page_zero_filled(uncmem
)) {
277 kunmap_atomic(user_mem
);
278 if (is_partial_io(bvec
))
280 zram
->stats
.pages_zero
++;
281 zram_set_flag(meta
, index
, ZRAM_ZERO
);
286 ret
= lzo1x_1_compress(uncmem
, PAGE_SIZE
, src
, &clen
,
287 meta
->compress_workmem
);
289 if (!is_partial_io(bvec
)) {
290 kunmap_atomic(user_mem
);
295 if (unlikely(ret
!= LZO_E_OK
)) {
296 pr_err("Compression failed! err=%d\n", ret
);
300 if (unlikely(clen
> max_zpage_size
)) {
301 zram
->stats
.bad_compress
++;
304 if (is_partial_io(bvec
))
308 handle
= zs_malloc(meta
->mem_pool
, clen
);
310 pr_info("Error allocating memory for compressed "
311 "page: %u, size=%zu\n", index
, clen
);
315 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_WO
);
317 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
))
318 src
= kmap_atomic(page
);
319 memcpy(cmem
, src
, clen
);
320 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
))
323 zs_unmap_object(meta
->mem_pool
, handle
);
325 meta
->table
[index
].handle
= handle
;
326 meta
->table
[index
].size
= clen
;
329 zram_stat64_add(zram
, &zram
->stats
.compr_size
, clen
);
330 zram
->stats
.pages_stored
++;
331 if (clen
<= PAGE_SIZE
/ 2)
332 zram
->stats
.good_compress
++;
335 if (is_partial_io(bvec
))
339 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
343 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
344 int offset
, struct bio
*bio
, int rw
)
349 down_read(&zram
->lock
);
350 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
351 up_read(&zram
->lock
);
353 down_write(&zram
->lock
);
354 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
355 up_write(&zram
->lock
);
361 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
363 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
365 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
368 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
, int rw
)
372 struct bio_vec
*bvec
;
376 zram_stat64_inc(zram
, &zram
->stats
.num_reads
);
379 zram_stat64_inc(zram
, &zram
->stats
.num_writes
);
383 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
384 offset
= (bio
->bi_sector
& (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
386 bio_for_each_segment(bvec
, bio
, i
) {
387 int max_transfer_size
= PAGE_SIZE
- offset
;
389 if (bvec
->bv_len
> max_transfer_size
) {
391 * zram_bvec_rw() can only make operation on a single
392 * zram page. Split the bio vector.
396 bv
.bv_page
= bvec
->bv_page
;
397 bv
.bv_len
= max_transfer_size
;
398 bv
.bv_offset
= bvec
->bv_offset
;
400 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
, rw
) < 0)
403 bv
.bv_len
= bvec
->bv_len
- max_transfer_size
;
404 bv
.bv_offset
+= max_transfer_size
;
405 if (zram_bvec_rw(zram
, &bv
, index
+1, 0, bio
, rw
) < 0)
408 if (zram_bvec_rw(zram
, bvec
, index
, offset
, bio
, rw
)
412 update_position(&index
, &offset
, bvec
);
415 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
424 * Check if request is within bounds and aligned on zram logical blocks.
426 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
429 (bio
->bi_sector
>= (zram
->disksize
>> SECTOR_SHIFT
)) ||
430 (bio
->bi_sector
& (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)) ||
431 (bio
->bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))) {
436 /* I/O request is valid */
441 * Handler function for all zram I/O requests.
443 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
445 struct zram
*zram
= queue
->queuedata
;
447 down_read(&zram
->init_lock
);
448 if (unlikely(!zram
->init_done
))
451 if (!valid_io_request(zram
, bio
)) {
452 zram_stat64_inc(zram
, &zram
->stats
.invalid_io
);
456 __zram_make_request(zram
, bio
, bio_data_dir(bio
));
457 up_read(&zram
->init_lock
);
462 up_read(&zram
->init_lock
);
466 static void __zram_reset_device(struct zram
*zram
)
469 struct zram_meta
*meta
;
471 if (!zram
->init_done
)
477 /* Free all pages that are still in this zram device */
478 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
479 unsigned long handle
= meta
->table
[index
].handle
;
483 zs_free(meta
->mem_pool
, handle
);
486 zram_meta_free(zram
->meta
);
489 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
492 set_capacity(zram
->disk
, 0);
495 void zram_reset_device(struct zram
*zram
)
497 down_write(&zram
->init_lock
);
498 __zram_reset_device(zram
);
499 up_write(&zram
->init_lock
);
502 void zram_meta_free(struct zram_meta
*meta
)
504 zs_destroy_pool(meta
->mem_pool
);
505 kfree(meta
->compress_workmem
);
506 free_pages((unsigned long)meta
->compress_buffer
, 1);
511 struct zram_meta
*zram_meta_alloc(u64 disksize
)
514 struct zram_meta
*meta
= kmalloc(sizeof(*meta
), GFP_KERNEL
);
518 meta
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
519 if (!meta
->compress_workmem
)
522 meta
->compress_buffer
=
523 (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
524 if (!meta
->compress_buffer
) {
525 pr_err("Error allocating compressor buffer space\n");
529 num_pages
= disksize
>> PAGE_SHIFT
;
530 meta
->table
= vzalloc(num_pages
* sizeof(*meta
->table
));
532 pr_err("Error allocating zram address table\n");
536 meta
->mem_pool
= zs_create_pool(GFP_NOIO
| __GFP_HIGHMEM
);
537 if (!meta
->mem_pool
) {
538 pr_err("Error creating memory pool\n");
547 free_pages((unsigned long)meta
->compress_buffer
, 1);
549 kfree(meta
->compress_workmem
);
557 void zram_init_device(struct zram
*zram
, struct zram_meta
*meta
)
559 if (zram
->disksize
> 2 * (totalram_pages
<< PAGE_SHIFT
)) {
561 "There is little point creating a zram of greater than "
562 "twice the size of memory since we expect a 2:1 compression "
563 "ratio. Note that zram uses about 0.1%% of the size of "
564 "the disk when not in use so a huge zram is "
566 "\tMemory Size: %lu kB\n"
567 "\tSize you selected: %llu kB\n"
568 "Continuing anyway ...\n",
569 (totalram_pages
<< PAGE_SHIFT
) >> 10, zram
->disksize
>> 10
573 /* zram devices sort of resembles non-rotational disks */
574 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
579 pr_debug("Initialization done!\n");
582 static void zram_slot_free_notify(struct block_device
*bdev
,
587 zram
= bdev
->bd_disk
->private_data
;
588 zram_free_page(zram
, index
);
589 zram_stat64_inc(zram
, &zram
->stats
.notify_free
);
592 static const struct block_device_operations zram_devops
= {
593 .swap_slot_free_notify
= zram_slot_free_notify
,
597 static int create_device(struct zram
*zram
, int device_id
)
601 init_rwsem(&zram
->lock
);
602 init_rwsem(&zram
->init_lock
);
603 spin_lock_init(&zram
->stat64_lock
);
605 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
607 pr_err("Error allocating disk queue for device %d\n",
613 blk_queue_make_request(zram
->queue
, zram_make_request
);
614 zram
->queue
->queuedata
= zram
;
616 /* gendisk structure */
617 zram
->disk
= alloc_disk(1);
619 blk_cleanup_queue(zram
->queue
);
620 pr_warn("Error allocating disk structure for device %d\n",
626 zram
->disk
->major
= zram_major
;
627 zram
->disk
->first_minor
= device_id
;
628 zram
->disk
->fops
= &zram_devops
;
629 zram
->disk
->queue
= zram
->queue
;
630 zram
->disk
->private_data
= zram
;
631 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
633 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
634 set_capacity(zram
->disk
, 0);
637 * To ensure that we always get PAGE_SIZE aligned
638 * and n*PAGE_SIZED sized I/O requests.
640 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
641 blk_queue_logical_block_size(zram
->disk
->queue
,
642 ZRAM_LOGICAL_BLOCK_SIZE
);
643 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
644 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
646 add_disk(zram
->disk
);
648 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
649 &zram_disk_attr_group
);
651 pr_warn("Error creating sysfs group");
661 static void destroy_device(struct zram
*zram
)
663 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
664 &zram_disk_attr_group
);
667 del_gendisk(zram
->disk
);
668 put_disk(zram
->disk
);
672 blk_cleanup_queue(zram
->queue
);
675 unsigned int zram_get_num_devices(void)
680 static int __init
zram_init(void)
684 if (num_devices
> max_num_devices
) {
685 pr_warn("Invalid value for num_devices: %u\n",
691 zram_major
= register_blkdev(0, "zram");
692 if (zram_major
<= 0) {
693 pr_warn("Unable to get major number\n");
698 /* Allocate the device array and initialize each one */
699 zram_devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
705 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
706 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
711 pr_info("Created %u device(s) ...\n", num_devices
);
717 destroy_device(&zram_devices
[--dev_id
]);
720 unregister_blkdev(zram_major
, "zram");
725 static void __exit
zram_exit(void)
730 for (i
= 0; i
< num_devices
; i
++) {
731 zram
= &zram_devices
[i
];
733 destroy_device(zram
);
734 zram_reset_device(zram
);
737 unregister_blkdev(zram_major
, "zram");
740 pr_debug("Cleanup done!\n");
743 module_param(num_devices
, uint
, 0);
744 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
746 module_init(zram_init
);
747 module_exit(zram_exit
);
749 MODULE_LICENSE("Dual BSD/GPL");
750 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
751 MODULE_DESCRIPTION("Compressed RAM Block Device");