2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major
;
40 static struct zram
*zram_devices
;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices
= 1;
45 static void zram_stat64_add(struct zram
*zram
, u64
*v
, u64 inc
)
47 spin_lock(&zram
->stat64_lock
);
49 spin_unlock(&zram
->stat64_lock
);
52 static void zram_stat64_sub(struct zram
*zram
, u64
*v
, u64 dec
)
54 spin_lock(&zram
->stat64_lock
);
56 spin_unlock(&zram
->stat64_lock
);
59 static void zram_stat64_inc(struct zram
*zram
, u64
*v
)
61 zram_stat64_add(zram
, v
, 1);
64 static int zram_test_flag(struct zram_meta
*meta
, u32 index
,
65 enum zram_pageflags flag
)
67 return meta
->table
[index
].flags
& BIT(flag
);
70 static void zram_set_flag(struct zram_meta
*meta
, u32 index
,
71 enum zram_pageflags flag
)
73 meta
->table
[index
].flags
|= BIT(flag
);
76 static void zram_clear_flag(struct zram_meta
*meta
, u32 index
,
77 enum zram_pageflags flag
)
79 meta
->table
[index
].flags
&= ~BIT(flag
);
82 static int page_zero_filled(void *ptr
)
87 page
= (unsigned long *)ptr
;
89 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
97 static void zram_free_page(struct zram
*zram
, size_t index
)
99 struct zram_meta
*meta
= zram
->meta
;
100 unsigned long handle
= meta
->table
[index
].handle
;
101 u16 size
= meta
->table
[index
].size
;
103 if (unlikely(!handle
)) {
105 * No memory is allocated for zero filled pages.
106 * Simply clear zero page flag.
108 if (zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
109 zram_clear_flag(meta
, index
, ZRAM_ZERO
);
110 zram
->stats
.pages_zero
--;
115 if (unlikely(size
> max_zpage_size
))
116 zram
->stats
.bad_compress
--;
118 zs_free(meta
->mem_pool
, handle
);
120 if (size
<= PAGE_SIZE
/ 2)
121 zram
->stats
.good_compress
--;
123 zram_stat64_sub(zram
, &zram
->stats
.compr_size
,
124 meta
->table
[index
].size
);
125 zram
->stats
.pages_stored
--;
127 meta
->table
[index
].handle
= 0;
128 meta
->table
[index
].size
= 0;
131 static inline int is_partial_io(struct bio_vec
*bvec
)
133 return bvec
->bv_len
!= PAGE_SIZE
;
136 static void handle_zero_page(struct bio_vec
*bvec
)
138 struct page
*page
= bvec
->bv_page
;
141 user_mem
= kmap_atomic(page
);
142 if (is_partial_io(bvec
))
143 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
145 clear_page(user_mem
);
146 kunmap_atomic(user_mem
);
148 flush_dcache_page(page
);
151 static int zram_decompress_page(struct zram
*zram
, char *mem
, u32 index
)
154 size_t clen
= PAGE_SIZE
;
156 struct zram_meta
*meta
= zram
->meta
;
157 unsigned long handle
= meta
->table
[index
].handle
;
159 if (!handle
|| zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
164 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_RO
);
165 if (meta
->table
[index
].size
== PAGE_SIZE
)
166 copy_page(mem
, cmem
);
168 ret
= lzo1x_decompress_safe(cmem
, meta
->table
[index
].size
,
170 zs_unmap_object(meta
->mem_pool
, handle
);
172 /* Should NEVER happen. Return bio error if it does. */
173 if (unlikely(ret
!= LZO_E_OK
)) {
174 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
175 zram_stat64_inc(zram
, &zram
->stats
.failed_reads
);
182 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
183 u32 index
, int offset
, struct bio
*bio
)
187 unsigned char *user_mem
, *uncmem
= NULL
;
188 struct zram_meta
*meta
= zram
->meta
;
189 page
= bvec
->bv_page
;
191 if (unlikely(!meta
->table
[index
].handle
) ||
192 zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
193 handle_zero_page(bvec
);
197 if (is_partial_io(bvec
))
198 /* Use a temporary buffer to decompress the page */
199 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
201 user_mem
= kmap_atomic(page
);
202 if (!is_partial_io(bvec
))
206 pr_info("Unable to allocate temp memory\n");
211 ret
= zram_decompress_page(zram
, uncmem
, index
);
212 /* Should NEVER happen. Return bio error if it does. */
213 if (unlikely(ret
!= LZO_E_OK
))
216 if (is_partial_io(bvec
))
217 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
220 flush_dcache_page(page
);
223 kunmap_atomic(user_mem
);
224 if (is_partial_io(bvec
))
229 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
234 unsigned long handle
;
236 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
237 struct zram_meta
*meta
= zram
->meta
;
239 page
= bvec
->bv_page
;
240 src
= meta
->compress_buffer
;
242 if (is_partial_io(bvec
)) {
244 * This is a partial IO. We need to read the full page
245 * before to write the changes.
247 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
252 ret
= zram_decompress_page(zram
, uncmem
, index
);
258 * System overwrites unused sectors. Free memory associated
259 * with this sector now.
261 if (meta
->table
[index
].handle
||
262 zram_test_flag(meta
, index
, ZRAM_ZERO
))
263 zram_free_page(zram
, index
);
265 user_mem
= kmap_atomic(page
);
267 if (is_partial_io(bvec
)) {
268 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
270 kunmap_atomic(user_mem
);
276 if (page_zero_filled(uncmem
)) {
277 kunmap_atomic(user_mem
);
278 zram
->stats
.pages_zero
++;
279 zram_set_flag(meta
, index
, ZRAM_ZERO
);
284 ret
= lzo1x_1_compress(uncmem
, PAGE_SIZE
, src
, &clen
,
285 meta
->compress_workmem
);
287 if (!is_partial_io(bvec
)) {
288 kunmap_atomic(user_mem
);
293 if (unlikely(ret
!= LZO_E_OK
)) {
294 pr_err("Compression failed! err=%d\n", ret
);
298 if (unlikely(clen
> max_zpage_size
)) {
299 zram
->stats
.bad_compress
++;
302 if (is_partial_io(bvec
))
306 handle
= zs_malloc(meta
->mem_pool
, clen
);
308 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
313 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_WO
);
315 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
)) {
316 src
= kmap_atomic(page
);
317 copy_page(cmem
, src
);
320 memcpy(cmem
, src
, clen
);
323 zs_unmap_object(meta
->mem_pool
, handle
);
325 meta
->table
[index
].handle
= handle
;
326 meta
->table
[index
].size
= clen
;
329 zram_stat64_add(zram
, &zram
->stats
.compr_size
, clen
);
330 zram
->stats
.pages_stored
++;
331 if (clen
<= PAGE_SIZE
/ 2)
332 zram
->stats
.good_compress
++;
335 if (is_partial_io(bvec
))
339 zram_stat64_inc(zram
, &zram
->stats
.failed_writes
);
343 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
344 int offset
, struct bio
*bio
, int rw
)
349 down_read(&zram
->lock
);
350 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
351 up_read(&zram
->lock
);
353 down_write(&zram
->lock
);
354 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
355 up_write(&zram
->lock
);
361 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
363 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
365 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
368 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
, int rw
)
372 struct bio_vec
*bvec
;
376 zram_stat64_inc(zram
, &zram
->stats
.num_reads
);
379 zram_stat64_inc(zram
, &zram
->stats
.num_writes
);
383 index
= bio
->bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
384 offset
= (bio
->bi_sector
& (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
386 bio_for_each_segment(bvec
, bio
, i
) {
387 int max_transfer_size
= PAGE_SIZE
- offset
;
389 if (bvec
->bv_len
> max_transfer_size
) {
391 * zram_bvec_rw() can only make operation on a single
392 * zram page. Split the bio vector.
396 bv
.bv_page
= bvec
->bv_page
;
397 bv
.bv_len
= max_transfer_size
;
398 bv
.bv_offset
= bvec
->bv_offset
;
400 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
, rw
) < 0)
403 bv
.bv_len
= bvec
->bv_len
- max_transfer_size
;
404 bv
.bv_offset
+= max_transfer_size
;
405 if (zram_bvec_rw(zram
, &bv
, index
+1, 0, bio
, rw
) < 0)
408 if (zram_bvec_rw(zram
, bvec
, index
, offset
, bio
, rw
)
412 update_position(&index
, &offset
, bvec
);
415 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
424 * Check if request is within bounds and aligned on zram logical blocks.
426 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
428 u64 start
, end
, bound
;
430 /* unaligned request */
431 if (unlikely(bio
->bi_sector
& (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)))
433 if (unlikely(bio
->bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))
436 start
= bio
->bi_sector
;
437 end
= start
+ (bio
->bi_size
>> SECTOR_SHIFT
);
438 bound
= zram
->disksize
>> SECTOR_SHIFT
;
439 /* out of range range */
440 if (unlikely(start
>= bound
|| end
>= bound
|| start
> end
))
443 /* I/O request is valid */
448 * Handler function for all zram I/O requests.
450 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
452 struct zram
*zram
= queue
->queuedata
;
454 down_read(&zram
->init_lock
);
455 if (unlikely(!zram
->init_done
))
458 if (!valid_io_request(zram
, bio
)) {
459 zram_stat64_inc(zram
, &zram
->stats
.invalid_io
);
463 __zram_make_request(zram
, bio
, bio_data_dir(bio
));
464 up_read(&zram
->init_lock
);
469 up_read(&zram
->init_lock
);
473 static void __zram_reset_device(struct zram
*zram
)
476 struct zram_meta
*meta
;
478 if (!zram
->init_done
)
484 /* Free all pages that are still in this zram device */
485 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
486 unsigned long handle
= meta
->table
[index
].handle
;
490 zs_free(meta
->mem_pool
, handle
);
493 zram_meta_free(zram
->meta
);
496 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
499 set_capacity(zram
->disk
, 0);
502 void zram_reset_device(struct zram
*zram
)
504 down_write(&zram
->init_lock
);
505 __zram_reset_device(zram
);
506 up_write(&zram
->init_lock
);
509 void zram_meta_free(struct zram_meta
*meta
)
511 zs_destroy_pool(meta
->mem_pool
);
512 kfree(meta
->compress_workmem
);
513 free_pages((unsigned long)meta
->compress_buffer
, 1);
518 struct zram_meta
*zram_meta_alloc(u64 disksize
)
521 struct zram_meta
*meta
= kmalloc(sizeof(*meta
), GFP_KERNEL
);
525 meta
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
526 if (!meta
->compress_workmem
)
529 meta
->compress_buffer
=
530 (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
531 if (!meta
->compress_buffer
) {
532 pr_err("Error allocating compressor buffer space\n");
536 num_pages
= disksize
>> PAGE_SHIFT
;
537 meta
->table
= vzalloc(num_pages
* sizeof(*meta
->table
));
539 pr_err("Error allocating zram address table\n");
543 meta
->mem_pool
= zs_create_pool(GFP_NOIO
| __GFP_HIGHMEM
);
544 if (!meta
->mem_pool
) {
545 pr_err("Error creating memory pool\n");
554 free_pages((unsigned long)meta
->compress_buffer
, 1);
556 kfree(meta
->compress_workmem
);
564 void zram_init_device(struct zram
*zram
, struct zram_meta
*meta
)
566 if (zram
->disksize
> 2 * (totalram_pages
<< PAGE_SHIFT
)) {
568 "There is little point creating a zram of greater than "
569 "twice the size of memory since we expect a 2:1 compression "
570 "ratio. Note that zram uses about 0.1%% of the size of "
571 "the disk when not in use so a huge zram is "
573 "\tMemory Size: %lu kB\n"
574 "\tSize you selected: %llu kB\n"
575 "Continuing anyway ...\n",
576 (totalram_pages
<< PAGE_SHIFT
) >> 10, zram
->disksize
>> 10
580 /* zram devices sort of resembles non-rotational disks */
581 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
586 pr_debug("Initialization done!\n");
589 static void zram_slot_free_notify(struct block_device
*bdev
,
594 zram
= bdev
->bd_disk
->private_data
;
595 down_write(&zram
->lock
);
596 zram_free_page(zram
, index
);
597 up_write(&zram
->lock
);
598 zram_stat64_inc(zram
, &zram
->stats
.notify_free
);
601 static const struct block_device_operations zram_devops
= {
602 .swap_slot_free_notify
= zram_slot_free_notify
,
606 static int create_device(struct zram
*zram
, int device_id
)
610 init_rwsem(&zram
->lock
);
611 init_rwsem(&zram
->init_lock
);
612 spin_lock_init(&zram
->stat64_lock
);
614 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
616 pr_err("Error allocating disk queue for device %d\n",
621 blk_queue_make_request(zram
->queue
, zram_make_request
);
622 zram
->queue
->queuedata
= zram
;
624 /* gendisk structure */
625 zram
->disk
= alloc_disk(1);
627 pr_warn("Error allocating disk structure for device %d\n",
632 zram
->disk
->major
= zram_major
;
633 zram
->disk
->first_minor
= device_id
;
634 zram
->disk
->fops
= &zram_devops
;
635 zram
->disk
->queue
= zram
->queue
;
636 zram
->disk
->private_data
= zram
;
637 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
639 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
640 set_capacity(zram
->disk
, 0);
643 * To ensure that we always get PAGE_SIZE aligned
644 * and n*PAGE_SIZED sized I/O requests.
646 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
647 blk_queue_logical_block_size(zram
->disk
->queue
,
648 ZRAM_LOGICAL_BLOCK_SIZE
);
649 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
650 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
652 add_disk(zram
->disk
);
654 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
655 &zram_disk_attr_group
);
657 pr_warn("Error creating sysfs group");
665 del_gendisk(zram
->disk
);
666 put_disk(zram
->disk
);
668 blk_cleanup_queue(zram
->queue
);
673 static void destroy_device(struct zram
*zram
)
675 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
676 &zram_disk_attr_group
);
679 del_gendisk(zram
->disk
);
680 put_disk(zram
->disk
);
684 blk_cleanup_queue(zram
->queue
);
687 static int __init
zram_init(void)
691 if (num_devices
> max_num_devices
) {
692 pr_warn("Invalid value for num_devices: %u\n",
698 zram_major
= register_blkdev(0, "zram");
699 if (zram_major
<= 0) {
700 pr_warn("Unable to get major number\n");
705 /* Allocate the device array and initialize each one */
706 zram_devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
712 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
713 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
718 pr_info("Created %u device(s) ...\n", num_devices
);
724 destroy_device(&zram_devices
[--dev_id
]);
727 unregister_blkdev(zram_major
, "zram");
732 static void __exit
zram_exit(void)
737 for (i
= 0; i
< num_devices
; i
++) {
738 zram
= &zram_devices
[i
];
740 get_disk(zram
->disk
);
741 destroy_device(zram
);
742 zram_reset_device(zram
);
743 put_disk(zram
->disk
);
746 unregister_blkdev(zram_major
, "zram");
749 pr_debug("Cleanup done!\n");
752 module_param(num_devices
, uint
, 0);
753 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
755 module_init(zram_init
);
756 module_exit(zram_exit
);
758 MODULE_LICENSE("Dual BSD/GPL");
759 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
760 MODULE_DESCRIPTION("Compressed RAM Block Device");