2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
39 static int zram_major
;
40 static struct zram
*zram_devices
;
41 static const char *default_compressor
= "lzo";
43 /* Module params (documentation at end) */
44 static unsigned int num_devices
= 1;
46 #define ZRAM_ATTR_RO(name) \
47 static ssize_t name##_show(struct device *d, \
48 struct device_attribute *attr, char *b) \
50 struct zram *zram = dev_to_zram(d); \
51 return scnprintf(b, PAGE_SIZE, "%llu\n", \
52 (u64)atomic64_read(&zram->stats.name)); \
54 static DEVICE_ATTR_RO(name);
56 static inline bool init_done(struct zram
*zram
)
58 return zram
->disksize
;
61 static inline struct zram
*dev_to_zram(struct device
*dev
)
63 return (struct zram
*)dev_to_disk(dev
)->private_data
;
66 static ssize_t
disksize_show(struct device
*dev
,
67 struct device_attribute
*attr
, char *buf
)
69 struct zram
*zram
= dev_to_zram(dev
);
71 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", zram
->disksize
);
74 static ssize_t
initstate_show(struct device
*dev
,
75 struct device_attribute
*attr
, char *buf
)
78 struct zram
*zram
= dev_to_zram(dev
);
80 down_read(&zram
->init_lock
);
81 val
= init_done(zram
);
82 up_read(&zram
->init_lock
);
84 return scnprintf(buf
, PAGE_SIZE
, "%u\n", val
);
87 static ssize_t
orig_data_size_show(struct device
*dev
,
88 struct device_attribute
*attr
, char *buf
)
90 struct zram
*zram
= dev_to_zram(dev
);
92 return scnprintf(buf
, PAGE_SIZE
, "%llu\n",
93 (u64
)(atomic64_read(&zram
->stats
.pages_stored
)) << PAGE_SHIFT
);
96 static ssize_t
mem_used_total_show(struct device
*dev
,
97 struct device_attribute
*attr
, char *buf
)
100 struct zram
*zram
= dev_to_zram(dev
);
102 down_read(&zram
->init_lock
);
103 if (init_done(zram
)) {
104 struct zram_meta
*meta
= zram
->meta
;
105 val
= zs_get_total_pages(meta
->mem_pool
);
107 up_read(&zram
->init_lock
);
109 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", val
<< PAGE_SHIFT
);
112 static ssize_t
max_comp_streams_show(struct device
*dev
,
113 struct device_attribute
*attr
, char *buf
)
116 struct zram
*zram
= dev_to_zram(dev
);
118 down_read(&zram
->init_lock
);
119 val
= zram
->max_comp_streams
;
120 up_read(&zram
->init_lock
);
122 return scnprintf(buf
, PAGE_SIZE
, "%d\n", val
);
125 static ssize_t
mem_limit_show(struct device
*dev
,
126 struct device_attribute
*attr
, char *buf
)
129 struct zram
*zram
= dev_to_zram(dev
);
131 down_read(&zram
->init_lock
);
132 val
= zram
->limit_pages
;
133 up_read(&zram
->init_lock
);
135 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", val
<< PAGE_SHIFT
);
138 static ssize_t
mem_limit_store(struct device
*dev
,
139 struct device_attribute
*attr
, const char *buf
, size_t len
)
143 struct zram
*zram
= dev_to_zram(dev
);
145 limit
= memparse(buf
, &tmp
);
146 if (buf
== tmp
) /* no chars parsed, invalid input */
149 down_write(&zram
->init_lock
);
150 zram
->limit_pages
= PAGE_ALIGN(limit
) >> PAGE_SHIFT
;
151 up_write(&zram
->init_lock
);
156 static ssize_t
mem_used_max_show(struct device
*dev
,
157 struct device_attribute
*attr
, char *buf
)
160 struct zram
*zram
= dev_to_zram(dev
);
162 down_read(&zram
->init_lock
);
164 val
= atomic_long_read(&zram
->stats
.max_used_pages
);
165 up_read(&zram
->init_lock
);
167 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", val
<< PAGE_SHIFT
);
170 static ssize_t
mem_used_max_store(struct device
*dev
,
171 struct device_attribute
*attr
, const char *buf
, size_t len
)
175 struct zram
*zram
= dev_to_zram(dev
);
177 err
= kstrtoul(buf
, 10, &val
);
181 down_read(&zram
->init_lock
);
182 if (init_done(zram
)) {
183 struct zram_meta
*meta
= zram
->meta
;
184 atomic_long_set(&zram
->stats
.max_used_pages
,
185 zs_get_total_pages(meta
->mem_pool
));
187 up_read(&zram
->init_lock
);
192 static ssize_t
max_comp_streams_store(struct device
*dev
,
193 struct device_attribute
*attr
, const char *buf
, size_t len
)
196 struct zram
*zram
= dev_to_zram(dev
);
199 ret
= kstrtoint(buf
, 0, &num
);
205 down_write(&zram
->init_lock
);
206 if (init_done(zram
)) {
207 if (!zcomp_set_max_streams(zram
->comp
, num
)) {
208 pr_info("Cannot change max compression streams\n");
214 zram
->max_comp_streams
= num
;
217 up_write(&zram
->init_lock
);
221 static ssize_t
comp_algorithm_show(struct device
*dev
,
222 struct device_attribute
*attr
, char *buf
)
225 struct zram
*zram
= dev_to_zram(dev
);
227 down_read(&zram
->init_lock
);
228 sz
= zcomp_available_show(zram
->compressor
, buf
);
229 up_read(&zram
->init_lock
);
234 static ssize_t
comp_algorithm_store(struct device
*dev
,
235 struct device_attribute
*attr
, const char *buf
, size_t len
)
237 struct zram
*zram
= dev_to_zram(dev
);
238 down_write(&zram
->init_lock
);
239 if (init_done(zram
)) {
240 up_write(&zram
->init_lock
);
241 pr_info("Can't change algorithm for initialized device\n");
244 strlcpy(zram
->compressor
, buf
, sizeof(zram
->compressor
));
245 up_write(&zram
->init_lock
);
249 /* flag operations needs meta->tb_lock */
250 static int zram_test_flag(struct zram_meta
*meta
, u32 index
,
251 enum zram_pageflags flag
)
253 return meta
->table
[index
].value
& BIT(flag
);
256 static void zram_set_flag(struct zram_meta
*meta
, u32 index
,
257 enum zram_pageflags flag
)
259 meta
->table
[index
].value
|= BIT(flag
);
262 static void zram_clear_flag(struct zram_meta
*meta
, u32 index
,
263 enum zram_pageflags flag
)
265 meta
->table
[index
].value
&= ~BIT(flag
);
268 static size_t zram_get_obj_size(struct zram_meta
*meta
, u32 index
)
270 return meta
->table
[index
].value
& (BIT(ZRAM_FLAG_SHIFT
) - 1);
273 static void zram_set_obj_size(struct zram_meta
*meta
,
274 u32 index
, size_t size
)
276 unsigned long flags
= meta
->table
[index
].value
>> ZRAM_FLAG_SHIFT
;
278 meta
->table
[index
].value
= (flags
<< ZRAM_FLAG_SHIFT
) | size
;
281 static inline int is_partial_io(struct bio_vec
*bvec
)
283 return bvec
->bv_len
!= PAGE_SIZE
;
287 * Check if request is within bounds and aligned on zram logical blocks.
289 static inline int valid_io_request(struct zram
*zram
,
290 sector_t start
, unsigned int size
)
294 /* unaligned request */
295 if (unlikely(start
& (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)))
297 if (unlikely(size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))
300 end
= start
+ (size
>> SECTOR_SHIFT
);
301 bound
= zram
->disksize
>> SECTOR_SHIFT
;
302 /* out of range range */
303 if (unlikely(start
>= bound
|| end
> bound
|| start
> end
))
306 /* I/O request is valid */
310 static void zram_meta_free(struct zram_meta
*meta
, u64 disksize
)
312 size_t num_pages
= disksize
>> PAGE_SHIFT
;
315 /* Free all pages that are still in this zram device */
316 for (index
= 0; index
< num_pages
; index
++) {
317 unsigned long handle
= meta
->table
[index
].handle
;
322 zs_free(meta
->mem_pool
, handle
);
325 zs_destroy_pool(meta
->mem_pool
);
330 static struct zram_meta
*zram_meta_alloc(int device_id
, u64 disksize
)
334 struct zram_meta
*meta
= kmalloc(sizeof(*meta
), GFP_KERNEL
);
339 num_pages
= disksize
>> PAGE_SHIFT
;
340 meta
->table
= vzalloc(num_pages
* sizeof(*meta
->table
));
342 pr_err("Error allocating zram address table\n");
346 snprintf(pool_name
, sizeof(pool_name
), "zram%d", device_id
);
347 meta
->mem_pool
= zs_create_pool(pool_name
, GFP_NOIO
| __GFP_HIGHMEM
);
348 if (!meta
->mem_pool
) {
349 pr_err("Error creating memory pool\n");
361 static inline bool zram_meta_get(struct zram
*zram
)
363 if (atomic_inc_not_zero(&zram
->refcount
))
368 static inline void zram_meta_put(struct zram
*zram
)
370 atomic_dec(&zram
->refcount
);
373 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
375 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
377 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
380 static int page_zero_filled(void *ptr
)
385 page
= (unsigned long *)ptr
;
387 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
395 static void handle_zero_page(struct bio_vec
*bvec
)
397 struct page
*page
= bvec
->bv_page
;
400 user_mem
= kmap_atomic(page
);
401 if (is_partial_io(bvec
))
402 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
404 clear_page(user_mem
);
405 kunmap_atomic(user_mem
);
407 flush_dcache_page(page
);
412 * To protect concurrent access to the same index entry,
413 * caller should hold this table index entry's bit_spinlock to
414 * indicate this index entry is accessing.
416 static void zram_free_page(struct zram
*zram
, size_t index
)
418 struct zram_meta
*meta
= zram
->meta
;
419 unsigned long handle
= meta
->table
[index
].handle
;
421 if (unlikely(!handle
)) {
423 * No memory is allocated for zero filled pages.
424 * Simply clear zero page flag.
426 if (zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
427 zram_clear_flag(meta
, index
, ZRAM_ZERO
);
428 atomic64_dec(&zram
->stats
.zero_pages
);
433 zs_free(meta
->mem_pool
, handle
);
435 atomic64_sub(zram_get_obj_size(meta
, index
),
436 &zram
->stats
.compr_data_size
);
437 atomic64_dec(&zram
->stats
.pages_stored
);
439 meta
->table
[index
].handle
= 0;
440 zram_set_obj_size(meta
, index
, 0);
443 static int zram_decompress_page(struct zram
*zram
, char *mem
, u32 index
)
447 struct zram_meta
*meta
= zram
->meta
;
448 unsigned long handle
;
451 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
452 handle
= meta
->table
[index
].handle
;
453 size
= zram_get_obj_size(meta
, index
);
455 if (!handle
|| zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
456 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
461 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_RO
);
462 if (size
== PAGE_SIZE
)
463 copy_page(mem
, cmem
);
465 ret
= zcomp_decompress(zram
->comp
, cmem
, size
, mem
);
466 zs_unmap_object(meta
->mem_pool
, handle
);
467 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
469 /* Should NEVER happen. Return bio error if it does. */
471 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
478 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
479 u32 index
, int offset
)
483 unsigned char *user_mem
, *uncmem
= NULL
;
484 struct zram_meta
*meta
= zram
->meta
;
485 page
= bvec
->bv_page
;
487 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
488 if (unlikely(!meta
->table
[index
].handle
) ||
489 zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
490 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
491 handle_zero_page(bvec
);
494 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
496 if (is_partial_io(bvec
))
497 /* Use a temporary buffer to decompress the page */
498 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
500 user_mem
= kmap_atomic(page
);
501 if (!is_partial_io(bvec
))
505 pr_info("Unable to allocate temp memory\n");
510 ret
= zram_decompress_page(zram
, uncmem
, index
);
511 /* Should NEVER happen. Return bio error if it does. */
515 if (is_partial_io(bvec
))
516 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
519 flush_dcache_page(page
);
522 kunmap_atomic(user_mem
);
523 if (is_partial_io(bvec
))
528 static inline void update_used_max(struct zram
*zram
,
529 const unsigned long pages
)
531 unsigned long old_max
, cur_max
;
533 old_max
= atomic_long_read(&zram
->stats
.max_used_pages
);
538 old_max
= atomic_long_cmpxchg(
539 &zram
->stats
.max_used_pages
, cur_max
, pages
);
540 } while (old_max
!= cur_max
);
543 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
548 unsigned long handle
;
550 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
551 struct zram_meta
*meta
= zram
->meta
;
552 struct zcomp_strm
*zstrm
;
554 unsigned long alloced_pages
;
556 page
= bvec
->bv_page
;
557 if (is_partial_io(bvec
)) {
559 * This is a partial IO. We need to read the full page
560 * before to write the changes.
562 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
567 ret
= zram_decompress_page(zram
, uncmem
, index
);
572 zstrm
= zcomp_strm_find(zram
->comp
);
574 user_mem
= kmap_atomic(page
);
576 if (is_partial_io(bvec
)) {
577 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
579 kunmap_atomic(user_mem
);
585 if (page_zero_filled(uncmem
)) {
587 kunmap_atomic(user_mem
);
588 /* Free memory associated with this sector now. */
589 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
590 zram_free_page(zram
, index
);
591 zram_set_flag(meta
, index
, ZRAM_ZERO
);
592 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
594 atomic64_inc(&zram
->stats
.zero_pages
);
599 ret
= zcomp_compress(zram
->comp
, zstrm
, uncmem
, &clen
);
600 if (!is_partial_io(bvec
)) {
601 kunmap_atomic(user_mem
);
607 pr_err("Compression failed! err=%d\n", ret
);
611 if (unlikely(clen
> max_zpage_size
)) {
613 if (is_partial_io(bvec
))
617 handle
= zs_malloc(meta
->mem_pool
, clen
);
619 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
625 alloced_pages
= zs_get_total_pages(meta
->mem_pool
);
626 if (zram
->limit_pages
&& alloced_pages
> zram
->limit_pages
) {
627 zs_free(meta
->mem_pool
, handle
);
632 update_used_max(zram
, alloced_pages
);
634 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_WO
);
636 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
)) {
637 src
= kmap_atomic(page
);
638 copy_page(cmem
, src
);
641 memcpy(cmem
, src
, clen
);
644 zcomp_strm_release(zram
->comp
, zstrm
);
646 zs_unmap_object(meta
->mem_pool
, handle
);
649 * Free memory associated with this sector
650 * before overwriting unused sectors.
652 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
653 zram_free_page(zram
, index
);
655 meta
->table
[index
].handle
= handle
;
656 zram_set_obj_size(meta
, index
, clen
);
657 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
660 atomic64_add(clen
, &zram
->stats
.compr_data_size
);
661 atomic64_inc(&zram
->stats
.pages_stored
);
664 zcomp_strm_release(zram
->comp
, zstrm
);
665 if (is_partial_io(bvec
))
670 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
676 atomic64_inc(&zram
->stats
.num_reads
);
677 ret
= zram_bvec_read(zram
, bvec
, index
, offset
);
679 atomic64_inc(&zram
->stats
.num_writes
);
680 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
685 atomic64_inc(&zram
->stats
.failed_reads
);
687 atomic64_inc(&zram
->stats
.failed_writes
);
694 * zram_bio_discard - handler on discard request
695 * @index: physical block index in PAGE_SIZE units
696 * @offset: byte offset within physical block
698 static void zram_bio_discard(struct zram
*zram
, u32 index
,
699 int offset
, struct bio
*bio
)
701 size_t n
= bio
->bi_iter
.bi_size
;
702 struct zram_meta
*meta
= zram
->meta
;
705 * zram manages data in physical block size units. Because logical block
706 * size isn't identical with physical block size on some arch, we
707 * could get a discard request pointing to a specific offset within a
708 * certain physical block. Although we can handle this request by
709 * reading that physiclal block and decompressing and partially zeroing
710 * and re-compressing and then re-storing it, this isn't reasonable
711 * because our intent with a discard request is to save memory. So
712 * skipping this logical block is appropriate here.
715 if (n
<= (PAGE_SIZE
- offset
))
718 n
-= (PAGE_SIZE
- offset
);
722 while (n
>= PAGE_SIZE
) {
723 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
724 zram_free_page(zram
, index
);
725 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
726 atomic64_inc(&zram
->stats
.notify_free
);
732 static void zram_reset_device(struct zram
*zram
)
734 struct zram_meta
*meta
;
738 down_write(&zram
->init_lock
);
740 zram
->limit_pages
= 0;
742 if (!init_done(zram
)) {
743 up_write(&zram
->init_lock
);
749 disksize
= zram
->disksize
;
751 * Refcount will go down to 0 eventually and r/w handler
752 * cannot handle further I/O so it will bail out by
753 * check zram_meta_get.
757 * We want to free zram_meta in process context to avoid
758 * deadlock between reclaim path and any other locks.
760 wait_event(zram
->io_done
, atomic_read(&zram
->refcount
) == 0);
763 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
765 zram
->max_comp_streams
= 1;
766 set_capacity(zram
->disk
, 0);
768 up_write(&zram
->init_lock
);
769 /* I/O operation under all of CPU are done so let's free */
770 zram_meta_free(meta
, disksize
);
774 static ssize_t
disksize_store(struct device
*dev
,
775 struct device_attribute
*attr
, const char *buf
, size_t len
)
779 struct zram_meta
*meta
;
780 struct zram
*zram
= dev_to_zram(dev
);
783 disksize
= memparse(buf
, NULL
);
787 disksize
= PAGE_ALIGN(disksize
);
788 meta
= zram_meta_alloc(zram
->disk
->first_minor
, disksize
);
792 comp
= zcomp_create(zram
->compressor
, zram
->max_comp_streams
);
794 pr_info("Cannot initialise %s compressing backend\n",
800 down_write(&zram
->init_lock
);
801 if (init_done(zram
)) {
802 pr_info("Cannot change disksize for initialized device\n");
804 goto out_destroy_comp
;
807 init_waitqueue_head(&zram
->io_done
);
808 atomic_set(&zram
->refcount
, 1);
811 zram
->disksize
= disksize
;
812 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
813 up_write(&zram
->init_lock
);
816 * Revalidate disk out of the init_lock to avoid lockdep splat.
817 * It's okay because disk's capacity is protected by init_lock
818 * so that revalidate_disk always sees up-to-date capacity.
820 revalidate_disk(zram
->disk
);
825 up_write(&zram
->init_lock
);
828 zram_meta_free(meta
, disksize
);
832 static ssize_t
reset_store(struct device
*dev
,
833 struct device_attribute
*attr
, const char *buf
, size_t len
)
836 unsigned short do_reset
;
838 struct block_device
*bdev
;
840 zram
= dev_to_zram(dev
);
841 bdev
= bdget_disk(zram
->disk
, 0);
846 mutex_lock(&bdev
->bd_mutex
);
847 /* Do not reset an active device! */
848 if (bdev
->bd_openers
) {
853 ret
= kstrtou16(buf
, 10, &do_reset
);
862 /* Make sure all pending I/O is finished */
864 zram_reset_device(zram
);
866 mutex_unlock(&bdev
->bd_mutex
);
867 revalidate_disk(zram
->disk
);
873 mutex_unlock(&bdev
->bd_mutex
);
878 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
)
883 struct bvec_iter iter
;
885 index
= bio
->bi_iter
.bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
886 offset
= (bio
->bi_iter
.bi_sector
&
887 (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
889 if (unlikely(bio
->bi_rw
& REQ_DISCARD
)) {
890 zram_bio_discard(zram
, index
, offset
, bio
);
895 rw
= bio_data_dir(bio
);
896 bio_for_each_segment(bvec
, bio
, iter
) {
897 int max_transfer_size
= PAGE_SIZE
- offset
;
899 if (bvec
.bv_len
> max_transfer_size
) {
901 * zram_bvec_rw() can only make operation on a single
902 * zram page. Split the bio vector.
906 bv
.bv_page
= bvec
.bv_page
;
907 bv
.bv_len
= max_transfer_size
;
908 bv
.bv_offset
= bvec
.bv_offset
;
910 if (zram_bvec_rw(zram
, &bv
, index
, offset
, rw
) < 0)
913 bv
.bv_len
= bvec
.bv_len
- max_transfer_size
;
914 bv
.bv_offset
+= max_transfer_size
;
915 if (zram_bvec_rw(zram
, &bv
, index
+ 1, 0, rw
) < 0)
918 if (zram_bvec_rw(zram
, &bvec
, index
, offset
, rw
) < 0)
921 update_position(&index
, &offset
, &bvec
);
924 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
933 * Handler function for all zram I/O requests.
935 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
937 struct zram
*zram
= queue
->queuedata
;
939 if (unlikely(!zram_meta_get(zram
)))
942 if (!valid_io_request(zram
, bio
->bi_iter
.bi_sector
,
943 bio
->bi_iter
.bi_size
)) {
944 atomic64_inc(&zram
->stats
.invalid_io
);
948 __zram_make_request(zram
, bio
);
957 static void zram_slot_free_notify(struct block_device
*bdev
,
961 struct zram_meta
*meta
;
963 zram
= bdev
->bd_disk
->private_data
;
966 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
967 zram_free_page(zram
, index
);
968 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
969 atomic64_inc(&zram
->stats
.notify_free
);
972 static int zram_rw_page(struct block_device
*bdev
, sector_t sector
,
973 struct page
*page
, int rw
)
975 int offset
, err
= -EIO
;
980 zram
= bdev
->bd_disk
->private_data
;
981 if (unlikely(!zram_meta_get(zram
)))
984 if (!valid_io_request(zram
, sector
, PAGE_SIZE
)) {
985 atomic64_inc(&zram
->stats
.invalid_io
);
990 index
= sector
>> SECTORS_PER_PAGE_SHIFT
;
991 offset
= sector
& (SECTORS_PER_PAGE
- 1) << SECTOR_SHIFT
;
994 bv
.bv_len
= PAGE_SIZE
;
997 err
= zram_bvec_rw(zram
, &bv
, index
, offset
, rw
);
1002 * If I/O fails, just return error(ie, non-zero) without
1003 * calling page_endio.
1004 * It causes resubmit the I/O with bio request by upper functions
1005 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1006 * bio->bi_end_io does things to handle the error
1007 * (e.g., SetPageError, set_page_dirty and extra works).
1010 page_endio(page
, rw
, 0);
1014 static const struct block_device_operations zram_devops
= {
1015 .swap_slot_free_notify
= zram_slot_free_notify
,
1016 .rw_page
= zram_rw_page
,
1017 .owner
= THIS_MODULE
1020 static DEVICE_ATTR_RW(disksize
);
1021 static DEVICE_ATTR_RO(initstate
);
1022 static DEVICE_ATTR_WO(reset
);
1023 static DEVICE_ATTR_RO(orig_data_size
);
1024 static DEVICE_ATTR_RO(mem_used_total
);
1025 static DEVICE_ATTR_RW(mem_limit
);
1026 static DEVICE_ATTR_RW(mem_used_max
);
1027 static DEVICE_ATTR_RW(max_comp_streams
);
1028 static DEVICE_ATTR_RW(comp_algorithm
);
1030 ZRAM_ATTR_RO(num_reads
);
1031 ZRAM_ATTR_RO(num_writes
);
1032 ZRAM_ATTR_RO(failed_reads
);
1033 ZRAM_ATTR_RO(failed_writes
);
1034 ZRAM_ATTR_RO(invalid_io
);
1035 ZRAM_ATTR_RO(notify_free
);
1036 ZRAM_ATTR_RO(zero_pages
);
1037 ZRAM_ATTR_RO(compr_data_size
);
1039 static struct attribute
*zram_disk_attrs
[] = {
1040 &dev_attr_disksize
.attr
,
1041 &dev_attr_initstate
.attr
,
1042 &dev_attr_reset
.attr
,
1043 &dev_attr_num_reads
.attr
,
1044 &dev_attr_num_writes
.attr
,
1045 &dev_attr_failed_reads
.attr
,
1046 &dev_attr_failed_writes
.attr
,
1047 &dev_attr_invalid_io
.attr
,
1048 &dev_attr_notify_free
.attr
,
1049 &dev_attr_zero_pages
.attr
,
1050 &dev_attr_orig_data_size
.attr
,
1051 &dev_attr_compr_data_size
.attr
,
1052 &dev_attr_mem_used_total
.attr
,
1053 &dev_attr_mem_limit
.attr
,
1054 &dev_attr_mem_used_max
.attr
,
1055 &dev_attr_max_comp_streams
.attr
,
1056 &dev_attr_comp_algorithm
.attr
,
1060 static struct attribute_group zram_disk_attr_group
= {
1061 .attrs
= zram_disk_attrs
,
1064 static int create_device(struct zram
*zram
, int device_id
)
1066 struct request_queue
*queue
;
1069 init_rwsem(&zram
->init_lock
);
1071 queue
= blk_alloc_queue(GFP_KERNEL
);
1073 pr_err("Error allocating disk queue for device %d\n",
1078 blk_queue_make_request(queue
, zram_make_request
);
1080 /* gendisk structure */
1081 zram
->disk
= alloc_disk(1);
1083 pr_warn("Error allocating disk structure for device %d\n",
1085 goto out_free_queue
;
1088 zram
->disk
->major
= zram_major
;
1089 zram
->disk
->first_minor
= device_id
;
1090 zram
->disk
->fops
= &zram_devops
;
1091 zram
->disk
->queue
= queue
;
1092 zram
->disk
->queue
->queuedata
= zram
;
1093 zram
->disk
->private_data
= zram
;
1094 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
1096 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1097 set_capacity(zram
->disk
, 0);
1098 /* zram devices sort of resembles non-rotational disks */
1099 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
1100 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, zram
->disk
->queue
);
1102 * To ensure that we always get PAGE_SIZE aligned
1103 * and n*PAGE_SIZED sized I/O requests.
1105 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
1106 blk_queue_logical_block_size(zram
->disk
->queue
,
1107 ZRAM_LOGICAL_BLOCK_SIZE
);
1108 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
1109 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
1110 zram
->disk
->queue
->limits
.discard_granularity
= PAGE_SIZE
;
1111 zram
->disk
->queue
->limits
.max_discard_sectors
= UINT_MAX
;
1113 * zram_bio_discard() will clear all logical blocks if logical block
1114 * size is identical with physical block size(PAGE_SIZE). But if it is
1115 * different, we will skip discarding some parts of logical blocks in
1116 * the part of the request range which isn't aligned to physical block
1117 * size. So we can't ensure that all discarded logical blocks are
1120 if (ZRAM_LOGICAL_BLOCK_SIZE
== PAGE_SIZE
)
1121 zram
->disk
->queue
->limits
.discard_zeroes_data
= 1;
1123 zram
->disk
->queue
->limits
.discard_zeroes_data
= 0;
1124 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, zram
->disk
->queue
);
1126 add_disk(zram
->disk
);
1128 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
1129 &zram_disk_attr_group
);
1131 pr_warn("Error creating sysfs group");
1134 strlcpy(zram
->compressor
, default_compressor
, sizeof(zram
->compressor
));
1136 zram
->max_comp_streams
= 1;
1140 del_gendisk(zram
->disk
);
1141 put_disk(zram
->disk
);
1143 blk_cleanup_queue(queue
);
1148 static void destroy_devices(unsigned int nr
)
1153 for (i
= 0; i
< nr
; i
++) {
1154 zram
= &zram_devices
[i
];
1156 * Remove sysfs first, so no one will perform a disksize
1157 * store while we destroy the devices
1159 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
1160 &zram_disk_attr_group
);
1162 zram_reset_device(zram
);
1164 blk_cleanup_queue(zram
->disk
->queue
);
1165 del_gendisk(zram
->disk
);
1166 put_disk(zram
->disk
);
1169 kfree(zram_devices
);
1170 unregister_blkdev(zram_major
, "zram");
1171 pr_info("Destroyed %u device(s)\n", nr
);
1174 static int __init
zram_init(void)
1178 if (num_devices
> max_num_devices
) {
1179 pr_warn("Invalid value for num_devices: %u\n",
1184 zram_major
= register_blkdev(0, "zram");
1185 if (zram_major
<= 0) {
1186 pr_warn("Unable to get major number\n");
1190 /* Allocate the device array and initialize each one */
1191 zram_devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
1192 if (!zram_devices
) {
1193 unregister_blkdev(zram_major
, "zram");
1197 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
1198 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
1203 pr_info("Created %u device(s)\n", num_devices
);
1207 destroy_devices(dev_id
);
1211 static void __exit
zram_exit(void)
1213 destroy_devices(num_devices
);
1216 module_init(zram_init
);
1217 module_exit(zram_exit
);
1219 module_param(num_devices
, uint
, 0);
1220 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
1222 MODULE_LICENSE("Dual BSD/GPL");
1223 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1224 MODULE_DESCRIPTION("Compressed RAM Block Device");