Merge 2.6.38-rc5 into staging-next
[deliverable/linux.git] / drivers / staging / zram / zram_drv.c
CommitLineData
306b0c95 1/*
f1e3cfff 2 * Compressed RAM block device
306b0c95 3 *
1130ebba 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
306b0c95
NG
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 *
12 * Project home: http://compcache.googlecode.com
13 */
14
f1e3cfff 15#define KMSG_COMPONENT "zram"
306b0c95
NG
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
b1f5b81e
RJ
18#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
306b0c95
NG
22#include <linux/module.h>
23#include <linux/kernel.h>
8946a086 24#include <linux/bio.h>
306b0c95
NG
25#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
5a0e3ad6 31#include <linux/slab.h>
306b0c95 32#include <linux/lzo.h>
306b0c95 33#include <linux/string.h>
306b0c95 34#include <linux/vmalloc.h>
306b0c95 35
16a4bfb9 36#include "zram_drv.h"
306b0c95
NG
37
38/* Globals */
f1e3cfff 39static int zram_major;
33863c21 40struct zram *devices;
306b0c95 41
306b0c95 42/* Module params (documentation at end) */
33863c21
NG
43unsigned int num_devices;
44
45static void zram_stat_inc(u32 *v)
46{
47 *v = *v + 1;
48}
49
50static void zram_stat_dec(u32 *v)
51{
52 *v = *v - 1;
53}
54
55static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
56{
57 spin_lock(&zram->stat64_lock);
58 *v = *v + inc;
59 spin_unlock(&zram->stat64_lock);
60}
61
62static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
63{
64 spin_lock(&zram->stat64_lock);
65 *v = *v - dec;
66 spin_unlock(&zram->stat64_lock);
67}
68
69static void zram_stat64_inc(struct zram *zram, u64 *v)
70{
71 zram_stat64_add(zram, v, 1);
72}
306b0c95 73
f1e3cfff
NG
74static int zram_test_flag(struct zram *zram, u32 index,
75 enum zram_pageflags flag)
306b0c95 76{
f1e3cfff 77 return zram->table[index].flags & BIT(flag);
306b0c95
NG
78}
79
f1e3cfff
NG
80static void zram_set_flag(struct zram *zram, u32 index,
81 enum zram_pageflags flag)
306b0c95 82{
f1e3cfff 83 zram->table[index].flags |= BIT(flag);
306b0c95
NG
84}
85
f1e3cfff
NG
86static void zram_clear_flag(struct zram *zram, u32 index,
87 enum zram_pageflags flag)
306b0c95 88{
f1e3cfff 89 zram->table[index].flags &= ~BIT(flag);
306b0c95
NG
90}
91
92static int page_zero_filled(void *ptr)
93{
94 unsigned int pos;
95 unsigned long *page;
96
97 page = (unsigned long *)ptr;
98
99 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100 if (page[pos])
101 return 0;
102 }
103
104 return 1;
105}
106
f1e3cfff 107static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
306b0c95 108{
f1e3cfff 109 if (!zram->disksize) {
306b0c95
NG
110 pr_info(
111 "disk size not provided. You can use disksize_kb module "
112 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113 default_disksize_perc_ram
114 );
f1e3cfff 115 zram->disksize = default_disksize_perc_ram *
306b0c95
NG
116 (totalram_bytes / 100);
117 }
118
f1e3cfff 119 if (zram->disksize > 2 * (totalram_bytes)) {
306b0c95 120 pr_info(
f1e3cfff 121 "There is little point creating a zram of greater than "
306b0c95 122 "twice the size of memory since we expect a 2:1 compression "
f1e3cfff
NG
123 "ratio. Note that zram uses about 0.1%% of the size of "
124 "the disk when not in use so a huge zram is "
306b0c95
NG
125 "wasteful.\n"
126 "\tMemory Size: %zu kB\n"
33863c21 127 "\tSize you selected: %llu kB\n"
306b0c95 128 "Continuing anyway ...\n",
f1e3cfff 129 totalram_bytes >> 10, zram->disksize
306b0c95
NG
130 );
131 }
132
f1e3cfff 133 zram->disksize &= PAGE_MASK;
306b0c95
NG
134}
135
f1e3cfff 136static void zram_free_page(struct zram *zram, size_t index)
306b0c95
NG
137{
138 u32 clen;
139 void *obj;
140
f1e3cfff
NG
141 struct page *page = zram->table[index].page;
142 u32 offset = zram->table[index].offset;
306b0c95
NG
143
144 if (unlikely(!page)) {
2e882281
NG
145 /*
146 * No memory is allocated for zero filled pages.
147 * Simply clear zero page flag.
148 */
f1e3cfff
NG
149 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
150 zram_clear_flag(zram, index, ZRAM_ZERO);
151 zram_stat_dec(&zram->stats.pages_zero);
306b0c95
NG
152 }
153 return;
154 }
155
f1e3cfff 156 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
306b0c95
NG
157 clen = PAGE_SIZE;
158 __free_page(page);
f1e3cfff
NG
159 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
160 zram_stat_dec(&zram->stats.pages_expand);
306b0c95
NG
161 goto out;
162 }
163
164 obj = kmap_atomic(page, KM_USER0) + offset;
165 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
166 kunmap_atomic(obj, KM_USER0);
167
f1e3cfff 168 xv_free(zram->mem_pool, page, offset);
306b0c95 169 if (clen <= PAGE_SIZE / 2)
f1e3cfff 170 zram_stat_dec(&zram->stats.good_compress);
306b0c95
NG
171
172out:
33863c21 173 zram_stat64_sub(zram, &zram->stats.compr_size, clen);
f1e3cfff 174 zram_stat_dec(&zram->stats.pages_stored);
306b0c95 175
f1e3cfff
NG
176 zram->table[index].page = NULL;
177 zram->table[index].offset = 0;
306b0c95
NG
178}
179
a1dd52af 180static void handle_zero_page(struct page *page)
306b0c95
NG
181{
182 void *user_mem;
306b0c95
NG
183
184 user_mem = kmap_atomic(page, KM_USER0);
185 memset(user_mem, 0, PAGE_SIZE);
186 kunmap_atomic(user_mem, KM_USER0);
187
30fb8a71 188 flush_dcache_page(page);
306b0c95
NG
189}
190
f1e3cfff 191static void handle_uncompressed_page(struct zram *zram,
a1dd52af 192 struct page *page, u32 index)
306b0c95 193{
306b0c95
NG
194 unsigned char *user_mem, *cmem;
195
306b0c95 196 user_mem = kmap_atomic(page, KM_USER0);
f1e3cfff
NG
197 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
198 zram->table[index].offset;
306b0c95
NG
199
200 memcpy(user_mem, cmem, PAGE_SIZE);
201 kunmap_atomic(user_mem, KM_USER0);
202 kunmap_atomic(cmem, KM_USER1);
203
30fb8a71 204 flush_dcache_page(page);
306b0c95
NG
205}
206
7d7854b4 207static void zram_read(struct zram *zram, struct bio *bio)
306b0c95 208{
a1dd52af
NG
209
210 int i;
306b0c95 211 u32 index;
a1dd52af 212 struct bio_vec *bvec;
306b0c95 213
484875ad 214 if (unlikely(!zram->init_done)) {
1aa32664 215 bio_endio(bio, -ENXIO);
7d7854b4 216 return;
484875ad 217 }
306b0c95 218
484875ad 219 zram_stat64_inc(zram, &zram->stats.num_reads);
306b0c95 220 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
484875ad 221
a1dd52af
NG
222 bio_for_each_segment(bvec, bio, i) {
223 int ret;
224 size_t clen;
225 struct page *page;
226 struct zobj_header *zheader;
227 unsigned char *user_mem, *cmem;
306b0c95 228
a1dd52af 229 page = bvec->bv_page;
306b0c95 230
f1e3cfff 231 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
a1dd52af 232 handle_zero_page(page);
5414e557 233 index++;
a1dd52af
NG
234 continue;
235 }
306b0c95 236
a1dd52af 237 /* Requested page is not present in compressed area */
f1e3cfff 238 if (unlikely(!zram->table[index].page)) {
a1dd52af
NG
239 pr_debug("Read before write: sector=%lu, size=%u",
240 (ulong)(bio->bi_sector), bio->bi_size);
2787f959 241 handle_zero_page(page);
5414e557 242 index++;
a1dd52af
NG
243 continue;
244 }
306b0c95 245
a1dd52af 246 /* Page is stored uncompressed since it's incompressible */
f1e3cfff
NG
247 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
248 handle_uncompressed_page(zram, page, index);
5414e557 249 index++;
a1dd52af
NG
250 continue;
251 }
306b0c95 252
a1dd52af
NG
253 user_mem = kmap_atomic(page, KM_USER0);
254 clen = PAGE_SIZE;
306b0c95 255
f1e3cfff
NG
256 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
257 zram->table[index].offset;
306b0c95 258
a1dd52af
NG
259 ret = lzo1x_decompress_safe(
260 cmem + sizeof(*zheader),
261 xv_get_object_size(cmem) - sizeof(*zheader),
262 user_mem, &clen);
306b0c95 263
a1dd52af
NG
264 kunmap_atomic(user_mem, KM_USER0);
265 kunmap_atomic(cmem, KM_USER1);
306b0c95 266
a1dd52af
NG
267 /* Should NEVER happen. Return bio error if it does. */
268 if (unlikely(ret != LZO_E_OK)) {
269 pr_err("Decompression failed! err=%d, page=%u\n",
270 ret, index);
f1e3cfff 271 zram_stat64_inc(zram, &zram->stats.failed_reads);
a1dd52af
NG
272 goto out;
273 }
274
275 flush_dcache_page(page);
276 index++;
277 }
306b0c95
NG
278
279 set_bit(BIO_UPTODATE, &bio->bi_flags);
280 bio_endio(bio, 0);
7d7854b4 281 return;
306b0c95
NG
282
283out:
284 bio_io_error(bio);
306b0c95
NG
285}
286
7d7854b4 287static void zram_write(struct zram *zram, struct bio *bio)
306b0c95 288{
484875ad 289 int i, ret;
a1dd52af
NG
290 u32 index;
291 struct bio_vec *bvec;
306b0c95 292
484875ad
NG
293 if (unlikely(!zram->init_done)) {
294 ret = zram_init_device(zram);
295 if (ret)
296 goto out;
297 }
306b0c95 298
484875ad 299 zram_stat64_inc(zram, &zram->stats.num_writes);
306b0c95
NG
300 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
301
a1dd52af 302 bio_for_each_segment(bvec, bio, i) {
a1dd52af
NG
303 u32 offset;
304 size_t clen;
305 struct zobj_header *zheader;
306 struct page *page, *page_store;
307 unsigned char *user_mem, *cmem, *src;
306b0c95 308
a1dd52af 309 page = bvec->bv_page;
f1e3cfff 310 src = zram->compress_buffer;
306b0c95 311
a1dd52af
NG
312 /*
313 * System overwrites unused sectors. Free memory associated
314 * with this sector now.
315 */
f1e3cfff
NG
316 if (zram->table[index].page ||
317 zram_test_flag(zram, index, ZRAM_ZERO))
318 zram_free_page(zram, index);
306b0c95 319
f1e3cfff 320 mutex_lock(&zram->lock);
306b0c95 321
a1dd52af
NG
322 user_mem = kmap_atomic(page, KM_USER0);
323 if (page_zero_filled(user_mem)) {
324 kunmap_atomic(user_mem, KM_USER0);
f1e3cfff
NG
325 mutex_unlock(&zram->lock);
326 zram_stat_inc(&zram->stats.pages_zero);
327 zram_set_flag(zram, index, ZRAM_ZERO);
5414e557 328 index++;
a1dd52af
NG
329 continue;
330 }
306b0c95 331
a1dd52af 332 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
f1e3cfff 333 zram->compress_workmem);
306b0c95 334
a1dd52af 335 kunmap_atomic(user_mem, KM_USER0);
306b0c95 336
a1dd52af 337 if (unlikely(ret != LZO_E_OK)) {
f1e3cfff 338 mutex_unlock(&zram->lock);
a1dd52af 339 pr_err("Compression failed! err=%d\n", ret);
f1e3cfff 340 zram_stat64_inc(zram, &zram->stats.failed_writes);
306b0c95
NG
341 goto out;
342 }
343
a1dd52af
NG
344 /*
345 * Page is incompressible. Store it as-is (uncompressed)
f1e3cfff 346 * since we do not want to return too many disk write
a1dd52af
NG
347 * errors which has side effect of hanging the system.
348 */
349 if (unlikely(clen > max_zpage_size)) {
350 clen = PAGE_SIZE;
351 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
352 if (unlikely(!page_store)) {
f1e3cfff 353 mutex_unlock(&zram->lock);
a1dd52af
NG
354 pr_info("Error allocating memory for "
355 "incompressible page: %u\n", index);
f1e3cfff
NG
356 zram_stat64_inc(zram,
357 &zram->stats.failed_writes);
a1dd52af
NG
358 goto out;
359 }
360
361 offset = 0;
f1e3cfff
NG
362 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
363 zram_stat_inc(&zram->stats.pages_expand);
364 zram->table[index].page = page_store;
a1dd52af
NG
365 src = kmap_atomic(page, KM_USER0);
366 goto memstore;
367 }
306b0c95 368
f1e3cfff
NG
369 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
370 &zram->table[index].page, &offset,
a1dd52af 371 GFP_NOIO | __GFP_HIGHMEM)) {
f1e3cfff 372 mutex_unlock(&zram->lock);
a1dd52af
NG
373 pr_info("Error allocating memory for compressed "
374 "page: %u, size=%zu\n", index, clen);
f1e3cfff 375 zram_stat64_inc(zram, &zram->stats.failed_writes);
a1dd52af
NG
376 goto out;
377 }
306b0c95
NG
378
379memstore:
f1e3cfff 380 zram->table[index].offset = offset;
306b0c95 381
f1e3cfff
NG
382 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
383 zram->table[index].offset;
306b0c95
NG
384
385#if 0
a1dd52af 386 /* Back-reference needed for memory defragmentation */
f1e3cfff 387 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
a1dd52af
NG
388 zheader = (struct zobj_header *)cmem;
389 zheader->table_idx = index;
390 cmem += sizeof(*zheader);
391 }
306b0c95
NG
392#endif
393
a1dd52af 394 memcpy(cmem, src, clen);
306b0c95 395
a1dd52af 396 kunmap_atomic(cmem, KM_USER1);
f1e3cfff 397 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
a1dd52af 398 kunmap_atomic(src, KM_USER0);
306b0c95 399
a1dd52af 400 /* Update stats */
33863c21 401 zram_stat64_add(zram, &zram->stats.compr_size, clen);
f1e3cfff 402 zram_stat_inc(&zram->stats.pages_stored);
a1dd52af 403 if (clen <= PAGE_SIZE / 2)
f1e3cfff 404 zram_stat_inc(&zram->stats.good_compress);
306b0c95 405
f1e3cfff 406 mutex_unlock(&zram->lock);
a1dd52af
NG
407 index++;
408 }
306b0c95
NG
409
410 set_bit(BIO_UPTODATE, &bio->bi_flags);
411 bio_endio(bio, 0);
7d7854b4 412 return;
306b0c95
NG
413
414out:
306b0c95 415 bio_io_error(bio);
306b0c95
NG
416}
417
306b0c95
NG
418/*
419 * Check if request is within bounds and page aligned.
420 */
f1e3cfff 421static inline int valid_io_request(struct zram *zram, struct bio *bio)
306b0c95
NG
422{
423 if (unlikely(
f1e3cfff 424 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
306b0c95 425 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
a1dd52af 426 (bio->bi_size & (PAGE_SIZE - 1)))) {
306b0c95
NG
427
428 return 0;
429 }
430
a1dd52af 431 /* I/O request is valid */
306b0c95
NG
432 return 1;
433}
434
435/*
f1e3cfff 436 * Handler function for all zram I/O requests.
306b0c95 437 */
f1e3cfff 438static int zram_make_request(struct request_queue *queue, struct bio *bio)
306b0c95 439{
f1e3cfff 440 struct zram *zram = queue->queuedata;
306b0c95 441
f1e3cfff
NG
442 if (!valid_io_request(zram, bio)) {
443 zram_stat64_inc(zram, &zram->stats.invalid_io);
306b0c95
NG
444 bio_io_error(bio);
445 return 0;
446 }
447
448 switch (bio_data_dir(bio)) {
449 case READ:
7d7854b4 450 zram_read(zram, bio);
306b0c95
NG
451 break;
452
453 case WRITE:
7d7854b4 454 zram_write(zram, bio);
306b0c95
NG
455 break;
456 }
457
7d7854b4 458 return 0;
306b0c95
NG
459}
460
33863c21 461void zram_reset_device(struct zram *zram)
306b0c95 462{
97a06382 463 size_t index;
306b0c95 464
484875ad 465 mutex_lock(&zram->init_lock);
f1e3cfff 466 zram->init_done = 0;
7eef7533 467
306b0c95 468 /* Free various per-device buffers */
f1e3cfff
NG
469 kfree(zram->compress_workmem);
470 free_pages((unsigned long)zram->compress_buffer, 1);
306b0c95 471
f1e3cfff
NG
472 zram->compress_workmem = NULL;
473 zram->compress_buffer = NULL;
306b0c95 474
f1e3cfff
NG
475 /* Free all pages that are still in this zram device */
476 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
306b0c95
NG
477 struct page *page;
478 u16 offset;
479
f1e3cfff
NG
480 page = zram->table[index].page;
481 offset = zram->table[index].offset;
306b0c95
NG
482
483 if (!page)
484 continue;
485
f1e3cfff 486 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
306b0c95
NG
487 __free_page(page);
488 else
f1e3cfff 489 xv_free(zram->mem_pool, page, offset);
306b0c95
NG
490 }
491
f1e3cfff
NG
492 vfree(zram->table);
493 zram->table = NULL;
306b0c95 494
f1e3cfff
NG
495 xv_destroy_pool(zram->mem_pool);
496 zram->mem_pool = NULL;
306b0c95 497
306b0c95 498 /* Reset stats */
f1e3cfff 499 memset(&zram->stats, 0, sizeof(zram->stats));
306b0c95 500
f1e3cfff 501 zram->disksize = 0;
484875ad 502 mutex_unlock(&zram->init_lock);
306b0c95
NG
503}
504
33863c21 505int zram_init_device(struct zram *zram)
306b0c95
NG
506{
507 int ret;
508 size_t num_pages;
306b0c95 509
484875ad
NG
510 mutex_lock(&zram->init_lock);
511
f1e3cfff 512 if (zram->init_done) {
484875ad
NG
513 mutex_unlock(&zram->init_lock);
514 return 0;
306b0c95
NG
515 }
516
f1e3cfff 517 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
306b0c95 518
f1e3cfff
NG
519 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
520 if (!zram->compress_workmem) {
306b0c95
NG
521 pr_err("Error allocating compressor working memory!\n");
522 ret = -ENOMEM;
523 goto fail;
524 }
525
f1e3cfff
NG
526 zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
527 if (!zram->compress_buffer) {
306b0c95
NG
528 pr_err("Error allocating compressor buffer space\n");
529 ret = -ENOMEM;
530 goto fail;
531 }
532
f1e3cfff 533 num_pages = zram->disksize >> PAGE_SHIFT;
5b84cc78 534 zram->table = vzalloc(num_pages * sizeof(*zram->table));
f1e3cfff
NG
535 if (!zram->table) {
536 pr_err("Error allocating zram address table\n");
306b0c95 537 /* To prevent accessing table entries during cleanup */
f1e3cfff 538 zram->disksize = 0;
306b0c95
NG
539 ret = -ENOMEM;
540 goto fail;
541 }
306b0c95 542
f1e3cfff 543 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
306b0c95 544
f1e3cfff
NG
545 /* zram devices sort of resembles non-rotational disks */
546 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
306b0c95 547
f1e3cfff
NG
548 zram->mem_pool = xv_create_pool();
549 if (!zram->mem_pool) {
306b0c95
NG
550 pr_err("Error creating memory pool\n");
551 ret = -ENOMEM;
552 goto fail;
553 }
554
f1e3cfff 555 zram->init_done = 1;
484875ad 556 mutex_unlock(&zram->init_lock);
306b0c95
NG
557
558 pr_debug("Initialization done!\n");
559 return 0;
560
561fail:
484875ad 562 mutex_unlock(&zram->init_lock);
33863c21 563 zram_reset_device(zram);
306b0c95
NG
564
565 pr_err("Initialization failed: err=%d\n", ret);
566 return ret;
567}
568
f1e3cfff 569void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
107c161b 570{
f1e3cfff 571 struct zram *zram;
107c161b 572
f1e3cfff
NG
573 zram = bdev->bd_disk->private_data;
574 zram_free_page(zram, index);
575 zram_stat64_inc(zram, &zram->stats.notify_free);
107c161b
NG
576}
577
f1e3cfff 578static const struct block_device_operations zram_devops = {
f1e3cfff 579 .swap_slot_free_notify = zram_slot_free_notify,
107c161b 580 .owner = THIS_MODULE
306b0c95
NG
581};
582
f1e3cfff 583static int create_device(struct zram *zram, int device_id)
306b0c95 584{
de1a21a0
NG
585 int ret = 0;
586
f1e3cfff 587 mutex_init(&zram->lock);
484875ad 588 mutex_init(&zram->init_lock);
f1e3cfff 589 spin_lock_init(&zram->stat64_lock);
306b0c95 590
f1e3cfff
NG
591 zram->queue = blk_alloc_queue(GFP_KERNEL);
592 if (!zram->queue) {
306b0c95
NG
593 pr_err("Error allocating disk queue for device %d\n",
594 device_id);
de1a21a0
NG
595 ret = -ENOMEM;
596 goto out;
306b0c95
NG
597 }
598
f1e3cfff
NG
599 blk_queue_make_request(zram->queue, zram_make_request);
600 zram->queue->queuedata = zram;
306b0c95
NG
601
602 /* gendisk structure */
f1e3cfff
NG
603 zram->disk = alloc_disk(1);
604 if (!zram->disk) {
605 blk_cleanup_queue(zram->queue);
306b0c95
NG
606 pr_warning("Error allocating disk structure for device %d\n",
607 device_id);
de1a21a0
NG
608 ret = -ENOMEM;
609 goto out;
306b0c95
NG
610 }
611
f1e3cfff
NG
612 zram->disk->major = zram_major;
613 zram->disk->first_minor = device_id;
614 zram->disk->fops = &zram_devops;
615 zram->disk->queue = zram->queue;
616 zram->disk->private_data = zram;
617 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
306b0c95 618
33863c21 619 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
f1e3cfff 620 set_capacity(zram->disk, 0);
5d83d5a0 621
a1dd52af
NG
622 /*
623 * To ensure that we always get PAGE_SIZE aligned
624 * and n*PAGE_SIZED sized I/O requests.
625 */
f1e3cfff 626 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
7b19b8d4
RJ
627 blk_queue_logical_block_size(zram->disk->queue,
628 ZRAM_LOGICAL_BLOCK_SIZE);
f1e3cfff
NG
629 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
630 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
5d83d5a0 631
f1e3cfff 632 add_disk(zram->disk);
306b0c95 633
33863c21
NG
634 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
635 &zram_disk_attr_group);
636 if (ret < 0) {
637 pr_warning("Error creating sysfs group");
638 goto out;
639 }
33863c21 640
f1e3cfff 641 zram->init_done = 0;
de1a21a0
NG
642
643out:
644 return ret;
306b0c95
NG
645}
646
f1e3cfff 647static void destroy_device(struct zram *zram)
306b0c95 648{
33863c21
NG
649 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
650 &zram_disk_attr_group);
33863c21 651
f1e3cfff
NG
652 if (zram->disk) {
653 del_gendisk(zram->disk);
654 put_disk(zram->disk);
306b0c95
NG
655 }
656
f1e3cfff
NG
657 if (zram->queue)
658 blk_cleanup_queue(zram->queue);
306b0c95
NG
659}
660
f1e3cfff 661static int __init zram_init(void)
306b0c95 662{
de1a21a0 663 int ret, dev_id;
306b0c95
NG
664
665 if (num_devices > max_num_devices) {
666 pr_warning("Invalid value for num_devices: %u\n",
667 num_devices);
de1a21a0
NG
668 ret = -EINVAL;
669 goto out;
306b0c95
NG
670 }
671
f1e3cfff
NG
672 zram_major = register_blkdev(0, "zram");
673 if (zram_major <= 0) {
306b0c95 674 pr_warning("Unable to get major number\n");
de1a21a0
NG
675 ret = -EBUSY;
676 goto out;
306b0c95
NG
677 }
678
679 if (!num_devices) {
680 pr_info("num_devices not specified. Using default: 1\n");
681 num_devices = 1;
682 }
683
684 /* Allocate the device array and initialize each one */
685 pr_info("Creating %u devices ...\n", num_devices);
f1e3cfff 686 devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
de1a21a0
NG
687 if (!devices) {
688 ret = -ENOMEM;
689 goto unregister;
690 }
306b0c95 691
de1a21a0
NG
692 for (dev_id = 0; dev_id < num_devices; dev_id++) {
693 ret = create_device(&devices[dev_id], dev_id);
694 if (ret)
3bf040c7 695 goto free_devices;
de1a21a0
NG
696 }
697
306b0c95 698 return 0;
de1a21a0 699
3bf040c7 700free_devices:
de1a21a0
NG
701 while (dev_id)
702 destroy_device(&devices[--dev_id]);
273ad8dc 703 kfree(devices);
de1a21a0 704unregister:
f1e3cfff 705 unregister_blkdev(zram_major, "zram");
de1a21a0 706out:
306b0c95
NG
707 return ret;
708}
709
f1e3cfff 710static void __exit zram_exit(void)
306b0c95
NG
711{
712 int i;
f1e3cfff 713 struct zram *zram;
306b0c95
NG
714
715 for (i = 0; i < num_devices; i++) {
f1e3cfff 716 zram = &devices[i];
306b0c95 717
f1e3cfff
NG
718 destroy_device(zram);
719 if (zram->init_done)
33863c21 720 zram_reset_device(zram);
306b0c95
NG
721 }
722
f1e3cfff 723 unregister_blkdev(zram_major, "zram");
306b0c95
NG
724
725 kfree(devices);
726 pr_debug("Cleanup done!\n");
727}
728
729module_param(num_devices, uint, 0);
f1e3cfff 730MODULE_PARM_DESC(num_devices, "Number of zram devices");
306b0c95 731
f1e3cfff
NG
732module_init(zram_init);
733module_exit(zram_exit);
306b0c95
NG
734
735MODULE_LICENSE("Dual BSD/GPL");
736MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
f1e3cfff 737MODULE_DESCRIPTION("Compressed RAM Block Device");
This page took 0.211252 seconds and 5 git commands to generate.