#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
+#include <linux/backing-dev.h>
#include <asm/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
struct work_struct work;
atomic_t pending;
int error;
+ int post_process;
};
/*
*/
mempool_t *io_pool;
mempool_t *page_pool;
+ struct bio_set *bs;
/*
* crypto related data
u8 key[0];
};
-#define MIN_IOS 256
+#define MIN_IOS 16
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES 8
return r;
}
+ static void dm_crypt_bio_destructor(struct bio *bio)
+ {
+ struct crypt_io *io = bio->bi_private;
+ struct crypt_config *cc = io->target->private;
+
+ bio_free(bio, cc->bs);
+ }
+
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned int i;
- /*
- * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
- * to fail earlier. This is not necessary but increases throughput.
- * FIXME: Is this really intelligent?
- */
- if (base_bio)
- clone = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
- else
- clone = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
+ if (base_bio) {
+ clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
+ __bio_clone(clone, base_bio);
+ } else
+ clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
+
if (!clone)
return NULL;
+ clone->bi_destructor = dm_crypt_bio_destructor;
+
/* if the last bio was not complete, continue where that one ended */
clone->bi_idx = *bio_vec_idx;
clone->bi_vcnt = *bio_vec_idx;
* kcryptd:
*
* Needed because it would be very unwise to do decryption in an
- * interrupt context, so bios returning from read requests get
- * queued here.
+ * interrupt context.
*/
static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
static void kcryptd_queue_io(struct crypt_io *io)
{
- INIT_WORK(&io->work, kcryptd_do_work, io);
+ INIT_WORK(&io->work, kcryptd_do_work);
queue_work(_kcryptd_workqueue, &io->work);
}
if (!read_io)
crypt_free_buffer_pages(cc, clone, done);
+ /* keep going - not finished yet */
if (unlikely(clone->bi_size))
return 1;
- /*
- * successful reads are decrypted by the worker thread
- */
if (!read_io)
goto out;
}
bio_put(clone);
+ io->post_process = 1;
kcryptd_queue_io(io);
return 0;
clone->bi_rw = io->base_bio->bi_rw;
}
-static struct bio *clone_read(struct crypt_io *io,
- sector_t sector)
+static void process_read(struct crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
struct bio *clone;
+ sector_t sector = base_bio->bi_sector - io->target->begin;
+
+ atomic_inc(&io->pending);
/*
* The block layer might modify the bvec array, so always
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data *afterwards*.
*/
- clone = bio_alloc(GFP_NOIO, bio_segments(base_bio));
- if (unlikely(!clone))
- return NULL;
+ clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
+ if (unlikely(!clone)) {
+ dec_pending(io, -ENOMEM);
+ return;
+ }
clone_init(io, clone);
+ clone->bi_destructor = dm_crypt_bio_destructor;
clone->bi_idx = 0;
clone->bi_vcnt = bio_segments(base_bio);
clone->bi_size = base_bio->bi_size;
+ clone->bi_sector = cc->start + sector;
memcpy(clone->bi_io_vec, bio_iovec(base_bio),
sizeof(struct bio_vec) * clone->bi_vcnt);
- clone->bi_sector = cc->start + sector;
- return clone;
+ generic_make_request(clone);
}
-static struct bio *clone_write(struct crypt_io *io,
- sector_t sector,
- unsigned *bvec_idx,
- struct convert_context *ctx)
+static void process_write(struct crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
struct bio *clone;
+ struct convert_context ctx;
+ unsigned remaining = base_bio->bi_size;
+ sector_t sector = base_bio->bi_sector - io->target->begin;
+ unsigned bvec_idx = 0;
- clone = crypt_alloc_buffer(cc, base_bio->bi_size,
- io->first_clone, bvec_idx);
- if (!clone)
- return NULL;
+ atomic_inc(&io->pending);
- ctx->bio_out = clone;
+ crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
- if (unlikely(crypt_convert(cc, ctx) < 0)) {
- crypt_free_buffer_pages(cc, clone,
- clone->bi_size);
- bio_put(clone);
- return NULL;
- }
+ /*
+ * The allocated buffers can be smaller than the whole bio,
+ * so repeat the whole process until all the data can be handled.
+ */
+ while (remaining) {
+ clone = crypt_alloc_buffer(cc, base_bio->bi_size,
+ io->first_clone, &bvec_idx);
+ if (unlikely(!clone)) {
+ dec_pending(io, -ENOMEM);
+ return;
+ }
- clone_init(io, clone);
- clone->bi_sector = cc->start + sector;
+ ctx.bio_out = clone;
- return clone;
+ if (unlikely(crypt_convert(cc, &ctx) < 0)) {
+ crypt_free_buffer_pages(cc, clone, clone->bi_size);
+ bio_put(clone);
+ dec_pending(io, -EIO);
+ return;
+ }
+
+ clone_init(io, clone);
+ clone->bi_sector = cc->start + sector;
+
+ if (!io->first_clone) {
+ /*
+ * hold a reference to the first clone, because it
+ * holds the bio_vec array and that can't be freed
+ * before all other clones are released
+ */
+ bio_get(clone);
+ io->first_clone = clone;
+ }
+
+ remaining -= clone->bi_size;
+ sector += bio_sectors(clone);
+
+ /* prevent bio_put of first_clone */
+ if (remaining)
+ atomic_inc(&io->pending);
+
+ generic_make_request(clone);
+
+ /* out of memory -> run queues */
+ if (remaining)
+ congestion_wait(bio_data_dir(clone), HZ/100);
+ }
}
static void process_read_endio(struct crypt_io *io)
dec_pending(io, crypt_convert(cc, &ctx));
}
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
{
- struct crypt_io *io = data;
+ struct crypt_io *io = container_of(work, struct crypt_io, work);
- process_read_endio(io);
+ if (io->post_process)
+ process_read_endio(io);
+ else if (bio_data_dir(io->base_bio) == READ)
+ process_read(io);
+ else
+ process_write(io);
}
/*
goto bad4;
}
+ cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4);
+ if (!cc->bs) {
+ ti->error = "Cannot allocate crypt bioset";
+ goto bad_bs;
+ }
+
if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
ti->error = "Error setting key";
goto bad5;
return 0;
bad5:
+ bioset_free(cc->bs);
+bad_bs:
mempool_destroy(cc->page_pool);
bad4:
mempool_destroy(cc->io_pool);
{
struct crypt_config *cc = (struct crypt_config *) ti->private;
+ bioset_free(cc->bs);
mempool_destroy(cc->page_pool);
mempool_destroy(cc->io_pool);
{
struct crypt_config *cc = ti->private;
struct crypt_io *io;
- struct convert_context ctx;
- struct bio *clone;
- unsigned int remaining = bio->bi_size;
- sector_t sector = bio->bi_sector - ti->begin;
- unsigned int bvec_idx = 0;
io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
io->base_bio = bio;
io->first_clone = NULL;
- io->error = 0;
- atomic_set(&io->pending, 1); /* hold a reference */
-
- if (bio_data_dir(bio) == WRITE)
- crypt_convert_init(cc, &ctx, NULL, bio, sector, 1);
-
- /*
- * The allocated buffers can be smaller than the whole bio,
- * so repeat the whole process until all the data can be handled.
- */
- while (remaining) {
- if (bio_data_dir(bio) == WRITE)
- clone = clone_write(io, sector, &bvec_idx, &ctx);
- else
- clone = clone_read(io, sector);
- if (!clone)
- goto cleanup;
-
- if (!io->first_clone) {
- /*
- * hold a reference to the first clone, because it
- * holds the bio_vec array and that can't be freed
- * before all other clones are released
- */
- bio_get(clone);
- io->first_clone = clone;
- }
- atomic_inc(&io->pending);
-
- remaining -= clone->bi_size;
- sector += bio_sectors(clone);
-
- generic_make_request(clone);
-
- /* out of memory -> run queues */
- if (remaining)
- blk_congestion_wait(bio_data_dir(clone), HZ/100);
- }
+ io->error = io->post_process = 0;
+ atomic_set(&io->pending, 0);
+ kcryptd_queue_io(io);
- /* drop reference, clones could have returned before we reach this */
- dec_pending(io, 0);
return 0;
-
-cleanup:
- if (io->first_clone) {
- dec_pending(io, -ENOMEM);
- return 0;
- }
-
- /* if no bio has been dispatched yet, we can directly return the error */
- mempool_free(io, cc->io_pool);
- return -ENOMEM;
}
static int crypt_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
struct crypt_config *cc = (struct crypt_config *) ti->private;
- const char *cipher;
- const char *chainmode = NULL;
unsigned int sz = 0;
switch (type) {
break;
case STATUSTYPE_TABLE:
- cipher = crypto_blkcipher_name(cc->tfm);
-
- chainmode = cc->chainmode;
-
if (cc->iv_mode)
- DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);
+ DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
+ cc->iv_mode);
else
- DMEMIT("%s-%s ", cipher, chainmode);
+ DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
if (cc->key_size > 0) {
if ((maxlen - sz) < ((cc->key_size << 1) + 1))
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 2, 0},
+ .version= {1, 3, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,