fs crypto: move per-file encryption from f2fs tree to fs/crypto
[deliverable/linux.git] / fs / f2fs / data.c
index 657ab8707b58e6a0233e7361874449a354419bd6..e5c762b372390b59fd0ceb011fea75ab19a41fe7 100644 (file)
@@ -34,9 +34,9 @@ static void f2fs_read_end_io(struct bio *bio)
 
        if (f2fs_bio_encrypted(bio)) {
                if (bio->bi_error) {
-                       f2fs_release_crypto_ctx(bio->bi_private);
+                       fscrypt_release_ctx(bio->bi_private);
                } else {
-                       f2fs_end_io_crypto_work(bio->bi_private, bio);
+                       fscrypt_decrypt_bio_pages(bio->bi_private, bio);
                        return;
                }
        }
@@ -64,7 +64,7 @@ static void f2fs_write_end_io(struct bio *bio)
        bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               f2fs_restore_and_release_control_page(&page);
+               fscrypt_pullback_bio_page(&page, true);
 
                if (unlikely(bio->bi_error)) {
                        set_bit(AS_EIO, &page->mapping->flags);
@@ -129,16 +129,10 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
 
        bio_for_each_segment_all(bvec, io->bio, i) {
 
-               if (bvec->bv_page->mapping) {
+               if (bvec->bv_page->mapping)
                        target = bvec->bv_page;
-               } else {
-                       struct f2fs_crypto_ctx *ctx;
-
-                       /* encrypted page */
-                       ctx = (struct f2fs_crypto_ctx *)page_private(
-                                                               bvec->bv_page);
-                       target = ctx->w.control_page;
-               }
+               else
+                       target = fscrypt_control_page(bvec->bv_page);
 
                if (inode && inode == target->mapping->host)
                        return true;
@@ -206,6 +200,13 @@ void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
                __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
 }
 
+void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
+{
+       f2fs_submit_merged_bio(sbi, DATA, WRITE);
+       f2fs_submit_merged_bio(sbi, NODE, WRITE);
+       f2fs_submit_merged_bio(sbi, META, WRITE);
+}
+
 /*
  * Fill the locked page with data located in the block address.
  * Return unlocked page.
@@ -213,13 +214,14 @@ void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
 {
        struct bio *bio;
-       struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+       struct page *page = fio->encrypted_page ?
+                       fio->encrypted_page : fio->page;
 
        trace_f2fs_submit_page_bio(page, fio);
        f2fs_trace_ios(fio, 0);
 
        /* Allocate a new bio */
-       bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
+       bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
 
        if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
                bio_put(bio);
@@ -240,21 +242,24 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
 
        io = is_read ? &sbi->read_io : &sbi->write_io[btype];
 
-       verify_block_addr(sbi, fio->blk_addr);
+       if (fio->old_blkaddr != NEW_ADDR)
+               verify_block_addr(sbi, fio->old_blkaddr);
+       verify_block_addr(sbi, fio->new_blkaddr);
 
        down_write(&io->io_rwsem);
 
        if (!is_read)
                inc_page_count(sbi, F2FS_WRITEBACK);
 
-       if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
+       if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
                                                io->fio.rw != fio->rw))
                __submit_merged_bio(io);
 alloc_new:
        if (io->bio == NULL) {
                int bio_blocks = MAX_BIO_BLOCKS(sbi);
 
-               io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
+               io->bio = __bio_alloc(sbi, fio->new_blkaddr,
+                                               bio_blocks, is_read);
                io->fio = *fio;
        }
 
@@ -266,7 +271,7 @@ alloc_new:
                goto alloc_new;
        }
 
-       io->last_block_in_bio = fio->blk_addr;
+       io->last_block_in_bio = fio->new_blkaddr;
        f2fs_trace_ios(fio, 0);
 
        up_write(&io->io_rwsem);
@@ -297,6 +302,13 @@ void set_data_blkaddr(struct dnode_of_data *dn)
                dn->node_changed = true;
 }
 
+void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+{
+       dn->data_blkaddr = blkaddr;
+       set_data_blkaddr(dn);
+       f2fs_update_extent_cache(dn);
+}
+
 int reserve_new_block(struct dnode_of_data *dn)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
@@ -400,7 +412,7 @@ got_it:
                return page;
        }
 
-       fio.blk_addr = dn.data_blkaddr;
+       fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
        fio.page = page;
        err = f2fs_submit_page_bio(&fio);
        if (err)
@@ -564,16 +576,33 @@ alloc:
        return 0;
 }
 
-static int __allocate_data_blocks(struct inode *inode, loff_t offset,
-                                                       size_t count)
+ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct inode *inode = file_inode(iocb->ki_filp);
        struct f2fs_map_blocks map;
+       ssize_t ret = 0;
 
-       map.m_lblk = F2FS_BYTES_TO_BLK(offset);
-       map.m_len = F2FS_BYTES_TO_BLK(count);
+       map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos);
+       map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from));
        map.m_next_pgofs = NULL;
 
-       return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_DIO);
+       if (f2fs_encrypted_inode(inode))
+               return 0;
+
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               ret = f2fs_convert_inline_inode(inode);
+               if (ret)
+                       return ret;
+               return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
+       }
+       if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
+               ret = f2fs_convert_inline_inode(inode);
+               if (ret)
+                       return ret;
+       }
+       if (!f2fs_has_inline_data(inode))
+               return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+       return ret;
 }
 
 /*
@@ -604,7 +633,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
        /* it only supports block size == page size */
        pgofs = (pgoff_t)map->m_lblk;
 
-       if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+       if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
                map->m_pblk = ei.blk + pgofs - ei.fofs;
                map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
                map->m_flags = F2FS_MAP_MAPPED;
@@ -639,7 +668,12 @@ next_block:
                                err = -EIO;
                                goto sync_out;
                        }
-                       err = __allocate_data_block(&dn);
+                       if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+                               if (blkaddr == NULL_ADDR)
+                                       err = reserve_new_block(&dn);
+                       } else {
+                               err = __allocate_data_block(&dn);
+                       }
                        if (err)
                                goto sync_out;
                        allocated = true;
@@ -670,7 +704,9 @@ next_block:
                map->m_len = 1;
        } else if ((map->m_pblk != NEW_ADDR &&
                        blkaddr == (map->m_pblk + ofs)) ||
-                       (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR)) {
+                       (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
+                       flag == F2FS_GET_BLOCK_PRE_DIO ||
+                       flag == F2FS_GET_BLOCK_PRE_AIO) {
                ofs++;
                map->m_len++;
        } else {
@@ -951,12 +987,12 @@ submit_and_realloc:
                        bio = NULL;
                }
                if (bio == NULL) {
-                       struct f2fs_crypto_ctx *ctx = NULL;
+                       struct fscrypt_ctx *ctx = NULL;
 
                        if (f2fs_encrypted_inode(inode) &&
                                        S_ISREG(inode->i_mode)) {
 
-                               ctx = f2fs_get_crypto_ctx(inode);
+                               ctx = fscrypt_get_ctx(inode);
                                if (IS_ERR(ctx))
                                        goto set_error_page;
 
@@ -969,7 +1005,7 @@ submit_and_realloc:
                                min_t(int, nr_pages, BIO_MAX_PAGES));
                        if (!bio) {
                                if (ctx)
-                                       f2fs_release_crypto_ctx(ctx);
+                                       fscrypt_release_ctx(ctx);
                                goto set_error_page;
                        }
                        bio->bi_bdev = bdev;
@@ -1047,10 +1083,10 @@ int do_write_data_page(struct f2fs_io_info *fio)
        if (err)
                return err;
 
-       fio->blk_addr = dn.data_blkaddr;
+       fio->old_blkaddr = dn.data_blkaddr;
 
        /* This page is already truncated */
-       if (fio->blk_addr == NULL_ADDR) {
+       if (fio->old_blkaddr == NULL_ADDR) {
                ClearPageUptodate(page);
                goto out_writepage;
        }
@@ -1059,9 +1095,9 @@ int do_write_data_page(struct f2fs_io_info *fio)
 
                /* wait for GCed encrypted page writeback */
                f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
-                                                       fio->blk_addr);
+                                                       fio->old_blkaddr);
 
-               fio->encrypted_page = f2fs_encrypt(inode, fio->page);
+               fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page);
                if (IS_ERR(fio->encrypted_page)) {
                        err = PTR_ERR(fio->encrypted_page);
                        goto out_writepage;
@@ -1074,7 +1110,7 @@ int do_write_data_page(struct f2fs_io_info *fio)
         * If current allocation needs SSR,
         * it had better in-place writes for updated data.
         */
-       if (unlikely(fio->blk_addr != NEW_ADDR &&
+       if (unlikely(fio->old_blkaddr != NEW_ADDR &&
                        !is_cold_data(page) &&
                        !IS_ATOMIC_WRITTEN_PAGE(page) &&
                        need_inplace_update(inode))) {
@@ -1083,8 +1119,6 @@ int do_write_data_page(struct f2fs_io_info *fio)
                trace_f2fs_do_write_data_page(page, IPU);
        } else {
                write_data_page(&dn, fio);
-               set_data_blkaddr(&dn);
-               f2fs_update_extent_cache(&dn);
                trace_f2fs_do_write_data_page(page, OPU);
                set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
                if (page->index == 0)
@@ -1409,6 +1443,14 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
        struct extent_info ei;
        int err = 0;
 
+       /*
+        * we already allocated all the blocks, so we don't need to get
+        * the block addresses when there is no need to fill the page.
+        */
+       if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
+                                       len == PAGE_CACHE_SIZE)
+               return 0;
+
        if (f2fs_has_inline_data(inode) ||
                        (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
                f2fs_lock_op(sbi);
@@ -1540,7 +1582,8 @@ repeat:
                        .sbi = sbi,
                        .type = DATA,
                        .rw = READ_SYNC,
-                       .blk_addr = blkaddr,
+                       .old_blkaddr = blkaddr,
+                       .new_blkaddr = blkaddr,
                        .page = page,
                        .encrypted_page = NULL,
                };
@@ -1560,7 +1603,7 @@ repeat:
 
                /* avoid symlink page */
                if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
-                       err = f2fs_decrypt_one(inode, page);
+                       err = fscrypt_decrypt_page(page);
                        if (err)
                                goto fail;
                }
@@ -1615,34 +1658,21 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                              loff_t offset)
 {
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
+       struct address_space *mapping = iocb->ki_filp->f_mapping;
        struct inode *inode = mapping->host;
        size_t count = iov_iter_count(iter);
        int err;
 
-       /* we don't need to use inline_data strictly */
-       err = f2fs_convert_inline_inode(inode);
+       err = check_direct_IO(inode, iter, offset);
        if (err)
                return err;
 
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
                return 0;
 
-       err = check_direct_IO(inode, iter, offset);
-       if (err)
-               return err;
-
        trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
-       if (iov_iter_rw(iter) == WRITE) {
-               err = __allocate_data_blocks(inode, offset, count);
-               if (err)
-                       goto out;
-       }
-
        err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
-out:
        if (err < 0 && iov_iter_rw(iter) == WRITE)
                f2fs_write_failed(mapping, offset + count);
 
This page took 0.032254 seconds and 5 git commands to generate.