| 1 | /* |
| 2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
| 3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. |
| 4 | * |
| 5 | * This copyrighted material is made available to anyone wishing to use, |
| 6 | * modify, copy, or redistribute it subject to the terms and conditions |
| 7 | * of the GNU General Public License version 2. |
| 8 | */ |
| 9 | |
| 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 11 | |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/spinlock.h> |
| 15 | #include <linux/completion.h> |
| 16 | #include <linux/buffer_head.h> |
| 17 | #include <linux/kallsyms.h> |
| 18 | #include <linux/gfs2_ondisk.h> |
| 19 | |
| 20 | #include "gfs2.h" |
| 21 | #include "incore.h" |
| 22 | #include "glock.h" |
| 23 | #include "inode.h" |
| 24 | #include "log.h" |
| 25 | #include "lops.h" |
| 26 | #include "meta_io.h" |
| 27 | #include "trans.h" |
| 28 | #include "util.h" |
| 29 | #include "trace_gfs2.h" |
| 30 | |
| 31 | int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, |
| 32 | unsigned int revokes) |
| 33 | { |
| 34 | struct gfs2_trans *tr; |
| 35 | int error; |
| 36 | |
| 37 | BUG_ON(current->journal_info); |
| 38 | BUG_ON(blocks == 0 && revokes == 0); |
| 39 | |
| 40 | if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) |
| 41 | return -EROFS; |
| 42 | |
| 43 | tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS); |
| 44 | if (!tr) |
| 45 | return -ENOMEM; |
| 46 | |
| 47 | tr->tr_ip = _RET_IP_; |
| 48 | tr->tr_blocks = blocks; |
| 49 | tr->tr_revokes = revokes; |
| 50 | tr->tr_reserved = 1; |
| 51 | tr->tr_alloced = 1; |
| 52 | if (blocks) |
| 53 | tr->tr_reserved += 6 + blocks; |
| 54 | if (revokes) |
| 55 | tr->tr_reserved += gfs2_struct2blk(sdp, revokes, |
| 56 | sizeof(u64)); |
| 57 | INIT_LIST_HEAD(&tr->tr_databuf); |
| 58 | INIT_LIST_HEAD(&tr->tr_buf); |
| 59 | |
| 60 | sb_start_intwrite(sdp->sd_vfs); |
| 61 | |
| 62 | error = gfs2_log_reserve(sdp, tr->tr_reserved); |
| 63 | if (error) |
| 64 | goto fail; |
| 65 | |
| 66 | current->journal_info = tr; |
| 67 | |
| 68 | return 0; |
| 69 | |
| 70 | fail: |
| 71 | sb_end_intwrite(sdp->sd_vfs); |
| 72 | kfree(tr); |
| 73 | |
| 74 | return error; |
| 75 | } |
| 76 | |
| 77 | static void gfs2_print_trans(const struct gfs2_trans *tr) |
| 78 | { |
| 79 | pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip); |
| 80 | pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n", |
| 81 | tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, tr->tr_touched); |
| 82 | pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n", |
| 83 | tr->tr_num_buf_new, tr->tr_num_buf_rm, |
| 84 | tr->tr_num_databuf_new, tr->tr_num_databuf_rm, |
| 85 | tr->tr_num_revoke, tr->tr_num_revoke_rm); |
| 86 | } |
| 87 | |
| 88 | void gfs2_trans_end(struct gfs2_sbd *sdp) |
| 89 | { |
| 90 | struct gfs2_trans *tr = current->journal_info; |
| 91 | s64 nbuf; |
| 92 | int alloced = tr->tr_alloced; |
| 93 | |
| 94 | BUG_ON(!tr); |
| 95 | current->journal_info = NULL; |
| 96 | |
| 97 | if (!tr->tr_touched) { |
| 98 | gfs2_log_release(sdp, tr->tr_reserved); |
| 99 | if (alloced) { |
| 100 | kfree(tr); |
| 101 | sb_end_intwrite(sdp->sd_vfs); |
| 102 | } |
| 103 | return; |
| 104 | } |
| 105 | |
| 106 | nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new; |
| 107 | nbuf -= tr->tr_num_buf_rm; |
| 108 | nbuf -= tr->tr_num_databuf_rm; |
| 109 | |
| 110 | if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) && |
| 111 | (tr->tr_num_revoke <= tr->tr_revokes))) |
| 112 | gfs2_print_trans(tr); |
| 113 | |
| 114 | gfs2_log_commit(sdp, tr); |
| 115 | if (alloced && !tr->tr_attached) |
| 116 | kfree(tr); |
| 117 | up_read(&sdp->sd_log_flush_lock); |
| 118 | |
| 119 | if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) |
| 120 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
| 121 | if (alloced) |
| 122 | sb_end_intwrite(sdp->sd_vfs); |
| 123 | } |
| 124 | |
| 125 | static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, |
| 126 | struct buffer_head *bh, |
| 127 | const struct gfs2_log_operations *lops) |
| 128 | { |
| 129 | struct gfs2_bufdata *bd; |
| 130 | |
| 131 | bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL); |
| 132 | bd->bd_bh = bh; |
| 133 | bd->bd_gl = gl; |
| 134 | bd->bd_ops = lops; |
| 135 | INIT_LIST_HEAD(&bd->bd_list); |
| 136 | bh->b_private = bd; |
| 137 | return bd; |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * gfs2_trans_add_data - Add a databuf to the transaction. |
| 142 | * @gl: The inode glock associated with the buffer |
| 143 | * @bh: The buffer to add |
| 144 | * |
| 145 | * This is used in two distinct cases: |
| 146 | * i) In ordered write mode |
| 147 | * We put the data buffer on a list so that we can ensure that its |
| 148 | * synced to disk at the right time |
| 149 | * ii) In journaled data mode |
| 150 | * We need to journal the data block in the same way as metadata in |
| 151 | * the functions above. The difference is that here we have a tag |
| 152 | * which is two __be64's being the block number (as per meta data) |
| 153 | * and a flag which says whether the data block needs escaping or |
| 154 | * not. This means we need a new log entry for each 251 or so data |
| 155 | * blocks, which isn't an enormous overhead but twice as much as |
| 156 | * for normal metadata blocks. |
| 157 | */ |
| 158 | void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) |
| 159 | { |
| 160 | struct gfs2_trans *tr = current->journal_info; |
| 161 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 162 | struct address_space *mapping = bh->b_page->mapping; |
| 163 | struct gfs2_inode *ip = GFS2_I(mapping->host); |
| 164 | struct gfs2_bufdata *bd; |
| 165 | |
| 166 | if (!gfs2_is_jdata(ip)) { |
| 167 | gfs2_ordered_add_inode(ip); |
| 168 | return; |
| 169 | } |
| 170 | |
| 171 | lock_buffer(bh); |
| 172 | gfs2_log_lock(sdp); |
| 173 | bd = bh->b_private; |
| 174 | if (bd == NULL) { |
| 175 | gfs2_log_unlock(sdp); |
| 176 | unlock_buffer(bh); |
| 177 | if (bh->b_private == NULL) |
| 178 | bd = gfs2_alloc_bufdata(gl, bh, &gfs2_databuf_lops); |
| 179 | else |
| 180 | bd = bh->b_private; |
| 181 | lock_buffer(bh); |
| 182 | gfs2_log_lock(sdp); |
| 183 | } |
| 184 | gfs2_assert(sdp, bd->bd_gl == gl); |
| 185 | tr->tr_touched = 1; |
| 186 | if (list_empty(&bd->bd_list)) { |
| 187 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); |
| 188 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); |
| 189 | gfs2_pin(sdp, bd->bd_bh); |
| 190 | tr->tr_num_databuf_new++; |
| 191 | list_add_tail(&bd->bd_list, &tr->tr_databuf); |
| 192 | } |
| 193 | gfs2_log_unlock(sdp); |
| 194 | unlock_buffer(bh); |
| 195 | } |
| 196 | |
| 197 | static void meta_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
| 198 | { |
| 199 | struct gfs2_meta_header *mh; |
| 200 | struct gfs2_trans *tr; |
| 201 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
| 202 | |
| 203 | tr = current->journal_info; |
| 204 | tr->tr_touched = 1; |
| 205 | if (!list_empty(&bd->bd_list)) |
| 206 | return; |
| 207 | set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); |
| 208 | set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); |
| 209 | mh = (struct gfs2_meta_header *)bd->bd_bh->b_data; |
| 210 | if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) { |
| 211 | pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n", |
| 212 | (unsigned long long)bd->bd_bh->b_blocknr); |
| 213 | BUG(); |
| 214 | } |
| 215 | if (unlikely(state == SFS_FROZEN)) { |
| 216 | printk(KERN_INFO "GFS2:adding buf while frozen\n"); |
| 217 | gfs2_assert_withdraw(sdp, 0); |
| 218 | } |
| 219 | gfs2_pin(sdp, bd->bd_bh); |
| 220 | mh->__pad0 = cpu_to_be64(0); |
| 221 | mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); |
| 222 | list_add(&bd->bd_list, &tr->tr_buf); |
| 223 | tr->tr_num_buf_new++; |
| 224 | } |
| 225 | |
| 226 | void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) |
| 227 | { |
| 228 | |
| 229 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 230 | struct gfs2_bufdata *bd; |
| 231 | |
| 232 | lock_buffer(bh); |
| 233 | gfs2_log_lock(sdp); |
| 234 | bd = bh->b_private; |
| 235 | if (bd == NULL) { |
| 236 | gfs2_log_unlock(sdp); |
| 237 | unlock_buffer(bh); |
| 238 | lock_page(bh->b_page); |
| 239 | if (bh->b_private == NULL) |
| 240 | bd = gfs2_alloc_bufdata(gl, bh, &gfs2_buf_lops); |
| 241 | else |
| 242 | bd = bh->b_private; |
| 243 | unlock_page(bh->b_page); |
| 244 | lock_buffer(bh); |
| 245 | gfs2_log_lock(sdp); |
| 246 | } |
| 247 | gfs2_assert(sdp, bd->bd_gl == gl); |
| 248 | meta_lo_add(sdp, bd); |
| 249 | gfs2_log_unlock(sdp); |
| 250 | unlock_buffer(bh); |
| 251 | } |
| 252 | |
| 253 | void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
| 254 | { |
| 255 | struct gfs2_trans *tr = current->journal_info; |
| 256 | |
| 257 | BUG_ON(!list_empty(&bd->bd_list)); |
| 258 | gfs2_add_revoke(sdp, bd); |
| 259 | tr->tr_touched = 1; |
| 260 | tr->tr_num_revoke++; |
| 261 | } |
| 262 | |
| 263 | void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) |
| 264 | { |
| 265 | struct gfs2_bufdata *bd, *tmp; |
| 266 | struct gfs2_trans *tr = current->journal_info; |
| 267 | unsigned int n = len; |
| 268 | |
| 269 | gfs2_log_lock(sdp); |
| 270 | list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_list) { |
| 271 | if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) { |
| 272 | list_del_init(&bd->bd_list); |
| 273 | gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke); |
| 274 | sdp->sd_log_num_revoke--; |
| 275 | kmem_cache_free(gfs2_bufdata_cachep, bd); |
| 276 | tr->tr_num_revoke_rm++; |
| 277 | if (--n == 0) |
| 278 | break; |
| 279 | } |
| 280 | } |
| 281 | gfs2_log_unlock(sdp); |
| 282 | } |
| 283 | |