2 * background writeback - scan btree for dirty data and write it to the backing
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 static struct workqueue_struct
*dirty_wq
;
15 static void read_dirty(struct closure
*);
19 struct cached_dev
*dc
;
25 static void __update_writeback_rate(struct cached_dev
*dc
)
27 struct cache_set
*c
= dc
->disk
.c
;
28 uint64_t cache_sectors
= c
->nbuckets
* c
->sb
.bucket_size
;
29 uint64_t cache_dirty_target
=
30 div_u64(cache_sectors
* dc
->writeback_percent
, 100);
32 int64_t target
= div64_u64(cache_dirty_target
* bdev_sectors(dc
->bdev
),
33 c
->cached_dev_sectors
);
39 int64_t dirty
= atomic_long_read(&dc
->disk
.sectors_dirty
);
40 int64_t derivative
= dirty
- dc
->disk
.sectors_dirty_last
;
42 dc
->disk
.sectors_dirty_last
= dirty
;
44 derivative
*= dc
->writeback_rate_d_term
;
45 derivative
= clamp(derivative
, -dirty
, dirty
);
47 derivative
= ewma_add(dc
->disk
.sectors_dirty_derivative
, derivative
,
48 dc
->writeback_rate_d_smooth
, 0);
50 /* Avoid divide by zero */
54 error
= div64_s64((dirty
+ derivative
- target
) << 8, target
);
56 change
= div_s64((dc
->writeback_rate
.rate
* error
) >> 8,
57 dc
->writeback_rate_p_term_inverse
);
59 /* Don't increase writeback rate if the device isn't keeping up */
61 time_after64(local_clock(),
62 dc
->writeback_rate
.next
+ 10 * NSEC_PER_MSEC
))
65 dc
->writeback_rate
.rate
=
66 clamp_t(int64_t, dc
->writeback_rate
.rate
+ change
,
69 dc
->writeback_rate_derivative
= derivative
;
70 dc
->writeback_rate_change
= change
;
71 dc
->writeback_rate_target
= target
;
73 schedule_delayed_work(&dc
->writeback_rate_update
,
74 dc
->writeback_rate_update_seconds
* HZ
);
77 static void update_writeback_rate(struct work_struct
*work
)
79 struct cached_dev
*dc
= container_of(to_delayed_work(work
),
81 writeback_rate_update
);
83 down_read(&dc
->writeback_lock
);
85 if (atomic_read(&dc
->has_dirty
) &&
86 dc
->writeback_percent
)
87 __update_writeback_rate(dc
);
89 up_read(&dc
->writeback_lock
);
92 static unsigned writeback_delay(struct cached_dev
*dc
, unsigned sectors
)
94 if (atomic_read(&dc
->disk
.detaching
) ||
95 !dc
->writeback_percent
)
98 return bch_next_delay(&dc
->writeback_rate
, sectors
* 10000000ULL);
101 /* Background writeback */
103 static bool dirty_pred(struct keybuf
*buf
, struct bkey
*k
)
108 static void dirty_init(struct keybuf_key
*w
)
110 struct dirty_io
*io
= w
->private;
111 struct bio
*bio
= &io
->bio
;
114 if (!io
->dc
->writeback_percent
)
115 bio_set_prio(bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
117 bio
->bi_size
= KEY_SIZE(&w
->key
) << 9;
118 bio
->bi_max_vecs
= DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
);
120 bio
->bi_io_vec
= bio
->bi_inline_vecs
;
121 bch_bio_map(bio
, NULL
);
124 static void refill_dirty(struct closure
*cl
)
126 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
,
128 struct keybuf
*buf
= &dc
->writeback_keys
;
129 bool searched_from_start
= false;
130 struct bkey end
= MAX_KEY
;
131 SET_KEY_INODE(&end
, dc
->disk
.id
);
133 if (!atomic_read(&dc
->disk
.detaching
) &&
134 !dc
->writeback_running
)
137 down_write(&dc
->writeback_lock
);
139 if (!atomic_read(&dc
->has_dirty
)) {
140 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
141 bch_write_bdev_super(dc
, NULL
);
143 up_write(&dc
->writeback_lock
);
147 if (bkey_cmp(&buf
->last_scanned
, &end
) >= 0) {
148 buf
->last_scanned
= KEY(dc
->disk
.id
, 0, 0);
149 searched_from_start
= true;
152 bch_refill_keybuf(dc
->disk
.c
, buf
, &end
);
154 if (bkey_cmp(&buf
->last_scanned
, &end
) >= 0 && searched_from_start
) {
155 /* Searched the entire btree - delay awhile */
157 if (RB_EMPTY_ROOT(&buf
->keys
)) {
158 atomic_set(&dc
->has_dirty
, 0);
162 if (!atomic_read(&dc
->disk
.detaching
))
163 closure_delay(&dc
->writeback
, dc
->writeback_delay
* HZ
);
166 up_write(&dc
->writeback_lock
);
168 ratelimit_reset(&dc
->writeback_rate
);
170 /* Punt to workqueue only so we don't recurse and blow the stack */
171 continue_at(cl
, read_dirty
, dirty_wq
);
174 void bch_writeback_queue(struct cached_dev
*dc
)
176 if (closure_trylock(&dc
->writeback
.cl
, &dc
->disk
.cl
)) {
177 if (!atomic_read(&dc
->disk
.detaching
))
178 closure_delay(&dc
->writeback
, dc
->writeback_delay
* HZ
);
180 continue_at(&dc
->writeback
.cl
, refill_dirty
, dirty_wq
);
184 void bch_writeback_add(struct cached_dev
*dc
, unsigned sectors
)
186 atomic_long_add(sectors
, &dc
->disk
.sectors_dirty
);
188 if (!atomic_read(&dc
->has_dirty
) &&
189 !atomic_xchg(&dc
->has_dirty
, 1)) {
190 atomic_inc(&dc
->count
);
192 if (BDEV_STATE(&dc
->sb
) != BDEV_STATE_DIRTY
) {
193 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_DIRTY
);
194 /* XXX: should do this synchronously */
195 bch_write_bdev_super(dc
, NULL
);
198 bch_writeback_queue(dc
);
200 if (dc
->writeback_percent
)
201 schedule_delayed_work(&dc
->writeback_rate_update
,
202 dc
->writeback_rate_update_seconds
* HZ
);
206 /* Background writeback - IO loop */
208 static void dirty_io_destructor(struct closure
*cl
)
210 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
214 static void write_dirty_finish(struct closure
*cl
)
216 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
217 struct keybuf_key
*w
= io
->bio
.bi_private
;
218 struct cached_dev
*dc
= io
->dc
;
219 struct bio_vec
*bv
= bio_iovec_idx(&io
->bio
, io
->bio
.bi_vcnt
);
221 while (bv
-- != io
->bio
.bi_io_vec
)
222 __free_page(bv
->bv_page
);
224 /* This is kind of a dumb way of signalling errors. */
225 if (KEY_DIRTY(&w
->key
)) {
228 bch_btree_op_init_stack(&op
);
230 op
.type
= BTREE_REPLACE
;
231 bkey_copy(&op
.replace
, &w
->key
);
233 SET_KEY_DIRTY(&w
->key
, false);
234 bch_keylist_add(&op
.keys
, &w
->key
);
236 for (i
= 0; i
< KEY_PTRS(&w
->key
); i
++)
237 atomic_inc(&PTR_BUCKET(dc
->disk
.c
, &w
->key
, i
)->pin
);
239 pr_debug("clearing %s", pkey(&w
->key
));
240 bch_btree_insert(&op
, dc
->disk
.c
);
241 closure_sync(&op
.cl
);
243 atomic_long_inc(op
.insert_collision
244 ? &dc
->disk
.c
->writeback_keys_failed
245 : &dc
->disk
.c
->writeback_keys_done
);
248 bch_keybuf_del(&dc
->writeback_keys
, w
);
249 atomic_dec_bug(&dc
->in_flight
);
251 closure_wake_up(&dc
->writeback_wait
);
253 closure_return_with_destructor(cl
, dirty_io_destructor
);
256 static void dirty_endio(struct bio
*bio
, int error
)
258 struct keybuf_key
*w
= bio
->bi_private
;
259 struct dirty_io
*io
= w
->private;
262 SET_KEY_DIRTY(&w
->key
, false);
264 closure_put(&io
->cl
);
267 static void write_dirty(struct closure
*cl
)
269 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
270 struct keybuf_key
*w
= io
->bio
.bi_private
;
273 io
->bio
.bi_rw
= WRITE
;
274 io
->bio
.bi_sector
= KEY_START(&w
->key
);
275 io
->bio
.bi_bdev
= io
->dc
->bdev
;
276 io
->bio
.bi_end_io
= dirty_endio
;
278 trace_bcache_write_dirty(&io
->bio
);
279 closure_bio_submit(&io
->bio
, cl
, &io
->dc
->disk
);
281 continue_at(cl
, write_dirty_finish
, dirty_wq
);
284 static void read_dirty_endio(struct bio
*bio
, int error
)
286 struct keybuf_key
*w
= bio
->bi_private
;
287 struct dirty_io
*io
= w
->private;
289 bch_count_io_errors(PTR_CACHE(io
->dc
->disk
.c
, &w
->key
, 0),
290 error
, "reading dirty data from cache");
292 dirty_endio(bio
, error
);
295 static void read_dirty_submit(struct closure
*cl
)
297 struct dirty_io
*io
= container_of(cl
, struct dirty_io
, cl
);
299 trace_bcache_read_dirty(&io
->bio
);
300 closure_bio_submit(&io
->bio
, cl
, &io
->dc
->disk
);
302 continue_at(cl
, write_dirty
, dirty_wq
);
305 static void read_dirty(struct closure
*cl
)
307 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
,
309 unsigned delay
= writeback_delay(dc
, 0);
310 struct keybuf_key
*w
;
314 * XXX: if we error, background writeback just spins. Should use some
319 w
= bch_keybuf_next(&dc
->writeback_keys
);
323 BUG_ON(ptr_stale(dc
->disk
.c
, &w
->key
, 0));
326 (KEY_START(&w
->key
) != dc
->last_read
||
327 jiffies_to_msecs(delay
) > 50)) {
330 closure_delay(&dc
->writeback
, delay
);
331 continue_at(cl
, read_dirty
, dirty_wq
);
334 dc
->last_read
= KEY_OFFSET(&w
->key
);
336 io
= kzalloc(sizeof(struct dirty_io
) + sizeof(struct bio_vec
)
337 * DIV_ROUND_UP(KEY_SIZE(&w
->key
), PAGE_SECTORS
),
346 io
->bio
.bi_sector
= PTR_OFFSET(&w
->key
, 0);
347 io
->bio
.bi_bdev
= PTR_CACHE(dc
->disk
.c
,
349 io
->bio
.bi_rw
= READ
;
350 io
->bio
.bi_end_io
= read_dirty_endio
;
352 if (bch_bio_alloc_pages(&io
->bio
, GFP_KERNEL
))
355 pr_debug("%s", pkey(&w
->key
));
357 closure_call(&io
->cl
, read_dirty_submit
, NULL
, &dc
->disk
.cl
);
359 delay
= writeback_delay(dc
, KEY_SIZE(&w
->key
));
361 atomic_inc(&dc
->in_flight
);
363 if (!closure_wait_event(&dc
->writeback_wait
, cl
,
364 atomic_read(&dc
->in_flight
) < 64))
365 continue_at(cl
, read_dirty
, dirty_wq
);
372 bch_keybuf_del(&dc
->writeback_keys
, w
);
378 void bch_cached_dev_writeback_init(struct cached_dev
*dc
)
380 closure_init_unlocked(&dc
->writeback
);
381 init_rwsem(&dc
->writeback_lock
);
383 bch_keybuf_init(&dc
->writeback_keys
, dirty_pred
);
385 dc
->writeback_metadata
= true;
386 dc
->writeback_running
= true;
387 dc
->writeback_percent
= 10;
388 dc
->writeback_delay
= 30;
389 dc
->writeback_rate
.rate
= 1024;
391 dc
->writeback_rate_update_seconds
= 30;
392 dc
->writeback_rate_d_term
= 16;
393 dc
->writeback_rate_p_term_inverse
= 64;
394 dc
->writeback_rate_d_smooth
= 8;
396 INIT_DELAYED_WORK(&dc
->writeback_rate_update
, update_writeback_rate
);
397 schedule_delayed_work(&dc
->writeback_rate_update
,
398 dc
->writeback_rate_update_seconds
* HZ
);
401 void bch_writeback_exit(void)
404 destroy_workqueue(dirty_wq
);
407 int __init
bch_writeback_init(void)
409 dirty_wq
= create_singlethread_workqueue("bcache_writeback");