Commit | Line | Data |
---|---|---|
f6bed0ef SL |
1 | /* |
2 | * Copyright (C) 2015 Shaohua Li <shli@fb.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | */ | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/wait.h> | |
16 | #include <linux/blkdev.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/raid/md_p.h> | |
5cb2fbd6 | 19 | #include <linux/crc32c.h> |
f6bed0ef SL |
20 | #include <linux/random.h> |
21 | #include "md.h" | |
22 | #include "raid5.h" | |
23 | ||
24 | /* | |
25 | * metadata/data stored in disk with 4k size unit (a block) regardless | |
26 | * underneath hardware sector size. only works with PAGE_SIZE == 4096 | |
27 | */ | |
28 | #define BLOCK_SECTORS (8) | |
29 | ||
0576b1c6 SL |
30 | /* |
31 | * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent | |
32 | * recovery scans a very long log | |
33 | */ | |
34 | #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ | |
35 | #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) | |
36 | ||
f6bed0ef SL |
37 | struct r5l_log { |
38 | struct md_rdev *rdev; | |
39 | ||
40 | u32 uuid_checksum; | |
41 | ||
42 | sector_t device_size; /* log device size, round to | |
43 | * BLOCK_SECTORS */ | |
0576b1c6 SL |
44 | sector_t max_free_space; /* reclaim run if free space is at |
45 | * this size */ | |
f6bed0ef SL |
46 | |
47 | sector_t last_checkpoint; /* log tail. where recovery scan | |
48 | * starts from */ | |
49 | u64 last_cp_seq; /* log tail sequence */ | |
50 | ||
51 | sector_t log_start; /* log head. where new data appends */ | |
52 | u64 seq; /* log head sequence */ | |
53 | ||
17036461 CH |
54 | sector_t next_checkpoint; |
55 | u64 next_cp_seq; | |
56 | ||
f6bed0ef SL |
57 | struct mutex io_mutex; |
58 | struct r5l_io_unit *current_io; /* current io_unit accepting new data */ | |
59 | ||
60 | spinlock_t io_list_lock; | |
61 | struct list_head running_ios; /* io_units which are still running, | |
62 | * and have not yet been completely | |
63 | * written to the log */ | |
64 | struct list_head io_end_ios; /* io_units which have been completely | |
65 | * written to the log but not yet written | |
66 | * to the RAID */ | |
a8c34f91 SL |
67 | struct list_head flushing_ios; /* io_units which are waiting for log |
68 | * cache flush */ | |
04732f74 | 69 | struct list_head finished_ios; /* io_units which settle down in log disk */ |
a8c34f91 | 70 | struct bio flush_bio; |
f6bed0ef SL |
71 | |
72 | struct kmem_cache *io_kc; | |
73 | ||
0576b1c6 SL |
74 | struct md_thread *reclaim_thread; |
75 | unsigned long reclaim_target; /* number of space that need to be | |
76 | * reclaimed. if it's 0, reclaim spaces | |
77 | * used by io_units which are in | |
78 | * IO_UNIT_STRIPE_END state (eg, reclaim | |
79 | * dones't wait for specific io_unit | |
80 | * switching to IO_UNIT_STRIPE_END | |
81 | * state) */ | |
0fd22b45 | 82 | wait_queue_head_t iounit_wait; |
0576b1c6 | 83 | |
f6bed0ef SL |
84 | struct list_head no_space_stripes; /* pending stripes, log has no space */ |
85 | spinlock_t no_space_stripes_lock; | |
56fef7c6 CH |
86 | |
87 | bool need_cache_flush; | |
f6bed0ef SL |
88 | }; |
89 | ||
90 | /* | |
91 | * an IO range starts from a meta data block and end at the next meta data | |
92 | * block. The io unit's the meta data block tracks data/parity followed it. io | |
93 | * unit is written to log disk with normal write, as we always flush log disk | |
94 | * first and then start move data to raid disks, there is no requirement to | |
95 | * write io unit with FLUSH/FUA | |
96 | */ | |
97 | struct r5l_io_unit { | |
98 | struct r5l_log *log; | |
99 | ||
100 | struct page *meta_page; /* store meta block */ | |
101 | int meta_offset; /* current offset in meta_page */ | |
102 | ||
103 | struct bio_list bios; | |
104 | atomic_t pending_io; /* pending bios not written to log yet */ | |
105 | struct bio *current_bio;/* current_bio accepting new data */ | |
106 | ||
107 | atomic_t pending_stripe;/* how many stripes not flushed to raid */ | |
108 | u64 seq; /* seq number of the metablock */ | |
109 | sector_t log_start; /* where the io_unit starts */ | |
110 | sector_t log_end; /* where the io_unit ends */ | |
111 | struct list_head log_sibling; /* log->running_ios */ | |
112 | struct list_head stripe_list; /* stripes added to the io_unit */ | |
113 | ||
114 | int state; | |
f6bed0ef SL |
115 | }; |
116 | ||
117 | /* r5l_io_unit state */ | |
118 | enum r5l_io_unit_state { | |
119 | IO_UNIT_RUNNING = 0, /* accepting new IO */ | |
120 | IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, | |
121 | * don't accepting new bio */ | |
122 | IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ | |
a8c34f91 | 123 | IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ |
f6bed0ef SL |
124 | }; |
125 | ||
126 | static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) | |
127 | { | |
128 | start += inc; | |
129 | if (start >= log->device_size) | |
130 | start = start - log->device_size; | |
131 | return start; | |
132 | } | |
133 | ||
134 | static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, | |
135 | sector_t end) | |
136 | { | |
137 | if (end >= start) | |
138 | return end - start; | |
139 | else | |
140 | return end + log->device_size - start; | |
141 | } | |
142 | ||
143 | static bool r5l_has_free_space(struct r5l_log *log, sector_t size) | |
144 | { | |
145 | sector_t used_size; | |
146 | ||
147 | used_size = r5l_ring_distance(log, log->last_checkpoint, | |
148 | log->log_start); | |
149 | ||
150 | return log->device_size > used_size + size; | |
151 | } | |
152 | ||
153 | static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log) | |
154 | { | |
155 | struct r5l_io_unit *io; | |
156 | /* We can't handle memory allocate failure so far */ | |
157 | gfp_t gfp = GFP_NOIO | __GFP_NOFAIL; | |
158 | ||
159 | io = kmem_cache_zalloc(log->io_kc, gfp); | |
160 | io->log = log; | |
161 | io->meta_page = alloc_page(gfp | __GFP_ZERO); | |
162 | ||
163 | bio_list_init(&io->bios); | |
164 | INIT_LIST_HEAD(&io->log_sibling); | |
165 | INIT_LIST_HEAD(&io->stripe_list); | |
166 | io->state = IO_UNIT_RUNNING; | |
f6bed0ef SL |
167 | return io; |
168 | } | |
169 | ||
170 | static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io) | |
171 | { | |
172 | __free_page(io->meta_page); | |
173 | kmem_cache_free(log->io_kc, io); | |
174 | } | |
175 | ||
176 | static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to, | |
177 | enum r5l_io_unit_state state) | |
178 | { | |
179 | struct r5l_io_unit *io; | |
180 | ||
181 | while (!list_empty(from)) { | |
182 | io = list_first_entry(from, struct r5l_io_unit, log_sibling); | |
183 | /* don't change list order */ | |
184 | if (io->state >= state) | |
185 | list_move_tail(&io->log_sibling, to); | |
186 | else | |
187 | break; | |
188 | } | |
189 | } | |
190 | ||
f6bed0ef SL |
191 | static void __r5l_set_io_unit_state(struct r5l_io_unit *io, |
192 | enum r5l_io_unit_state state) | |
193 | { | |
f6bed0ef SL |
194 | if (WARN_ON(io->state >= state)) |
195 | return; | |
196 | io->state = state; | |
f6bed0ef SL |
197 | } |
198 | ||
d8858f43 CH |
199 | static void r5l_io_run_stripes(struct r5l_io_unit *io) |
200 | { | |
201 | struct stripe_head *sh, *next; | |
202 | ||
203 | list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { | |
204 | list_del_init(&sh->log_list); | |
205 | set_bit(STRIPE_HANDLE, &sh->state); | |
206 | raid5_release_stripe(sh); | |
207 | } | |
208 | } | |
209 | ||
f6bed0ef | 210 | /* XXX: totally ignores I/O errors */ |
56fef7c6 CH |
211 | static void r5l_log_run_stripes(struct r5l_log *log) |
212 | { | |
213 | struct r5l_io_unit *io, *next; | |
214 | ||
215 | assert_spin_locked(&log->io_list_lock); | |
216 | ||
217 | list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { | |
218 | /* don't change list order */ | |
219 | if (io->state < IO_UNIT_IO_END) | |
220 | break; | |
221 | ||
222 | list_move_tail(&io->log_sibling, &log->finished_ios); | |
223 | r5l_io_run_stripes(io); | |
224 | } | |
225 | } | |
226 | ||
f6bed0ef SL |
227 | static void r5l_log_endio(struct bio *bio) |
228 | { | |
229 | struct r5l_io_unit *io = bio->bi_private; | |
230 | struct r5l_log *log = io->log; | |
509ffec7 | 231 | unsigned long flags; |
f6bed0ef SL |
232 | |
233 | bio_put(bio); | |
234 | ||
235 | if (!atomic_dec_and_test(&io->pending_io)) | |
236 | return; | |
237 | ||
509ffec7 CH |
238 | spin_lock_irqsave(&log->io_list_lock, flags); |
239 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); | |
56fef7c6 CH |
240 | if (log->need_cache_flush) |
241 | r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, | |
242 | IO_UNIT_IO_END); | |
243 | else | |
244 | r5l_log_run_stripes(log); | |
509ffec7 CH |
245 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
246 | ||
56fef7c6 CH |
247 | if (log->need_cache_flush) |
248 | md_wakeup_thread(log->rdev->mddev->thread); | |
f6bed0ef SL |
249 | } |
250 | ||
251 | static void r5l_submit_current_io(struct r5l_log *log) | |
252 | { | |
253 | struct r5l_io_unit *io = log->current_io; | |
254 | struct r5l_meta_block *block; | |
255 | struct bio *bio; | |
509ffec7 | 256 | unsigned long flags; |
f6bed0ef SL |
257 | u32 crc; |
258 | ||
259 | if (!io) | |
260 | return; | |
261 | ||
262 | block = page_address(io->meta_page); | |
263 | block->meta_size = cpu_to_le32(io->meta_offset); | |
5cb2fbd6 | 264 | crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); |
f6bed0ef SL |
265 | block->checksum = cpu_to_le32(crc); |
266 | ||
267 | log->current_io = NULL; | |
509ffec7 CH |
268 | spin_lock_irqsave(&log->io_list_lock, flags); |
269 | __r5l_set_io_unit_state(io, IO_UNIT_IO_START); | |
270 | spin_unlock_irqrestore(&log->io_list_lock, flags); | |
f6bed0ef SL |
271 | |
272 | while ((bio = bio_list_pop(&io->bios))) { | |
273 | /* all IO must start from rdev->data_offset */ | |
274 | bio->bi_iter.bi_sector += log->rdev->data_offset; | |
275 | submit_bio(WRITE, bio); | |
276 | } | |
277 | } | |
278 | ||
b349feb3 CH |
279 | static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io) |
280 | { | |
281 | struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); | |
282 | ||
283 | bio->bi_rw = WRITE; | |
284 | bio->bi_bdev = log->rdev->bdev; | |
285 | bio->bi_iter.bi_sector = log->log_start; | |
286 | bio->bi_end_io = r5l_log_endio; | |
287 | bio->bi_private = io; | |
288 | ||
289 | bio_list_add(&io->bios, bio); | |
290 | atomic_inc(&io->pending_io); | |
291 | return bio; | |
292 | } | |
293 | ||
f6bed0ef SL |
294 | static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) |
295 | { | |
296 | struct r5l_io_unit *io; | |
297 | struct r5l_meta_block *block; | |
f6bed0ef SL |
298 | |
299 | io = r5l_alloc_io_unit(log); | |
300 | ||
301 | block = page_address(io->meta_page); | |
302 | block->magic = cpu_to_le32(R5LOG_MAGIC); | |
303 | block->version = R5LOG_VERSION; | |
304 | block->seq = cpu_to_le64(log->seq); | |
305 | block->position = cpu_to_le64(log->log_start); | |
306 | ||
307 | io->log_start = log->log_start; | |
308 | io->meta_offset = sizeof(struct r5l_meta_block); | |
309 | io->seq = log->seq; | |
310 | ||
b349feb3 CH |
311 | io->current_bio = r5l_bio_alloc(log, io); |
312 | bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); | |
f6bed0ef SL |
313 | |
314 | log->seq++; | |
315 | log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); | |
316 | io->log_end = log->log_start; | |
317 | /* current bio hit disk end */ | |
318 | if (log->log_start == 0) | |
319 | io->current_bio = NULL; | |
320 | ||
321 | spin_lock_irq(&log->io_list_lock); | |
322 | list_add_tail(&io->log_sibling, &log->running_ios); | |
323 | spin_unlock_irq(&log->io_list_lock); | |
324 | ||
325 | return io; | |
326 | } | |
327 | ||
328 | static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) | |
329 | { | |
22581f58 CH |
330 | if (log->current_io && |
331 | log->current_io->meta_offset + payload_size > PAGE_SIZE) | |
f6bed0ef | 332 | r5l_submit_current_io(log); |
f6bed0ef | 333 | |
22581f58 CH |
334 | if (!log->current_io) |
335 | log->current_io = r5l_new_meta(log); | |
f6bed0ef SL |
336 | return 0; |
337 | } | |
338 | ||
339 | static void r5l_append_payload_meta(struct r5l_log *log, u16 type, | |
340 | sector_t location, | |
341 | u32 checksum1, u32 checksum2, | |
342 | bool checksum2_valid) | |
343 | { | |
344 | struct r5l_io_unit *io = log->current_io; | |
345 | struct r5l_payload_data_parity *payload; | |
346 | ||
347 | payload = page_address(io->meta_page) + io->meta_offset; | |
348 | payload->header.type = cpu_to_le16(type); | |
349 | payload->header.flags = cpu_to_le16(0); | |
350 | payload->size = cpu_to_le32((1 + !!checksum2_valid) << | |
351 | (PAGE_SHIFT - 9)); | |
352 | payload->location = cpu_to_le64(location); | |
353 | payload->checksum[0] = cpu_to_le32(checksum1); | |
354 | if (checksum2_valid) | |
355 | payload->checksum[1] = cpu_to_le32(checksum2); | |
356 | ||
357 | io->meta_offset += sizeof(struct r5l_payload_data_parity) + | |
358 | sizeof(__le32) * (1 + !!checksum2_valid); | |
359 | } | |
360 | ||
361 | static void r5l_append_payload_page(struct r5l_log *log, struct page *page) | |
362 | { | |
363 | struct r5l_io_unit *io = log->current_io; | |
364 | ||
365 | alloc_bio: | |
b349feb3 CH |
366 | if (!io->current_bio) |
367 | io->current_bio = r5l_bio_alloc(log, io); | |
368 | ||
f6bed0ef SL |
369 | if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) { |
370 | io->current_bio = NULL; | |
371 | goto alloc_bio; | |
372 | } | |
373 | log->log_start = r5l_ring_add(log, log->log_start, | |
374 | BLOCK_SECTORS); | |
375 | /* current bio hit disk end */ | |
376 | if (log->log_start == 0) | |
377 | io->current_bio = NULL; | |
378 | ||
379 | io->log_end = log->log_start; | |
380 | } | |
381 | ||
382 | static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, | |
383 | int data_pages, int parity_pages) | |
384 | { | |
385 | int i; | |
386 | int meta_size; | |
387 | struct r5l_io_unit *io; | |
388 | ||
389 | meta_size = | |
390 | ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) | |
391 | * data_pages) + | |
392 | sizeof(struct r5l_payload_data_parity) + | |
393 | sizeof(__le32) * parity_pages; | |
394 | ||
395 | r5l_get_meta(log, meta_size); | |
396 | io = log->current_io; | |
397 | ||
398 | for (i = 0; i < sh->disks; i++) { | |
399 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) | |
400 | continue; | |
401 | if (i == sh->pd_idx || i == sh->qd_idx) | |
402 | continue; | |
403 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, | |
404 | raid5_compute_blocknr(sh, i, 0), | |
405 | sh->dev[i].log_checksum, 0, false); | |
406 | r5l_append_payload_page(log, sh->dev[i].page); | |
407 | } | |
408 | ||
409 | if (sh->qd_idx >= 0) { | |
410 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, | |
411 | sh->sector, sh->dev[sh->pd_idx].log_checksum, | |
412 | sh->dev[sh->qd_idx].log_checksum, true); | |
413 | r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); | |
414 | r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); | |
415 | } else { | |
416 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, | |
417 | sh->sector, sh->dev[sh->pd_idx].log_checksum, | |
418 | 0, false); | |
419 | r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); | |
420 | } | |
421 | ||
422 | list_add_tail(&sh->log_list, &io->stripe_list); | |
423 | atomic_inc(&io->pending_stripe); | |
424 | sh->log_io = io; | |
425 | } | |
426 | ||
509ffec7 | 427 | static void r5l_wake_reclaim(struct r5l_log *log, sector_t space); |
f6bed0ef SL |
428 | /* |
429 | * running in raid5d, where reclaim could wait for raid5d too (when it flushes | |
430 | * data from log to raid disks), so we shouldn't wait for reclaim here | |
431 | */ | |
432 | int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) | |
433 | { | |
434 | int write_disks = 0; | |
435 | int data_pages, parity_pages; | |
436 | int meta_size; | |
437 | int reserve; | |
438 | int i; | |
439 | ||
440 | if (!log) | |
441 | return -EAGAIN; | |
442 | /* Don't support stripe batch */ | |
443 | if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || | |
444 | test_bit(STRIPE_SYNCING, &sh->state)) { | |
445 | /* the stripe is written to log, we start writing it to raid */ | |
446 | clear_bit(STRIPE_LOG_TRAPPED, &sh->state); | |
447 | return -EAGAIN; | |
448 | } | |
449 | ||
450 | for (i = 0; i < sh->disks; i++) { | |
451 | void *addr; | |
452 | ||
453 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) | |
454 | continue; | |
455 | write_disks++; | |
456 | /* checksum is already calculated in last run */ | |
457 | if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) | |
458 | continue; | |
459 | addr = kmap_atomic(sh->dev[i].page); | |
5cb2fbd6 SL |
460 | sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, |
461 | addr, PAGE_SIZE); | |
f6bed0ef SL |
462 | kunmap_atomic(addr); |
463 | } | |
464 | parity_pages = 1 + !!(sh->qd_idx >= 0); | |
465 | data_pages = write_disks - parity_pages; | |
466 | ||
467 | meta_size = | |
468 | ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) | |
469 | * data_pages) + | |
470 | sizeof(struct r5l_payload_data_parity) + | |
471 | sizeof(__le32) * parity_pages; | |
472 | /* Doesn't work with very big raid array */ | |
473 | if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE) | |
474 | return -EINVAL; | |
475 | ||
476 | set_bit(STRIPE_LOG_TRAPPED, &sh->state); | |
253f9fd4 SL |
477 | /* |
478 | * The stripe must enter state machine again to finish the write, so | |
479 | * don't delay. | |
480 | */ | |
481 | clear_bit(STRIPE_DELAYED, &sh->state); | |
f6bed0ef SL |
482 | atomic_inc(&sh->count); |
483 | ||
484 | mutex_lock(&log->io_mutex); | |
485 | /* meta + data */ | |
486 | reserve = (1 + write_disks) << (PAGE_SHIFT - 9); | |
487 | if (r5l_has_free_space(log, reserve)) | |
488 | r5l_log_stripe(log, sh, data_pages, parity_pages); | |
489 | else { | |
490 | spin_lock(&log->no_space_stripes_lock); | |
491 | list_add_tail(&sh->log_list, &log->no_space_stripes); | |
492 | spin_unlock(&log->no_space_stripes_lock); | |
493 | ||
494 | r5l_wake_reclaim(log, reserve); | |
495 | } | |
496 | mutex_unlock(&log->io_mutex); | |
497 | ||
498 | return 0; | |
499 | } | |
500 | ||
501 | void r5l_write_stripe_run(struct r5l_log *log) | |
502 | { | |
503 | if (!log) | |
504 | return; | |
505 | mutex_lock(&log->io_mutex); | |
506 | r5l_submit_current_io(log); | |
507 | mutex_unlock(&log->io_mutex); | |
508 | } | |
509 | ||
828cbe98 SL |
510 | int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) |
511 | { | |
512 | if (!log) | |
513 | return -ENODEV; | |
514 | /* | |
515 | * we flush log disk cache first, then write stripe data to raid disks. | |
516 | * So if bio is finished, the log disk cache is flushed already. The | |
517 | * recovery guarantees we can recovery the bio from log disk, so we | |
518 | * don't need to flush again | |
519 | */ | |
520 | if (bio->bi_iter.bi_size == 0) { | |
521 | bio_endio(bio); | |
522 | return 0; | |
523 | } | |
524 | bio->bi_rw &= ~REQ_FLUSH; | |
525 | return -EAGAIN; | |
526 | } | |
527 | ||
f6bed0ef SL |
528 | /* This will run after log space is reclaimed */ |
529 | static void r5l_run_no_space_stripes(struct r5l_log *log) | |
530 | { | |
531 | struct stripe_head *sh; | |
532 | ||
533 | spin_lock(&log->no_space_stripes_lock); | |
534 | while (!list_empty(&log->no_space_stripes)) { | |
535 | sh = list_first_entry(&log->no_space_stripes, | |
536 | struct stripe_head, log_list); | |
537 | list_del_init(&sh->log_list); | |
538 | set_bit(STRIPE_HANDLE, &sh->state); | |
539 | raid5_release_stripe(sh); | |
540 | } | |
541 | spin_unlock(&log->no_space_stripes_lock); | |
542 | } | |
543 | ||
17036461 CH |
544 | static sector_t r5l_reclaimable_space(struct r5l_log *log) |
545 | { | |
546 | return r5l_ring_distance(log, log->last_checkpoint, | |
547 | log->next_checkpoint); | |
548 | } | |
549 | ||
04732f74 | 550 | static bool r5l_complete_finished_ios(struct r5l_log *log) |
17036461 CH |
551 | { |
552 | struct r5l_io_unit *io, *next; | |
553 | bool found = false; | |
554 | ||
555 | assert_spin_locked(&log->io_list_lock); | |
556 | ||
04732f74 | 557 | list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { |
17036461 CH |
558 | /* don't change list order */ |
559 | if (io->state < IO_UNIT_STRIPE_END) | |
560 | break; | |
561 | ||
562 | log->next_checkpoint = io->log_start; | |
563 | log->next_cp_seq = io->seq; | |
564 | ||
565 | list_del(&io->log_sibling); | |
566 | r5l_free_io_unit(log, io); | |
567 | ||
568 | found = true; | |
569 | } | |
570 | ||
571 | return found; | |
572 | } | |
573 | ||
509ffec7 CH |
574 | static void __r5l_stripe_write_finished(struct r5l_io_unit *io) |
575 | { | |
576 | struct r5l_log *log = io->log; | |
509ffec7 CH |
577 | unsigned long flags; |
578 | ||
579 | spin_lock_irqsave(&log->io_list_lock, flags); | |
580 | __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); | |
17036461 | 581 | |
04732f74 | 582 | if (!r5l_complete_finished_ios(log)) { |
85f2f9a4 SL |
583 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
584 | return; | |
585 | } | |
509ffec7 | 586 | |
17036461 | 587 | if (r5l_reclaimable_space(log) > log->max_free_space) |
509ffec7 CH |
588 | r5l_wake_reclaim(log, 0); |
589 | ||
509ffec7 CH |
590 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
591 | wake_up(&log->iounit_wait); | |
592 | } | |
593 | ||
0576b1c6 SL |
594 | void r5l_stripe_write_finished(struct stripe_head *sh) |
595 | { | |
596 | struct r5l_io_unit *io; | |
597 | ||
0576b1c6 | 598 | io = sh->log_io; |
0576b1c6 SL |
599 | sh->log_io = NULL; |
600 | ||
509ffec7 CH |
601 | if (io && atomic_dec_and_test(&io->pending_stripe)) |
602 | __r5l_stripe_write_finished(io); | |
0576b1c6 SL |
603 | } |
604 | ||
a8c34f91 SL |
605 | static void r5l_log_flush_endio(struct bio *bio) |
606 | { | |
607 | struct r5l_log *log = container_of(bio, struct r5l_log, | |
608 | flush_bio); | |
609 | unsigned long flags; | |
610 | struct r5l_io_unit *io; | |
a8c34f91 SL |
611 | |
612 | spin_lock_irqsave(&log->io_list_lock, flags); | |
d8858f43 CH |
613 | list_for_each_entry(io, &log->flushing_ios, log_sibling) |
614 | r5l_io_run_stripes(io); | |
04732f74 | 615 | list_splice_tail_init(&log->flushing_ios, &log->finished_ios); |
a8c34f91 SL |
616 | spin_unlock_irqrestore(&log->io_list_lock, flags); |
617 | } | |
618 | ||
0576b1c6 SL |
619 | /* |
620 | * Starting dispatch IO to raid. | |
621 | * io_unit(meta) consists of a log. There is one situation we want to avoid. A | |
622 | * broken meta in the middle of a log causes recovery can't find meta at the | |
623 | * head of log. If operations require meta at the head persistent in log, we | |
624 | * must make sure meta before it persistent in log too. A case is: | |
625 | * | |
626 | * stripe data/parity is in log, we start write stripe to raid disks. stripe | |
627 | * data/parity must be persistent in log before we do the write to raid disks. | |
628 | * | |
629 | * The solution is we restrictly maintain io_unit list order. In this case, we | |
630 | * only write stripes of an io_unit to raid disks till the io_unit is the first | |
631 | * one whose data/parity is in log. | |
632 | */ | |
633 | void r5l_flush_stripe_to_raid(struct r5l_log *log) | |
634 | { | |
a8c34f91 | 635 | bool do_flush; |
56fef7c6 CH |
636 | |
637 | if (!log || !log->need_cache_flush) | |
0576b1c6 | 638 | return; |
0576b1c6 SL |
639 | |
640 | spin_lock_irq(&log->io_list_lock); | |
a8c34f91 SL |
641 | /* flush bio is running */ |
642 | if (!list_empty(&log->flushing_ios)) { | |
643 | spin_unlock_irq(&log->io_list_lock); | |
644 | return; | |
0576b1c6 | 645 | } |
a8c34f91 SL |
646 | list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); |
647 | do_flush = !list_empty(&log->flushing_ios); | |
0576b1c6 | 648 | spin_unlock_irq(&log->io_list_lock); |
a8c34f91 SL |
649 | |
650 | if (!do_flush) | |
651 | return; | |
652 | bio_reset(&log->flush_bio); | |
653 | log->flush_bio.bi_bdev = log->rdev->bdev; | |
654 | log->flush_bio.bi_end_io = r5l_log_flush_endio; | |
655 | submit_bio(WRITE_FLUSH, &log->flush_bio); | |
0576b1c6 SL |
656 | } |
657 | ||
0576b1c6 SL |
658 | static void r5l_write_super(struct r5l_log *log, sector_t cp); |
659 | static void r5l_do_reclaim(struct r5l_log *log) | |
660 | { | |
0576b1c6 | 661 | sector_t reclaim_target = xchg(&log->reclaim_target, 0); |
17036461 CH |
662 | sector_t reclaimable; |
663 | sector_t next_checkpoint; | |
664 | u64 next_cp_seq; | |
0576b1c6 SL |
665 | |
666 | spin_lock_irq(&log->io_list_lock); | |
667 | /* | |
668 | * move proper io_unit to reclaim list. We should not change the order. | |
669 | * reclaimable/unreclaimable io_unit can be mixed in the list, we | |
670 | * shouldn't reuse space of an unreclaimable io_unit | |
671 | */ | |
672 | while (1) { | |
17036461 CH |
673 | reclaimable = r5l_reclaimable_space(log); |
674 | if (reclaimable >= reclaim_target || | |
0576b1c6 SL |
675 | (list_empty(&log->running_ios) && |
676 | list_empty(&log->io_end_ios) && | |
a8c34f91 | 677 | list_empty(&log->flushing_ios) && |
04732f74 | 678 | list_empty(&log->finished_ios))) |
0576b1c6 SL |
679 | break; |
680 | ||
17036461 CH |
681 | md_wakeup_thread(log->rdev->mddev->thread); |
682 | wait_event_lock_irq(log->iounit_wait, | |
683 | r5l_reclaimable_space(log) > reclaimable, | |
684 | log->io_list_lock); | |
0576b1c6 | 685 | } |
17036461 CH |
686 | |
687 | next_checkpoint = log->next_checkpoint; | |
688 | next_cp_seq = log->next_cp_seq; | |
0576b1c6 SL |
689 | spin_unlock_irq(&log->io_list_lock); |
690 | ||
17036461 CH |
691 | BUG_ON(reclaimable < 0); |
692 | if (reclaimable == 0) | |
0576b1c6 SL |
693 | return; |
694 | ||
0576b1c6 SL |
695 | /* |
696 | * write_super will flush cache of each raid disk. We must write super | |
697 | * here, because the log area might be reused soon and we don't want to | |
698 | * confuse recovery | |
699 | */ | |
17036461 | 700 | r5l_write_super(log, next_checkpoint); |
0576b1c6 SL |
701 | |
702 | mutex_lock(&log->io_mutex); | |
17036461 CH |
703 | log->last_checkpoint = next_checkpoint; |
704 | log->last_cp_seq = next_cp_seq; | |
0576b1c6 | 705 | mutex_unlock(&log->io_mutex); |
0576b1c6 | 706 | |
17036461 | 707 | r5l_run_no_space_stripes(log); |
0576b1c6 SL |
708 | } |
709 | ||
710 | static void r5l_reclaim_thread(struct md_thread *thread) | |
711 | { | |
712 | struct mddev *mddev = thread->mddev; | |
713 | struct r5conf *conf = mddev->private; | |
714 | struct r5l_log *log = conf->log; | |
715 | ||
716 | if (!log) | |
717 | return; | |
718 | r5l_do_reclaim(log); | |
719 | } | |
720 | ||
f6bed0ef SL |
721 | static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) |
722 | { | |
0576b1c6 SL |
723 | unsigned long target; |
724 | unsigned long new = (unsigned long)space; /* overflow in theory */ | |
725 | ||
726 | do { | |
727 | target = log->reclaim_target; | |
728 | if (new < target) | |
729 | return; | |
730 | } while (cmpxchg(&log->reclaim_target, target, new) != target); | |
731 | md_wakeup_thread(log->reclaim_thread); | |
f6bed0ef SL |
732 | } |
733 | ||
e6c033f7 SL |
734 | void r5l_quiesce(struct r5l_log *log, int state) |
735 | { | |
736 | if (!log || state == 2) | |
737 | return; | |
738 | if (state == 0) { | |
739 | log->reclaim_thread = md_register_thread(r5l_reclaim_thread, | |
740 | log->rdev->mddev, "reclaim"); | |
741 | } else if (state == 1) { | |
742 | /* | |
743 | * at this point all stripes are finished, so io_unit is at | |
744 | * least in STRIPE_END state | |
745 | */ | |
746 | r5l_wake_reclaim(log, -1L); | |
747 | md_unregister_thread(&log->reclaim_thread); | |
748 | r5l_do_reclaim(log); | |
749 | } | |
750 | } | |
751 | ||
355810d1 SL |
752 | struct r5l_recovery_ctx { |
753 | struct page *meta_page; /* current meta */ | |
754 | sector_t meta_total_blocks; /* total size of current meta and data */ | |
755 | sector_t pos; /* recovery position */ | |
756 | u64 seq; /* recovery position seq */ | |
757 | }; | |
758 | ||
759 | static int r5l_read_meta_block(struct r5l_log *log, | |
760 | struct r5l_recovery_ctx *ctx) | |
761 | { | |
762 | struct page *page = ctx->meta_page; | |
763 | struct r5l_meta_block *mb; | |
764 | u32 crc, stored_crc; | |
765 | ||
766 | if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false)) | |
767 | return -EIO; | |
768 | ||
769 | mb = page_address(page); | |
770 | stored_crc = le32_to_cpu(mb->checksum); | |
771 | mb->checksum = 0; | |
772 | ||
773 | if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || | |
774 | le64_to_cpu(mb->seq) != ctx->seq || | |
775 | mb->version != R5LOG_VERSION || | |
776 | le64_to_cpu(mb->position) != ctx->pos) | |
777 | return -EINVAL; | |
778 | ||
5cb2fbd6 | 779 | crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
355810d1 SL |
780 | if (stored_crc != crc) |
781 | return -EINVAL; | |
782 | ||
783 | if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) | |
784 | return -EINVAL; | |
785 | ||
786 | ctx->meta_total_blocks = BLOCK_SECTORS; | |
787 | ||
788 | return 0; | |
789 | } | |
790 | ||
791 | static int r5l_recovery_flush_one_stripe(struct r5l_log *log, | |
792 | struct r5l_recovery_ctx *ctx, | |
793 | sector_t stripe_sect, | |
794 | int *offset, sector_t *log_offset) | |
795 | { | |
796 | struct r5conf *conf = log->rdev->mddev->private; | |
797 | struct stripe_head *sh; | |
798 | struct r5l_payload_data_parity *payload; | |
799 | int disk_index; | |
800 | ||
801 | sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0); | |
802 | while (1) { | |
803 | payload = page_address(ctx->meta_page) + *offset; | |
804 | ||
805 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { | |
806 | raid5_compute_sector(conf, | |
807 | le64_to_cpu(payload->location), 0, | |
808 | &disk_index, sh); | |
809 | ||
810 | sync_page_io(log->rdev, *log_offset, PAGE_SIZE, | |
811 | sh->dev[disk_index].page, READ, false); | |
812 | sh->dev[disk_index].log_checksum = | |
813 | le32_to_cpu(payload->checksum[0]); | |
814 | set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); | |
815 | ctx->meta_total_blocks += BLOCK_SECTORS; | |
816 | } else { | |
817 | disk_index = sh->pd_idx; | |
818 | sync_page_io(log->rdev, *log_offset, PAGE_SIZE, | |
819 | sh->dev[disk_index].page, READ, false); | |
820 | sh->dev[disk_index].log_checksum = | |
821 | le32_to_cpu(payload->checksum[0]); | |
822 | set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); | |
823 | ||
824 | if (sh->qd_idx >= 0) { | |
825 | disk_index = sh->qd_idx; | |
826 | sync_page_io(log->rdev, | |
827 | r5l_ring_add(log, *log_offset, BLOCK_SECTORS), | |
828 | PAGE_SIZE, sh->dev[disk_index].page, | |
829 | READ, false); | |
830 | sh->dev[disk_index].log_checksum = | |
831 | le32_to_cpu(payload->checksum[1]); | |
832 | set_bit(R5_Wantwrite, | |
833 | &sh->dev[disk_index].flags); | |
834 | } | |
835 | ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; | |
836 | } | |
837 | ||
838 | *log_offset = r5l_ring_add(log, *log_offset, | |
839 | le32_to_cpu(payload->size)); | |
840 | *offset += sizeof(struct r5l_payload_data_parity) + | |
841 | sizeof(__le32) * | |
842 | (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); | |
843 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) | |
844 | break; | |
845 | } | |
846 | ||
847 | for (disk_index = 0; disk_index < sh->disks; disk_index++) { | |
848 | void *addr; | |
849 | u32 checksum; | |
850 | ||
851 | if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) | |
852 | continue; | |
853 | addr = kmap_atomic(sh->dev[disk_index].page); | |
5cb2fbd6 | 854 | checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); |
355810d1 SL |
855 | kunmap_atomic(addr); |
856 | if (checksum != sh->dev[disk_index].log_checksum) | |
857 | goto error; | |
858 | } | |
859 | ||
860 | for (disk_index = 0; disk_index < sh->disks; disk_index++) { | |
861 | struct md_rdev *rdev, *rrdev; | |
862 | ||
863 | if (!test_and_clear_bit(R5_Wantwrite, | |
864 | &sh->dev[disk_index].flags)) | |
865 | continue; | |
866 | ||
867 | /* in case device is broken */ | |
868 | rdev = rcu_dereference(conf->disks[disk_index].rdev); | |
869 | if (rdev) | |
870 | sync_page_io(rdev, stripe_sect, PAGE_SIZE, | |
871 | sh->dev[disk_index].page, WRITE, false); | |
872 | rrdev = rcu_dereference(conf->disks[disk_index].replacement); | |
873 | if (rrdev) | |
874 | sync_page_io(rrdev, stripe_sect, PAGE_SIZE, | |
875 | sh->dev[disk_index].page, WRITE, false); | |
876 | } | |
877 | raid5_release_stripe(sh); | |
878 | return 0; | |
879 | ||
880 | error: | |
881 | for (disk_index = 0; disk_index < sh->disks; disk_index++) | |
882 | sh->dev[disk_index].flags = 0; | |
883 | raid5_release_stripe(sh); | |
884 | return -EINVAL; | |
885 | } | |
886 | ||
887 | static int r5l_recovery_flush_one_meta(struct r5l_log *log, | |
888 | struct r5l_recovery_ctx *ctx) | |
889 | { | |
890 | struct r5conf *conf = log->rdev->mddev->private; | |
891 | struct r5l_payload_data_parity *payload; | |
892 | struct r5l_meta_block *mb; | |
893 | int offset; | |
894 | sector_t log_offset; | |
895 | sector_t stripe_sector; | |
896 | ||
897 | mb = page_address(ctx->meta_page); | |
898 | offset = sizeof(struct r5l_meta_block); | |
899 | log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); | |
900 | ||
901 | while (offset < le32_to_cpu(mb->meta_size)) { | |
902 | int dd; | |
903 | ||
904 | payload = (void *)mb + offset; | |
905 | stripe_sector = raid5_compute_sector(conf, | |
906 | le64_to_cpu(payload->location), 0, &dd, NULL); | |
907 | if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector, | |
908 | &offset, &log_offset)) | |
909 | return -EINVAL; | |
910 | } | |
911 | return 0; | |
912 | } | |
913 | ||
914 | /* copy data/parity from log to raid disks */ | |
915 | static void r5l_recovery_flush_log(struct r5l_log *log, | |
916 | struct r5l_recovery_ctx *ctx) | |
917 | { | |
918 | while (1) { | |
919 | if (r5l_read_meta_block(log, ctx)) | |
920 | return; | |
921 | if (r5l_recovery_flush_one_meta(log, ctx)) | |
922 | return; | |
923 | ctx->seq++; | |
924 | ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); | |
925 | } | |
926 | } | |
927 | ||
928 | static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, | |
929 | u64 seq) | |
930 | { | |
931 | struct page *page; | |
932 | struct r5l_meta_block *mb; | |
933 | u32 crc; | |
934 | ||
935 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | |
936 | if (!page) | |
937 | return -ENOMEM; | |
938 | mb = page_address(page); | |
939 | mb->magic = cpu_to_le32(R5LOG_MAGIC); | |
940 | mb->version = R5LOG_VERSION; | |
941 | mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); | |
942 | mb->seq = cpu_to_le64(seq); | |
943 | mb->position = cpu_to_le64(pos); | |
5cb2fbd6 | 944 | crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
355810d1 SL |
945 | mb->checksum = cpu_to_le32(crc); |
946 | ||
947 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) { | |
948 | __free_page(page); | |
949 | return -EIO; | |
950 | } | |
951 | __free_page(page); | |
952 | return 0; | |
953 | } | |
954 | ||
f6bed0ef SL |
955 | static int r5l_recovery_log(struct r5l_log *log) |
956 | { | |
355810d1 SL |
957 | struct r5l_recovery_ctx ctx; |
958 | ||
959 | ctx.pos = log->last_checkpoint; | |
960 | ctx.seq = log->last_cp_seq; | |
961 | ctx.meta_page = alloc_page(GFP_KERNEL); | |
962 | if (!ctx.meta_page) | |
963 | return -ENOMEM; | |
964 | ||
965 | r5l_recovery_flush_log(log, &ctx); | |
966 | __free_page(ctx.meta_page); | |
967 | ||
968 | /* | |
969 | * we did a recovery. Now ctx.pos points to an invalid meta block. New | |
970 | * log will start here. but we can't let superblock point to last valid | |
971 | * meta block. The log might looks like: | |
972 | * | meta 1| meta 2| meta 3| | |
973 | * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If | |
974 | * superblock points to meta 1, we write a new valid meta 2n. if crash | |
975 | * happens again, new recovery will start from meta 1. Since meta 2n is | |
976 | * valid now, recovery will think meta 3 is valid, which is wrong. | |
977 | * The solution is we create a new meta in meta2 with its seq == meta | |
978 | * 1's seq + 10 and let superblock points to meta2. The same recovery will | |
979 | * not think meta 3 is a valid meta, because its seq doesn't match | |
980 | */ | |
981 | if (ctx.seq > log->last_cp_seq + 1) { | |
982 | int ret; | |
983 | ||
984 | ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); | |
985 | if (ret) | |
986 | return ret; | |
987 | log->seq = ctx.seq + 11; | |
988 | log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); | |
989 | r5l_write_super(log, ctx.pos); | |
990 | } else { | |
991 | log->log_start = ctx.pos; | |
992 | log->seq = ctx.seq; | |
993 | } | |
f6bed0ef SL |
994 | return 0; |
995 | } | |
996 | ||
997 | static void r5l_write_super(struct r5l_log *log, sector_t cp) | |
998 | { | |
999 | struct mddev *mddev = log->rdev->mddev; | |
1000 | ||
1001 | log->rdev->journal_tail = cp; | |
1002 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | |
1003 | } | |
1004 | ||
1005 | static int r5l_load_log(struct r5l_log *log) | |
1006 | { | |
1007 | struct md_rdev *rdev = log->rdev; | |
1008 | struct page *page; | |
1009 | struct r5l_meta_block *mb; | |
1010 | sector_t cp = log->rdev->journal_tail; | |
1011 | u32 stored_crc, expected_crc; | |
1012 | bool create_super = false; | |
1013 | int ret; | |
1014 | ||
1015 | /* Make sure it's valid */ | |
1016 | if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) | |
1017 | cp = 0; | |
1018 | page = alloc_page(GFP_KERNEL); | |
1019 | if (!page) | |
1020 | return -ENOMEM; | |
1021 | ||
1022 | if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) { | |
1023 | ret = -EIO; | |
1024 | goto ioerr; | |
1025 | } | |
1026 | mb = page_address(page); | |
1027 | ||
1028 | if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || | |
1029 | mb->version != R5LOG_VERSION) { | |
1030 | create_super = true; | |
1031 | goto create; | |
1032 | } | |
1033 | stored_crc = le32_to_cpu(mb->checksum); | |
1034 | mb->checksum = 0; | |
5cb2fbd6 | 1035 | expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); |
f6bed0ef SL |
1036 | if (stored_crc != expected_crc) { |
1037 | create_super = true; | |
1038 | goto create; | |
1039 | } | |
1040 | if (le64_to_cpu(mb->position) != cp) { | |
1041 | create_super = true; | |
1042 | goto create; | |
1043 | } | |
1044 | create: | |
1045 | if (create_super) { | |
1046 | log->last_cp_seq = prandom_u32(); | |
1047 | cp = 0; | |
1048 | /* | |
1049 | * Make sure super points to correct address. Log might have | |
1050 | * data very soon. If super hasn't correct log tail address, | |
1051 | * recovery can't find the log | |
1052 | */ | |
1053 | r5l_write_super(log, cp); | |
1054 | } else | |
1055 | log->last_cp_seq = le64_to_cpu(mb->seq); | |
1056 | ||
1057 | log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); | |
0576b1c6 SL |
1058 | log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; |
1059 | if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) | |
1060 | log->max_free_space = RECLAIM_MAX_FREE_SPACE; | |
f6bed0ef SL |
1061 | log->last_checkpoint = cp; |
1062 | ||
1063 | __free_page(page); | |
1064 | ||
1065 | return r5l_recovery_log(log); | |
1066 | ioerr: | |
1067 | __free_page(page); | |
1068 | return ret; | |
1069 | } | |
1070 | ||
1071 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | |
1072 | { | |
1073 | struct r5l_log *log; | |
1074 | ||
1075 | if (PAGE_SIZE != 4096) | |
1076 | return -EINVAL; | |
1077 | log = kzalloc(sizeof(*log), GFP_KERNEL); | |
1078 | if (!log) | |
1079 | return -ENOMEM; | |
1080 | log->rdev = rdev; | |
1081 | ||
56fef7c6 CH |
1082 | log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0); |
1083 | ||
5cb2fbd6 SL |
1084 | log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, |
1085 | sizeof(rdev->mddev->uuid)); | |
f6bed0ef SL |
1086 | |
1087 | mutex_init(&log->io_mutex); | |
1088 | ||
1089 | spin_lock_init(&log->io_list_lock); | |
1090 | INIT_LIST_HEAD(&log->running_ios); | |
0576b1c6 | 1091 | INIT_LIST_HEAD(&log->io_end_ios); |
a8c34f91 | 1092 | INIT_LIST_HEAD(&log->flushing_ios); |
04732f74 | 1093 | INIT_LIST_HEAD(&log->finished_ios); |
a8c34f91 | 1094 | bio_init(&log->flush_bio); |
f6bed0ef SL |
1095 | |
1096 | log->io_kc = KMEM_CACHE(r5l_io_unit, 0); | |
1097 | if (!log->io_kc) | |
1098 | goto io_kc; | |
1099 | ||
0576b1c6 SL |
1100 | log->reclaim_thread = md_register_thread(r5l_reclaim_thread, |
1101 | log->rdev->mddev, "reclaim"); | |
1102 | if (!log->reclaim_thread) | |
1103 | goto reclaim_thread; | |
0fd22b45 | 1104 | init_waitqueue_head(&log->iounit_wait); |
0576b1c6 | 1105 | |
f6bed0ef SL |
1106 | INIT_LIST_HEAD(&log->no_space_stripes); |
1107 | spin_lock_init(&log->no_space_stripes_lock); | |
1108 | ||
1109 | if (r5l_load_log(log)) | |
1110 | goto error; | |
1111 | ||
1112 | conf->log = log; | |
1113 | return 0; | |
1114 | error: | |
0576b1c6 SL |
1115 | md_unregister_thread(&log->reclaim_thread); |
1116 | reclaim_thread: | |
f6bed0ef SL |
1117 | kmem_cache_destroy(log->io_kc); |
1118 | io_kc: | |
1119 | kfree(log); | |
1120 | return -EINVAL; | |
1121 | } | |
1122 | ||
1123 | void r5l_exit_log(struct r5l_log *log) | |
1124 | { | |
0576b1c6 | 1125 | md_unregister_thread(&log->reclaim_thread); |
f6bed0ef SL |
1126 | kmem_cache_destroy(log->io_kc); |
1127 | kfree(log); | |
1128 | } |