block: Fix bio_copy_data()
[deliverable/linux.git] / drivers / md / bcache / journal.c
CommitLineData
cafe5635
KO
1/*
2 * bcache journalling code, for btree insertions
3 *
4 * Copyright 2012 Google, Inc.
5 */
6
7#include "bcache.h"
8#include "btree.h"
9#include "debug.h"
10#include "request.h"
11
c37511b8
KO
12#include <trace/events/bcache.h>
13
cafe5635
KO
14/*
15 * Journal replay/recovery:
16 *
17 * This code is all driven from run_cache_set(); we first read the journal
18 * entries, do some other stuff, then we mark all the keys in the journal
19 * entries (same as garbage collection would), then we replay them - reinserting
20 * them into the cache in precisely the same order as they appear in the
21 * journal.
22 *
23 * We only journal keys that go in leaf nodes, which simplifies things quite a
24 * bit.
25 */
26
27static void journal_read_endio(struct bio *bio, int error)
28{
29 struct closure *cl = bio->bi_private;
30 closure_put(cl);
31}
32
33static int journal_read_bucket(struct cache *ca, struct list_head *list,
34 struct btree_op *op, unsigned bucket_index)
35{
36 struct journal_device *ja = &ca->journal;
37 struct bio *bio = &ja->bio;
38
39 struct journal_replay *i;
40 struct jset *j, *data = ca->set->journal.w[0].data;
41 unsigned len, left, offset = 0;
42 int ret = 0;
43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
44
45 pr_debug("reading %llu", (uint64_t) bucket);
46
47 while (offset < ca->sb.bucket_size) {
48reread: left = ca->sb.bucket_size - offset;
49 len = min_t(unsigned, left, PAGE_SECTORS * 8);
50
51 bio_reset(bio);
52 bio->bi_sector = bucket + offset;
53 bio->bi_bdev = ca->bdev;
54 bio->bi_rw = READ;
55 bio->bi_size = len << 9;
56
57 bio->bi_end_io = journal_read_endio;
58 bio->bi_private = &op->cl;
169ef1cf 59 bch_bio_map(bio, data);
cafe5635
KO
60
61 closure_bio_submit(bio, &op->cl, ca);
62 closure_sync(&op->cl);
63
64 /* This function could be simpler now since we no longer write
65 * journal entries that overlap bucket boundaries; this means
66 * the start of a bucket will always have a valid journal entry
67 * if it has any journal entries at all.
68 */
69
70 j = data;
71 while (len) {
72 struct list_head *where;
73 size_t blocks, bytes = set_bytes(j);
74
75 if (j->magic != jset_magic(ca->set))
76 return ret;
77
78 if (bytes > left << 9)
79 return ret;
80
81 if (bytes > len << 9)
82 goto reread;
83
84 if (j->csum != csum_set(j))
85 return ret;
86
87 blocks = set_blocks(j, ca->set);
88
89 while (!list_empty(list)) {
90 i = list_first_entry(list,
91 struct journal_replay, list);
92 if (i->j.seq >= j->last_seq)
93 break;
94 list_del(&i->list);
95 kfree(i);
96 }
97
98 list_for_each_entry_reverse(i, list, list) {
99 if (j->seq == i->j.seq)
100 goto next_set;
101
102 if (j->seq < i->j.last_seq)
103 goto next_set;
104
105 if (j->seq > i->j.seq) {
106 where = &i->list;
107 goto add;
108 }
109 }
110
111 where = list;
112add:
113 i = kmalloc(offsetof(struct journal_replay, j) +
114 bytes, GFP_KERNEL);
115 if (!i)
116 return -ENOMEM;
117 memcpy(&i->j, j, bytes);
118 list_add(&i->list, where);
119 ret = 1;
120
121 ja->seq[bucket_index] = j->seq;
122next_set:
123 offset += blocks * ca->sb.block_size;
124 len -= blocks * ca->sb.block_size;
125 j = ((void *) j) + blocks * block_bytes(ca);
126 }
127 }
128
129 return ret;
130}
131
132int bch_journal_read(struct cache_set *c, struct list_head *list,
133 struct btree_op *op)
134{
135#define read_bucket(b) \
136 ({ \
137 int ret = journal_read_bucket(ca, list, op, b); \
138 __set_bit(b, bitmap); \
139 if (ret < 0) \
140 return ret; \
141 ret; \
142 })
143
144 struct cache *ca;
145 unsigned iter;
146
147 for_each_cache(ca, c, iter) {
148 struct journal_device *ja = &ca->journal;
149 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
150 unsigned i, l, r, m;
151 uint64_t seq;
152
153 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
154 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
155
156 /* Read journal buckets ordered by golden ratio hash to quickly
157 * find a sequence of buckets with valid journal entries
158 */
159 for (i = 0; i < ca->sb.njournal_buckets; i++) {
160 l = (i * 2654435769U) % ca->sb.njournal_buckets;
161
162 if (test_bit(l, bitmap))
163 break;
164
165 if (read_bucket(l))
166 goto bsearch;
167 }
168
169 /* If that fails, check all the buckets we haven't checked
170 * already
171 */
172 pr_debug("falling back to linear search");
173
174 for (l = 0; l < ca->sb.njournal_buckets; l++) {
175 if (test_bit(l, bitmap))
176 continue;
177
178 if (read_bucket(l))
179 goto bsearch;
180 }
181bsearch:
182 /* Binary search */
183 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
184 pr_debug("starting binary search, l %u r %u", l, r);
185
186 while (l + 1 < r) {
faa56736
KO
187 seq = list_entry(list->prev, struct journal_replay,
188 list)->j.seq;
189
cafe5635 190 m = (l + r) >> 1;
faa56736 191 read_bucket(m);
cafe5635 192
faa56736
KO
193 if (seq != list_entry(list->prev, struct journal_replay,
194 list)->j.seq)
cafe5635
KO
195 l = m;
196 else
197 r = m;
198 }
199
200 /* Read buckets in reverse order until we stop finding more
201 * journal entries
202 */
203 pr_debug("finishing up");
204 l = m;
205
206 while (1) {
207 if (!l--)
208 l = ca->sb.njournal_buckets - 1;
209
210 if (l == m)
211 break;
212
213 if (test_bit(l, bitmap))
214 continue;
215
216 if (!read_bucket(l))
217 break;
218 }
219
220 seq = 0;
221
222 for (i = 0; i < ca->sb.njournal_buckets; i++)
223 if (ja->seq[i] > seq) {
224 seq = ja->seq[i];
225 ja->cur_idx = ja->discard_idx =
226 ja->last_idx = i;
227
228 }
229 }
230
231 c->journal.seq = list_entry(list->prev,
232 struct journal_replay,
233 list)->j.seq;
234
235 return 0;
236#undef read_bucket
237}
238
239void bch_journal_mark(struct cache_set *c, struct list_head *list)
240{
241 atomic_t p = { 0 };
242 struct bkey *k;
243 struct journal_replay *i;
244 struct journal *j = &c->journal;
245 uint64_t last = j->seq;
246
247 /*
248 * journal.pin should never fill up - we never write a journal
249 * entry when it would fill up. But if for some reason it does, we
250 * iterate over the list in reverse order so that we can just skip that
251 * refcount instead of bugging.
252 */
253
254 list_for_each_entry_reverse(i, list, list) {
255 BUG_ON(last < i->j.seq);
256 i->pin = NULL;
257
258 while (last-- != i->j.seq)
259 if (fifo_free(&j->pin) > 1) {
260 fifo_push_front(&j->pin, p);
261 atomic_set(&fifo_front(&j->pin), 0);
262 }
263
264 if (fifo_free(&j->pin) > 1) {
265 fifo_push_front(&j->pin, p);
266 i->pin = &fifo_front(&j->pin);
267 atomic_set(i->pin, 1);
268 }
269
270 for (k = i->j.start;
271 k < end(&i->j);
272 k = bkey_next(k)) {
273 unsigned j;
274
275 for (j = 0; j < KEY_PTRS(k); j++) {
276 struct bucket *g = PTR_BUCKET(c, k, j);
277 atomic_inc(&g->pin);
278
279 if (g->prio == BTREE_PRIO &&
280 !ptr_stale(c, k, j))
281 g->prio = INITIAL_PRIO;
282 }
283
284 __bch_btree_mark_key(c, 0, k);
285 }
286 }
287}
288
289int bch_journal_replay(struct cache_set *s, struct list_head *list,
290 struct btree_op *op)
291{
292 int ret = 0, keys = 0, entries = 0;
293 struct bkey *k;
294 struct journal_replay *i =
295 list_entry(list->prev, struct journal_replay, list);
296
297 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
298
299 list_for_each_entry(i, list, list) {
300 BUG_ON(i->pin && atomic_read(i->pin) != 1);
301
302 if (n != i->j.seq)
b1a67b0f
KO
303 pr_err(
304 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
305 n, i->j.seq - 1, start, end);
cafe5635
KO
306
307 for (k = i->j.start;
308 k < end(&i->j);
309 k = bkey_next(k)) {
c37511b8
KO
310 trace_bcache_journal_replay_key(k);
311
cafe5635
KO
312 bkey_copy(op->keys.top, k);
313 bch_keylist_push(&op->keys);
314
315 op->journal = i->pin;
316 atomic_inc(op->journal);
317
318 ret = bch_btree_insert(op, s);
319 if (ret)
320 goto err;
321
322 BUG_ON(!bch_keylist_empty(&op->keys));
323 keys++;
324
325 cond_resched();
326 }
327
328 if (i->pin)
329 atomic_dec(i->pin);
330 n = i->j.seq + 1;
331 entries++;
332 }
333
334 pr_info("journal replay done, %i keys in %i entries, seq %llu",
335 keys, entries, end);
336
337 while (!list_empty(list)) {
338 i = list_first_entry(list, struct journal_replay, list);
339 list_del(&i->list);
340 kfree(i);
341 }
342err:
343 closure_sync(&op->cl);
344 return ret;
345}
346
347/* Journalling */
348
349static void btree_flush_write(struct cache_set *c)
350{
351 /*
352 * Try to find the btree node with that references the oldest journal
353 * entry, best is our current candidate and is locked if non NULL:
354 */
355 struct btree *b, *best = NULL;
356 unsigned iter;
357
358 for_each_cached_btree(b, c, iter) {
359 if (!down_write_trylock(&b->lock))
360 continue;
361
362 if (!btree_node_dirty(b) ||
363 !btree_current_write(b)->journal) {
364 rw_unlock(true, b);
365 continue;
366 }
367
368 if (!best)
369 best = b;
370 else if (journal_pin_cmp(c,
371 btree_current_write(best),
372 btree_current_write(b))) {
373 rw_unlock(true, best);
374 best = b;
375 } else
376 rw_unlock(true, b);
377 }
378
379 if (best)
380 goto out;
381
382 /* We can't find the best btree node, just pick the first */
383 list_for_each_entry(b, &c->btree_cache, list)
384 if (!b->level && btree_node_dirty(b)) {
385 best = b;
386 rw_lock(true, best, best->level);
387 goto found;
388 }
389
390out:
391 if (!best)
392 return;
393found:
394 if (btree_node_dirty(best))
57943511 395 bch_btree_node_write(best, NULL);
cafe5635
KO
396 rw_unlock(true, best);
397}
398
399#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
400
401static void journal_discard_endio(struct bio *bio, int error)
402{
403 struct journal_device *ja =
404 container_of(bio, struct journal_device, discard_bio);
405 struct cache *ca = container_of(ja, struct cache, journal);
406
407 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
408
409 closure_wake_up(&ca->set->journal.wait);
410 closure_put(&ca->set->cl);
411}
412
413static void journal_discard_work(struct work_struct *work)
414{
415 struct journal_device *ja =
416 container_of(work, struct journal_device, discard_work);
417
418 submit_bio(0, &ja->discard_bio);
419}
420
421static void do_journal_discard(struct cache *ca)
422{
423 struct journal_device *ja = &ca->journal;
424 struct bio *bio = &ja->discard_bio;
425
426 if (!ca->discard) {
427 ja->discard_idx = ja->last_idx;
428 return;
429 }
430
431 switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
432 case DISCARD_IN_FLIGHT:
433 return;
434
435 case DISCARD_DONE:
436 ja->discard_idx = (ja->discard_idx + 1) %
437 ca->sb.njournal_buckets;
438
439 atomic_set(&ja->discard_in_flight, DISCARD_READY);
440 /* fallthrough */
441
442 case DISCARD_READY:
443 if (ja->discard_idx == ja->last_idx)
444 return;
445
446 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
447
448 bio_init(bio);
449 bio->bi_sector = bucket_to_sector(ca->set,
b1a67b0f 450 ca->sb.d[ja->discard_idx]);
cafe5635
KO
451 bio->bi_bdev = ca->bdev;
452 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
453 bio->bi_max_vecs = 1;
454 bio->bi_io_vec = bio->bi_inline_vecs;
455 bio->bi_size = bucket_bytes(ca);
456 bio->bi_end_io = journal_discard_endio;
457
458 closure_get(&ca->set->cl);
459 INIT_WORK(&ja->discard_work, journal_discard_work);
460 schedule_work(&ja->discard_work);
461 }
462}
463
464static void journal_reclaim(struct cache_set *c)
465{
466 struct bkey *k = &c->journal.key;
467 struct cache *ca;
468 uint64_t last_seq;
469 unsigned iter, n = 0;
470 atomic_t p;
471
472 while (!atomic_read(&fifo_front(&c->journal.pin)))
473 fifo_pop(&c->journal.pin, p);
474
475 last_seq = last_seq(&c->journal);
476
477 /* Update last_idx */
478
479 for_each_cache(ca, c, iter) {
480 struct journal_device *ja = &ca->journal;
481
482 while (ja->last_idx != ja->cur_idx &&
483 ja->seq[ja->last_idx] < last_seq)
484 ja->last_idx = (ja->last_idx + 1) %
485 ca->sb.njournal_buckets;
486 }
487
488 for_each_cache(ca, c, iter)
489 do_journal_discard(ca);
490
491 if (c->journal.blocks_free)
492 return;
493
494 /*
495 * Allocate:
496 * XXX: Sort by free journal space
497 */
498
499 for_each_cache(ca, c, iter) {
500 struct journal_device *ja = &ca->journal;
501 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
502
503 /* No space available on this device */
504 if (next == ja->discard_idx)
505 continue;
506
507 ja->cur_idx = next;
508 k->ptr[n++] = PTR(0,
509 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
510 ca->sb.nr_this_dev);
511 }
512
513 bkey_init(k);
514 SET_KEY_PTRS(k, n);
515
516 if (n)
517 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
518
519 if (!journal_full(&c->journal))
520 __closure_wake_up(&c->journal.wait);
521}
522
523void bch_journal_next(struct journal *j)
524{
525 atomic_t p = { 1 };
526
527 j->cur = (j->cur == j->w)
528 ? &j->w[1]
529 : &j->w[0];
530
531 /*
532 * The fifo_push() needs to happen at the same time as j->seq is
533 * incremented for last_seq() to be calculated correctly
534 */
535 BUG_ON(!fifo_push(&j->pin, p));
536 atomic_set(&fifo_back(&j->pin), 1);
537
538 j->cur->data->seq = ++j->seq;
539 j->cur->need_write = false;
540 j->cur->data->keys = 0;
541
542 if (fifo_full(&j->pin))
543 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
544}
545
546static void journal_write_endio(struct bio *bio, int error)
547{
548 struct journal_write *w = bio->bi_private;
549
550 cache_set_err_on(error, w->c, "journal io error");
551 closure_put(&w->c->journal.io.cl);
552}
553
554static void journal_write(struct closure *);
555
556static void journal_write_done(struct closure *cl)
557{
558 struct journal *j = container_of(cl, struct journal, io.cl);
559 struct cache_set *c = container_of(j, struct cache_set, journal);
560
561 struct journal_write *w = (j->cur == j->w)
562 ? &j->w[1]
563 : &j->w[0];
564
565 __closure_wake_up(&w->wait);
566
567 if (c->journal_delay_ms)
568 closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
569
570 continue_at(cl, journal_write, system_wq);
571}
572
573static void journal_write_unlocked(struct closure *cl)
c19ed23a 574 __releases(c->journal.lock)
cafe5635
KO
575{
576 struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
577 struct cache *ca;
578 struct journal_write *w = c->journal.cur;
579 struct bkey *k = &c->journal.key;
580 unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
581
582 struct bio *bio;
583 struct bio_list list;
584 bio_list_init(&list);
585
586 if (!w->need_write) {
587 /*
588 * XXX: have to unlock closure before we unlock journal lock,
589 * else we race with bch_journal(). But this way we race
590 * against cache set unregister. Doh.
591 */
592 set_closure_fn(cl, NULL, NULL);
593 closure_sub(cl, CLOSURE_RUNNING + 1);
594 spin_unlock(&c->journal.lock);
595 return;
596 } else if (journal_full(&c->journal)) {
597 journal_reclaim(c);
598 spin_unlock(&c->journal.lock);
599
600 btree_flush_write(c);
601 continue_at(cl, journal_write, system_wq);
602 }
603
604 c->journal.blocks_free -= set_blocks(w->data, c);
605
606 w->data->btree_level = c->root->level;
607
608 bkey_copy(&w->data->btree_root, &c->root->key);
609 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
610
611 for_each_cache(ca, c, i)
612 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
613
614 w->data->magic = jset_magic(c);
615 w->data->version = BCACHE_JSET_VERSION;
616 w->data->last_seq = last_seq(&c->journal);
617 w->data->csum = csum_set(w->data);
618
619 for (i = 0; i < KEY_PTRS(k); i++) {
620 ca = PTR_CACHE(c, k, i);
621 bio = &ca->journal.bio;
622
623 atomic_long_add(sectors, &ca->meta_sectors_written);
624
625 bio_reset(bio);
626 bio->bi_sector = PTR_OFFSET(k, i);
627 bio->bi_bdev = ca->bdev;
e49c7c37 628 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
cafe5635
KO
629 bio->bi_size = sectors << 9;
630
631 bio->bi_end_io = journal_write_endio;
632 bio->bi_private = w;
169ef1cf 633 bch_bio_map(bio, w->data);
cafe5635
KO
634
635 trace_bcache_journal_write(bio);
636 bio_list_add(&list, bio);
637
638 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
639
640 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
641 }
642
643 atomic_dec_bug(&fifo_back(&c->journal.pin));
644 bch_journal_next(&c->journal);
645 journal_reclaim(c);
646
647 spin_unlock(&c->journal.lock);
648
649 while ((bio = bio_list_pop(&list)))
650 closure_bio_submit(bio, cl, c->cache[0]);
651
652 continue_at(cl, journal_write_done, NULL);
653}
654
655static void journal_write(struct closure *cl)
656{
657 struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
658
659 spin_lock(&c->journal.lock);
660 journal_write_unlocked(cl);
661}
662
663static void __journal_try_write(struct cache_set *c, bool noflush)
c19ed23a 664 __releases(c->journal.lock)
cafe5635
KO
665{
666 struct closure *cl = &c->journal.io.cl;
667
668 if (!closure_trylock(cl, &c->cl))
669 spin_unlock(&c->journal.lock);
670 else if (noflush && journal_full(&c->journal)) {
671 spin_unlock(&c->journal.lock);
672 continue_at(cl, journal_write, system_wq);
673 } else
674 journal_write_unlocked(cl);
675}
676
677#define journal_try_write(c) __journal_try_write(c, false)
678
679void bch_journal_meta(struct cache_set *c, struct closure *cl)
680{
681 struct journal_write *w;
682
683 if (CACHE_SYNC(&c->sb)) {
684 spin_lock(&c->journal.lock);
685
686 w = c->journal.cur;
687 w->need_write = true;
688
689 if (cl)
690 BUG_ON(!closure_wait(&w->wait, cl));
691
692 __journal_try_write(c, true);
693 }
694}
695
696/*
697 * Entry point to the journalling code - bio_insert() and btree_invalidate()
698 * pass bch_journal() a list of keys to be journalled, and then
699 * bch_journal() hands those same keys off to btree_insert_async()
700 */
701
702void bch_journal(struct closure *cl)
703{
704 struct btree_op *op = container_of(cl, struct btree_op, cl);
705 struct cache_set *c = op->c;
706 struct journal_write *w;
707 size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
708
709 if (op->type != BTREE_INSERT ||
710 !CACHE_SYNC(&c->sb))
711 goto out;
712
713 /*
714 * If we're looping because we errored, might already be waiting on
715 * another journal write:
716 */
717 while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
718 closure_sync(cl->parent);
719
720 spin_lock(&c->journal.lock);
721
722 if (journal_full(&c->journal)) {
c37511b8
KO
723 trace_bcache_journal_full(c);
724
cafe5635
KO
725 closure_wait(&c->journal.wait, cl);
726
727 journal_reclaim(c);
728 spin_unlock(&c->journal.lock);
729
730 btree_flush_write(c);
731 continue_at(cl, bch_journal, bcache_wq);
732 }
733
734 w = c->journal.cur;
735 w->need_write = true;
736 b = __set_blocks(w->data, w->data->keys + n, c);
737
738 if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
739 b > c->journal.blocks_free) {
c37511b8
KO
740 trace_bcache_journal_entry_full(c);
741
742 /*
743 * XXX: If we were inserting so many keys that they won't fit in
cafe5635
KO
744 * an _empty_ journal write, we'll deadlock. For now, handle
745 * this in bch_keylist_realloc() - but something to think about.
746 */
747 BUG_ON(!w->data->keys);
748
cafe5635
KO
749 BUG_ON(!closure_wait(&w->wait, cl));
750
751 closure_flush(&c->journal.io);
752
753 journal_try_write(c);
754 continue_at(cl, bch_journal, bcache_wq);
755 }
756
757 memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
758 w->data->keys += n;
759
760 op->journal = &fifo_back(&c->journal.pin);
761 atomic_inc(op->journal);
762
763 if (op->flush_journal) {
764 closure_flush(&c->journal.io);
765 closure_wait(&w->wait, cl->parent);
766 }
767
768 journal_try_write(c);
769out:
770 bch_btree_insert_async(cl);
771}
772
773void bch_journal_free(struct cache_set *c)
774{
775 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
776 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
777 free_fifo(&c->journal.pin);
778}
779
780int bch_journal_alloc(struct cache_set *c)
781{
782 struct journal *j = &c->journal;
783
784 closure_init_unlocked(&j->io);
785 spin_lock_init(&j->lock);
786
787 c->journal_delay_ms = 100;
788
789 j->w[0].c = c;
790 j->w[1].c = c;
791
792 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
793 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
794 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
795 return -ENOMEM;
796
797 return 0;
798}
This page took 0.077576 seconds and 5 git commands to generate.