UBIFS: print verbose message when rescanning a corrupted node
[deliverable/linux.git] / drivers / mtd / ubi / fastmap-wl.c
CommitLineData
78d6d497
RW
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17/**
18 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
19 * @wrk: the work description object
20 */
21static void update_fastmap_work_fn(struct work_struct *wrk)
22{
23 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
1841fcfd 24
78d6d497
RW
25 ubi_update_fastmap(ubi);
26 spin_lock(&ubi->wl_lock);
27 ubi->fm_work_scheduled = 0;
28 spin_unlock(&ubi->wl_lock);
29}
30
78d6d497
RW
31/**
32 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
33 * @root: the RB-tree where to look for
34 */
35static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
36{
37 struct rb_node *p;
38 struct ubi_wl_entry *e, *victim = NULL;
39 int max_ec = UBI_MAX_ERASECOUNTER;
40
41 ubi_rb_for_each_entry(p, e, root, u.rb) {
42 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
43 victim = e;
44 max_ec = e->ec;
45 }
46 }
47
48 return victim;
49}
50
51/**
52 * return_unused_pool_pebs - returns unused PEB to the free tree.
53 * @ubi: UBI device description object
54 * @pool: fastmap pool description object
55 */
56static void return_unused_pool_pebs(struct ubi_device *ubi,
57 struct ubi_fm_pool *pool)
58{
59 int i;
60 struct ubi_wl_entry *e;
61
62 for (i = pool->used; i < pool->size; i++) {
63 e = ubi->lookuptbl[pool->pebs[i]];
64 wl_tree_add(e, &ubi->free);
65 ubi->free_count++;
66 }
67}
68
69static int anchor_pebs_avalible(struct rb_root *root)
70{
71 struct rb_node *p;
72 struct ubi_wl_entry *e;
73
74 ubi_rb_for_each_entry(p, e, root, u.rb)
75 if (e->pnum < UBI_FM_MAX_START)
76 return 1;
77
78 return 0;
79}
80
81/**
82 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
83 * @ubi: UBI device description object
84 * @anchor: This PEB will be used as anchor PEB by fastmap
85 *
86 * The function returns a physical erase block with a given maximal number
87 * and removes it from the wl subsystem.
88 * Must be called with wl_lock held!
89 */
90struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
91{
92 struct ubi_wl_entry *e = NULL;
93
94 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
95 goto out;
96
97 if (anchor)
98 e = find_anchor_wl_entry(&ubi->free);
99 else
100 e = find_mean_wl_entry(ubi, &ubi->free);
101
102 if (!e)
103 goto out;
104
105 self_check_in_wl_tree(ubi, e, &ubi->free);
106
107 /* remove it from the free list,
108 * the wl subsystem does no longer know this erase block */
109 rb_erase(&e->u.rb, &ubi->free);
110 ubi->free_count--;
111out:
112 return e;
113}
114
115/**
116 * ubi_refill_pools - refills all fastmap PEB pools.
117 * @ubi: UBI device description object
118 */
119void ubi_refill_pools(struct ubi_device *ubi)
120{
121 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
122 struct ubi_fm_pool *pool = &ubi->fm_pool;
123 struct ubi_wl_entry *e;
124 int enough;
125
126 spin_lock(&ubi->wl_lock);
127
128 return_unused_pool_pebs(ubi, wl_pool);
129 return_unused_pool_pebs(ubi, pool);
130
131 wl_pool->size = 0;
132 pool->size = 0;
133
134 for (;;) {
135 enough = 0;
136 if (pool->size < pool->max_size) {
137 if (!ubi->free.rb_node)
138 break;
139
140 e = wl_get_wle(ubi);
141 if (!e)
142 break;
143
144 pool->pebs[pool->size] = e->pnum;
145 pool->size++;
146 } else
147 enough++;
148
149 if (wl_pool->size < wl_pool->max_size) {
150 if (!ubi->free.rb_node ||
151 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
152 break;
153
154 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
155 self_check_in_wl_tree(ubi, e, &ubi->free);
156 rb_erase(&e->u.rb, &ubi->free);
157 ubi->free_count--;
158
159 wl_pool->pebs[wl_pool->size] = e->pnum;
160 wl_pool->size++;
161 } else
162 enough++;
163
164 if (enough == 2)
165 break;
166 }
167
168 wl_pool->used = 0;
169 pool->used = 0;
170
171 spin_unlock(&ubi->wl_lock);
172}
173
174/**
175 * ubi_wl_get_peb - get a physical eraseblock.
176 * @ubi: UBI device description object
177 *
178 * This function returns a physical eraseblock in case of success and a
179 * negative error code in case of failure.
180 * Returns with ubi->fm_eba_sem held in read mode!
181 */
182int ubi_wl_get_peb(struct ubi_device *ubi)
183{
184 int ret, retried = 0;
185 struct ubi_fm_pool *pool = &ubi->fm_pool;
186 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
187
188again:
189 down_read(&ubi->fm_eba_sem);
190 spin_lock(&ubi->wl_lock);
191
192 /* We check here also for the WL pool because at this point we can
193 * refill the WL pool synchronous. */
194 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
195 spin_unlock(&ubi->wl_lock);
196 up_read(&ubi->fm_eba_sem);
197 ret = ubi_update_fastmap(ubi);
198 if (ret) {
199 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
200 down_read(&ubi->fm_eba_sem);
201 return -ENOSPC;
202 }
203 down_read(&ubi->fm_eba_sem);
204 spin_lock(&ubi->wl_lock);
205 }
206
207 if (pool->used == pool->size) {
208 spin_unlock(&ubi->wl_lock);
209 if (retried) {
210 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
211 ret = -ENOSPC;
212 goto out;
213 }
214 retried = 1;
215 up_read(&ubi->fm_eba_sem);
216 goto again;
217 }
218
219 ubi_assert(pool->used < pool->size);
220 ret = pool->pebs[pool->used++];
221 prot_queue_add(ubi, ubi->lookuptbl[ret]);
222 spin_unlock(&ubi->wl_lock);
223out:
224 return ret;
225}
226
227/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
228 *
229 * @ubi: UBI device description object
230 */
231static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
232{
233 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
234 int pnum;
235
236 if (pool->used == pool->size) {
237 /* We cannot update the fastmap here because this
238 * function is called in atomic context.
239 * Let's fail here and refill/update it as soon as possible. */
240 if (!ubi->fm_work_scheduled) {
241 ubi->fm_work_scheduled = 1;
242 schedule_work(&ubi->fm_work);
243 }
244 return NULL;
78d6d497 245 }
e1bc37ce
RW
246
247 pnum = pool->pebs[pool->used++];
248 return ubi->lookuptbl[pnum];
78d6d497
RW
249}
250
251/**
252 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
253 * @ubi: UBI device description object
254 */
255int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
256{
257 struct ubi_work *wrk;
258
259 spin_lock(&ubi->wl_lock);
260 if (ubi->wl_scheduled) {
261 spin_unlock(&ubi->wl_lock);
262 return 0;
263 }
264 ubi->wl_scheduled = 1;
265 spin_unlock(&ubi->wl_lock);
266
267 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
268 if (!wrk) {
269 spin_lock(&ubi->wl_lock);
270 ubi->wl_scheduled = 0;
271 spin_unlock(&ubi->wl_lock);
272 return -ENOMEM;
273 }
274
275 wrk->anchor = 1;
276 wrk->func = &wear_leveling_worker;
277 schedule_ubi_work(ubi, wrk);
278 return 0;
279}
280
281/**
282 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
283 * sub-system.
284 * see: ubi_wl_put_peb()
285 *
286 * @ubi: UBI device description object
287 * @fm_e: physical eraseblock to return
288 * @lnum: the last used logical eraseblock number for the PEB
289 * @torture: if this physical eraseblock has to be tortured
290 */
291int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
292 int lnum, int torture)
293{
294 struct ubi_wl_entry *e;
295 int vol_id, pnum = fm_e->pnum;
296
297 dbg_wl("PEB %d", pnum);
298
299 ubi_assert(pnum >= 0);
300 ubi_assert(pnum < ubi->peb_count);
301
302 spin_lock(&ubi->wl_lock);
303 e = ubi->lookuptbl[pnum];
304
305 /* This can happen if we recovered from a fastmap the very
306 * first time and writing now a new one. In this case the wl system
307 * has never seen any PEB used by the original fastmap.
308 */
309 if (!e) {
310 e = fm_e;
311 ubi_assert(e->ec >= 0);
312 ubi->lookuptbl[pnum] = e;
313 }
314
315 spin_unlock(&ubi->wl_lock);
316
317 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
318 return schedule_erase(ubi, e, vol_id, lnum, torture);
319}
320
321/**
322 * ubi_is_erase_work - checks whether a work is erase work.
323 * @wrk: The work object to be checked
324 */
325int ubi_is_erase_work(struct ubi_work *wrk)
326{
327 return wrk->func == erase_worker;
328}
329
330static void ubi_fastmap_close(struct ubi_device *ubi)
331{
332 int i;
333
334 flush_work(&ubi->fm_work);
335 return_unused_pool_pebs(ubi, &ubi->fm_pool);
336 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
337
338 if (ubi->fm) {
339 for (i = 0; i < ubi->fm->used_blocks; i++)
340 kfree(ubi->fm->e[i]);
341 }
342 kfree(ubi->fm);
343}
2f84c246
RW
344
345/**
346 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
347 * See find_mean_wl_entry()
348 *
349 * @ubi: UBI device description object
350 * @e: physical eraseblock to return
351 * @root: RB tree to test against.
352 */
353static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
354 struct ubi_wl_entry *e,
355 struct rb_root *root) {
356 if (e && !ubi->fm_disabled && !ubi->fm &&
357 e->pnum < UBI_FM_MAX_START)
358 e = rb_entry(rb_next(root->rb_node),
359 struct ubi_wl_entry, u.rb);
360
361 return e;
362}
This page took 0.068935 seconds and 5 git commands to generate.