]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/mtd/ubi/fastmap-wl.c
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
9 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
10 * @wrk: the work description object
12 static void update_fastmap_work_fn(struct work_struct
*wrk
)
14 struct ubi_device
*ubi
= container_of(wrk
, struct ubi_device
, fm_work
);
16 ubi_update_fastmap(ubi
);
17 spin_lock(&ubi
->wl_lock
);
18 ubi
->fm_work_scheduled
= 0;
19 spin_unlock(&ubi
->wl_lock
);
23 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
24 * @root: the RB-tree where to look for
26 static struct ubi_wl_entry
*find_anchor_wl_entry(struct rb_root
*root
)
29 struct ubi_wl_entry
*e
, *victim
= NULL
;
30 int max_ec
= UBI_MAX_ERASECOUNTER
;
32 ubi_rb_for_each_entry(p
, e
, root
, u
.rb
) {
33 if (e
->pnum
< UBI_FM_MAX_START
&& e
->ec
< max_ec
) {
43 * return_unused_pool_pebs - returns unused PEB to the free tree.
44 * @ubi: UBI device description object
45 * @pool: fastmap pool description object
47 static void return_unused_pool_pebs(struct ubi_device
*ubi
,
48 struct ubi_fm_pool
*pool
)
51 struct ubi_wl_entry
*e
;
53 for (i
= pool
->used
; i
< pool
->size
; i
++) {
54 e
= ubi
->lookuptbl
[pool
->pebs
[i
]];
55 wl_tree_add(e
, &ubi
->free
);
60 static int anchor_pebs_available(struct rb_root
*root
)
63 struct ubi_wl_entry
*e
;
65 ubi_rb_for_each_entry(p
, e
, root
, u
.rb
)
66 if (e
->pnum
< UBI_FM_MAX_START
)
73 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
74 * @ubi: UBI device description object
75 * @anchor: This PEB will be used as anchor PEB by fastmap
77 * The function returns a physical erase block with a given maximal number
78 * and removes it from the wl subsystem.
79 * Must be called with wl_lock held!
81 struct ubi_wl_entry
*ubi_wl_get_fm_peb(struct ubi_device
*ubi
, int anchor
)
83 struct ubi_wl_entry
*e
= NULL
;
85 if (!ubi
->free
.rb_node
|| (ubi
->free_count
- ubi
->beb_rsvd_pebs
< 1))
89 e
= find_anchor_wl_entry(&ubi
->free
);
91 e
= find_mean_wl_entry(ubi
, &ubi
->free
);
96 self_check_in_wl_tree(ubi
, e
, &ubi
->free
);
98 /* remove it from the free list,
99 * the wl subsystem does no longer know this erase block */
100 rb_erase(&e
->u
.rb
, &ubi
->free
);
107 * ubi_refill_pools - refills all fastmap PEB pools.
108 * @ubi: UBI device description object
110 void ubi_refill_pools(struct ubi_device
*ubi
)
112 struct ubi_fm_pool
*wl_pool
= &ubi
->fm_wl_pool
;
113 struct ubi_fm_pool
*pool
= &ubi
->fm_pool
;
114 struct ubi_wl_entry
*e
;
117 spin_lock(&ubi
->wl_lock
);
119 return_unused_pool_pebs(ubi
, wl_pool
);
120 return_unused_pool_pebs(ubi
, pool
);
127 if (pool
->size
< pool
->max_size
) {
128 if (!ubi
->free
.rb_node
)
135 pool
->pebs
[pool
->size
] = e
->pnum
;
140 if (wl_pool
->size
< wl_pool
->max_size
) {
141 if (!ubi
->free
.rb_node
||
142 (ubi
->free_count
- ubi
->beb_rsvd_pebs
< 5))
145 e
= find_wl_entry(ubi
, &ubi
->free
, WL_FREE_MAX_DIFF
);
146 self_check_in_wl_tree(ubi
, e
, &ubi
->free
);
147 rb_erase(&e
->u
.rb
, &ubi
->free
);
150 wl_pool
->pebs
[wl_pool
->size
] = e
->pnum
;
162 spin_unlock(&ubi
->wl_lock
);
166 * produce_free_peb - produce a free physical eraseblock.
167 * @ubi: UBI device description object
169 * This function tries to make a free PEB by means of synchronous execution of
170 * pending works. This may be needed if, for example the background thread is
171 * disabled. Returns zero in case of success and a negative error code in case
174 static int produce_free_peb(struct ubi_device
*ubi
)
178 while (!ubi
->free
.rb_node
&& ubi
->works_count
) {
179 dbg_wl("do one work synchronously");
190 * ubi_wl_get_peb - get a physical eraseblock.
191 * @ubi: UBI device description object
193 * This function returns a physical eraseblock in case of success and a
194 * negative error code in case of failure.
195 * Returns with ubi->fm_eba_sem held in read mode!
197 int ubi_wl_get_peb(struct ubi_device
*ubi
)
199 int ret
, retried
= 0;
200 struct ubi_fm_pool
*pool
= &ubi
->fm_pool
;
201 struct ubi_fm_pool
*wl_pool
= &ubi
->fm_wl_pool
;
204 down_read(&ubi
->fm_eba_sem
);
205 spin_lock(&ubi
->wl_lock
);
207 /* We check here also for the WL pool because at this point we can
208 * refill the WL pool synchronous. */
209 if (pool
->used
== pool
->size
|| wl_pool
->used
== wl_pool
->size
) {
210 spin_unlock(&ubi
->wl_lock
);
211 up_read(&ubi
->fm_eba_sem
);
212 ret
= ubi_update_fastmap(ubi
);
214 ubi_msg(ubi
, "Unable to write a new fastmap: %i", ret
);
215 down_read(&ubi
->fm_eba_sem
);
218 down_read(&ubi
->fm_eba_sem
);
219 spin_lock(&ubi
->wl_lock
);
222 if (pool
->used
== pool
->size
) {
223 spin_unlock(&ubi
->wl_lock
);
225 ubi_err(ubi
, "Unable to get a free PEB from user WL pool");
230 up_read(&ubi
->fm_eba_sem
);
231 ret
= produce_free_peb(ubi
);
233 down_read(&ubi
->fm_eba_sem
);
239 ubi_assert(pool
->used
< pool
->size
);
240 ret
= pool
->pebs
[pool
->used
++];
241 prot_queue_add(ubi
, ubi
->lookuptbl
[ret
]);
242 spin_unlock(&ubi
->wl_lock
);
247 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
249 * @ubi: UBI device description object
251 static struct ubi_wl_entry
*get_peb_for_wl(struct ubi_device
*ubi
)
253 struct ubi_fm_pool
*pool
= &ubi
->fm_wl_pool
;
256 ubi_assert(rwsem_is_locked(&ubi
->fm_eba_sem
));
258 if (pool
->used
== pool
->size
) {
259 /* We cannot update the fastmap here because this
260 * function is called in atomic context.
261 * Let's fail here and refill/update it as soon as possible. */
262 if (!ubi
->fm_work_scheduled
) {
263 ubi
->fm_work_scheduled
= 1;
264 schedule_work(&ubi
->fm_work
);
269 pnum
= pool
->pebs
[pool
->used
++];
270 return ubi
->lookuptbl
[pnum
];
274 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
275 * @ubi: UBI device description object
277 int ubi_ensure_anchor_pebs(struct ubi_device
*ubi
)
279 struct ubi_work
*wrk
;
281 spin_lock(&ubi
->wl_lock
);
282 if (ubi
->wl_scheduled
) {
283 spin_unlock(&ubi
->wl_lock
);
286 ubi
->wl_scheduled
= 1;
287 spin_unlock(&ubi
->wl_lock
);
289 wrk
= kmalloc(sizeof(struct ubi_work
), GFP_NOFS
);
291 spin_lock(&ubi
->wl_lock
);
292 ubi
->wl_scheduled
= 0;
293 spin_unlock(&ubi
->wl_lock
);
298 wrk
->func
= &wear_leveling_worker
;
299 __schedule_ubi_work(ubi
, wrk
);
304 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
306 * see: ubi_wl_put_peb()
308 * @ubi: UBI device description object
309 * @fm_e: physical eraseblock to return
310 * @lnum: the last used logical eraseblock number for the PEB
311 * @torture: if this physical eraseblock has to be tortured
313 int ubi_wl_put_fm_peb(struct ubi_device
*ubi
, struct ubi_wl_entry
*fm_e
,
314 int lnum
, int torture
)
316 struct ubi_wl_entry
*e
;
317 int vol_id
, pnum
= fm_e
->pnum
;
319 dbg_wl("PEB %d", pnum
);
321 ubi_assert(pnum
>= 0);
322 ubi_assert(pnum
< ubi
->peb_count
);
324 spin_lock(&ubi
->wl_lock
);
325 e
= ubi
->lookuptbl
[pnum
];
327 /* This can happen if we recovered from a fastmap the very
328 * first time and writing now a new one. In this case the wl system
329 * has never seen any PEB used by the original fastmap.
333 ubi_assert(e
->ec
>= 0);
334 ubi
->lookuptbl
[pnum
] = e
;
337 spin_unlock(&ubi
->wl_lock
);
339 vol_id
= lnum
? UBI_FM_DATA_VOLUME_ID
: UBI_FM_SB_VOLUME_ID
;
340 return schedule_erase(ubi
, e
, vol_id
, lnum
, torture
, true);
344 * ubi_is_erase_work - checks whether a work is erase work.
345 * @wrk: The work object to be checked
347 int ubi_is_erase_work(struct ubi_work
*wrk
)
349 return wrk
->func
== erase_worker
;
352 static void ubi_fastmap_close(struct ubi_device
*ubi
)
356 return_unused_pool_pebs(ubi
, &ubi
->fm_pool
);
357 return_unused_pool_pebs(ubi
, &ubi
->fm_wl_pool
);
360 for (i
= 0; i
< ubi
->fm
->used_blocks
; i
++)
361 kfree(ubi
->fm
->e
[i
]);
367 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
368 * See find_mean_wl_entry()
370 * @ubi: UBI device description object
371 * @e: physical eraseblock to return
372 * @root: RB tree to test against.
374 static struct ubi_wl_entry
*may_reserve_for_fm(struct ubi_device
*ubi
,
375 struct ubi_wl_entry
*e
,
376 struct rb_root
*root
) {
377 if (e
&& !ubi
->fm_disabled
&& !ubi
->fm
&&
378 e
->pnum
< UBI_FM_MAX_START
)
379 e
= rb_entry(rb_next(root
->rb_node
),
380 struct ubi_wl_entry
, u
.rb
);