Commit 06b68ba1 authored by Artem Bityutskiy's avatar Artem Bityutskiy

UBI: create ubi_wl_entry slab on initialization

Similarly to ltree_entry_slab, it makes more sense to create
and destroy ubi_wl_entry slab on module initialization/exit.
Signed-off-by: default avatarArtem Bityutskiy <Artem.Bityutskiy@nokia.com>
parent 3a8d4642
...@@ -70,6 +70,10 @@ struct class *ubi_class; ...@@ -70,6 +70,10 @@ struct class *ubi_class;
/* Slab cache for lock-tree entries */ /* Slab cache for lock-tree entries */
struct kmem_cache *ubi_ltree_slab; struct kmem_cache *ubi_ltree_slab;
/* Slab cache for wear-leveling entries */
struct kmem_cache *ubi_wl_entry_slab;
/* "Show" method for files in '/<sysfs>/class/ubi/' */ /* "Show" method for files in '/<sysfs>/class/ubi/' */
static ssize_t ubi_version_show(struct class *class, char *buf) static ssize_t ubi_version_show(struct class *class, char *buf)
{ {
...@@ -732,6 +736,12 @@ static int __init ubi_init(void) ...@@ -732,6 +736,12 @@ static int __init ubi_init(void)
if (!ubi_ltree_slab) if (!ubi_ltree_slab)
goto out_version; goto out_version;
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
if (!ubi_wl_entry_slab)
goto out_ltree;
/* Attach MTD devices */ /* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) { for (i = 0; i < mtd_devs; i++) {
struct mtd_dev_param *p = &mtd_dev_param[i]; struct mtd_dev_param *p = &mtd_dev_param[i];
...@@ -747,6 +757,8 @@ static int __init ubi_init(void) ...@@ -747,6 +757,8 @@ static int __init ubi_init(void)
out_detach: out_detach:
for (k = 0; k < i; k++) for (k = 0; k < i; k++)
detach_mtd_dev(ubi_devices[k]); detach_mtd_dev(ubi_devices[k]);
kmem_cache_destroy(ubi_wl_entry_slab);
out_ltree:
kmem_cache_destroy(ubi_ltree_slab); kmem_cache_destroy(ubi_ltree_slab);
out_version: out_version:
class_remove_file(ubi_class, &ubi_version); class_remove_file(ubi_class, &ubi_version);
...@@ -762,6 +774,7 @@ static void __exit ubi_exit(void) ...@@ -762,6 +774,7 @@ static void __exit ubi_exit(void)
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
detach_mtd_dev(ubi_devices[i]); detach_mtd_dev(ubi_devices[i]);
kmem_cache_destroy(ubi_wl_entry_slab);
kmem_cache_destroy(ubi_ltree_slab); kmem_cache_destroy(ubi_ltree_slab);
class_remove_file(ubi_class, &ubi_version); class_remove_file(ubi_class, &ubi_version);
class_destroy(ubi_class); class_destroy(ubi_class);
......
...@@ -97,6 +97,22 @@ enum { ...@@ -97,6 +97,22 @@ enum {
extern int ubi_devices_cnt; extern int ubi_devices_cnt;
extern struct ubi_device *ubi_devices[]; extern struct ubi_device *ubi_devices[];
/**
* struct ubi_wl_entry - wear-leveling entry.
* @rb: link in the corresponding RB-tree
* @ec: erase counter
* @pnum: physical eraseblock number
*
* This data structure is used in the WL unit. Each physical eraseblock has a
* corresponding &struct wl_entry object which may be kept in different
* RB-trees. See WL unit for details.
*/
struct ubi_wl_entry {
struct rb_node rb;
int ec;
int pnum;
};
/** /**
* struct ubi_ltree_entry - an entry in the lock tree. * struct ubi_ltree_entry - an entry in the lock tree.
* @rb: links RB-tree nodes * @rb: links RB-tree nodes
...@@ -382,6 +398,7 @@ struct ubi_device { ...@@ -382,6 +398,7 @@ struct ubi_device {
}; };
extern struct kmem_cache *ubi_ltree_slab; extern struct kmem_cache *ubi_ltree_slab;
extern struct kmem_cache *ubi_wl_entry_slab;
extern struct file_operations ubi_cdev_operations; extern struct file_operations ubi_cdev_operations;
extern struct file_operations ubi_vol_cdev_operations; extern struct file_operations ubi_vol_cdev_operations;
extern struct class *ubi_class; extern struct class *ubi_class;
......
...@@ -116,21 +116,6 @@ ...@@ -116,21 +116,6 @@
*/ */
#define WL_MAX_FAILURES 32 #define WL_MAX_FAILURES 32
/**
* struct ubi_wl_entry - wear-leveling entry.
* @rb: link in the corresponding RB-tree
* @ec: erase counter
* @pnum: physical eraseblock number
*
* Each physical eraseblock has a corresponding &struct wl_entry object which
* may be kept in different RB-trees.
*/
struct ubi_wl_entry {
struct rb_node rb;
int ec;
int pnum;
};
/** /**
* struct ubi_wl_prot_entry - PEB protection entry. * struct ubi_wl_prot_entry - PEB protection entry.
* @rb_pnum: link in the @wl->prot.pnum RB-tree * @rb_pnum: link in the @wl->prot.pnum RB-tree
...@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, ...@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
#define paranoid_check_in_wl_tree(e, root) #define paranoid_check_in_wl_tree(e, root)
#endif #endif
/* Slab cache for wear-leveling entries */
static struct kmem_cache *wl_entries_slab;
/** /**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree. * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add * @e: the wear-leveling entry to add
...@@ -878,14 +860,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -878,14 +860,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
err = schedule_erase(ubi, e2, 0); err = schedule_erase(ubi, e2, 0);
if (err) { if (err) {
kmem_cache_free(wl_entries_slab, e2); kmem_cache_free(ubi_wl_entry_slab, e2);
ubi_ro_mode(ubi); ubi_ro_mode(ubi);
} }
} }
err = schedule_erase(ubi, e1, 0); err = schedule_erase(ubi, e1, 0);
if (err) { if (err) {
kmem_cache_free(wl_entries_slab, e1); kmem_cache_free(ubi_wl_entry_slab, e1);
ubi_ro_mode(ubi); ubi_ro_mode(ubi);
} }
...@@ -920,14 +902,14 @@ error: ...@@ -920,14 +902,14 @@ error:
dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
err = schedule_erase(ubi, e1, 0); err = schedule_erase(ubi, e1, 0);
if (err) { if (err) {
kmem_cache_free(wl_entries_slab, e1); kmem_cache_free(ubi_wl_entry_slab, e1);
ubi_ro_mode(ubi); ubi_ro_mode(ubi);
} }
} }
err = schedule_erase(ubi, e2, 0); err = schedule_erase(ubi, e2, 0);
if (err) { if (err) {
kmem_cache_free(wl_entries_slab, e2); kmem_cache_free(ubi_wl_entry_slab, e2);
ubi_ro_mode(ubi); ubi_ro_mode(ubi);
} }
...@@ -1020,7 +1002,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ...@@ -1020,7 +1002,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
if (cancel) { if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk); kfree(wl_wrk);
kmem_cache_free(wl_entries_slab, e); kmem_cache_free(ubi_wl_entry_slab, e);
return 0; return 0;
} }
...@@ -1049,7 +1031,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ...@@ -1049,7 +1031,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("failed to erase PEB %d, error %d", pnum, err); ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk); kfree(wl_wrk);
kmem_cache_free(wl_entries_slab, e); kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) { err == -EBUSY) {
...@@ -1294,7 +1276,7 @@ static void tree_destroy(struct rb_root *root) ...@@ -1294,7 +1276,7 @@ static void tree_destroy(struct rb_root *root)
rb->rb_right = NULL; rb->rb_right = NULL;
} }
kmem_cache_free(wl_entries_slab, e); kmem_cache_free(ubi_wl_entry_slab, e);
} }
} }
} }
...@@ -1407,14 +1389,6 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1407,14 +1389,6 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
return err; return err;
} }
if (ubi_devices_cnt == 0) {
wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
if (!wl_entries_slab)
return -ENOMEM;
}
err = -ENOMEM; err = -ENOMEM;
ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
if (!ubi->lookuptbl) if (!ubi->lookuptbl)
...@@ -1423,7 +1397,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1423,7 +1397,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
cond_resched(); cond_resched();
e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) if (!e)
goto out_free; goto out_free;
...@@ -1431,7 +1405,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1431,7 +1405,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->ec = seb->ec; e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e; ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) { if (schedule_erase(ubi, e, 0)) {
kmem_cache_free(wl_entries_slab, e); kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free; goto out_free;
} }
} }
...@@ -1439,7 +1413,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1439,7 +1413,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry(seb, &si->free, u.list) { list_for_each_entry(seb, &si->free, u.list) {
cond_resched(); cond_resched();
e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) if (!e)
goto out_free; goto out_free;
...@@ -1453,7 +1427,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1453,7 +1427,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry(seb, &si->corr, u.list) { list_for_each_entry(seb, &si->corr, u.list) {
cond_resched(); cond_resched();
e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) if (!e)
goto out_free; goto out_free;
...@@ -1461,7 +1435,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1461,7 +1435,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->ec = seb->ec; e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e; ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) { if (schedule_erase(ubi, e, 0)) {
kmem_cache_free(wl_entries_slab, e); kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free; goto out_free;
} }
} }
...@@ -1470,7 +1444,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1470,7 +1444,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
cond_resched(); cond_resched();
e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) if (!e)
goto out_free; goto out_free;
...@@ -1510,8 +1484,6 @@ out_free: ...@@ -1510,8 +1484,6 @@ out_free:
tree_destroy(&ubi->free); tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub); tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl); kfree(ubi->lookuptbl);
if (ubi_devices_cnt == 0)
kmem_cache_destroy(wl_entries_slab);
return err; return err;
} }
...@@ -1541,7 +1513,7 @@ static void protection_trees_destroy(struct ubi_device *ubi) ...@@ -1541,7 +1513,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
rb->rb_right = NULL; rb->rb_right = NULL;
} }
kmem_cache_free(wl_entries_slab, pe->e); kmem_cache_free(ubi_wl_entry_slab, pe->e);
kfree(pe); kfree(pe);
} }
} }
...@@ -1565,8 +1537,6 @@ void ubi_wl_close(struct ubi_device *ubi) ...@@ -1565,8 +1537,6 @@ void ubi_wl_close(struct ubi_device *ubi)
tree_destroy(&ubi->free); tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub); tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl); kfree(ubi->lookuptbl);
if (ubi_devices_cnt == 1)
kmem_cache_destroy(wl_entries_slab);
} }
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment