summaryrefslogtreecommitdiff
path: root/drivers/mtd/ubi
diff options
context:
space:
mode:
authorZhihao Cheng <chengzhihao1@huawei.com>2022-07-30 19:28:37 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-03-11 13:57:25 +0100
commit79548ccdd992707879b4b683b7251c58ddf26f12 (patch)
treeecb3fd2acb5a6f27436b700a8a12069c1eef9471 /drivers/mtd/ubi
parent003bb9868a5188ea6a7f34e8495e8d861d2e3b21 (diff)
ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()
[ Upstream commit a240bc5c43130c6aa50831d7caaa02a1d84e1bce ] Wear-leveling entry could be freed in error path, which may be accessed again in eraseblk_count_seq_show(), for example: __erase_worker eraseblk_count_seq_show wl = ubi->lookuptbl[*block_number] if (wl) wl_entry_destroy ubi->lookuptbl[e->pnum] = NULL kmem_cache_free(ubi_wl_entry_slab, e) erase_count = wl->ec // UAF! Wear-leveling entry updating/accessing in ubi->lookuptbl should be protected by ubi->wl_lock, fix it by adding ubi->wl_lock to serialize wl entry accessing between wl_entry_destroy() and eraseblk_count_seq_show(). Fetch a reproducer in [Link]. Link: https://bugzilla.kernel.org/show_bug.cgi?id=216305 Fixes: 7bccd12d27b7e3 ("ubi: Add debugfs file for tracking PEB state") Fixes: 801c135ce73d5d ("UBI: Unsorted Block Images") Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com> Signed-off-by: Richard Weinberger <richard@nod.at> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r--drivers/mtd/ubi/wl.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index afcdacb9d0e9..4a672e925d86 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -886,8 +886,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
- if (e2)
+ if (e2) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e2);
+ spin_unlock(&ubi->wl_lock);
+ }
goto out_ro;
}
@@ -1120,14 +1123,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
if (err1) {
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
err = err1;
goto out_ro;
}
return err;
}
+ spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
+ spin_unlock(&ubi->wl_lock);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling