diff options
| -rw-r--r-- | fs/iomap/ioend.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/fs/iomap/ioend.c b/fs/iomap/ioend.c index e4d57cb969f1..4d1ef8a2cee9 100644 --- a/fs/iomap/ioend.c +++ b/fs/iomap/ioend.c @@ -69,11 +69,57 @@ static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend) return folio_count; } +static DEFINE_SPINLOCK(failed_ioend_lock); +static LIST_HEAD(failed_ioend_list); + +static void +iomap_fail_ioends( + struct work_struct *work) +{ + struct iomap_ioend *ioend; + struct list_head tmp; + unsigned long flags; + + spin_lock_irqsave(&failed_ioend_lock, flags); + list_replace_init(&failed_ioend_list, &tmp); + spin_unlock_irqrestore(&failed_ioend_lock, flags); + + while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend, + io_list))) { + list_del_init(&ioend->io_list); + iomap_finish_ioend_buffered(ioend); + cond_resched(); + } +} + +static DECLARE_WORK(failed_ioend_work, iomap_fail_ioends); + +static void iomap_fail_ioend_buffered(struct iomap_ioend *ioend) +{ + unsigned long flags; + + /* + * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions + * in the fserror code. The caller no longer owns the ioend reference + * after the spinlock drops. + */ + spin_lock_irqsave(&failed_ioend_lock, flags); + if (list_empty(&failed_ioend_list)) + WARN_ON_ONCE(!schedule_work(&failed_ioend_work)); + list_add_tail(&ioend->io_list, &failed_ioend_list); + spin_unlock_irqrestore(&failed_ioend_lock, flags); +} + static void ioend_writeback_end_bio(struct bio *bio) { struct iomap_ioend *ioend = iomap_ioend_from_bio(bio); ioend->io_error = blk_status_to_errno(bio->bi_status); + if (ioend->io_error) { + iomap_fail_ioend_buffered(ioend); + return; + } + iomap_finish_ioend_buffered(ioend); } |
