diff options
| author | David Howells <dhowells@redhat.com> | 2024-05-29 21:47:07 +0100 |
|---|---|---|
| committer | Christian Brauner <brauner@kernel.org> | 2024-09-12 12:20:40 +0200 |
| commit | cd0277ed0c188dd40e7744e89299af7b78831ca4 (patch) | |
| tree | 7ebbd871e1bfb367a520d139eccca5dd9bdf2ada /fs/netfs/misc.c | |
| parent | c45ebd636c32d33c75e51ce977520ff146bd41a1 (diff) | |
netfs: Use new folio_queue data type and iterator instead of xarray iter
Make the netfs write-side routines use the new folio_queue struct to hold a
rolling buffer of folios, with the issuer adding folios at the tail and the
collector removing them from the head as they're processed instead of using
an xarray.
This will allow a subsequent patch to simplify the write collector.
The primary mark (as tested by folioq_is_marked()) is used to note if the
corresponding folio needs putting.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20240814203850.2240469-16-dhowells@redhat.com/ # v2
Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/netfs/misc.c')
| -rw-r--r-- | fs/netfs/misc.c | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index c1f321cf5999..4cd2f84415c1 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -8,6 +8,82 @@ #include <linux/swap.h> #include "internal.h" +/* + * Append a folio to the rolling queue. + */ +int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio, + bool needs_put) +{ + struct folio_queue *tail = rreq->buffer_tail; + unsigned int slot, order = folio_order(folio); + + if (WARN_ON_ONCE(!rreq->buffer && tail) || + WARN_ON_ONCE(rreq->buffer && !tail)) + return -EIO; + + if (!tail || folioq_full(tail)) { + tail = kmalloc(sizeof(*tail), GFP_NOFS); + if (!tail) + return -ENOMEM; + netfs_stat(&netfs_n_folioq); + folioq_init(tail); + tail->prev = rreq->buffer_tail; + if (tail->prev) + tail->prev->next = tail; + rreq->buffer_tail = tail; + if (!rreq->buffer) { + rreq->buffer = tail; + iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0); + } + rreq->buffer_tail_slot = 0; + } + + rreq->io_iter.count += PAGE_SIZE << order; + + slot = folioq_append(tail, folio); + /* Store the counter after setting the slot. */ + smp_store_release(&rreq->buffer_tail_slot, slot); + return 0; +} + +/* + * Delete the head of a rolling queue. + */ +struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq) +{ + struct folio_queue *head = wreq->buffer, *next = head->next; + + if (next) + next->prev = NULL; + netfs_stat_d(&netfs_n_folioq); + kfree(head); + wreq->buffer = next; + return next; +} + +/* + * Clear out a rolling queue. + */ +void netfs_clear_buffer(struct netfs_io_request *rreq) +{ + struct folio_queue *p; + + while ((p = rreq->buffer)) { + rreq->buffer = p->next; + for (int slot = 0; slot < folioq_nr_slots(p); slot++) { + struct folio *folio = folioq_folio(p, slot); + if (!folio) + continue; + if (folioq_is_marked(p, slot)) { + trace_netfs_folio(folio, netfs_folio_trace_put); + folio_put(folio); + } + } + netfs_stat_d(&netfs_n_folioq); + kfree(p); + } +} + /** * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback * @mapping: The mapping the folio belongs to. |
