summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2025-07-14 11:05:07 +0200
committerChristian Brauner <brauner@kernel.org>2025-07-14 11:05:07 +0200
commit86ab0c10090b26e789b7bf477d4b673b5e18e55b (patch)
treed1a47a7ed3f64fbacf3efe00c490f5b6afb463e7
parentfdfe0133473a528e3f5da69c35419ce6711d6b89 (diff)
parent89635eae076cd8eaa5cb752f66538c9dc6c9fdc3 (diff)
Merge patch series "netfs: Fix use of fscache with ceph"
David Howells <dhowells@redhat.com> says: Here are a couple of patches that fix the use of fscaching with ceph: (1) Fix the read collector to mark the write request that it creates to copy data to the cache with NETFS_RREQ_OFFLOAD_COLLECTION so that it will run the write collector on a workqueue as it's meant to run in the background and the app isn't going to wait for it. (2) Fix the read collector to wake up the copy-to-cache write request after it sets NETFS_RREQ_ALL_QUEUED if the write request doesn't have any subrequests left on it. ALL_QUEUED indicates that there won't be any more subreqs coming and the collector should clean up - except that an event is needed to trigger that, but it only gets events from subreq termination and so the last event can beat us to setting ALL_QUEUED. * patches from https://lore.kernel.org/20250711151005.2956810-1-dhowells@redhat.com: netfs: Fix race between cache write completion and ALL_QUEUED being set netfs: Fix copy-to-cache so that it performs collection with ceph+fscache Link: https://lore.kernel.org/20250711151005.2956810-1-dhowells@redhat.com Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r--fs/netfs/read_pgpriv2.c5
-rw-r--r--include/trace/events/netfs.h30
2 files changed, 35 insertions, 0 deletions
diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
index 5bbe906a551d..8097bc069c1d 100644
--- a/fs/netfs/read_pgpriv2.c
+++ b/fs/netfs/read_pgpriv2.c
@@ -110,6 +110,8 @@ static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
if (!creq->io_streams[1].avail)
goto cancel_put;
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &creq->flags);
+ trace_netfs_copy2cache(rreq, creq);
trace_netfs_write(creq, netfs_write_trace_copy_to_cache);
netfs_stat(&netfs_n_wh_copy_to_cache);
rreq->copy_to_cache = creq;
@@ -154,6 +156,9 @@ void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
netfs_issue_write(creq, &creq->io_streams[1]);
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_end_copy_to_cache);
+ if (list_empty_careful(&creq->io_streams[1].subrequests))
+ netfs_wake_collector(creq);
netfs_put_request(creq, netfs_rreq_trace_put_return);
creq->copy_to_cache = NULL;
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 73e96ccbe830..64a382fbc31a 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -55,6 +55,7 @@
EM(netfs_rreq_trace_copy, "COPY ") \
EM(netfs_rreq_trace_dirty, "DIRTY ") \
EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
EM(netfs_rreq_trace_free, "FREE ") \
EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
EM(netfs_rreq_trace_recollect, "RECLLCT") \
@@ -559,6 +560,35 @@ TRACE_EVENT(netfs_write,
__entry->start, __entry->start + __entry->len - 1)
);
+TRACE_EVENT(netfs_copy2cache,
+ TP_PROTO(const struct netfs_io_request *rreq,
+ const struct netfs_io_request *creq),
+
+ TP_ARGS(rreq, creq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, creq)
+ __field(unsigned int, cookie)
+ __field(unsigned int, ino)
+ ),
+
+ TP_fast_assign(
+ struct netfs_inode *__ctx = netfs_inode(rreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->rreq = rreq->debug_id;
+ __entry->creq = creq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->ino = rreq->inode->i_ino;
+ ),
+
+ TP_printk("R=%08x CR=%08x c=%08x i=%x ",
+ __entry->rreq,
+ __entry->creq,
+ __entry->cookie,
+ __entry->ino)
+ );
+
TRACE_EVENT(netfs_collect,
TP_PROTO(const struct netfs_io_request *wreq),