summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2026-01-07 08:27:14 +0100
committerAnna Schumaker <anna.schumaker@oracle.com>2026-01-20 14:49:47 -0500
commit300ca8123c901605eda5eba33c83dc6eb03d0a3c (patch)
tree95757cd820a79ba1bc7831d96372101220be0946
parent0ebe655bd033fd84e312980c9eba199604631e7e (diff)
NFS: return delegations from the end of a LRU when over the watermark
Directly returning delegations on close when over the watermark is rather suboptimal as these delegations are much more likely to be reused than those that have been unused for a long time. Switch to returning unused delegations from a new LRU list when we are above the threshold and there are reclaimable delegations instead. Pass over referenced delegations during the first pass to give delegations that aren't in active used by frequently used for stat() or similar another chance to not be instantly reclaimed. This scheme works the same as the referenced flags in the VFS inode and dentry caches. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/delegation.c61
-rw-r--r--include/linux/nfs_fs_sb.h1
3 files changed, 60 insertions, 3 deletions
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 65b3de91b441..62aece00f810 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1062,6 +1062,7 @@ struct nfs_server *nfs_alloc_server(void)
INIT_LIST_HEAD(&server->delegations);
spin_lock_init(&server->delegations_lock);
INIT_LIST_HEAD(&server->delegations_return);
+ INIT_LIST_HEAD(&server->delegations_lru);
INIT_LIST_HEAD(&server->layouts);
INIT_LIST_HEAD(&server->state_owners_lru);
INIT_LIST_HEAD(&server->ss_copies);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index d2d2dd745466..848cb55073fc 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -660,6 +660,60 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
return err;
}
+static inline bool nfs_delegations_over_limit(struct nfs_server *server)
+{
+ return !list_empty_careful(&server->delegations_lru) &&
+ atomic_long_read(&server->nr_active_delegations) >
+ nfs_delegation_watermark;
+}
+
+static void nfs_delegations_return_from_lru(struct nfs_server *server)
+{
+ struct nfs_delegation *d, *n;
+ unsigned int pass = 0;
+ bool moved = false;
+
+retry:
+ spin_lock(&server->delegations_lock);
+ list_for_each_entry_safe(d, n, &server->delegations_lru, entry) {
+ if (!nfs_delegations_over_limit(server))
+ break;
+ if (pass == 0 && test_bit(NFS_DELEGATION_REFERENCED, &d->flags))
+ continue;
+ list_move_tail(&d->entry, &server->delegations_return);
+ moved = true;
+ }
+ spin_unlock(&server->delegations_lock);
+
+ /*
+ * If we are still over the limit, try to reclaim referenced delegations
+ * as well.
+ */
+ if (pass == 0 && nfs_delegations_over_limit(server)) {
+ pass++;
+ goto retry;
+ }
+
+ if (moved) {
+ set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ nfs4_schedule_state_manager(server->nfs_client);
+ }
+}
+
+static void nfs_delegation_add_lru(struct nfs_server *server,
+ struct nfs_delegation *delegation)
+{
+ spin_lock(&server->delegations_lock);
+ if (list_empty(&delegation->entry)) {
+ list_add_tail(&delegation->entry, &server->delegations_lru);
+ refcount_inc(&delegation->refcount);
+ }
+ spin_unlock(&server->delegations_lock);
+
+ if (nfs_delegations_over_limit(server))
+ nfs_delegations_return_from_lru(server);
+}
+
static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
{
struct nfs_delegation *d;
@@ -825,6 +879,7 @@ out_unlock:
*/
void nfs4_inode_return_delegation_on_close(struct inode *inode)
{
+ struct nfs_server *server = NFS_SERVER(inode);
struct nfs_delegation *delegation;
bool return_now = false;
@@ -832,9 +887,7 @@ void nfs4_inode_return_delegation_on_close(struct inode *inode)
if (!delegation)
return;
- if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
- atomic_long_read(&NFS_SERVER(inode)->nr_active_delegations) >=
- nfs_delegation_watermark) {
+ if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
spin_lock(&delegation->lock);
if (delegation->inode &&
list_empty(&NFS_I(inode)->open_files) &&
@@ -848,6 +901,8 @@ void nfs4_inode_return_delegation_on_close(struct inode *inode)
if (return_now) {
nfs_clear_verifier_delegated(inode);
nfs_end_delegation_return(inode, delegation, 0);
+ } else {
+ nfs_delegation_add_lru(server, delegation);
}
nfs_put_delegation(delegation);
}
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index e377b8c7086e..bb13a294b69e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -261,6 +261,7 @@ struct nfs_server {
struct list_head delegations;
spinlock_t delegations_lock;
struct list_head delegations_return;
+ struct list_head delegations_lru;
atomic_long_t nr_active_delegations;
unsigned int delegation_hash_mask;
struct hlist_head *delegation_hash_table;