From 351275d9754bf0c647ec288ac3d938c9415d52eb Mon Sep 17 00:00:00 2001 From: Don Brady Date: Tue, 21 Mar 2023 14:50:29 -0600 Subject: [PATCH] DLPX-84995 NFSD: Never call nfsd_file_gc() in foreground paths (#24) The checks in nfsd_file_acquire() and nfsd_file_put() that directly invoke filecache garbage collection are intended to keep cache occupancy between a low- and high-watermark. The reason to limit the capacity of the filecache is to keep filecache lookups reasonably fast. However, invoking garbage collection at those points has some undesirable negative impacts. Files that are held open by NFSv4 clients often push the occupancy of the filecache over these watermarks. At that point: - Every call to nfsd_file_acquire() and nfsd_file_put() results in an LRU walk. This has the same effect on lookup latency as long chains in the hash table. - Garbage collection will then run on every nfsd thread, causing a lot of unnecessary lock contention. - Limiting cache capacity pushes out files used only by NFSv3 clients, which are the type of files the filecache is supposed to help. To address those negative impacts, remove the direct calls to the garbage collector. --- fs/nfsd/filecache.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index fde013b6e05b..77ab99b2f758 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -28,8 +28,6 @@ #define NFSD_LAUNDRETTE_DELAY (2 * HZ) #define NFSD_FILE_SHUTDOWN (1) -#define NFSD_FILE_LRU_THRESHOLD (4096UL) -#define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2) /* We only care about NFSD_MAY_READ/WRITE for this cache */ #define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE) @@ -65,8 +63,6 @@ static struct delayed_work nfsd_filecache_laundrette; static DEFINE_SPINLOCK(laundrette_lock); static LIST_HEAD(laundrettes); -static void nfsd_file_gc(void); - static void nfsd_file_schedule_laundrette(void) { @@ -306,8 +302,6 @@ nfsd_file_put(struct nfsd_file *nf) nfsd_file_put_noref(nf); if (is_hashed) nfsd_file_schedule_laundrette(); - if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT) - nfsd_file_gc(); } struct nfsd_file * @@ -1007,8 +1001,7 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount, nfsd_file_hashtbl[hashval].nfb_count); spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock); - if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD) - nfsd_file_gc(); + atomic_long_inc(&nfsd_filecache_count); nf->nf_mark = nfsd_file_mark_find_or_create(nf); if (nf->nf_mark)