xref: /linux/fs/nfs/fscache.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* NFS filesystem cache interface
3  *
4  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/nfs_fs.h>
13 #include <linux/nfs_fs_sb.h>
14 #include <linux/in6.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/iversion.h>
18 
19 #include "internal.h"
20 #include "iostat.h"
21 #include "fscache.h"
22 
23 #define NFSDBG_FACILITY		NFSDBG_FSCACHE
24 
25 static struct rb_root nfs_fscache_keys = RB_ROOT;
26 static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
27 
28 /*
29  * Layout of the key for an NFS server cache object.
30  */
31 struct nfs_server_key {
32 	struct {
33 		uint16_t	nfsversion;		/* NFS protocol version */
34 		uint16_t	family;			/* address family */
35 		__be16		port;			/* IP port */
36 	} hdr;
37 	union {
38 		struct in_addr	ipv4_addr;	/* IPv4 address */
39 		struct in6_addr ipv6_addr;	/* IPv6 address */
40 	};
41 } __packed;
42 
43 /*
44  * Get the per-client index cookie for an NFS client if the appropriate mount
45  * flag was set
46  * - We always try and get an index cookie for the client, but get filehandle
47  *   cookies on a per-superblock basis, depending on the mount flags
48  */
49 void nfs_fscache_get_client_cookie(struct nfs_client *clp)
50 {
51 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
52 	const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
53 	struct nfs_server_key key;
54 	uint16_t len = sizeof(key.hdr);
55 
56 	memset(&key, 0, sizeof(key));
57 	key.hdr.nfsversion = clp->rpc_ops->version;
58 	key.hdr.family = clp->cl_addr.ss_family;
59 
60 	switch (clp->cl_addr.ss_family) {
61 	case AF_INET:
62 		key.hdr.port = sin->sin_port;
63 		key.ipv4_addr = sin->sin_addr;
64 		len += sizeof(key.ipv4_addr);
65 		break;
66 
67 	case AF_INET6:
68 		key.hdr.port = sin6->sin6_port;
69 		key.ipv6_addr = sin6->sin6_addr;
70 		len += sizeof(key.ipv6_addr);
71 		break;
72 
73 	default:
74 		printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
75 		       clp->cl_addr.ss_family);
76 		clp->fscache = NULL;
77 		return;
78 	}
79 
80 	/* create a cache index for looking up filehandles */
81 	clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
82 					      &nfs_fscache_server_index_def,
83 					      &key, len,
84 					      NULL, 0,
85 					      clp, 0, true);
86 	dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
87 		 clp, clp->fscache);
88 }
89 
90 /*
91  * Dispose of a per-client cookie
92  */
93 void nfs_fscache_release_client_cookie(struct nfs_client *clp)
94 {
95 	dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
96 		 clp, clp->fscache);
97 
98 	fscache_relinquish_cookie(clp->fscache, NULL, false);
99 	clp->fscache = NULL;
100 }
101 
102 /*
103  * Get the cache cookie for an NFS superblock.  We have to handle
104  * uniquification here because the cache doesn't do it for us.
105  *
106  * The default uniquifier is just an empty string, but it may be overridden
107  * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
108  * superblock across an automount point of some nature.
109  */
110 void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
111 {
112 	struct nfs_fscache_key *key, *xkey;
113 	struct nfs_server *nfss = NFS_SB(sb);
114 	struct rb_node **p, *parent;
115 	int diff;
116 
117 	if (!uniq) {
118 		uniq = "";
119 		ulen = 1;
120 	}
121 
122 	key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
123 	if (!key)
124 		return;
125 
126 	key->nfs_client = nfss->nfs_client;
127 	key->key.super.s_flags = sb->s_flags & NFS_MS_MASK;
128 	key->key.nfs_server.flags = nfss->flags;
129 	key->key.nfs_server.rsize = nfss->rsize;
130 	key->key.nfs_server.wsize = nfss->wsize;
131 	key->key.nfs_server.acregmin = nfss->acregmin;
132 	key->key.nfs_server.acregmax = nfss->acregmax;
133 	key->key.nfs_server.acdirmin = nfss->acdirmin;
134 	key->key.nfs_server.acdirmax = nfss->acdirmax;
135 	key->key.nfs_server.fsid = nfss->fsid;
136 	key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
137 
138 	key->key.uniq_len = ulen;
139 	memcpy(key->key.uniquifier, uniq, ulen);
140 
141 	spin_lock(&nfs_fscache_keys_lock);
142 	p = &nfs_fscache_keys.rb_node;
143 	parent = NULL;
144 	while (*p) {
145 		parent = *p;
146 		xkey = rb_entry(parent, struct nfs_fscache_key, node);
147 
148 		if (key->nfs_client < xkey->nfs_client)
149 			goto go_left;
150 		if (key->nfs_client > xkey->nfs_client)
151 			goto go_right;
152 
153 		diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
154 		if (diff < 0)
155 			goto go_left;
156 		if (diff > 0)
157 			goto go_right;
158 
159 		if (key->key.uniq_len == 0)
160 			goto non_unique;
161 		diff = memcmp(key->key.uniquifier,
162 			      xkey->key.uniquifier,
163 			      key->key.uniq_len);
164 		if (diff < 0)
165 			goto go_left;
166 		if (diff > 0)
167 			goto go_right;
168 		goto non_unique;
169 
170 	go_left:
171 		p = &(*p)->rb_left;
172 		continue;
173 	go_right:
174 		p = &(*p)->rb_right;
175 	}
176 
177 	rb_link_node(&key->node, parent, p);
178 	rb_insert_color(&key->node, &nfs_fscache_keys);
179 	spin_unlock(&nfs_fscache_keys_lock);
180 	nfss->fscache_key = key;
181 
182 	/* create a cache index for looking up filehandles */
183 	nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
184 					       &nfs_fscache_super_index_def,
185 					       key, sizeof(*key) + ulen,
186 					       NULL, 0,
187 					       nfss, 0, true);
188 	dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
189 		 nfss, nfss->fscache);
190 	return;
191 
192 non_unique:
193 	spin_unlock(&nfs_fscache_keys_lock);
194 	kfree(key);
195 	nfss->fscache_key = NULL;
196 	nfss->fscache = NULL;
197 	printk(KERN_WARNING "NFS:"
198 	       " Cache request denied due to non-unique superblock keys\n");
199 }
200 
201 /*
202  * release a per-superblock cookie
203  */
204 void nfs_fscache_release_super_cookie(struct super_block *sb)
205 {
206 	struct nfs_server *nfss = NFS_SB(sb);
207 
208 	dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
209 		 nfss, nfss->fscache);
210 
211 	fscache_relinquish_cookie(nfss->fscache, NULL, false);
212 	nfss->fscache = NULL;
213 
214 	if (nfss->fscache_key) {
215 		spin_lock(&nfs_fscache_keys_lock);
216 		rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
217 		spin_unlock(&nfs_fscache_keys_lock);
218 		kfree(nfss->fscache_key);
219 		nfss->fscache_key = NULL;
220 	}
221 }
222 
223 /*
224  * Initialise the per-inode cache cookie pointer for an NFS inode.
225  */
226 void nfs_fscache_init_inode(struct inode *inode)
227 {
228 	struct nfs_fscache_inode_auxdata auxdata;
229 	struct nfs_inode *nfsi = NFS_I(inode);
230 
231 	nfsi->fscache = NULL;
232 	if (!S_ISREG(inode->i_mode))
233 		return;
234 
235 	memset(&auxdata, 0, sizeof(auxdata));
236 	auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
237 	auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
238 
239 	if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
240 		auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
241 
242 	nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
243 					       &nfs_fscache_inode_object_def,
244 					       nfsi->fh.data, nfsi->fh.size,
245 					       &auxdata, sizeof(auxdata),
246 					       nfsi, nfsi->vfs_inode.i_size, false);
247 }
248 
249 /*
250  * Release a per-inode cookie.
251  */
252 void nfs_fscache_clear_inode(struct inode *inode)
253 {
254 	struct nfs_fscache_inode_auxdata auxdata;
255 	struct nfs_inode *nfsi = NFS_I(inode);
256 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
257 
258 	dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
259 
260 	memset(&auxdata, 0, sizeof(auxdata));
261 	auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
262 	auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
263 	fscache_relinquish_cookie(cookie, &auxdata, false);
264 	nfsi->fscache = NULL;
265 }
266 
267 static bool nfs_fscache_can_enable(void *data)
268 {
269 	struct inode *inode = data;
270 
271 	return !inode_is_open_for_write(inode);
272 }
273 
274 /*
275  * Enable or disable caching for a file that is being opened as appropriate.
276  * The cookie is allocated when the inode is initialised, but is not enabled at
277  * that time.  Enablement is deferred to file-open time to avoid stat() and
278  * access() thrashing the cache.
279  *
280  * For now, with NFS, only regular files that are open read-only will be able
281  * to use the cache.
282  *
283  * We enable the cache for an inode if we open it read-only and it isn't
284  * currently open for writing.  We disable the cache if the inode is open
285  * write-only.
286  *
287  * The caller uses the file struct to pin i_writecount on the inode before
288  * calling us when a file is opened for writing, so we can make use of that.
289  *
290  * Note that this may be invoked multiple times in parallel by parallel
291  * nfs_open() functions.
292  */
293 void nfs_fscache_open_file(struct inode *inode, struct file *filp)
294 {
295 	struct nfs_fscache_inode_auxdata auxdata;
296 	struct nfs_inode *nfsi = NFS_I(inode);
297 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
298 
299 	if (!fscache_cookie_valid(cookie))
300 		return;
301 
302 	memset(&auxdata, 0, sizeof(auxdata));
303 	auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
304 	auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
305 
306 	if (inode_is_open_for_write(inode)) {
307 		dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
308 		clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
309 		fscache_disable_cookie(cookie, &auxdata, true);
310 		fscache_uncache_all_inode_pages(cookie, inode);
311 	} else {
312 		dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
313 		fscache_enable_cookie(cookie, &auxdata, nfsi->vfs_inode.i_size,
314 				      nfs_fscache_can_enable, inode);
315 		if (fscache_cookie_enabled(cookie))
316 			set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
317 	}
318 }
319 EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
320 
321 /*
322  * Release the caching state associated with a page, if the page isn't busy
323  * interacting with the cache.
324  * - Returns true (can release page) or false (page busy).
325  */
326 int nfs_fscache_release_page(struct page *page, gfp_t gfp)
327 {
328 	if (PageFsCache(page)) {
329 		struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
330 
331 		BUG_ON(!cookie);
332 		dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
333 			 cookie, page, NFS_I(page->mapping->host));
334 
335 		if (!fscache_maybe_release_page(cookie, page, gfp))
336 			return 0;
337 
338 		nfs_inc_fscache_stats(page->mapping->host,
339 				      NFSIOS_FSCACHE_PAGES_UNCACHED);
340 	}
341 
342 	return 1;
343 }
344 
345 /*
346  * Release the caching state associated with a page if undergoing complete page
347  * invalidation.
348  */
349 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
350 {
351 	struct fscache_cookie *cookie = nfs_i_fscache(inode);
352 
353 	BUG_ON(!cookie);
354 
355 	dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
356 		 cookie, page, NFS_I(inode));
357 
358 	fscache_wait_on_page_write(cookie, page);
359 
360 	BUG_ON(!PageLocked(page));
361 	fscache_uncache_page(cookie, page);
362 	nfs_inc_fscache_stats(page->mapping->host,
363 			      NFSIOS_FSCACHE_PAGES_UNCACHED);
364 }
365 
366 /*
367  * Handle completion of a page being read from the cache.
368  * - Called in process (keventd) context.
369  */
370 static void nfs_readpage_from_fscache_complete(struct page *page,
371 					       void *context,
372 					       int error)
373 {
374 	dfprintk(FSCACHE,
375 		 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
376 		 page, context, error);
377 
378 	/* if the read completes with an error, we just unlock the page and let
379 	 * the VM reissue the readpage */
380 	if (!error) {
381 		SetPageUptodate(page);
382 		unlock_page(page);
383 	} else {
384 		error = nfs_readpage_async(context, page->mapping->host, page);
385 		if (error)
386 			unlock_page(page);
387 	}
388 }
389 
390 /*
391  * Retrieve a page from fscache
392  */
393 int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
394 				struct inode *inode, struct page *page)
395 {
396 	int ret;
397 
398 	dfprintk(FSCACHE,
399 		 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
400 		 nfs_i_fscache(inode), page, page->index, page->flags, inode);
401 
402 	ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
403 					 page,
404 					 nfs_readpage_from_fscache_complete,
405 					 ctx,
406 					 GFP_KERNEL);
407 
408 	switch (ret) {
409 	case 0: /* read BIO submitted (page in fscache) */
410 		dfprintk(FSCACHE,
411 			 "NFS:    readpage_from_fscache: BIO submitted\n");
412 		nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
413 		return ret;
414 
415 	case -ENOBUFS: /* inode not in cache */
416 	case -ENODATA: /* page not in cache */
417 		nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
418 		dfprintk(FSCACHE,
419 			 "NFS:    readpage_from_fscache %d\n", ret);
420 		return 1;
421 
422 	default:
423 		dfprintk(FSCACHE, "NFS:    readpage_from_fscache %d\n", ret);
424 		nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
425 	}
426 	return ret;
427 }
428 
429 /*
430  * Retrieve a set of pages from fscache
431  */
432 int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
433 				 struct inode *inode,
434 				 struct address_space *mapping,
435 				 struct list_head *pages,
436 				 unsigned *nr_pages)
437 {
438 	unsigned npages = *nr_pages;
439 	int ret;
440 
441 	dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
442 		 nfs_i_fscache(inode), npages, inode);
443 
444 	ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
445 					  mapping, pages, nr_pages,
446 					  nfs_readpage_from_fscache_complete,
447 					  ctx,
448 					  mapping_gfp_mask(mapping));
449 	if (*nr_pages < npages)
450 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
451 				      npages);
452 	if (*nr_pages > 0)
453 		nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
454 				      *nr_pages);
455 
456 	switch (ret) {
457 	case 0: /* read submitted to the cache for all pages */
458 		BUG_ON(!list_empty(pages));
459 		BUG_ON(*nr_pages != 0);
460 		dfprintk(FSCACHE,
461 			 "NFS: nfs_getpages_from_fscache: submitted\n");
462 
463 		return ret;
464 
465 	case -ENOBUFS: /* some pages aren't cached and can't be */
466 	case -ENODATA: /* some pages aren't cached */
467 		dfprintk(FSCACHE,
468 			 "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
469 		return 1;
470 
471 	default:
472 		dfprintk(FSCACHE,
473 			 "NFS: nfs_getpages_from_fscache: ret  %d\n", ret);
474 	}
475 
476 	return ret;
477 }
478 
479 /*
480  * Store a newly fetched page in fscache
481  * - PG_fscache must be set on the page
482  */
483 void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
484 {
485 	int ret;
486 
487 	dfprintk(FSCACHE,
488 		 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
489 		 nfs_i_fscache(inode), page, page->index, page->flags, sync);
490 
491 	ret = fscache_write_page(nfs_i_fscache(inode), page,
492 				 inode->i_size, GFP_KERNEL);
493 	dfprintk(FSCACHE,
494 		 "NFS:     readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
495 		 page, page->index, page->flags, ret);
496 
497 	if (ret != 0) {
498 		fscache_uncache_page(nfs_i_fscache(inode), page);
499 		nfs_inc_fscache_stats(inode,
500 				      NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
501 		nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
502 	} else {
503 		nfs_inc_fscache_stats(inode,
504 				      NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
505 	}
506 }
507