1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/9p/vfs_file.c
4  *
5  * This file contians vfs file ops for 9P2000.
6  *
7  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
8  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/errno.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/file.h>
16 #include <linux/stat.h>
17 #include <linux/string.h>
18 #include <linux/inet.h>
19 #include <linux/list.h>
20 #include <linux/pagemap.h>
21 #include <linux/utsname.h>
22 #include <linux/uaccess.h>
23 #include <linux/idr.h>
24 #include <linux/uio.h>
25 #include <linux/slab.h>
26 #include <net/9p/9p.h>
27 #include <net/9p/client.h>
28 
29 #include "v9fs.h"
30 #include "v9fs_vfs.h"
31 #include "fid.h"
32 #include "cache.h"
33 
34 static const struct vm_operations_struct v9fs_file_vm_ops;
35 static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
36 
37 /**
38  * v9fs_file_open - open a file (or directory)
39  * @inode: inode to be opened
40  * @file: file being opened
41  *
42  */
43 
v9fs_file_open(struct inode * inode,struct file * file)44 int v9fs_file_open(struct inode *inode, struct file *file)
45 {
46 	int err;
47 	struct v9fs_inode *v9inode;
48 	struct v9fs_session_info *v9ses;
49 	struct p9_fid *fid, *writeback_fid;
50 	int omode;
51 
52 	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
53 	v9inode = V9FS_I(inode);
54 	v9ses = v9fs_inode2v9ses(inode);
55 	if (v9fs_proto_dotl(v9ses))
56 		omode = v9fs_open_to_dotl_flags(file->f_flags);
57 	else
58 		omode = v9fs_uflags2omode(file->f_flags,
59 					v9fs_proto_dotu(v9ses));
60 	fid = file->private_data;
61 	if (!fid) {
62 		fid = v9fs_fid_clone(file_dentry(file));
63 		if (IS_ERR(fid))
64 			return PTR_ERR(fid);
65 
66 		err = p9_client_open(fid, omode);
67 		if (err < 0) {
68 			p9_client_clunk(fid);
69 			return err;
70 		}
71 		if ((file->f_flags & O_APPEND) &&
72 			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
73 			generic_file_llseek(file, 0, SEEK_END);
74 	}
75 
76 	file->private_data = fid;
77 	mutex_lock(&v9inode->v_mutex);
78 	if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
79 	    !v9inode->writeback_fid &&
80 	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
81 		/*
82 		 * clone a fid and add it to writeback_fid
83 		 * we do it during open time instead of
84 		 * page dirty time via write_begin/page_mkwrite
85 		 * because we want write after unlink usecase
86 		 * to work.
87 		 */
88 		writeback_fid = v9fs_writeback_fid(file_dentry(file));
89 		if (IS_ERR(writeback_fid)) {
90 			err = PTR_ERR(writeback_fid);
91 			mutex_unlock(&v9inode->v_mutex);
92 			goto out_error;
93 		}
94 		v9inode->writeback_fid = (void *) writeback_fid;
95 	}
96 	mutex_unlock(&v9inode->v_mutex);
97 	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
98 		v9fs_cache_inode_set_cookie(inode, file);
99 	v9fs_open_fid_add(inode, fid);
100 	return 0;
101 out_error:
102 	p9_client_clunk(file->private_data);
103 	file->private_data = NULL;
104 	return err;
105 }
106 
107 /**
108  * v9fs_file_lock - lock a file (or directory)
109  * @filp: file to be locked
110  * @cmd: lock command
111  * @fl: file lock structure
112  *
113  * Bugs: this looks like a local only lock, we should extend into 9P
114  *       by using open exclusive
115  */
116 
v9fs_file_lock(struct file * filp,int cmd,struct file_lock * fl)117 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
118 {
119 	int res = 0;
120 	struct inode *inode = file_inode(filp);
121 
122 	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
123 
124 	/* No mandatory locks */
125 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
126 		return -ENOLCK;
127 
128 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
129 		filemap_write_and_wait(inode->i_mapping);
130 		invalidate_mapping_pages(&inode->i_data, 0, -1);
131 	}
132 
133 	return res;
134 }
135 
v9fs_file_do_lock(struct file * filp,int cmd,struct file_lock * fl)136 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
137 {
138 	struct p9_flock flock;
139 	struct p9_fid *fid;
140 	uint8_t status = P9_LOCK_ERROR;
141 	int res = 0;
142 	unsigned char fl_type;
143 	struct v9fs_session_info *v9ses;
144 
145 	fid = filp->private_data;
146 	BUG_ON(fid == NULL);
147 
148 	if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
149 		BUG();
150 
151 	res = locks_lock_file_wait(filp, fl);
152 	if (res < 0)
153 		goto out;
154 
155 	/* convert posix lock to p9 tlock args */
156 	memset(&flock, 0, sizeof(flock));
157 	/* map the lock type */
158 	switch (fl->fl_type) {
159 	case F_RDLCK:
160 		flock.type = P9_LOCK_TYPE_RDLCK;
161 		break;
162 	case F_WRLCK:
163 		flock.type = P9_LOCK_TYPE_WRLCK;
164 		break;
165 	case F_UNLCK:
166 		flock.type = P9_LOCK_TYPE_UNLCK;
167 		break;
168 	}
169 	flock.start = fl->fl_start;
170 	if (fl->fl_end == OFFSET_MAX)
171 		flock.length = 0;
172 	else
173 		flock.length = fl->fl_end - fl->fl_start + 1;
174 	flock.proc_id = fl->fl_pid;
175 	flock.client_id = fid->clnt->name;
176 	if (IS_SETLKW(cmd))
177 		flock.flags = P9_LOCK_FLAGS_BLOCK;
178 
179 	v9ses = v9fs_inode2v9ses(file_inode(filp));
180 
181 	/*
182 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
183 	 * for lock request, keep on trying
184 	 */
185 	for (;;) {
186 		res = p9_client_lock_dotl(fid, &flock, &status);
187 		if (res < 0)
188 			goto out_unlock;
189 
190 		if (status != P9_LOCK_BLOCKED)
191 			break;
192 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
193 			break;
194 		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
195 				!= 0)
196 			break;
197 		/*
198 		 * p9_client_lock_dotl overwrites flock.client_id with the
199 		 * server message, free and reuse the client name
200 		 */
201 		if (flock.client_id != fid->clnt->name) {
202 			kfree(flock.client_id);
203 			flock.client_id = fid->clnt->name;
204 		}
205 	}
206 
207 	/* map 9p status to VFS status */
208 	switch (status) {
209 	case P9_LOCK_SUCCESS:
210 		res = 0;
211 		break;
212 	case P9_LOCK_BLOCKED:
213 		res = -EAGAIN;
214 		break;
215 	default:
216 		WARN_ONCE(1, "unknown lock status code: %d\n", status);
217 		fallthrough;
218 	case P9_LOCK_ERROR:
219 	case P9_LOCK_GRACE:
220 		res = -ENOLCK;
221 		break;
222 	}
223 
224 out_unlock:
225 	/*
226 	 * incase server returned error for lock request, revert
227 	 * it locally
228 	 */
229 	if (res < 0 && fl->fl_type != F_UNLCK) {
230 		fl_type = fl->fl_type;
231 		fl->fl_type = F_UNLCK;
232 		/* Even if this fails we want to return the remote error */
233 		locks_lock_file_wait(filp, fl);
234 		fl->fl_type = fl_type;
235 	}
236 	if (flock.client_id != fid->clnt->name)
237 		kfree(flock.client_id);
238 out:
239 	return res;
240 }
241 
v9fs_file_getlock(struct file * filp,struct file_lock * fl)242 static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
243 {
244 	struct p9_getlock glock;
245 	struct p9_fid *fid;
246 	int res = 0;
247 
248 	fid = filp->private_data;
249 	BUG_ON(fid == NULL);
250 
251 	posix_test_lock(filp, fl);
252 	/*
253 	 * if we have a conflicting lock locally, no need to validate
254 	 * with server
255 	 */
256 	if (fl->fl_type != F_UNLCK)
257 		return res;
258 
259 	/* convert posix lock to p9 tgetlock args */
260 	memset(&glock, 0, sizeof(glock));
261 	glock.type  = P9_LOCK_TYPE_UNLCK;
262 	glock.start = fl->fl_start;
263 	if (fl->fl_end == OFFSET_MAX)
264 		glock.length = 0;
265 	else
266 		glock.length = fl->fl_end - fl->fl_start + 1;
267 	glock.proc_id = fl->fl_pid;
268 	glock.client_id = fid->clnt->name;
269 
270 	res = p9_client_getlock_dotl(fid, &glock);
271 	if (res < 0)
272 		goto out;
273 	/* map 9p lock type to os lock type */
274 	switch (glock.type) {
275 	case P9_LOCK_TYPE_RDLCK:
276 		fl->fl_type = F_RDLCK;
277 		break;
278 	case P9_LOCK_TYPE_WRLCK:
279 		fl->fl_type = F_WRLCK;
280 		break;
281 	case P9_LOCK_TYPE_UNLCK:
282 		fl->fl_type = F_UNLCK;
283 		break;
284 	}
285 	if (glock.type != P9_LOCK_TYPE_UNLCK) {
286 		fl->fl_start = glock.start;
287 		if (glock.length == 0)
288 			fl->fl_end = OFFSET_MAX;
289 		else
290 			fl->fl_end = glock.start + glock.length - 1;
291 		fl->fl_pid = -glock.proc_id;
292 	}
293 out:
294 	if (glock.client_id != fid->clnt->name)
295 		kfree(glock.client_id);
296 	return res;
297 }
298 
299 /**
300  * v9fs_file_lock_dotl - lock a file (or directory)
301  * @filp: file to be locked
302  * @cmd: lock command
303  * @fl: file lock structure
304  *
305  */
306 
v9fs_file_lock_dotl(struct file * filp,int cmd,struct file_lock * fl)307 static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
308 {
309 	struct inode *inode = file_inode(filp);
310 	int ret = -ENOLCK;
311 
312 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
313 		 filp, cmd, fl, filp);
314 
315 	/* No mandatory locks */
316 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
317 		goto out_err;
318 
319 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
320 		filemap_write_and_wait(inode->i_mapping);
321 		invalidate_mapping_pages(&inode->i_data, 0, -1);
322 	}
323 
324 	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
325 		ret = v9fs_file_do_lock(filp, cmd, fl);
326 	else if (IS_GETLK(cmd))
327 		ret = v9fs_file_getlock(filp, fl);
328 	else
329 		ret = -EINVAL;
330 out_err:
331 	return ret;
332 }
333 
334 /**
335  * v9fs_file_flock_dotl - lock a file
336  * @filp: file to be locked
337  * @cmd: lock command
338  * @fl: file lock structure
339  *
340  */
341 
v9fs_file_flock_dotl(struct file * filp,int cmd,struct file_lock * fl)342 static int v9fs_file_flock_dotl(struct file *filp, int cmd,
343 	struct file_lock *fl)
344 {
345 	struct inode *inode = file_inode(filp);
346 	int ret = -ENOLCK;
347 
348 	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
349 		 filp, cmd, fl, filp);
350 
351 	/* No mandatory locks */
352 	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
353 		goto out_err;
354 
355 	if (!(fl->fl_flags & FL_FLOCK))
356 		goto out_err;
357 
358 	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
359 		filemap_write_and_wait(inode->i_mapping);
360 		invalidate_mapping_pages(&inode->i_data, 0, -1);
361 	}
362 	/* Convert flock to posix lock */
363 	fl->fl_flags |= FL_POSIX;
364 	fl->fl_flags ^= FL_FLOCK;
365 
366 	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
367 		ret = v9fs_file_do_lock(filp, cmd, fl);
368 	else
369 		ret = -EINVAL;
370 out_err:
371 	return ret;
372 }
373 
374 /**
375  * v9fs_file_read - read from a file
376  * @filp: file pointer to read
377  * @udata: user data buffer to read data into
378  * @count: size of buffer
379  * @offset: offset at which to read data
380  *
381  */
382 
383 static ssize_t
v9fs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)384 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
385 {
386 	struct p9_fid *fid = iocb->ki_filp->private_data;
387 	int ret, err = 0;
388 
389 	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
390 		 iov_iter_count(to), iocb->ki_pos);
391 
392 	if (iocb->ki_filp->f_flags & O_NONBLOCK)
393 		ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
394 	else
395 		ret = p9_client_read(fid, iocb->ki_pos, to, &err);
396 	if (!ret)
397 		return err;
398 
399 	iocb->ki_pos += ret;
400 	return ret;
401 }
402 
403 /**
404  * v9fs_file_write - write to a file
405  * @filp: file pointer to write
406  * @data: data buffer to write data from
407  * @count: size of buffer
408  * @offset: offset at which to write data
409  *
410  */
411 static ssize_t
v9fs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)412 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
413 {
414 	struct file *file = iocb->ki_filp;
415 	ssize_t retval;
416 	loff_t origin;
417 	int err = 0;
418 
419 	retval = generic_write_checks(iocb, from);
420 	if (retval <= 0)
421 		return retval;
422 
423 	origin = iocb->ki_pos;
424 	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
425 	if (retval > 0) {
426 		struct inode *inode = file_inode(file);
427 		loff_t i_size;
428 		unsigned long pg_start, pg_end;
429 		pg_start = origin >> PAGE_SHIFT;
430 		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
431 		if (inode->i_mapping && inode->i_mapping->nrpages)
432 			invalidate_inode_pages2_range(inode->i_mapping,
433 						      pg_start, pg_end);
434 		iocb->ki_pos += retval;
435 		i_size = i_size_read(inode);
436 		if (iocb->ki_pos > i_size) {
437 			inode_add_bytes(inode, iocb->ki_pos - i_size);
438 			/*
439 			 * Need to serialize against i_size_write() in
440 			 * v9fs_stat2inode()
441 			 */
442 			v9fs_i_size_write(inode, iocb->ki_pos);
443 		}
444 		return retval;
445 	}
446 	return err;
447 }
448 
v9fs_file_fsync(struct file * filp,loff_t start,loff_t end,int datasync)449 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
450 			   int datasync)
451 {
452 	struct p9_fid *fid;
453 	struct inode *inode = filp->f_mapping->host;
454 	struct p9_wstat wstat;
455 	int retval;
456 
457 	retval = file_write_and_wait_range(filp, start, end);
458 	if (retval)
459 		return retval;
460 
461 	inode_lock(inode);
462 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
463 
464 	fid = filp->private_data;
465 	v9fs_blank_wstat(&wstat);
466 
467 	retval = p9_client_wstat(fid, &wstat);
468 	inode_unlock(inode);
469 
470 	return retval;
471 }
472 
v9fs_file_fsync_dotl(struct file * filp,loff_t start,loff_t end,int datasync)473 int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
474 			 int datasync)
475 {
476 	struct p9_fid *fid;
477 	struct inode *inode = filp->f_mapping->host;
478 	int retval;
479 
480 	retval = file_write_and_wait_range(filp, start, end);
481 	if (retval)
482 		return retval;
483 
484 	inode_lock(inode);
485 	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
486 
487 	fid = filp->private_data;
488 
489 	retval = p9_client_fsync(fid, datasync);
490 	inode_unlock(inode);
491 
492 	return retval;
493 }
494 
495 static int
v9fs_file_mmap(struct file * filp,struct vm_area_struct * vma)496 v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
497 {
498 	int retval;
499 
500 
501 	retval = generic_file_mmap(filp, vma);
502 	if (!retval)
503 		vma->vm_ops = &v9fs_file_vm_ops;
504 
505 	return retval;
506 }
507 
508 static int
v9fs_mmap_file_mmap(struct file * filp,struct vm_area_struct * vma)509 v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
510 {
511 	int retval;
512 	struct inode *inode;
513 	struct v9fs_inode *v9inode;
514 	struct p9_fid *fid;
515 
516 	inode = file_inode(filp);
517 	v9inode = V9FS_I(inode);
518 	mutex_lock(&v9inode->v_mutex);
519 	if (!v9inode->writeback_fid &&
520 	    (vma->vm_flags & VM_SHARED) &&
521 	    (vma->vm_flags & VM_WRITE)) {
522 		/*
523 		 * clone a fid and add it to writeback_fid
524 		 * we do it during mmap instead of
525 		 * page dirty time via write_begin/page_mkwrite
526 		 * because we want write after unlink usecase
527 		 * to work.
528 		 */
529 		fid = v9fs_writeback_fid(file_dentry(filp));
530 		if (IS_ERR(fid)) {
531 			retval = PTR_ERR(fid);
532 			mutex_unlock(&v9inode->v_mutex);
533 			return retval;
534 		}
535 		v9inode->writeback_fid = (void *) fid;
536 	}
537 	mutex_unlock(&v9inode->v_mutex);
538 
539 	retval = generic_file_mmap(filp, vma);
540 	if (!retval)
541 		vma->vm_ops = &v9fs_mmap_file_vm_ops;
542 
543 	return retval;
544 }
545 
546 static vm_fault_t
v9fs_vm_page_mkwrite(struct vm_fault * vmf)547 v9fs_vm_page_mkwrite(struct vm_fault *vmf)
548 {
549 	struct v9fs_inode *v9inode;
550 	struct page *page = vmf->page;
551 	struct file *filp = vmf->vma->vm_file;
552 	struct inode *inode = file_inode(filp);
553 
554 
555 	p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
556 		 page, (unsigned long)filp->private_data);
557 
558 	/* Update file times before taking page lock */
559 	file_update_time(filp);
560 
561 	v9inode = V9FS_I(inode);
562 	/* make sure the cache has finished storing the page */
563 	v9fs_fscache_wait_on_page_write(inode, page);
564 	BUG_ON(!v9inode->writeback_fid);
565 	lock_page(page);
566 	if (page->mapping != inode->i_mapping)
567 		goto out_unlock;
568 	wait_for_stable_page(page);
569 
570 	return VM_FAULT_LOCKED;
571 out_unlock:
572 	unlock_page(page);
573 	return VM_FAULT_NOPAGE;
574 }
575 
576 /**
577  * v9fs_mmap_file_read - read from a file
578  * @filp: file pointer to read
579  * @data: user data buffer to read data into
580  * @count: size of buffer
581  * @offset: offset at which to read data
582  *
583  */
584 static ssize_t
v9fs_mmap_file_read_iter(struct kiocb * iocb,struct iov_iter * to)585 v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
586 {
587 	/* TODO: Check if there are dirty pages */
588 	return v9fs_file_read_iter(iocb, to);
589 }
590 
591 /**
592  * v9fs_mmap_file_write - write to a file
593  * @filp: file pointer to write
594  * @data: data buffer to write data from
595  * @count: size of buffer
596  * @offset: offset at which to write data
597  *
598  */
599 static ssize_t
v9fs_mmap_file_write_iter(struct kiocb * iocb,struct iov_iter * from)600 v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
601 {
602 	/*
603 	 * TODO: invalidate mmaps on filp's inode between
604 	 * offset and offset+count
605 	 */
606 	return v9fs_file_write_iter(iocb, from);
607 }
608 
v9fs_mmap_vm_close(struct vm_area_struct * vma)609 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
610 {
611 	struct inode *inode;
612 
613 	struct writeback_control wbc = {
614 		.nr_to_write = LONG_MAX,
615 		.sync_mode = WB_SYNC_ALL,
616 		.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
617 		 /* absolute end, byte at end included */
618 		.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
619 			(vma->vm_end - vma->vm_start - 1),
620 	};
621 
622 	if (!(vma->vm_flags & VM_SHARED))
623 		return;
624 
625 	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
626 
627 	inode = file_inode(vma->vm_file);
628 
629 	if (!mapping_can_writeback(inode->i_mapping))
630 		wbc.nr_to_write = 0;
631 
632 	might_sleep();
633 	sync_inode(inode, &wbc);
634 }
635 
636 
637 static const struct vm_operations_struct v9fs_file_vm_ops = {
638 	.fault = filemap_fault,
639 	.map_pages = filemap_map_pages,
640 	.page_mkwrite = v9fs_vm_page_mkwrite,
641 };
642 
643 static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
644 	.close = v9fs_mmap_vm_close,
645 	.fault = filemap_fault,
646 	.map_pages = filemap_map_pages,
647 	.page_mkwrite = v9fs_vm_page_mkwrite,
648 };
649 
650 
651 const struct file_operations v9fs_cached_file_operations = {
652 	.llseek = generic_file_llseek,
653 	.read_iter = generic_file_read_iter,
654 	.write_iter = generic_file_write_iter,
655 	.open = v9fs_file_open,
656 	.release = v9fs_dir_release,
657 	.lock = v9fs_file_lock,
658 	.mmap = v9fs_file_mmap,
659 	.splice_read = generic_file_splice_read,
660 	.splice_write = iter_file_splice_write,
661 	.fsync = v9fs_file_fsync,
662 };
663 
664 const struct file_operations v9fs_cached_file_operations_dotl = {
665 	.llseek = generic_file_llseek,
666 	.read_iter = generic_file_read_iter,
667 	.write_iter = generic_file_write_iter,
668 	.open = v9fs_file_open,
669 	.release = v9fs_dir_release,
670 	.lock = v9fs_file_lock_dotl,
671 	.flock = v9fs_file_flock_dotl,
672 	.mmap = v9fs_file_mmap,
673 	.splice_read = generic_file_splice_read,
674 	.splice_write = iter_file_splice_write,
675 	.fsync = v9fs_file_fsync_dotl,
676 };
677 
678 const struct file_operations v9fs_file_operations = {
679 	.llseek = generic_file_llseek,
680 	.read_iter = v9fs_file_read_iter,
681 	.write_iter = v9fs_file_write_iter,
682 	.open = v9fs_file_open,
683 	.release = v9fs_dir_release,
684 	.lock = v9fs_file_lock,
685 	.mmap = generic_file_readonly_mmap,
686 	.splice_read = generic_file_splice_read,
687 	.splice_write = iter_file_splice_write,
688 	.fsync = v9fs_file_fsync,
689 };
690 
691 const struct file_operations v9fs_file_operations_dotl = {
692 	.llseek = generic_file_llseek,
693 	.read_iter = v9fs_file_read_iter,
694 	.write_iter = v9fs_file_write_iter,
695 	.open = v9fs_file_open,
696 	.release = v9fs_dir_release,
697 	.lock = v9fs_file_lock_dotl,
698 	.flock = v9fs_file_flock_dotl,
699 	.mmap = generic_file_readonly_mmap,
700 	.splice_read = generic_file_splice_read,
701 	.splice_write = iter_file_splice_write,
702 	.fsync = v9fs_file_fsync_dotl,
703 };
704 
705 const struct file_operations v9fs_mmap_file_operations = {
706 	.llseek = generic_file_llseek,
707 	.read_iter = v9fs_mmap_file_read_iter,
708 	.write_iter = v9fs_mmap_file_write_iter,
709 	.open = v9fs_file_open,
710 	.release = v9fs_dir_release,
711 	.lock = v9fs_file_lock,
712 	.mmap = v9fs_mmap_file_mmap,
713 	.splice_read = generic_file_splice_read,
714 	.splice_write = iter_file_splice_write,
715 	.fsync = v9fs_file_fsync,
716 };
717 
718 const struct file_operations v9fs_mmap_file_operations_dotl = {
719 	.llseek = generic_file_llseek,
720 	.read_iter = v9fs_mmap_file_read_iter,
721 	.write_iter = v9fs_mmap_file_write_iter,
722 	.open = v9fs_file_open,
723 	.release = v9fs_dir_release,
724 	.lock = v9fs_file_lock_dotl,
725 	.flock = v9fs_file_flock_dotl,
726 	.mmap = v9fs_mmap_file_mmap,
727 	.splice_read = generic_file_splice_read,
728 	.splice_write = iter_file_splice_write,
729 	.fsync = v9fs_file_fsync_dotl,
730 };
731