xref: /linux/fs/smb/client/file.c (revision 16e00683)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  */
11 #include <linux/fs.h>
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
24 #include <linux/mm.h>
25 #include <asm/div64.h>
26 #include "cifsfs.h"
27 #include "cifspdu.h"
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
34 #include "fscache.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
40 
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42 
43 /*
44  * Prepare a subrequest to upload to the server.  We need to allocate credits
45  * so that we know the maximum amount of data that we can include in it.
46  */
cifs_prepare_write(struct netfs_io_subrequest * subreq)47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48 {
49 	struct cifs_io_subrequest *wdata =
50 		container_of(subreq, struct cifs_io_subrequest, subreq);
51 	struct cifs_io_request *req = wdata->req;
52 	struct TCP_Server_Info *server;
53 	struct cifsFileInfo *open_file = req->cfile;
54 	size_t wsize = req->rreq.wsize;
55 	int rc;
56 
57 	if (!wdata->have_xid) {
58 		wdata->xid = get_xid();
59 		wdata->have_xid = true;
60 	}
61 
62 	server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
63 	wdata->server = server;
64 
65 retry:
66 	if (open_file->invalidHandle) {
67 		rc = cifs_reopen_file(open_file, false);
68 		if (rc < 0) {
69 			if (rc == -EAGAIN)
70 				goto retry;
71 			subreq->error = rc;
72 			return netfs_prepare_write_failed(subreq);
73 		}
74 	}
75 
76 	rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
77 					   &wdata->credits);
78 	if (rc < 0) {
79 		subreq->error = rc;
80 		return netfs_prepare_write_failed(subreq);
81 	}
82 
83 #ifdef CONFIG_CIFS_SMB_DIRECT
84 	if (server->smbd_conn)
85 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
86 #endif
87 }
88 
89 /*
90  * Issue a subrequest to upload to the server.
91  */
cifs_issue_write(struct netfs_io_subrequest * subreq)92 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
93 {
94 	struct cifs_io_subrequest *wdata =
95 		container_of(subreq, struct cifs_io_subrequest, subreq);
96 	struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
97 	int rc;
98 
99 	if (cifs_forced_shutdown(sbi)) {
100 		rc = -EIO;
101 		goto fail;
102 	}
103 
104 	rc = adjust_credits(wdata->server, &wdata->credits, wdata->subreq.len);
105 	if (rc)
106 		goto fail;
107 
108 	rc = -EAGAIN;
109 	if (wdata->req->cfile->invalidHandle)
110 		goto fail;
111 
112 	wdata->server->ops->async_writev(wdata);
113 out:
114 	return;
115 
116 fail:
117 	if (rc == -EAGAIN)
118 		trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
119 	else
120 		trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
121 	add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
122 	cifs_write_subrequest_terminated(wdata, rc, false);
123 	goto out;
124 }
125 
126 /*
127  * Split the read up according to how many credits we can get for each piece.
128  * It's okay to sleep here if we need to wait for more credit to become
129  * available.
130  *
131  * We also choose the server and allocate an operation ID to be cleaned up
132  * later.
133  */
cifs_clamp_length(struct netfs_io_subrequest * subreq)134 static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
135 {
136 	struct netfs_io_request *rreq = subreq->rreq;
137 	struct TCP_Server_Info *server;
138 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
139 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
140 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
141 	size_t rsize = 0;
142 	int rc;
143 
144 	rdata->xid = get_xid();
145 	rdata->have_xid = true;
146 
147 	server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
148 	rdata->server = server;
149 
150 	if (cifs_sb->ctx->rsize == 0)
151 		cifs_sb->ctx->rsize =
152 			server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
153 						     cifs_sb->ctx);
154 
155 
156 	rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
157 					   &rdata->credits);
158 	if (rc) {
159 		subreq->error = rc;
160 		return false;
161 	}
162 
163 	subreq->len = min_t(size_t, subreq->len, rsize);
164 #ifdef CONFIG_CIFS_SMB_DIRECT
165 	if (server->smbd_conn)
166 		subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
167 #endif
168 	return true;
169 }
170 
171 /*
172  * Issue a read operation on behalf of the netfs helper functions.  We're asked
173  * to make a read of a certain size at a point in the file.  We are permitted
174  * to only read a portion of that, but as long as we read something, the netfs
175  * helper will call us again so that we can issue another read.
176  */
cifs_req_issue_read(struct netfs_io_subrequest * subreq)177 static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
178 {
179 	struct netfs_io_request *rreq = subreq->rreq;
180 	struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
181 	struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
182 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
183 	pid_t pid;
184 	int rc = 0;
185 
186 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
187 		pid = req->cfile->pid;
188 	else
189 		pid = current->tgid; // Ummm...  This may be a workqueue
190 
191 	cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
192 		 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
193 		 subreq->transferred, subreq->len);
194 
195 	if (req->cfile->invalidHandle) {
196 		do {
197 			rc = cifs_reopen_file(req->cfile, true);
198 		} while (rc == -EAGAIN);
199 		if (rc)
200 			goto out;
201 	}
202 
203 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
204 	rdata->pid = pid;
205 
206 	rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len);
207 	if (!rc) {
208 		if (rdata->req->cfile->invalidHandle)
209 			rc = -EAGAIN;
210 		else
211 			rc = rdata->server->ops->async_readv(rdata);
212 	}
213 
214 out:
215 	if (rc)
216 		netfs_subreq_terminated(subreq, rc, false);
217 }
218 
219 /*
220  * Writeback calls this when it finds a folio that needs uploading.  This isn't
221  * called if writeback only has copy-to-cache to deal with.
222  */
cifs_begin_writeback(struct netfs_io_request * wreq)223 static void cifs_begin_writeback(struct netfs_io_request *wreq)
224 {
225 	struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
226 	int ret;
227 
228 	ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
229 	if (ret) {
230 		cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
231 		return;
232 	}
233 
234 	wreq->io_streams[0].avail = true;
235 }
236 
237 /*
238  * Initialise a request.
239  */
cifs_init_request(struct netfs_io_request * rreq,struct file * file)240 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
241 {
242 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
243 	struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
244 	struct cifsFileInfo *open_file = NULL;
245 
246 	rreq->rsize = cifs_sb->ctx->rsize;
247 	rreq->wsize = cifs_sb->ctx->wsize;
248 
249 	if (file) {
250 		open_file = file->private_data;
251 		rreq->netfs_priv = file->private_data;
252 		req->cfile = cifsFileInfo_get(open_file);
253 	} else if (rreq->origin != NETFS_WRITEBACK) {
254 		WARN_ON_ONCE(1);
255 		return -EIO;
256 	}
257 
258 	return 0;
259 }
260 
261 /*
262  * Expand the size of a readahead to the size of the rsize, if at least as
263  * large as a page, allowing for the possibility that rsize is not pow-2
264  * aligned.
265  */
cifs_expand_readahead(struct netfs_io_request * rreq)266 static void cifs_expand_readahead(struct netfs_io_request *rreq)
267 {
268 	unsigned int rsize = rreq->rsize;
269 	loff_t misalignment, i_size = i_size_read(rreq->inode);
270 
271 	if (rsize < PAGE_SIZE)
272 		return;
273 
274 	if (rsize < INT_MAX)
275 		rsize = roundup_pow_of_two(rsize);
276 	else
277 		rsize = ((unsigned int)INT_MAX + 1) / 2;
278 
279 	misalignment = rreq->start & (rsize - 1);
280 	if (misalignment) {
281 		rreq->start -= misalignment;
282 		rreq->len += misalignment;
283 	}
284 
285 	rreq->len = round_up(rreq->len, rsize);
286 	if (rreq->start < i_size && rreq->len > i_size - rreq->start)
287 		rreq->len = i_size - rreq->start;
288 }
289 
290 /*
291  * Completion of a request operation.
292  */
cifs_rreq_done(struct netfs_io_request * rreq)293 static void cifs_rreq_done(struct netfs_io_request *rreq)
294 {
295 	struct timespec64 atime, mtime;
296 	struct inode *inode = rreq->inode;
297 
298 	/* we do not want atime to be less than mtime, it broke some apps */
299 	atime = inode_set_atime_to_ts(inode, current_time(inode));
300 	mtime = inode_get_mtime(inode);
301 	if (timespec64_compare(&atime, &mtime))
302 		inode_set_atime_to_ts(inode, inode_get_mtime(inode));
303 }
304 
cifs_post_modify(struct inode * inode)305 static void cifs_post_modify(struct inode *inode)
306 {
307 	/* Indication to update ctime and mtime as close is deferred */
308 	set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
309 }
310 
cifs_free_request(struct netfs_io_request * rreq)311 static void cifs_free_request(struct netfs_io_request *rreq)
312 {
313 	struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
314 
315 	if (req->cfile)
316 		cifsFileInfo_put(req->cfile);
317 }
318 
cifs_free_subrequest(struct netfs_io_subrequest * subreq)319 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
320 {
321 	struct cifs_io_subrequest *rdata =
322 		container_of(subreq, struct cifs_io_subrequest, subreq);
323 	int rc = subreq->error;
324 
325 	if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
326 #ifdef CONFIG_CIFS_SMB_DIRECT
327 		if (rdata->mr) {
328 			smbd_deregister_mr(rdata->mr);
329 			rdata->mr = NULL;
330 		}
331 #endif
332 	}
333 
334 	add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
335 	if (rdata->have_xid)
336 		free_xid(rdata->xid);
337 }
338 
339 const struct netfs_request_ops cifs_req_ops = {
340 	.request_pool		= &cifs_io_request_pool,
341 	.subrequest_pool	= &cifs_io_subrequest_pool,
342 	.init_request		= cifs_init_request,
343 	.free_request		= cifs_free_request,
344 	.free_subrequest	= cifs_free_subrequest,
345 	.expand_readahead	= cifs_expand_readahead,
346 	.clamp_length		= cifs_clamp_length,
347 	.issue_read		= cifs_req_issue_read,
348 	.done			= cifs_rreq_done,
349 	.post_modify		= cifs_post_modify,
350 	.begin_writeback	= cifs_begin_writeback,
351 	.prepare_write		= cifs_prepare_write,
352 	.issue_write		= cifs_issue_write,
353 };
354 
355 /*
356  * Mark as invalid, all open files on tree connections since they
357  * were closed when session to server was lost.
358  */
359 void
cifs_mark_open_files_invalid(struct cifs_tcon * tcon)360 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
361 {
362 	struct cifsFileInfo *open_file = NULL;
363 	struct list_head *tmp;
364 	struct list_head *tmp1;
365 
366 	/* only send once per connect */
367 	spin_lock(&tcon->tc_lock);
368 	if (tcon->need_reconnect)
369 		tcon->status = TID_NEED_RECON;
370 
371 	if (tcon->status != TID_NEED_RECON) {
372 		spin_unlock(&tcon->tc_lock);
373 		return;
374 	}
375 	tcon->status = TID_IN_FILES_INVALIDATE;
376 	spin_unlock(&tcon->tc_lock);
377 
378 	/* list all files open on tree connection and mark them invalid */
379 	spin_lock(&tcon->open_file_lock);
380 	list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
381 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
382 		open_file->invalidHandle = true;
383 		open_file->oplock_break_cancelled = true;
384 	}
385 	spin_unlock(&tcon->open_file_lock);
386 
387 	invalidate_all_cached_dirs(tcon);
388 	spin_lock(&tcon->tc_lock);
389 	if (tcon->status == TID_IN_FILES_INVALIDATE)
390 		tcon->status = TID_NEED_TCON;
391 	spin_unlock(&tcon->tc_lock);
392 
393 	/*
394 	 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
395 	 * to this tcon.
396 	 */
397 }
398 
cifs_convert_flags(unsigned int flags,int rdwr_for_fscache)399 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
400 {
401 	if ((flags & O_ACCMODE) == O_RDONLY)
402 		return GENERIC_READ;
403 	else if ((flags & O_ACCMODE) == O_WRONLY)
404 		return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
405 	else if ((flags & O_ACCMODE) == O_RDWR) {
406 		/* GENERIC_ALL is too much permission to request
407 		   can cause unnecessary access denied on create */
408 		/* return GENERIC_ALL; */
409 		return (GENERIC_READ | GENERIC_WRITE);
410 	}
411 
412 	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
413 		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
414 		FILE_READ_DATA);
415 }
416 
417 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_convert_flags(unsigned int flags)418 static u32 cifs_posix_convert_flags(unsigned int flags)
419 {
420 	u32 posix_flags = 0;
421 
422 	if ((flags & O_ACCMODE) == O_RDONLY)
423 		posix_flags = SMB_O_RDONLY;
424 	else if ((flags & O_ACCMODE) == O_WRONLY)
425 		posix_flags = SMB_O_WRONLY;
426 	else if ((flags & O_ACCMODE) == O_RDWR)
427 		posix_flags = SMB_O_RDWR;
428 
429 	if (flags & O_CREAT) {
430 		posix_flags |= SMB_O_CREAT;
431 		if (flags & O_EXCL)
432 			posix_flags |= SMB_O_EXCL;
433 	} else if (flags & O_EXCL)
434 		cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
435 			 current->comm, current->tgid);
436 
437 	if (flags & O_TRUNC)
438 		posix_flags |= SMB_O_TRUNC;
439 	/* be safe and imply O_SYNC for O_DSYNC */
440 	if (flags & O_DSYNC)
441 		posix_flags |= SMB_O_SYNC;
442 	if (flags & O_DIRECTORY)
443 		posix_flags |= SMB_O_DIRECTORY;
444 	if (flags & O_NOFOLLOW)
445 		posix_flags |= SMB_O_NOFOLLOW;
446 	if (flags & O_DIRECT)
447 		posix_flags |= SMB_O_DIRECT;
448 
449 	return posix_flags;
450 }
451 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
452 
cifs_get_disposition(unsigned int flags)453 static inline int cifs_get_disposition(unsigned int flags)
454 {
455 	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
456 		return FILE_CREATE;
457 	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
458 		return FILE_OVERWRITE_IF;
459 	else if ((flags & O_CREAT) == O_CREAT)
460 		return FILE_OPEN_IF;
461 	else if ((flags & O_TRUNC) == O_TRUNC)
462 		return FILE_OVERWRITE;
463 	else
464 		return FILE_OPEN;
465 }
466 
467 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
cifs_posix_open(const char * full_path,struct inode ** pinode,struct super_block * sb,int mode,unsigned int f_flags,__u32 * poplock,__u16 * pnetfid,unsigned int xid)468 int cifs_posix_open(const char *full_path, struct inode **pinode,
469 			struct super_block *sb, int mode, unsigned int f_flags,
470 			__u32 *poplock, __u16 *pnetfid, unsigned int xid)
471 {
472 	int rc;
473 	FILE_UNIX_BASIC_INFO *presp_data;
474 	__u32 posix_flags = 0;
475 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
476 	struct cifs_fattr fattr;
477 	struct tcon_link *tlink;
478 	struct cifs_tcon *tcon;
479 
480 	cifs_dbg(FYI, "posix open %s\n", full_path);
481 
482 	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
483 	if (presp_data == NULL)
484 		return -ENOMEM;
485 
486 	tlink = cifs_sb_tlink(cifs_sb);
487 	if (IS_ERR(tlink)) {
488 		rc = PTR_ERR(tlink);
489 		goto posix_open_ret;
490 	}
491 
492 	tcon = tlink_tcon(tlink);
493 	mode &= ~current_umask();
494 
495 	posix_flags = cifs_posix_convert_flags(f_flags);
496 	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
497 			     poplock, full_path, cifs_sb->local_nls,
498 			     cifs_remap(cifs_sb));
499 	cifs_put_tlink(tlink);
500 
501 	if (rc)
502 		goto posix_open_ret;
503 
504 	if (presp_data->Type == cpu_to_le32(-1))
505 		goto posix_open_ret; /* open ok, caller does qpathinfo */
506 
507 	if (!pinode)
508 		goto posix_open_ret; /* caller does not need info */
509 
510 	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
511 
512 	/* get new inode and set it up */
513 	if (*pinode == NULL) {
514 		cifs_fill_uniqueid(sb, &fattr);
515 		*pinode = cifs_iget(sb, &fattr);
516 		if (!*pinode) {
517 			rc = -ENOMEM;
518 			goto posix_open_ret;
519 		}
520 	} else {
521 		cifs_revalidate_mapping(*pinode);
522 		rc = cifs_fattr_to_inode(*pinode, &fattr, false);
523 	}
524 
525 posix_open_ret:
526 	kfree(presp_data);
527 	return rc;
528 }
529 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
530 
cifs_nt_open(const char * full_path,struct inode * inode,struct cifs_sb_info * cifs_sb,struct cifs_tcon * tcon,unsigned int f_flags,__u32 * oplock,struct cifs_fid * fid,unsigned int xid,struct cifs_open_info_data * buf)531 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
532 			struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
533 			struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
534 {
535 	int rc;
536 	int desired_access;
537 	int disposition;
538 	int create_options = CREATE_NOT_DIR;
539 	struct TCP_Server_Info *server = tcon->ses->server;
540 	struct cifs_open_parms oparms;
541 	int rdwr_for_fscache = 0;
542 
543 	if (!server->ops->open)
544 		return -ENOSYS;
545 
546 	/* If we're caching, we need to be able to fill in around partial writes. */
547 	if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
548 		rdwr_for_fscache = 1;
549 
550 	desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
551 
552 /*********************************************************************
553  *  open flag mapping table:
554  *
555  *	POSIX Flag            CIFS Disposition
556  *	----------            ----------------
557  *	O_CREAT               FILE_OPEN_IF
558  *	O_CREAT | O_EXCL      FILE_CREATE
559  *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
560  *	O_TRUNC               FILE_OVERWRITE
561  *	none of the above     FILE_OPEN
562  *
563  *	Note that there is not a direct match between disposition
564  *	FILE_SUPERSEDE (ie create whether or not file exists although
565  *	O_CREAT | O_TRUNC is similar but truncates the existing
566  *	file rather than creating a new file as FILE_SUPERSEDE does
567  *	(which uses the attributes / metadata passed in on open call)
568  *?
569  *?  O_SYNC is a reasonable match to CIFS writethrough flag
570  *?  and the read write flags match reasonably.  O_LARGEFILE
571  *?  is irrelevant because largefile support is always used
572  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
573  *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
574  *********************************************************************/
575 
576 	disposition = cifs_get_disposition(f_flags);
577 
578 	/* BB pass O_SYNC flag through on file attributes .. BB */
579 
580 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
581 	if (f_flags & O_SYNC)
582 		create_options |= CREATE_WRITE_THROUGH;
583 
584 	if (f_flags & O_DIRECT)
585 		create_options |= CREATE_NO_BUFFER;
586 
587 retry_open:
588 	oparms = (struct cifs_open_parms) {
589 		.tcon = tcon,
590 		.cifs_sb = cifs_sb,
591 		.desired_access = desired_access,
592 		.create_options = cifs_create_options(cifs_sb, create_options),
593 		.disposition = disposition,
594 		.path = full_path,
595 		.fid = fid,
596 	};
597 
598 	rc = server->ops->open(xid, &oparms, oplock, buf);
599 	if (rc) {
600 		if (rc == -EACCES && rdwr_for_fscache == 1) {
601 			desired_access = cifs_convert_flags(f_flags, 0);
602 			rdwr_for_fscache = 2;
603 			goto retry_open;
604 		}
605 		return rc;
606 	}
607 	if (rdwr_for_fscache == 2)
608 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
609 
610 	/* TODO: Add support for calling posix query info but with passing in fid */
611 	if (tcon->unix_ext)
612 		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
613 					      xid);
614 	else
615 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
616 					 xid, fid);
617 
618 	if (rc) {
619 		server->ops->close(xid, tcon, fid);
620 		if (rc == -ESTALE)
621 			rc = -EOPENSTALE;
622 	}
623 
624 	return rc;
625 }
626 
627 static bool
cifs_has_mand_locks(struct cifsInodeInfo * cinode)628 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
629 {
630 	struct cifs_fid_locks *cur;
631 	bool has_locks = false;
632 
633 	down_read(&cinode->lock_sem);
634 	list_for_each_entry(cur, &cinode->llist, llist) {
635 		if (!list_empty(&cur->locks)) {
636 			has_locks = true;
637 			break;
638 		}
639 	}
640 	up_read(&cinode->lock_sem);
641 	return has_locks;
642 }
643 
644 void
cifs_down_write(struct rw_semaphore * sem)645 cifs_down_write(struct rw_semaphore *sem)
646 {
647 	while (!down_write_trylock(sem))
648 		msleep(10);
649 }
650 
651 static void cifsFileInfo_put_work(struct work_struct *work);
652 void serverclose_work(struct work_struct *work);
653 
cifs_new_fileinfo(struct cifs_fid * fid,struct file * file,struct tcon_link * tlink,__u32 oplock,const char * symlink_target)654 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
655 				       struct tcon_link *tlink, __u32 oplock,
656 				       const char *symlink_target)
657 {
658 	struct dentry *dentry = file_dentry(file);
659 	struct inode *inode = d_inode(dentry);
660 	struct cifsInodeInfo *cinode = CIFS_I(inode);
661 	struct cifsFileInfo *cfile;
662 	struct cifs_fid_locks *fdlocks;
663 	struct cifs_tcon *tcon = tlink_tcon(tlink);
664 	struct TCP_Server_Info *server = tcon->ses->server;
665 
666 	cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
667 	if (cfile == NULL)
668 		return cfile;
669 
670 	fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
671 	if (!fdlocks) {
672 		kfree(cfile);
673 		return NULL;
674 	}
675 
676 	if (symlink_target) {
677 		cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
678 		if (!cfile->symlink_target) {
679 			kfree(fdlocks);
680 			kfree(cfile);
681 			return NULL;
682 		}
683 	}
684 
685 	INIT_LIST_HEAD(&fdlocks->locks);
686 	fdlocks->cfile = cfile;
687 	cfile->llist = fdlocks;
688 
689 	cfile->count = 1;
690 	cfile->pid = current->tgid;
691 	cfile->uid = current_fsuid();
692 	cfile->dentry = dget(dentry);
693 	cfile->f_flags = file->f_flags;
694 	cfile->invalidHandle = false;
695 	cfile->deferred_close_scheduled = false;
696 	cfile->tlink = cifs_get_tlink(tlink);
697 	INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
698 	INIT_WORK(&cfile->put, cifsFileInfo_put_work);
699 	INIT_WORK(&cfile->serverclose, serverclose_work);
700 	INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
701 	mutex_init(&cfile->fh_mutex);
702 	spin_lock_init(&cfile->file_info_lock);
703 
704 	cifs_sb_active(inode->i_sb);
705 
706 	/*
707 	 * If the server returned a read oplock and we have mandatory brlocks,
708 	 * set oplock level to None.
709 	 */
710 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
711 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
712 		oplock = 0;
713 	}
714 
715 	cifs_down_write(&cinode->lock_sem);
716 	list_add(&fdlocks->llist, &cinode->llist);
717 	up_write(&cinode->lock_sem);
718 
719 	spin_lock(&tcon->open_file_lock);
720 	if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
721 		oplock = fid->pending_open->oplock;
722 	list_del(&fid->pending_open->olist);
723 
724 	fid->purge_cache = false;
725 	server->ops->set_fid(cfile, fid, oplock);
726 
727 	list_add(&cfile->tlist, &tcon->openFileList);
728 	atomic_inc(&tcon->num_local_opens);
729 
730 	/* if readable file instance put first in list*/
731 	spin_lock(&cinode->open_file_lock);
732 	if (file->f_mode & FMODE_READ)
733 		list_add(&cfile->flist, &cinode->openFileList);
734 	else
735 		list_add_tail(&cfile->flist, &cinode->openFileList);
736 	spin_unlock(&cinode->open_file_lock);
737 	spin_unlock(&tcon->open_file_lock);
738 
739 	if (fid->purge_cache)
740 		cifs_zap_mapping(inode);
741 
742 	file->private_data = cfile;
743 	return cfile;
744 }
745 
746 struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo * cifs_file)747 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
748 {
749 	spin_lock(&cifs_file->file_info_lock);
750 	cifsFileInfo_get_locked(cifs_file);
751 	spin_unlock(&cifs_file->file_info_lock);
752 	return cifs_file;
753 }
754 
cifsFileInfo_put_final(struct cifsFileInfo * cifs_file)755 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
756 {
757 	struct inode *inode = d_inode(cifs_file->dentry);
758 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
759 	struct cifsLockInfo *li, *tmp;
760 	struct super_block *sb = inode->i_sb;
761 
762 	/*
763 	 * Delete any outstanding lock records. We'll lose them when the file
764 	 * is closed anyway.
765 	 */
766 	cifs_down_write(&cifsi->lock_sem);
767 	list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
768 		list_del(&li->llist);
769 		cifs_del_lock_waiters(li);
770 		kfree(li);
771 	}
772 	list_del(&cifs_file->llist->llist);
773 	kfree(cifs_file->llist);
774 	up_write(&cifsi->lock_sem);
775 
776 	cifs_put_tlink(cifs_file->tlink);
777 	dput(cifs_file->dentry);
778 	cifs_sb_deactive(sb);
779 	kfree(cifs_file->symlink_target);
780 	kfree(cifs_file);
781 }
782 
cifsFileInfo_put_work(struct work_struct * work)783 static void cifsFileInfo_put_work(struct work_struct *work)
784 {
785 	struct cifsFileInfo *cifs_file = container_of(work,
786 			struct cifsFileInfo, put);
787 
788 	cifsFileInfo_put_final(cifs_file);
789 }
790 
serverclose_work(struct work_struct * work)791 void serverclose_work(struct work_struct *work)
792 {
793 	struct cifsFileInfo *cifs_file = container_of(work,
794 			struct cifsFileInfo, serverclose);
795 
796 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
797 
798 	struct TCP_Server_Info *server = tcon->ses->server;
799 	int rc = 0;
800 	int retries = 0;
801 	int MAX_RETRIES = 4;
802 
803 	do {
804 		if (server->ops->close_getattr)
805 			rc = server->ops->close_getattr(0, tcon, cifs_file);
806 		else if (server->ops->close)
807 			rc = server->ops->close(0, tcon, &cifs_file->fid);
808 
809 		if (rc == -EBUSY || rc == -EAGAIN) {
810 			retries++;
811 			msleep(250);
812 		}
813 	} while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
814 	);
815 
816 	if (retries == MAX_RETRIES)
817 		pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
818 
819 	if (cifs_file->offload)
820 		queue_work(fileinfo_put_wq, &cifs_file->put);
821 	else
822 		cifsFileInfo_put_final(cifs_file);
823 }
824 
825 /**
826  * cifsFileInfo_put - release a reference of file priv data
827  *
828  * Always potentially wait for oplock handler. See _cifsFileInfo_put().
829  *
830  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
831  */
cifsFileInfo_put(struct cifsFileInfo * cifs_file)832 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
833 {
834 	_cifsFileInfo_put(cifs_file, true, true);
835 }
836 
837 /**
838  * _cifsFileInfo_put - release a reference of file priv data
839  *
840  * This may involve closing the filehandle @cifs_file out on the
841  * server. Must be called without holding tcon->open_file_lock,
842  * cinode->open_file_lock and cifs_file->file_info_lock.
843  *
844  * If @wait_for_oplock_handler is true and we are releasing the last
845  * reference, wait for any running oplock break handler of the file
846  * and cancel any pending one.
847  *
848  * @cifs_file:	cifs/smb3 specific info (eg refcounts) for an open file
849  * @wait_oplock_handler: must be false if called from oplock_break_handler
850  * @offload:	not offloaded on close and oplock breaks
851  *
852  */
_cifsFileInfo_put(struct cifsFileInfo * cifs_file,bool wait_oplock_handler,bool offload)853 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
854 		       bool wait_oplock_handler, bool offload)
855 {
856 	struct inode *inode = d_inode(cifs_file->dentry);
857 	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
858 	struct TCP_Server_Info *server = tcon->ses->server;
859 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
860 	struct super_block *sb = inode->i_sb;
861 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
862 	struct cifs_fid fid = {};
863 	struct cifs_pending_open open;
864 	bool oplock_break_cancelled;
865 	bool serverclose_offloaded = false;
866 
867 	spin_lock(&tcon->open_file_lock);
868 	spin_lock(&cifsi->open_file_lock);
869 	spin_lock(&cifs_file->file_info_lock);
870 
871 	cifs_file->offload = offload;
872 	if (--cifs_file->count > 0) {
873 		spin_unlock(&cifs_file->file_info_lock);
874 		spin_unlock(&cifsi->open_file_lock);
875 		spin_unlock(&tcon->open_file_lock);
876 		return;
877 	}
878 	spin_unlock(&cifs_file->file_info_lock);
879 
880 	if (server->ops->get_lease_key)
881 		server->ops->get_lease_key(inode, &fid);
882 
883 	/* store open in pending opens to make sure we don't miss lease break */
884 	cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
885 
886 	/* remove it from the lists */
887 	list_del(&cifs_file->flist);
888 	list_del(&cifs_file->tlist);
889 	atomic_dec(&tcon->num_local_opens);
890 
891 	if (list_empty(&cifsi->openFileList)) {
892 		cifs_dbg(FYI, "closing last open instance for inode %p\n",
893 			 d_inode(cifs_file->dentry));
894 		/*
895 		 * In strict cache mode we need invalidate mapping on the last
896 		 * close  because it may cause a error when we open this file
897 		 * again and get at least level II oplock.
898 		 */
899 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
900 			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
901 		cifs_set_oplock_level(cifsi, 0);
902 	}
903 
904 	spin_unlock(&cifsi->open_file_lock);
905 	spin_unlock(&tcon->open_file_lock);
906 
907 	oplock_break_cancelled = wait_oplock_handler ?
908 		cancel_work_sync(&cifs_file->oplock_break) : false;
909 
910 	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
911 		struct TCP_Server_Info *server = tcon->ses->server;
912 		unsigned int xid;
913 		int rc = 0;
914 
915 		xid = get_xid();
916 		if (server->ops->close_getattr)
917 			rc = server->ops->close_getattr(xid, tcon, cifs_file);
918 		else if (server->ops->close)
919 			rc = server->ops->close(xid, tcon, &cifs_file->fid);
920 		_free_xid(xid);
921 
922 		if (rc == -EBUSY || rc == -EAGAIN) {
923 			// Server close failed, hence offloading it as an async op
924 			queue_work(serverclose_wq, &cifs_file->serverclose);
925 			serverclose_offloaded = true;
926 		}
927 	}
928 
929 	if (oplock_break_cancelled)
930 		cifs_done_oplock_break(cifsi);
931 
932 	cifs_del_pending_open(&open);
933 
934 	// if serverclose has been offloaded to wq (on failure), it will
935 	// handle offloading put as well. If serverclose not offloaded,
936 	// we need to handle offloading put here.
937 	if (!serverclose_offloaded) {
938 		if (offload)
939 			queue_work(fileinfo_put_wq, &cifs_file->put);
940 		else
941 			cifsFileInfo_put_final(cifs_file);
942 	}
943 }
944 
cifs_open(struct inode * inode,struct file * file)945 int cifs_open(struct inode *inode, struct file *file)
946 
947 {
948 	int rc = -EACCES;
949 	unsigned int xid;
950 	__u32 oplock;
951 	struct cifs_sb_info *cifs_sb;
952 	struct TCP_Server_Info *server;
953 	struct cifs_tcon *tcon;
954 	struct tcon_link *tlink;
955 	struct cifsFileInfo *cfile = NULL;
956 	void *page;
957 	const char *full_path;
958 	bool posix_open_ok = false;
959 	struct cifs_fid fid = {};
960 	struct cifs_pending_open open;
961 	struct cifs_open_info_data data = {};
962 
963 	xid = get_xid();
964 
965 	cifs_sb = CIFS_SB(inode->i_sb);
966 	if (unlikely(cifs_forced_shutdown(cifs_sb))) {
967 		free_xid(xid);
968 		return -EIO;
969 	}
970 
971 	tlink = cifs_sb_tlink(cifs_sb);
972 	if (IS_ERR(tlink)) {
973 		free_xid(xid);
974 		return PTR_ERR(tlink);
975 	}
976 	tcon = tlink_tcon(tlink);
977 	server = tcon->ses->server;
978 
979 	page = alloc_dentry_path();
980 	full_path = build_path_from_dentry(file_dentry(file), page);
981 	if (IS_ERR(full_path)) {
982 		rc = PTR_ERR(full_path);
983 		goto out;
984 	}
985 
986 	cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
987 		 inode, file->f_flags, full_path);
988 
989 	if (file->f_flags & O_DIRECT &&
990 	    cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
991 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
992 			file->f_op = &cifs_file_direct_nobrl_ops;
993 		else
994 			file->f_op = &cifs_file_direct_ops;
995 	}
996 
997 	/* Get the cached handle as SMB2 close is deferred */
998 	rc = cifs_get_readable_path(tcon, full_path, &cfile);
999 	if (rc == 0) {
1000 		if (file->f_flags == cfile->f_flags) {
1001 			file->private_data = cfile;
1002 			spin_lock(&CIFS_I(inode)->deferred_lock);
1003 			cifs_del_deferred_close(cfile);
1004 			spin_unlock(&CIFS_I(inode)->deferred_lock);
1005 			goto use_cache;
1006 		} else {
1007 			_cifsFileInfo_put(cfile, true, false);
1008 		}
1009 	}
1010 
1011 	if (server->oplocks)
1012 		oplock = REQ_OPLOCK;
1013 	else
1014 		oplock = 0;
1015 
1016 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1017 	if (!tcon->broken_posix_open && tcon->unix_ext &&
1018 	    cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1019 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1020 		/* can not refresh inode info since size could be stale */
1021 		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1022 				cifs_sb->ctx->file_mode /* ignored */,
1023 				file->f_flags, &oplock, &fid.netfid, xid);
1024 		if (rc == 0) {
1025 			cifs_dbg(FYI, "posix open succeeded\n");
1026 			posix_open_ok = true;
1027 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1028 			if (tcon->ses->serverNOS)
1029 				cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1030 					 tcon->ses->ip_addr,
1031 					 tcon->ses->serverNOS);
1032 			tcon->broken_posix_open = true;
1033 		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
1034 			 (rc != -EOPNOTSUPP)) /* path not found or net err */
1035 			goto out;
1036 		/*
1037 		 * Else fallthrough to retry open the old way on network i/o
1038 		 * or DFS errors.
1039 		 */
1040 	}
1041 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1042 
1043 	if (server->ops->get_lease_key)
1044 		server->ops->get_lease_key(inode, &fid);
1045 
1046 	cifs_add_pending_open(&fid, tlink, &open);
1047 
1048 	if (!posix_open_ok) {
1049 		if (server->ops->get_lease_key)
1050 			server->ops->get_lease_key(inode, &fid);
1051 
1052 		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1053 				  xid, &data);
1054 		if (rc) {
1055 			cifs_del_pending_open(&open);
1056 			goto out;
1057 		}
1058 	}
1059 
1060 	cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1061 	if (cfile == NULL) {
1062 		if (server->ops->close)
1063 			server->ops->close(xid, tcon, &fid);
1064 		cifs_del_pending_open(&open);
1065 		rc = -ENOMEM;
1066 		goto out;
1067 	}
1068 
1069 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1070 	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1071 		/*
1072 		 * Time to set mode which we can not set earlier due to
1073 		 * problems creating new read-only files.
1074 		 */
1075 		struct cifs_unix_set_info_args args = {
1076 			.mode	= inode->i_mode,
1077 			.uid	= INVALID_UID, /* no change */
1078 			.gid	= INVALID_GID, /* no change */
1079 			.ctime	= NO_CHANGE_64,
1080 			.atime	= NO_CHANGE_64,
1081 			.mtime	= NO_CHANGE_64,
1082 			.device	= 0,
1083 		};
1084 		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1085 				       cfile->pid);
1086 	}
1087 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1088 
1089 use_cache:
1090 	fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1091 			   file->f_mode & FMODE_WRITE);
1092 	if (!(file->f_flags & O_DIRECT))
1093 		goto out;
1094 	if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1095 		goto out;
1096 	cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1097 
1098 out:
1099 	free_dentry_path(page);
1100 	free_xid(xid);
1101 	cifs_put_tlink(tlink);
1102 	cifs_free_open_info(&data);
1103 	return rc;
1104 }
1105 
1106 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1107 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1108 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1109 
1110 /*
1111  * Try to reacquire byte range locks that were released when session
1112  * to server was lost.
1113  */
1114 static int
cifs_relock_file(struct cifsFileInfo * cfile)1115 cifs_relock_file(struct cifsFileInfo *cfile)
1116 {
1117 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1118 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1119 	int rc = 0;
1120 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1121 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1122 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1123 
1124 	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1125 	if (cinode->can_cache_brlcks) {
1126 		/* can cache locks - no need to relock */
1127 		up_read(&cinode->lock_sem);
1128 		return rc;
1129 	}
1130 
1131 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1132 	if (cap_unix(tcon->ses) &&
1133 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1134 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1135 		rc = cifs_push_posix_locks(cfile);
1136 	else
1137 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1138 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1139 
1140 	up_read(&cinode->lock_sem);
1141 	return rc;
1142 }
1143 
1144 static int
cifs_reopen_file(struct cifsFileInfo * cfile,bool can_flush)1145 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1146 {
1147 	int rc = -EACCES;
1148 	unsigned int xid;
1149 	__u32 oplock;
1150 	struct cifs_sb_info *cifs_sb;
1151 	struct cifs_tcon *tcon;
1152 	struct TCP_Server_Info *server;
1153 	struct cifsInodeInfo *cinode;
1154 	struct inode *inode;
1155 	void *page;
1156 	const char *full_path;
1157 	int desired_access;
1158 	int disposition = FILE_OPEN;
1159 	int create_options = CREATE_NOT_DIR;
1160 	struct cifs_open_parms oparms;
1161 	int rdwr_for_fscache = 0;
1162 
1163 	xid = get_xid();
1164 	mutex_lock(&cfile->fh_mutex);
1165 	if (!cfile->invalidHandle) {
1166 		mutex_unlock(&cfile->fh_mutex);
1167 		free_xid(xid);
1168 		return 0;
1169 	}
1170 
1171 	inode = d_inode(cfile->dentry);
1172 	cifs_sb = CIFS_SB(inode->i_sb);
1173 	tcon = tlink_tcon(cfile->tlink);
1174 	server = tcon->ses->server;
1175 
1176 	/*
1177 	 * Can not grab rename sem here because various ops, including those
1178 	 * that already have the rename sem can end up causing writepage to get
1179 	 * called and if the server was down that means we end up here, and we
1180 	 * can never tell if the caller already has the rename_sem.
1181 	 */
1182 	page = alloc_dentry_path();
1183 	full_path = build_path_from_dentry(cfile->dentry, page);
1184 	if (IS_ERR(full_path)) {
1185 		mutex_unlock(&cfile->fh_mutex);
1186 		free_dentry_path(page);
1187 		free_xid(xid);
1188 		return PTR_ERR(full_path);
1189 	}
1190 
1191 	cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1192 		 inode, cfile->f_flags, full_path);
1193 
1194 	if (tcon->ses->server->oplocks)
1195 		oplock = REQ_OPLOCK;
1196 	else
1197 		oplock = 0;
1198 
1199 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1200 	if (tcon->unix_ext && cap_unix(tcon->ses) &&
1201 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1202 				le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1203 		/*
1204 		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1205 		 * original open. Must mask them off for a reopen.
1206 		 */
1207 		unsigned int oflags = cfile->f_flags &
1208 						~(O_CREAT | O_EXCL | O_TRUNC);
1209 
1210 		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1211 				     cifs_sb->ctx->file_mode /* ignored */,
1212 				     oflags, &oplock, &cfile->fid.netfid, xid);
1213 		if (rc == 0) {
1214 			cifs_dbg(FYI, "posix reopen succeeded\n");
1215 			oparms.reconnect = true;
1216 			goto reopen_success;
1217 		}
1218 		/*
1219 		 * fallthrough to retry open the old way on errors, especially
1220 		 * in the reconnect path it is important to retry hard
1221 		 */
1222 	}
1223 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1224 
1225 	/* If we're caching, we need to be able to fill in around partial writes. */
1226 	if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1227 		rdwr_for_fscache = 1;
1228 
1229 	desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1230 
1231 	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
1232 	if (cfile->f_flags & O_SYNC)
1233 		create_options |= CREATE_WRITE_THROUGH;
1234 
1235 	if (cfile->f_flags & O_DIRECT)
1236 		create_options |= CREATE_NO_BUFFER;
1237 
1238 	if (server->ops->get_lease_key)
1239 		server->ops->get_lease_key(inode, &cfile->fid);
1240 
1241 retry_open:
1242 	oparms = (struct cifs_open_parms) {
1243 		.tcon = tcon,
1244 		.cifs_sb = cifs_sb,
1245 		.desired_access = desired_access,
1246 		.create_options = cifs_create_options(cifs_sb, create_options),
1247 		.disposition = disposition,
1248 		.path = full_path,
1249 		.fid = &cfile->fid,
1250 		.reconnect = true,
1251 	};
1252 
1253 	/*
1254 	 * Can not refresh inode by passing in file_info buf to be returned by
1255 	 * ops->open and then calling get_inode_info with returned buf since
1256 	 * file might have write behind data that needs to be flushed and server
1257 	 * version of file size can be stale. If we knew for sure that inode was
1258 	 * not dirty locally we could do this.
1259 	 */
1260 	rc = server->ops->open(xid, &oparms, &oplock, NULL);
1261 	if (rc == -ENOENT && oparms.reconnect == false) {
1262 		/* durable handle timeout is expired - open the file again */
1263 		rc = server->ops->open(xid, &oparms, &oplock, NULL);
1264 		/* indicate that we need to relock the file */
1265 		oparms.reconnect = true;
1266 	}
1267 	if (rc == -EACCES && rdwr_for_fscache == 1) {
1268 		desired_access = cifs_convert_flags(cfile->f_flags, 0);
1269 		rdwr_for_fscache = 2;
1270 		goto retry_open;
1271 	}
1272 
1273 	if (rc) {
1274 		mutex_unlock(&cfile->fh_mutex);
1275 		cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1276 		cifs_dbg(FYI, "oplock: %d\n", oplock);
1277 		goto reopen_error_exit;
1278 	}
1279 
1280 	if (rdwr_for_fscache == 2)
1281 		cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1282 
1283 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1284 reopen_success:
1285 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1286 	cfile->invalidHandle = false;
1287 	mutex_unlock(&cfile->fh_mutex);
1288 	cinode = CIFS_I(inode);
1289 
1290 	if (can_flush) {
1291 		rc = filemap_write_and_wait(inode->i_mapping);
1292 		if (!is_interrupt_error(rc))
1293 			mapping_set_error(inode->i_mapping, rc);
1294 
1295 		if (tcon->posix_extensions) {
1296 			rc = smb311_posix_get_inode_info(&inode, full_path,
1297 							 NULL, inode->i_sb, xid);
1298 		} else if (tcon->unix_ext) {
1299 			rc = cifs_get_inode_info_unix(&inode, full_path,
1300 						      inode->i_sb, xid);
1301 		} else {
1302 			rc = cifs_get_inode_info(&inode, full_path, NULL,
1303 						 inode->i_sb, xid, NULL);
1304 		}
1305 	}
1306 	/*
1307 	 * Else we are writing out data to server already and could deadlock if
1308 	 * we tried to flush data, and since we do not know if we have data that
1309 	 * would invalidate the current end of file on the server we can not go
1310 	 * to the server to get the new inode info.
1311 	 */
1312 
1313 	/*
1314 	 * If the server returned a read oplock and we have mandatory brlocks,
1315 	 * set oplock level to None.
1316 	 */
1317 	if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1318 		cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1319 		oplock = 0;
1320 	}
1321 
1322 	server->ops->set_fid(cfile, &cfile->fid, oplock);
1323 	if (oparms.reconnect)
1324 		cifs_relock_file(cfile);
1325 
1326 reopen_error_exit:
1327 	free_dentry_path(page);
1328 	free_xid(xid);
1329 	return rc;
1330 }
1331 
smb2_deferred_work_close(struct work_struct * work)1332 void smb2_deferred_work_close(struct work_struct *work)
1333 {
1334 	struct cifsFileInfo *cfile = container_of(work,
1335 			struct cifsFileInfo, deferred.work);
1336 
1337 	spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1338 	cifs_del_deferred_close(cfile);
1339 	cfile->deferred_close_scheduled = false;
1340 	spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1341 	_cifsFileInfo_put(cfile, true, false);
1342 }
1343 
1344 static bool
smb2_can_defer_close(struct inode * inode,struct cifs_deferred_close * dclose)1345 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1346 {
1347 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1348 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1349 
1350 	return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1351 			(cinode->oplock == CIFS_CACHE_RHW_FLG ||
1352 			 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1353 			!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1354 
1355 }
1356 
cifs_close(struct inode * inode,struct file * file)1357 int cifs_close(struct inode *inode, struct file *file)
1358 {
1359 	struct cifsFileInfo *cfile;
1360 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1361 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1362 	struct cifs_deferred_close *dclose;
1363 
1364 	cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1365 
1366 	if (file->private_data != NULL) {
1367 		cfile = file->private_data;
1368 		file->private_data = NULL;
1369 		dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1370 		if ((cfile->status_file_deleted == false) &&
1371 		    (smb2_can_defer_close(inode, dclose))) {
1372 			if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
1373 				inode_set_mtime_to_ts(inode,
1374 						      inode_set_ctime_current(inode));
1375 			}
1376 			spin_lock(&cinode->deferred_lock);
1377 			cifs_add_deferred_close(cfile, dclose);
1378 			if (cfile->deferred_close_scheduled &&
1379 			    delayed_work_pending(&cfile->deferred)) {
1380 				/*
1381 				 * If there is no pending work, mod_delayed_work queues new work.
1382 				 * So, Increase the ref count to avoid use-after-free.
1383 				 */
1384 				if (!mod_delayed_work(deferredclose_wq,
1385 						&cfile->deferred, cifs_sb->ctx->closetimeo))
1386 					cifsFileInfo_get(cfile);
1387 			} else {
1388 				/* Deferred close for files */
1389 				queue_delayed_work(deferredclose_wq,
1390 						&cfile->deferred, cifs_sb->ctx->closetimeo);
1391 				cfile->deferred_close_scheduled = true;
1392 				spin_unlock(&cinode->deferred_lock);
1393 				return 0;
1394 			}
1395 			spin_unlock(&cinode->deferred_lock);
1396 			_cifsFileInfo_put(cfile, true, false);
1397 		} else {
1398 			_cifsFileInfo_put(cfile, true, false);
1399 			kfree(dclose);
1400 		}
1401 	}
1402 
1403 	/* return code from the ->release op is always ignored */
1404 	return 0;
1405 }
1406 
1407 void
cifs_reopen_persistent_handles(struct cifs_tcon * tcon)1408 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1409 {
1410 	struct cifsFileInfo *open_file, *tmp;
1411 	struct list_head tmp_list;
1412 
1413 	if (!tcon->use_persistent || !tcon->need_reopen_files)
1414 		return;
1415 
1416 	tcon->need_reopen_files = false;
1417 
1418 	cifs_dbg(FYI, "Reopen persistent handles\n");
1419 	INIT_LIST_HEAD(&tmp_list);
1420 
1421 	/* list all files open on tree connection, reopen resilient handles  */
1422 	spin_lock(&tcon->open_file_lock);
1423 	list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1424 		if (!open_file->invalidHandle)
1425 			continue;
1426 		cifsFileInfo_get(open_file);
1427 		list_add_tail(&open_file->rlist, &tmp_list);
1428 	}
1429 	spin_unlock(&tcon->open_file_lock);
1430 
1431 	list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1432 		if (cifs_reopen_file(open_file, false /* do not flush */))
1433 			tcon->need_reopen_files = true;
1434 		list_del_init(&open_file->rlist);
1435 		cifsFileInfo_put(open_file);
1436 	}
1437 }
1438 
cifs_closedir(struct inode * inode,struct file * file)1439 int cifs_closedir(struct inode *inode, struct file *file)
1440 {
1441 	int rc = 0;
1442 	unsigned int xid;
1443 	struct cifsFileInfo *cfile = file->private_data;
1444 	struct cifs_tcon *tcon;
1445 	struct TCP_Server_Info *server;
1446 	char *buf;
1447 
1448 	cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1449 
1450 	if (cfile == NULL)
1451 		return rc;
1452 
1453 	xid = get_xid();
1454 	tcon = tlink_tcon(cfile->tlink);
1455 	server = tcon->ses->server;
1456 
1457 	cifs_dbg(FYI, "Freeing private data in close dir\n");
1458 	spin_lock(&cfile->file_info_lock);
1459 	if (server->ops->dir_needs_close(cfile)) {
1460 		cfile->invalidHandle = true;
1461 		spin_unlock(&cfile->file_info_lock);
1462 		if (server->ops->close_dir)
1463 			rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1464 		else
1465 			rc = -ENOSYS;
1466 		cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1467 		/* not much we can do if it fails anyway, ignore rc */
1468 		rc = 0;
1469 	} else
1470 		spin_unlock(&cfile->file_info_lock);
1471 
1472 	buf = cfile->srch_inf.ntwrk_buf_start;
1473 	if (buf) {
1474 		cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1475 		cfile->srch_inf.ntwrk_buf_start = NULL;
1476 		if (cfile->srch_inf.smallBuf)
1477 			cifs_small_buf_release(buf);
1478 		else
1479 			cifs_buf_release(buf);
1480 	}
1481 
1482 	cifs_put_tlink(cfile->tlink);
1483 	kfree(file->private_data);
1484 	file->private_data = NULL;
1485 	/* BB can we lock the filestruct while this is going on? */
1486 	free_xid(xid);
1487 	return rc;
1488 }
1489 
1490 static struct cifsLockInfo *
cifs_lock_init(__u64 offset,__u64 length,__u8 type,__u16 flags)1491 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1492 {
1493 	struct cifsLockInfo *lock =
1494 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1495 	if (!lock)
1496 		return lock;
1497 	lock->offset = offset;
1498 	lock->length = length;
1499 	lock->type = type;
1500 	lock->pid = current->tgid;
1501 	lock->flags = flags;
1502 	INIT_LIST_HEAD(&lock->blist);
1503 	init_waitqueue_head(&lock->block_q);
1504 	return lock;
1505 }
1506 
1507 void
cifs_del_lock_waiters(struct cifsLockInfo * lock)1508 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1509 {
1510 	struct cifsLockInfo *li, *tmp;
1511 	list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1512 		list_del_init(&li->blist);
1513 		wake_up(&li->block_q);
1514 	}
1515 }
1516 
1517 #define CIFS_LOCK_OP	0
1518 #define CIFS_READ_OP	1
1519 #define CIFS_WRITE_OP	2
1520 
1521 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1522 static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks * fdlocks,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsFileInfo * cfile,struct cifsLockInfo ** conf_lock,int rw_check)1523 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1524 			    __u64 length, __u8 type, __u16 flags,
1525 			    struct cifsFileInfo *cfile,
1526 			    struct cifsLockInfo **conf_lock, int rw_check)
1527 {
1528 	struct cifsLockInfo *li;
1529 	struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1530 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1531 
1532 	list_for_each_entry(li, &fdlocks->locks, llist) {
1533 		if (offset + length <= li->offset ||
1534 		    offset >= li->offset + li->length)
1535 			continue;
1536 		if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1537 		    server->ops->compare_fids(cfile, cur_cfile)) {
1538 			/* shared lock prevents write op through the same fid */
1539 			if (!(li->type & server->vals->shared_lock_type) ||
1540 			    rw_check != CIFS_WRITE_OP)
1541 				continue;
1542 		}
1543 		if ((type & server->vals->shared_lock_type) &&
1544 		    ((server->ops->compare_fids(cfile, cur_cfile) &&
1545 		     current->tgid == li->pid) || type == li->type))
1546 			continue;
1547 		if (rw_check == CIFS_LOCK_OP &&
1548 		    (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1549 		    server->ops->compare_fids(cfile, cur_cfile))
1550 			continue;
1551 		if (conf_lock)
1552 			*conf_lock = li;
1553 		return true;
1554 	}
1555 	return false;
1556 }
1557 
1558 bool
cifs_find_lock_conflict(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,__u16 flags,struct cifsLockInfo ** conf_lock,int rw_check)1559 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1560 			__u8 type, __u16 flags,
1561 			struct cifsLockInfo **conf_lock, int rw_check)
1562 {
1563 	bool rc = false;
1564 	struct cifs_fid_locks *cur;
1565 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1566 
1567 	list_for_each_entry(cur, &cinode->llist, llist) {
1568 		rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1569 						 flags, cfile, conf_lock,
1570 						 rw_check);
1571 		if (rc)
1572 			break;
1573 	}
1574 
1575 	return rc;
1576 }
1577 
1578 /*
1579  * Check if there is another lock that prevents us to set the lock (mandatory
1580  * style). If such a lock exists, update the flock structure with its
1581  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1582  * or leave it the same if we can't. Returns 0 if we don't need to request to
1583  * the server or 1 otherwise.
1584  */
1585 static int
cifs_lock_test(struct cifsFileInfo * cfile,__u64 offset,__u64 length,__u8 type,struct file_lock * flock)1586 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1587 	       __u8 type, struct file_lock *flock)
1588 {
1589 	int rc = 0;
1590 	struct cifsLockInfo *conf_lock;
1591 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1592 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1593 	bool exist;
1594 
1595 	down_read(&cinode->lock_sem);
1596 
1597 	exist = cifs_find_lock_conflict(cfile, offset, length, type,
1598 					flock->c.flc_flags, &conf_lock,
1599 					CIFS_LOCK_OP);
1600 	if (exist) {
1601 		flock->fl_start = conf_lock->offset;
1602 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1603 		flock->c.flc_pid = conf_lock->pid;
1604 		if (conf_lock->type & server->vals->shared_lock_type)
1605 			flock->c.flc_type = F_RDLCK;
1606 		else
1607 			flock->c.flc_type = F_WRLCK;
1608 	} else if (!cinode->can_cache_brlcks)
1609 		rc = 1;
1610 	else
1611 		flock->c.flc_type = F_UNLCK;
1612 
1613 	up_read(&cinode->lock_sem);
1614 	return rc;
1615 }
1616 
1617 static void
cifs_lock_add(struct cifsFileInfo * cfile,struct cifsLockInfo * lock)1618 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1619 {
1620 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1621 	cifs_down_write(&cinode->lock_sem);
1622 	list_add_tail(&lock->llist, &cfile->llist->locks);
1623 	up_write(&cinode->lock_sem);
1624 }
1625 
1626 /*
1627  * Set the byte-range lock (mandatory style). Returns:
1628  * 1) 0, if we set the lock and don't need to request to the server;
1629  * 2) 1, if no locks prevent us but we need to request to the server;
1630  * 3) -EACCES, if there is a lock that prevents us and wait is false.
1631  */
1632 static int
cifs_lock_add_if(struct cifsFileInfo * cfile,struct cifsLockInfo * lock,bool wait)1633 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1634 		 bool wait)
1635 {
1636 	struct cifsLockInfo *conf_lock;
1637 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1638 	bool exist;
1639 	int rc = 0;
1640 
1641 try_again:
1642 	exist = false;
1643 	cifs_down_write(&cinode->lock_sem);
1644 
1645 	exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1646 					lock->type, lock->flags, &conf_lock,
1647 					CIFS_LOCK_OP);
1648 	if (!exist && cinode->can_cache_brlcks) {
1649 		list_add_tail(&lock->llist, &cfile->llist->locks);
1650 		up_write(&cinode->lock_sem);
1651 		return rc;
1652 	}
1653 
1654 	if (!exist)
1655 		rc = 1;
1656 	else if (!wait)
1657 		rc = -EACCES;
1658 	else {
1659 		list_add_tail(&lock->blist, &conf_lock->blist);
1660 		up_write(&cinode->lock_sem);
1661 		rc = wait_event_interruptible(lock->block_q,
1662 					(lock->blist.prev == &lock->blist) &&
1663 					(lock->blist.next == &lock->blist));
1664 		if (!rc)
1665 			goto try_again;
1666 		cifs_down_write(&cinode->lock_sem);
1667 		list_del_init(&lock->blist);
1668 	}
1669 
1670 	up_write(&cinode->lock_sem);
1671 	return rc;
1672 }
1673 
1674 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1675 /*
1676  * Check if there is another lock that prevents us to set the lock (posix
1677  * style). If such a lock exists, update the flock structure with its
1678  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1679  * or leave it the same if we can't. Returns 0 if we don't need to request to
1680  * the server or 1 otherwise.
1681  */
1682 static int
cifs_posix_lock_test(struct file * file,struct file_lock * flock)1683 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1684 {
1685 	int rc = 0;
1686 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1687 	unsigned char saved_type = flock->c.flc_type;
1688 
1689 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1690 		return 1;
1691 
1692 	down_read(&cinode->lock_sem);
1693 	posix_test_lock(file, flock);
1694 
1695 	if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1696 		flock->c.flc_type = saved_type;
1697 		rc = 1;
1698 	}
1699 
1700 	up_read(&cinode->lock_sem);
1701 	return rc;
1702 }
1703 
1704 /*
1705  * Set the byte-range lock (posix style). Returns:
1706  * 1) <0, if the error occurs while setting the lock;
1707  * 2) 0, if we set the lock and don't need to request to the server;
1708  * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1709  * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1710  */
1711 static int
cifs_posix_lock_set(struct file * file,struct file_lock * flock)1712 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1713 {
1714 	struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1715 	int rc = FILE_LOCK_DEFERRED + 1;
1716 
1717 	if ((flock->c.flc_flags & FL_POSIX) == 0)
1718 		return rc;
1719 
1720 	cifs_down_write(&cinode->lock_sem);
1721 	if (!cinode->can_cache_brlcks) {
1722 		up_write(&cinode->lock_sem);
1723 		return rc;
1724 	}
1725 
1726 	rc = posix_lock_file(file, flock, NULL);
1727 	up_write(&cinode->lock_sem);
1728 	return rc;
1729 }
1730 
1731 int
cifs_push_mandatory_locks(struct cifsFileInfo * cfile)1732 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1733 {
1734 	unsigned int xid;
1735 	int rc = 0, stored_rc;
1736 	struct cifsLockInfo *li, *tmp;
1737 	struct cifs_tcon *tcon;
1738 	unsigned int num, max_num, max_buf;
1739 	LOCKING_ANDX_RANGE *buf, *cur;
1740 	static const int types[] = {
1741 		LOCKING_ANDX_LARGE_FILES,
1742 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1743 	};
1744 	int i;
1745 
1746 	xid = get_xid();
1747 	tcon = tlink_tcon(cfile->tlink);
1748 
1749 	/*
1750 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1751 	 * and check it before using.
1752 	 */
1753 	max_buf = tcon->ses->server->maxBuf;
1754 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1755 		free_xid(xid);
1756 		return -EINVAL;
1757 	}
1758 
1759 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1760 		     PAGE_SIZE);
1761 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1762 			PAGE_SIZE);
1763 	max_num = (max_buf - sizeof(struct smb_hdr)) /
1764 						sizeof(LOCKING_ANDX_RANGE);
1765 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1766 	if (!buf) {
1767 		free_xid(xid);
1768 		return -ENOMEM;
1769 	}
1770 
1771 	for (i = 0; i < 2; i++) {
1772 		cur = buf;
1773 		num = 0;
1774 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1775 			if (li->type != types[i])
1776 				continue;
1777 			cur->Pid = cpu_to_le16(li->pid);
1778 			cur->LengthLow = cpu_to_le32((u32)li->length);
1779 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1780 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
1781 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1782 			if (++num == max_num) {
1783 				stored_rc = cifs_lockv(xid, tcon,
1784 						       cfile->fid.netfid,
1785 						       (__u8)li->type, 0, num,
1786 						       buf);
1787 				if (stored_rc)
1788 					rc = stored_rc;
1789 				cur = buf;
1790 				num = 0;
1791 			} else
1792 				cur++;
1793 		}
1794 
1795 		if (num) {
1796 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1797 					       (__u8)types[i], 0, num, buf);
1798 			if (stored_rc)
1799 				rc = stored_rc;
1800 		}
1801 	}
1802 
1803 	kfree(buf);
1804 	free_xid(xid);
1805 	return rc;
1806 }
1807 
1808 static __u32
hash_lockowner(fl_owner_t owner)1809 hash_lockowner(fl_owner_t owner)
1810 {
1811 	return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1812 }
1813 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1814 
1815 struct lock_to_push {
1816 	struct list_head llist;
1817 	__u64 offset;
1818 	__u64 length;
1819 	__u32 pid;
1820 	__u16 netfid;
1821 	__u8 type;
1822 };
1823 
1824 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1825 static int
cifs_push_posix_locks(struct cifsFileInfo * cfile)1826 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1827 {
1828 	struct inode *inode = d_inode(cfile->dentry);
1829 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1830 	struct file_lock *flock;
1831 	struct file_lock_context *flctx = locks_inode_context(inode);
1832 	unsigned int count = 0, i;
1833 	int rc = 0, xid, type;
1834 	struct list_head locks_to_send, *el;
1835 	struct lock_to_push *lck, *tmp;
1836 	__u64 length;
1837 
1838 	xid = get_xid();
1839 
1840 	if (!flctx)
1841 		goto out;
1842 
1843 	spin_lock(&flctx->flc_lock);
1844 	list_for_each(el, &flctx->flc_posix) {
1845 		count++;
1846 	}
1847 	spin_unlock(&flctx->flc_lock);
1848 
1849 	INIT_LIST_HEAD(&locks_to_send);
1850 
1851 	/*
1852 	 * Allocating count locks is enough because no FL_POSIX locks can be
1853 	 * added to the list while we are holding cinode->lock_sem that
1854 	 * protects locking operations of this inode.
1855 	 */
1856 	for (i = 0; i < count; i++) {
1857 		lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1858 		if (!lck) {
1859 			rc = -ENOMEM;
1860 			goto err_out;
1861 		}
1862 		list_add_tail(&lck->llist, &locks_to_send);
1863 	}
1864 
1865 	el = locks_to_send.next;
1866 	spin_lock(&flctx->flc_lock);
1867 	for_each_file_lock(flock, &flctx->flc_posix) {
1868 		unsigned char ftype = flock->c.flc_type;
1869 
1870 		if (el == &locks_to_send) {
1871 			/*
1872 			 * The list ended. We don't have enough allocated
1873 			 * structures - something is really wrong.
1874 			 */
1875 			cifs_dbg(VFS, "Can't push all brlocks!\n");
1876 			break;
1877 		}
1878 		length = cifs_flock_len(flock);
1879 		if (ftype == F_RDLCK || ftype == F_SHLCK)
1880 			type = CIFS_RDLCK;
1881 		else
1882 			type = CIFS_WRLCK;
1883 		lck = list_entry(el, struct lock_to_push, llist);
1884 		lck->pid = hash_lockowner(flock->c.flc_owner);
1885 		lck->netfid = cfile->fid.netfid;
1886 		lck->length = length;
1887 		lck->type = type;
1888 		lck->offset = flock->fl_start;
1889 	}
1890 	spin_unlock(&flctx->flc_lock);
1891 
1892 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1893 		int stored_rc;
1894 
1895 		stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1896 					     lck->offset, lck->length, NULL,
1897 					     lck->type, 0);
1898 		if (stored_rc)
1899 			rc = stored_rc;
1900 		list_del(&lck->llist);
1901 		kfree(lck);
1902 	}
1903 
1904 out:
1905 	free_xid(xid);
1906 	return rc;
1907 err_out:
1908 	list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1909 		list_del(&lck->llist);
1910 		kfree(lck);
1911 	}
1912 	goto out;
1913 }
1914 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1915 
1916 static int
cifs_push_locks(struct cifsFileInfo * cfile)1917 cifs_push_locks(struct cifsFileInfo *cfile)
1918 {
1919 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1920 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1921 	int rc = 0;
1922 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1923 	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1924 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1925 
1926 	/* we are going to update can_cache_brlcks here - need a write access */
1927 	cifs_down_write(&cinode->lock_sem);
1928 	if (!cinode->can_cache_brlcks) {
1929 		up_write(&cinode->lock_sem);
1930 		return rc;
1931 	}
1932 
1933 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1934 	if (cap_unix(tcon->ses) &&
1935 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1936 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1937 		rc = cifs_push_posix_locks(cfile);
1938 	else
1939 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1940 		rc = tcon->ses->server->ops->push_mand_locks(cfile);
1941 
1942 	cinode->can_cache_brlcks = false;
1943 	up_write(&cinode->lock_sem);
1944 	return rc;
1945 }
1946 
1947 static void
cifs_read_flock(struct file_lock * flock,__u32 * type,int * lock,int * unlock,bool * wait_flag,struct TCP_Server_Info * server)1948 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1949 		bool *wait_flag, struct TCP_Server_Info *server)
1950 {
1951 	if (flock->c.flc_flags & FL_POSIX)
1952 		cifs_dbg(FYI, "Posix\n");
1953 	if (flock->c.flc_flags & FL_FLOCK)
1954 		cifs_dbg(FYI, "Flock\n");
1955 	if (flock->c.flc_flags & FL_SLEEP) {
1956 		cifs_dbg(FYI, "Blocking lock\n");
1957 		*wait_flag = true;
1958 	}
1959 	if (flock->c.flc_flags & FL_ACCESS)
1960 		cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1961 	if (flock->c.flc_flags & FL_LEASE)
1962 		cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1963 	if (flock->c.flc_flags &
1964 	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1965 	       FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1966 		cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1967 		         flock->c.flc_flags);
1968 
1969 	*type = server->vals->large_lock_type;
1970 	if (lock_is_write(flock)) {
1971 		cifs_dbg(FYI, "F_WRLCK\n");
1972 		*type |= server->vals->exclusive_lock_type;
1973 		*lock = 1;
1974 	} else if (lock_is_unlock(flock)) {
1975 		cifs_dbg(FYI, "F_UNLCK\n");
1976 		*type |= server->vals->unlock_lock_type;
1977 		*unlock = 1;
1978 		/* Check if unlock includes more than one lock range */
1979 	} else if (lock_is_read(flock)) {
1980 		cifs_dbg(FYI, "F_RDLCK\n");
1981 		*type |= server->vals->shared_lock_type;
1982 		*lock = 1;
1983 	} else if (flock->c.flc_type == F_EXLCK) {
1984 		cifs_dbg(FYI, "F_EXLCK\n");
1985 		*type |= server->vals->exclusive_lock_type;
1986 		*lock = 1;
1987 	} else if (flock->c.flc_type == F_SHLCK) {
1988 		cifs_dbg(FYI, "F_SHLCK\n");
1989 		*type |= server->vals->shared_lock_type;
1990 		*lock = 1;
1991 	} else
1992 		cifs_dbg(FYI, "Unknown type of lock\n");
1993 }
1994 
1995 static int
cifs_getlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,unsigned int xid)1996 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1997 	   bool wait_flag, bool posix_lck, unsigned int xid)
1998 {
1999 	int rc = 0;
2000 	__u64 length = cifs_flock_len(flock);
2001 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2002 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2003 	struct TCP_Server_Info *server = tcon->ses->server;
2004 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2005 	__u16 netfid = cfile->fid.netfid;
2006 
2007 	if (posix_lck) {
2008 		int posix_lock_type;
2009 
2010 		rc = cifs_posix_lock_test(file, flock);
2011 		if (!rc)
2012 			return rc;
2013 
2014 		if (type & server->vals->shared_lock_type)
2015 			posix_lock_type = CIFS_RDLCK;
2016 		else
2017 			posix_lock_type = CIFS_WRLCK;
2018 		rc = CIFSSMBPosixLock(xid, tcon, netfid,
2019 				      hash_lockowner(flock->c.flc_owner),
2020 				      flock->fl_start, length, flock,
2021 				      posix_lock_type, wait_flag);
2022 		return rc;
2023 	}
2024 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2025 
2026 	rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2027 	if (!rc)
2028 		return rc;
2029 
2030 	/* BB we could chain these into one lock request BB */
2031 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2032 				    1, 0, false);
2033 	if (rc == 0) {
2034 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2035 					    type, 0, 1, false);
2036 		flock->c.flc_type = F_UNLCK;
2037 		if (rc != 0)
2038 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2039 				 rc);
2040 		return 0;
2041 	}
2042 
2043 	if (type & server->vals->shared_lock_type) {
2044 		flock->c.flc_type = F_WRLCK;
2045 		return 0;
2046 	}
2047 
2048 	type &= ~server->vals->exclusive_lock_type;
2049 
2050 	rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2051 				    type | server->vals->shared_lock_type,
2052 				    1, 0, false);
2053 	if (rc == 0) {
2054 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2055 			type | server->vals->shared_lock_type, 0, 1, false);
2056 		flock->c.flc_type = F_RDLCK;
2057 		if (rc != 0)
2058 			cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2059 				 rc);
2060 	} else
2061 		flock->c.flc_type = F_WRLCK;
2062 
2063 	return 0;
2064 }
2065 
2066 void
cifs_move_llist(struct list_head * source,struct list_head * dest)2067 cifs_move_llist(struct list_head *source, struct list_head *dest)
2068 {
2069 	struct list_head *li, *tmp;
2070 	list_for_each_safe(li, tmp, source)
2071 		list_move(li, dest);
2072 }
2073 
2074 void
cifs_free_llist(struct list_head * llist)2075 cifs_free_llist(struct list_head *llist)
2076 {
2077 	struct cifsLockInfo *li, *tmp;
2078 	list_for_each_entry_safe(li, tmp, llist, llist) {
2079 		cifs_del_lock_waiters(li);
2080 		list_del(&li->llist);
2081 		kfree(li);
2082 	}
2083 }
2084 
2085 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2086 int
cifs_unlock_range(struct cifsFileInfo * cfile,struct file_lock * flock,unsigned int xid)2087 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2088 		  unsigned int xid)
2089 {
2090 	int rc = 0, stored_rc;
2091 	static const int types[] = {
2092 		LOCKING_ANDX_LARGE_FILES,
2093 		LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2094 	};
2095 	unsigned int i;
2096 	unsigned int max_num, num, max_buf;
2097 	LOCKING_ANDX_RANGE *buf, *cur;
2098 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2099 	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2100 	struct cifsLockInfo *li, *tmp;
2101 	__u64 length = cifs_flock_len(flock);
2102 	struct list_head tmp_llist;
2103 
2104 	INIT_LIST_HEAD(&tmp_llist);
2105 
2106 	/*
2107 	 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2108 	 * and check it before using.
2109 	 */
2110 	max_buf = tcon->ses->server->maxBuf;
2111 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2112 		return -EINVAL;
2113 
2114 	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2115 		     PAGE_SIZE);
2116 	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2117 			PAGE_SIZE);
2118 	max_num = (max_buf - sizeof(struct smb_hdr)) /
2119 						sizeof(LOCKING_ANDX_RANGE);
2120 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2121 	if (!buf)
2122 		return -ENOMEM;
2123 
2124 	cifs_down_write(&cinode->lock_sem);
2125 	for (i = 0; i < 2; i++) {
2126 		cur = buf;
2127 		num = 0;
2128 		list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2129 			if (flock->fl_start > li->offset ||
2130 			    (flock->fl_start + length) <
2131 			    (li->offset + li->length))
2132 				continue;
2133 			if (current->tgid != li->pid)
2134 				continue;
2135 			if (types[i] != li->type)
2136 				continue;
2137 			if (cinode->can_cache_brlcks) {
2138 				/*
2139 				 * We can cache brlock requests - simply remove
2140 				 * a lock from the file's list.
2141 				 */
2142 				list_del(&li->llist);
2143 				cifs_del_lock_waiters(li);
2144 				kfree(li);
2145 				continue;
2146 			}
2147 			cur->Pid = cpu_to_le16(li->pid);
2148 			cur->LengthLow = cpu_to_le32((u32)li->length);
2149 			cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2150 			cur->OffsetLow = cpu_to_le32((u32)li->offset);
2151 			cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2152 			/*
2153 			 * We need to save a lock here to let us add it again to
2154 			 * the file's list if the unlock range request fails on
2155 			 * the server.
2156 			 */
2157 			list_move(&li->llist, &tmp_llist);
2158 			if (++num == max_num) {
2159 				stored_rc = cifs_lockv(xid, tcon,
2160 						       cfile->fid.netfid,
2161 						       li->type, num, 0, buf);
2162 				if (stored_rc) {
2163 					/*
2164 					 * We failed on the unlock range
2165 					 * request - add all locks from the tmp
2166 					 * list to the head of the file's list.
2167 					 */
2168 					cifs_move_llist(&tmp_llist,
2169 							&cfile->llist->locks);
2170 					rc = stored_rc;
2171 				} else
2172 					/*
2173 					 * The unlock range request succeed -
2174 					 * free the tmp list.
2175 					 */
2176 					cifs_free_llist(&tmp_llist);
2177 				cur = buf;
2178 				num = 0;
2179 			} else
2180 				cur++;
2181 		}
2182 		if (num) {
2183 			stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2184 					       types[i], num, 0, buf);
2185 			if (stored_rc) {
2186 				cifs_move_llist(&tmp_llist,
2187 						&cfile->llist->locks);
2188 				rc = stored_rc;
2189 			} else
2190 				cifs_free_llist(&tmp_llist);
2191 		}
2192 	}
2193 
2194 	up_write(&cinode->lock_sem);
2195 	kfree(buf);
2196 	return rc;
2197 }
2198 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2199 
2200 static int
cifs_setlk(struct file * file,struct file_lock * flock,__u32 type,bool wait_flag,bool posix_lck,int lock,int unlock,unsigned int xid)2201 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2202 	   bool wait_flag, bool posix_lck, int lock, int unlock,
2203 	   unsigned int xid)
2204 {
2205 	int rc = 0;
2206 	__u64 length = cifs_flock_len(flock);
2207 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2208 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2209 	struct TCP_Server_Info *server = tcon->ses->server;
2210 	struct inode *inode = d_inode(cfile->dentry);
2211 
2212 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2213 	if (posix_lck) {
2214 		int posix_lock_type;
2215 
2216 		rc = cifs_posix_lock_set(file, flock);
2217 		if (rc <= FILE_LOCK_DEFERRED)
2218 			return rc;
2219 
2220 		if (type & server->vals->shared_lock_type)
2221 			posix_lock_type = CIFS_RDLCK;
2222 		else
2223 			posix_lock_type = CIFS_WRLCK;
2224 
2225 		if (unlock == 1)
2226 			posix_lock_type = CIFS_UNLCK;
2227 
2228 		rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2229 				      hash_lockowner(flock->c.flc_owner),
2230 				      flock->fl_start, length,
2231 				      NULL, posix_lock_type, wait_flag);
2232 		goto out;
2233 	}
2234 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2235 	if (lock) {
2236 		struct cifsLockInfo *lock;
2237 
2238 		lock = cifs_lock_init(flock->fl_start, length, type,
2239 				      flock->c.flc_flags);
2240 		if (!lock)
2241 			return -ENOMEM;
2242 
2243 		rc = cifs_lock_add_if(cfile, lock, wait_flag);
2244 		if (rc < 0) {
2245 			kfree(lock);
2246 			return rc;
2247 		}
2248 		if (!rc)
2249 			goto out;
2250 
2251 		/*
2252 		 * Windows 7 server can delay breaking lease from read to None
2253 		 * if we set a byte-range lock on a file - break it explicitly
2254 		 * before sending the lock to the server to be sure the next
2255 		 * read won't conflict with non-overlapted locks due to
2256 		 * pagereading.
2257 		 */
2258 		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2259 					CIFS_CACHE_READ(CIFS_I(inode))) {
2260 			cifs_zap_mapping(inode);
2261 			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2262 				 inode);
2263 			CIFS_I(inode)->oplock = 0;
2264 		}
2265 
2266 		rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2267 					    type, 1, 0, wait_flag);
2268 		if (rc) {
2269 			kfree(lock);
2270 			return rc;
2271 		}
2272 
2273 		cifs_lock_add(cfile, lock);
2274 	} else if (unlock)
2275 		rc = server->ops->mand_unlock_range(cfile, flock, xid);
2276 
2277 out:
2278 	if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2279 		/*
2280 		 * If this is a request to remove all locks because we
2281 		 * are closing the file, it doesn't matter if the
2282 		 * unlocking failed as both cifs.ko and the SMB server
2283 		 * remove the lock on file close
2284 		 */
2285 		if (rc) {
2286 			cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2287 			if (!(flock->c.flc_flags & FL_CLOSE))
2288 				return rc;
2289 		}
2290 		rc = locks_lock_file_wait(file, flock);
2291 	}
2292 	return rc;
2293 }
2294 
cifs_flock(struct file * file,int cmd,struct file_lock * fl)2295 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2296 {
2297 	int rc, xid;
2298 	int lock = 0, unlock = 0;
2299 	bool wait_flag = false;
2300 	bool posix_lck = false;
2301 	struct cifs_sb_info *cifs_sb;
2302 	struct cifs_tcon *tcon;
2303 	struct cifsFileInfo *cfile;
2304 	__u32 type;
2305 
2306 	xid = get_xid();
2307 
2308 	if (!(fl->c.flc_flags & FL_FLOCK)) {
2309 		rc = -ENOLCK;
2310 		free_xid(xid);
2311 		return rc;
2312 	}
2313 
2314 	cfile = (struct cifsFileInfo *)file->private_data;
2315 	tcon = tlink_tcon(cfile->tlink);
2316 
2317 	cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2318 			tcon->ses->server);
2319 	cifs_sb = CIFS_FILE_SB(file);
2320 
2321 	if (cap_unix(tcon->ses) &&
2322 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2323 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2324 		posix_lck = true;
2325 
2326 	if (!lock && !unlock) {
2327 		/*
2328 		 * if no lock or unlock then nothing to do since we do not
2329 		 * know what it is
2330 		 */
2331 		rc = -EOPNOTSUPP;
2332 		free_xid(xid);
2333 		return rc;
2334 	}
2335 
2336 	rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2337 			xid);
2338 	free_xid(xid);
2339 	return rc;
2340 
2341 
2342 }
2343 
cifs_lock(struct file * file,int cmd,struct file_lock * flock)2344 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2345 {
2346 	int rc, xid;
2347 	int lock = 0, unlock = 0;
2348 	bool wait_flag = false;
2349 	bool posix_lck = false;
2350 	struct cifs_sb_info *cifs_sb;
2351 	struct cifs_tcon *tcon;
2352 	struct cifsFileInfo *cfile;
2353 	__u32 type;
2354 
2355 	rc = -EACCES;
2356 	xid = get_xid();
2357 
2358 	cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2359 		 flock->c.flc_flags, flock->c.flc_type,
2360 		 (long long)flock->fl_start,
2361 		 (long long)flock->fl_end);
2362 
2363 	cfile = (struct cifsFileInfo *)file->private_data;
2364 	tcon = tlink_tcon(cfile->tlink);
2365 
2366 	cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2367 			tcon->ses->server);
2368 	cifs_sb = CIFS_FILE_SB(file);
2369 	set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2370 
2371 	if (cap_unix(tcon->ses) &&
2372 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2373 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2374 		posix_lck = true;
2375 	/*
2376 	 * BB add code here to normalize offset and length to account for
2377 	 * negative length which we can not accept over the wire.
2378 	 */
2379 	if (IS_GETLK(cmd)) {
2380 		rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2381 		free_xid(xid);
2382 		return rc;
2383 	}
2384 
2385 	if (!lock && !unlock) {
2386 		/*
2387 		 * if no lock or unlock then nothing to do since we do not
2388 		 * know what it is
2389 		 */
2390 		free_xid(xid);
2391 		return -EOPNOTSUPP;
2392 	}
2393 
2394 	rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2395 			xid);
2396 	free_xid(xid);
2397 	return rc;
2398 }
2399 
cifs_write_subrequest_terminated(struct cifs_io_subrequest * wdata,ssize_t result,bool was_async)2400 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2401 				      bool was_async)
2402 {
2403 	struct netfs_io_request *wreq = wdata->rreq;
2404 	loff_t new_server_eof;
2405 
2406 	if (result > 0) {
2407 		new_server_eof = wdata->subreq.start + wdata->subreq.transferred + result;
2408 
2409 		if (new_server_eof > netfs_inode(wreq->inode)->remote_i_size)
2410 			netfs_resize_file(netfs_inode(wreq->inode), new_server_eof, true);
2411 	}
2412 
2413 	netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2414 }
2415 
find_readable_file(struct cifsInodeInfo * cifs_inode,bool fsuid_only)2416 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2417 					bool fsuid_only)
2418 {
2419 	struct cifsFileInfo *open_file = NULL;
2420 	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2421 
2422 	/* only filter by fsuid on multiuser mounts */
2423 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2424 		fsuid_only = false;
2425 
2426 	spin_lock(&cifs_inode->open_file_lock);
2427 	/* we could simply get the first_list_entry since write-only entries
2428 	   are always at the end of the list but since the first entry might
2429 	   have a close pending, we go through the whole list */
2430 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2431 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2432 			continue;
2433 		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2434 			if ((!open_file->invalidHandle)) {
2435 				/* found a good file */
2436 				/* lock it so it will not be closed on us */
2437 				cifsFileInfo_get(open_file);
2438 				spin_unlock(&cifs_inode->open_file_lock);
2439 				return open_file;
2440 			} /* else might as well continue, and look for
2441 			     another, or simply have the caller reopen it
2442 			     again rather than trying to fix this handle */
2443 		} else /* write only file */
2444 			break; /* write only files are last so must be done */
2445 	}
2446 	spin_unlock(&cifs_inode->open_file_lock);
2447 	return NULL;
2448 }
2449 
2450 /* Return -EBADF if no handle is found and general rc otherwise */
2451 int
cifs_get_writable_file(struct cifsInodeInfo * cifs_inode,int flags,struct cifsFileInfo ** ret_file)2452 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2453 		       struct cifsFileInfo **ret_file)
2454 {
2455 	struct cifsFileInfo *open_file, *inv_file = NULL;
2456 	struct cifs_sb_info *cifs_sb;
2457 	bool any_available = false;
2458 	int rc = -EBADF;
2459 	unsigned int refind = 0;
2460 	bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2461 	bool with_delete = flags & FIND_WR_WITH_DELETE;
2462 	*ret_file = NULL;
2463 
2464 	/*
2465 	 * Having a null inode here (because mapping->host was set to zero by
2466 	 * the VFS or MM) should not happen but we had reports of on oops (due
2467 	 * to it being zero) during stress testcases so we need to check for it
2468 	 */
2469 
2470 	if (cifs_inode == NULL) {
2471 		cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2472 		dump_stack();
2473 		return rc;
2474 	}
2475 
2476 	cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2477 
2478 	/* only filter by fsuid on multiuser mounts */
2479 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2480 		fsuid_only = false;
2481 
2482 	spin_lock(&cifs_inode->open_file_lock);
2483 refind_writable:
2484 	if (refind > MAX_REOPEN_ATT) {
2485 		spin_unlock(&cifs_inode->open_file_lock);
2486 		return rc;
2487 	}
2488 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2489 		if (!any_available && open_file->pid != current->tgid)
2490 			continue;
2491 		if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2492 			continue;
2493 		if (with_delete && !(open_file->fid.access & DELETE))
2494 			continue;
2495 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2496 			if (!open_file->invalidHandle) {
2497 				/* found a good writable file */
2498 				cifsFileInfo_get(open_file);
2499 				spin_unlock(&cifs_inode->open_file_lock);
2500 				*ret_file = open_file;
2501 				return 0;
2502 			} else {
2503 				if (!inv_file)
2504 					inv_file = open_file;
2505 			}
2506 		}
2507 	}
2508 	/* couldn't find useable FH with same pid, try any available */
2509 	if (!any_available) {
2510 		any_available = true;
2511 		goto refind_writable;
2512 	}
2513 
2514 	if (inv_file) {
2515 		any_available = false;
2516 		cifsFileInfo_get(inv_file);
2517 	}
2518 
2519 	spin_unlock(&cifs_inode->open_file_lock);
2520 
2521 	if (inv_file) {
2522 		rc = cifs_reopen_file(inv_file, false);
2523 		if (!rc) {
2524 			*ret_file = inv_file;
2525 			return 0;
2526 		}
2527 
2528 		spin_lock(&cifs_inode->open_file_lock);
2529 		list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2530 		spin_unlock(&cifs_inode->open_file_lock);
2531 		cifsFileInfo_put(inv_file);
2532 		++refind;
2533 		inv_file = NULL;
2534 		spin_lock(&cifs_inode->open_file_lock);
2535 		goto refind_writable;
2536 	}
2537 
2538 	return rc;
2539 }
2540 
2541 struct cifsFileInfo *
find_writable_file(struct cifsInodeInfo * cifs_inode,int flags)2542 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2543 {
2544 	struct cifsFileInfo *cfile;
2545 	int rc;
2546 
2547 	rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2548 	if (rc)
2549 		cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2550 
2551 	return cfile;
2552 }
2553 
2554 int
cifs_get_writable_path(struct cifs_tcon * tcon,const char * name,int flags,struct cifsFileInfo ** ret_file)2555 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2556 		       int flags,
2557 		       struct cifsFileInfo **ret_file)
2558 {
2559 	struct cifsFileInfo *cfile;
2560 	void *page = alloc_dentry_path();
2561 
2562 	*ret_file = NULL;
2563 
2564 	spin_lock(&tcon->open_file_lock);
2565 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2566 		struct cifsInodeInfo *cinode;
2567 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2568 		if (IS_ERR(full_path)) {
2569 			spin_unlock(&tcon->open_file_lock);
2570 			free_dentry_path(page);
2571 			return PTR_ERR(full_path);
2572 		}
2573 		if (strcmp(full_path, name))
2574 			continue;
2575 
2576 		cinode = CIFS_I(d_inode(cfile->dentry));
2577 		spin_unlock(&tcon->open_file_lock);
2578 		free_dentry_path(page);
2579 		return cifs_get_writable_file(cinode, flags, ret_file);
2580 	}
2581 
2582 	spin_unlock(&tcon->open_file_lock);
2583 	free_dentry_path(page);
2584 	return -ENOENT;
2585 }
2586 
2587 int
cifs_get_readable_path(struct cifs_tcon * tcon,const char * name,struct cifsFileInfo ** ret_file)2588 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2589 		       struct cifsFileInfo **ret_file)
2590 {
2591 	struct cifsFileInfo *cfile;
2592 	void *page = alloc_dentry_path();
2593 
2594 	*ret_file = NULL;
2595 
2596 	spin_lock(&tcon->open_file_lock);
2597 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2598 		struct cifsInodeInfo *cinode;
2599 		const char *full_path = build_path_from_dentry(cfile->dentry, page);
2600 		if (IS_ERR(full_path)) {
2601 			spin_unlock(&tcon->open_file_lock);
2602 			free_dentry_path(page);
2603 			return PTR_ERR(full_path);
2604 		}
2605 		if (strcmp(full_path, name))
2606 			continue;
2607 
2608 		cinode = CIFS_I(d_inode(cfile->dentry));
2609 		spin_unlock(&tcon->open_file_lock);
2610 		free_dentry_path(page);
2611 		*ret_file = find_readable_file(cinode, 0);
2612 		return *ret_file ? 0 : -ENOENT;
2613 	}
2614 
2615 	spin_unlock(&tcon->open_file_lock);
2616 	free_dentry_path(page);
2617 	return -ENOENT;
2618 }
2619 
2620 /*
2621  * Flush data on a strict file.
2622  */
cifs_strict_fsync(struct file * file,loff_t start,loff_t end,int datasync)2623 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2624 		      int datasync)
2625 {
2626 	unsigned int xid;
2627 	int rc = 0;
2628 	struct cifs_tcon *tcon;
2629 	struct TCP_Server_Info *server;
2630 	struct cifsFileInfo *smbfile = file->private_data;
2631 	struct inode *inode = file_inode(file);
2632 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2633 
2634 	rc = file_write_and_wait_range(file, start, end);
2635 	if (rc) {
2636 		trace_cifs_fsync_err(inode->i_ino, rc);
2637 		return rc;
2638 	}
2639 
2640 	xid = get_xid();
2641 
2642 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2643 		 file, datasync);
2644 
2645 	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2646 		rc = cifs_zap_mapping(inode);
2647 		if (rc) {
2648 			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2649 			rc = 0; /* don't care about it in fsync */
2650 		}
2651 	}
2652 
2653 	tcon = tlink_tcon(smbfile->tlink);
2654 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2655 		server = tcon->ses->server;
2656 		if (server->ops->flush == NULL) {
2657 			rc = -ENOSYS;
2658 			goto strict_fsync_exit;
2659 		}
2660 
2661 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2662 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2663 			if (smbfile) {
2664 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2665 				cifsFileInfo_put(smbfile);
2666 			} else
2667 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2668 		} else
2669 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2670 	}
2671 
2672 strict_fsync_exit:
2673 	free_xid(xid);
2674 	return rc;
2675 }
2676 
2677 /*
2678  * Flush data on a non-strict data.
2679  */
cifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)2680 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2681 {
2682 	unsigned int xid;
2683 	int rc = 0;
2684 	struct cifs_tcon *tcon;
2685 	struct TCP_Server_Info *server;
2686 	struct cifsFileInfo *smbfile = file->private_data;
2687 	struct inode *inode = file_inode(file);
2688 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2689 
2690 	rc = file_write_and_wait_range(file, start, end);
2691 	if (rc) {
2692 		trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2693 		return rc;
2694 	}
2695 
2696 	xid = get_xid();
2697 
2698 	cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2699 		 file, datasync);
2700 
2701 	tcon = tlink_tcon(smbfile->tlink);
2702 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2703 		server = tcon->ses->server;
2704 		if (server->ops->flush == NULL) {
2705 			rc = -ENOSYS;
2706 			goto fsync_exit;
2707 		}
2708 
2709 		if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2710 			smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2711 			if (smbfile) {
2712 				rc = server->ops->flush(xid, tcon, &smbfile->fid);
2713 				cifsFileInfo_put(smbfile);
2714 			} else
2715 				cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2716 		} else
2717 			rc = server->ops->flush(xid, tcon, &smbfile->fid);
2718 	}
2719 
2720 fsync_exit:
2721 	free_xid(xid);
2722 	return rc;
2723 }
2724 
2725 /*
2726  * As file closes, flush all cached write data for this inode checking
2727  * for write behind errors.
2728  */
cifs_flush(struct file * file,fl_owner_t id)2729 int cifs_flush(struct file *file, fl_owner_t id)
2730 {
2731 	struct inode *inode = file_inode(file);
2732 	int rc = 0;
2733 
2734 	if (file->f_mode & FMODE_WRITE)
2735 		rc = filemap_write_and_wait(inode->i_mapping);
2736 
2737 	cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2738 	if (rc) {
2739 		/* get more nuanced writeback errors */
2740 		rc = filemap_check_wb_err(file->f_mapping, 0);
2741 		trace_cifs_flush_err(inode->i_ino, rc);
2742 	}
2743 	return rc;
2744 }
2745 
2746 static ssize_t
cifs_writev(struct kiocb * iocb,struct iov_iter * from)2747 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2748 {
2749 	struct file *file = iocb->ki_filp;
2750 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2751 	struct inode *inode = file->f_mapping->host;
2752 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2753 	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2754 	ssize_t rc;
2755 
2756 	rc = netfs_start_io_write(inode);
2757 	if (rc < 0)
2758 		return rc;
2759 
2760 	/*
2761 	 * We need to hold the sem to be sure nobody modifies lock list
2762 	 * with a brlock that prevents writing.
2763 	 */
2764 	down_read(&cinode->lock_sem);
2765 
2766 	rc = generic_write_checks(iocb, from);
2767 	if (rc <= 0)
2768 		goto out;
2769 
2770 	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2771 				     server->vals->exclusive_lock_type, 0,
2772 				     NULL, CIFS_WRITE_OP))
2773 		rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2774 	else
2775 		rc = -EACCES;
2776 out:
2777 	up_read(&cinode->lock_sem);
2778 	netfs_end_io_write(inode);
2779 	if (rc > 0)
2780 		rc = generic_write_sync(iocb, rc);
2781 	return rc;
2782 }
2783 
2784 ssize_t
cifs_strict_writev(struct kiocb * iocb,struct iov_iter * from)2785 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2786 {
2787 	struct inode *inode = file_inode(iocb->ki_filp);
2788 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2789 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2790 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2791 						iocb->ki_filp->private_data;
2792 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2793 	ssize_t written;
2794 
2795 	written = cifs_get_writer(cinode);
2796 	if (written)
2797 		return written;
2798 
2799 	if (CIFS_CACHE_WRITE(cinode)) {
2800 		if (cap_unix(tcon->ses) &&
2801 		    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2802 		    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2803 			written = netfs_file_write_iter(iocb, from);
2804 			goto out;
2805 		}
2806 		written = cifs_writev(iocb, from);
2807 		goto out;
2808 	}
2809 	/*
2810 	 * For non-oplocked files in strict cache mode we need to write the data
2811 	 * to the server exactly from the pos to pos+len-1 rather than flush all
2812 	 * affected pages because it may cause a error with mandatory locks on
2813 	 * these pages but not on the region from pos to ppos+len-1.
2814 	 */
2815 	written = netfs_file_write_iter(iocb, from);
2816 	if (CIFS_CACHE_READ(cinode)) {
2817 		/*
2818 		 * We have read level caching and we have just sent a write
2819 		 * request to the server thus making data in the cache stale.
2820 		 * Zap the cache and set oplock/lease level to NONE to avoid
2821 		 * reading stale data from the cache. All subsequent read
2822 		 * operations will read new data from the server.
2823 		 */
2824 		cifs_zap_mapping(inode);
2825 		cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2826 			 inode);
2827 		cinode->oplock = 0;
2828 	}
2829 out:
2830 	cifs_put_writer(cinode);
2831 	return written;
2832 }
2833 
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)2834 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2835 {
2836 	ssize_t rc;
2837 	struct inode *inode = file_inode(iocb->ki_filp);
2838 
2839 	if (iocb->ki_flags & IOCB_DIRECT)
2840 		return netfs_unbuffered_read_iter(iocb, iter);
2841 
2842 	rc = cifs_revalidate_mapping(inode);
2843 	if (rc)
2844 		return rc;
2845 
2846 	return netfs_file_read_iter(iocb, iter);
2847 }
2848 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2849 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2850 {
2851 	struct inode *inode = file_inode(iocb->ki_filp);
2852 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2853 	ssize_t written;
2854 	int rc;
2855 
2856 	if (iocb->ki_filp->f_flags & O_DIRECT) {
2857 		written = netfs_unbuffered_write_iter(iocb, from);
2858 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
2859 			cifs_zap_mapping(inode);
2860 			cifs_dbg(FYI,
2861 				 "Set no oplock for inode=%p after a write operation\n",
2862 				 inode);
2863 			cinode->oplock = 0;
2864 		}
2865 		return written;
2866 	}
2867 
2868 	written = cifs_get_writer(cinode);
2869 	if (written)
2870 		return written;
2871 
2872 	written = netfs_file_write_iter(iocb, from);
2873 
2874 	if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2875 		rc = filemap_fdatawrite(inode->i_mapping);
2876 		if (rc)
2877 			cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2878 				 rc, inode);
2879 	}
2880 
2881 	cifs_put_writer(cinode);
2882 	return written;
2883 }
2884 
2885 ssize_t
cifs_strict_readv(struct kiocb * iocb,struct iov_iter * to)2886 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2887 {
2888 	struct inode *inode = file_inode(iocb->ki_filp);
2889 	struct cifsInodeInfo *cinode = CIFS_I(inode);
2890 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2891 	struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2892 						iocb->ki_filp->private_data;
2893 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2894 	int rc = -EACCES;
2895 
2896 	/*
2897 	 * In strict cache mode we need to read from the server all the time
2898 	 * if we don't have level II oplock because the server can delay mtime
2899 	 * change - so we can't make a decision about inode invalidating.
2900 	 * And we can also fail with pagereading if there are mandatory locks
2901 	 * on pages affected by this read but not on the region from pos to
2902 	 * pos+len-1.
2903 	 */
2904 	if (!CIFS_CACHE_READ(cinode))
2905 		return netfs_unbuffered_read_iter(iocb, to);
2906 
2907 	if (cap_unix(tcon->ses) &&
2908 	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2909 	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2910 		if (iocb->ki_flags & IOCB_DIRECT)
2911 			return netfs_unbuffered_read_iter(iocb, to);
2912 		return netfs_buffered_read_iter(iocb, to);
2913 	}
2914 
2915 	/*
2916 	 * We need to hold the sem to be sure nobody modifies lock list
2917 	 * with a brlock that prevents reading.
2918 	 */
2919 	if (iocb->ki_flags & IOCB_DIRECT) {
2920 		rc = netfs_start_io_direct(inode);
2921 		if (rc < 0)
2922 			goto out;
2923 		down_read(&cinode->lock_sem);
2924 		if (!cifs_find_lock_conflict(
2925 			    cfile, iocb->ki_pos, iov_iter_count(to),
2926 			    tcon->ses->server->vals->shared_lock_type,
2927 			    0, NULL, CIFS_READ_OP))
2928 			rc = netfs_unbuffered_read_iter_locked(iocb, to);
2929 		up_read(&cinode->lock_sem);
2930 		netfs_end_io_direct(inode);
2931 	} else {
2932 		rc = netfs_start_io_read(inode);
2933 		if (rc < 0)
2934 			goto out;
2935 		down_read(&cinode->lock_sem);
2936 		if (!cifs_find_lock_conflict(
2937 			    cfile, iocb->ki_pos, iov_iter_count(to),
2938 			    tcon->ses->server->vals->shared_lock_type,
2939 			    0, NULL, CIFS_READ_OP))
2940 			rc = filemap_read(iocb, to, 0);
2941 		up_read(&cinode->lock_sem);
2942 		netfs_end_io_read(inode);
2943 	}
2944 out:
2945 	return rc;
2946 }
2947 
cifs_page_mkwrite(struct vm_fault * vmf)2948 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2949 {
2950 	return netfs_page_mkwrite(vmf, NULL);
2951 }
2952 
2953 static const struct vm_operations_struct cifs_file_vm_ops = {
2954 	.fault = filemap_fault,
2955 	.map_pages = filemap_map_pages,
2956 	.page_mkwrite = cifs_page_mkwrite,
2957 };
2958 
cifs_file_strict_mmap(struct file * file,struct vm_area_struct * vma)2959 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2960 {
2961 	int xid, rc = 0;
2962 	struct inode *inode = file_inode(file);
2963 
2964 	xid = get_xid();
2965 
2966 	if (!CIFS_CACHE_READ(CIFS_I(inode)))
2967 		rc = cifs_zap_mapping(inode);
2968 	if (!rc)
2969 		rc = generic_file_mmap(file, vma);
2970 	if (!rc)
2971 		vma->vm_ops = &cifs_file_vm_ops;
2972 
2973 	free_xid(xid);
2974 	return rc;
2975 }
2976 
cifs_file_mmap(struct file * file,struct vm_area_struct * vma)2977 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2978 {
2979 	int rc, xid;
2980 
2981 	xid = get_xid();
2982 
2983 	rc = cifs_revalidate_file(file);
2984 	if (rc)
2985 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2986 			 rc);
2987 	if (!rc)
2988 		rc = generic_file_mmap(file, vma);
2989 	if (!rc)
2990 		vma->vm_ops = &cifs_file_vm_ops;
2991 
2992 	free_xid(xid);
2993 	return rc;
2994 }
2995 
is_inode_writable(struct cifsInodeInfo * cifs_inode)2996 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2997 {
2998 	struct cifsFileInfo *open_file;
2999 
3000 	spin_lock(&cifs_inode->open_file_lock);
3001 	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3002 		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3003 			spin_unlock(&cifs_inode->open_file_lock);
3004 			return 1;
3005 		}
3006 	}
3007 	spin_unlock(&cifs_inode->open_file_lock);
3008 	return 0;
3009 }
3010 
3011 /* We do not want to update the file size from server for inodes
3012    open for write - to avoid races with writepage extending
3013    the file - in the future we could consider allowing
3014    refreshing the inode only on increases in the file size
3015    but this is tricky to do without racing with writebehind
3016    page caching in the current Linux kernel design */
is_size_safe_to_change(struct cifsInodeInfo * cifsInode,__u64 end_of_file,bool from_readdir)3017 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3018 			    bool from_readdir)
3019 {
3020 	if (!cifsInode)
3021 		return true;
3022 
3023 	if (is_inode_writable(cifsInode) ||
3024 		((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3025 		/* This inode is open for write at least once */
3026 		struct cifs_sb_info *cifs_sb;
3027 
3028 		cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3029 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3030 			/* since no page cache to corrupt on directio
3031 			we can change size safely */
3032 			return true;
3033 		}
3034 
3035 		if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3036 			return true;
3037 
3038 		return false;
3039 	} else
3040 		return true;
3041 }
3042 
cifs_oplock_break(struct work_struct * work)3043 void cifs_oplock_break(struct work_struct *work)
3044 {
3045 	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3046 						  oplock_break);
3047 	struct inode *inode = d_inode(cfile->dentry);
3048 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3049 	struct cifsInodeInfo *cinode = CIFS_I(inode);
3050 	struct cifs_tcon *tcon;
3051 	struct TCP_Server_Info *server;
3052 	struct tcon_link *tlink;
3053 	int rc = 0;
3054 	bool purge_cache = false, oplock_break_cancelled;
3055 	__u64 persistent_fid, volatile_fid;
3056 	__u16 net_fid;
3057 
3058 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3059 			TASK_UNINTERRUPTIBLE);
3060 
3061 	tlink = cifs_sb_tlink(cifs_sb);
3062 	if (IS_ERR(tlink))
3063 		goto out;
3064 	tcon = tlink_tcon(tlink);
3065 	server = tcon->ses->server;
3066 
3067 	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3068 				      cfile->oplock_epoch, &purge_cache);
3069 
3070 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3071 						cifs_has_mand_locks(cinode)) {
3072 		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3073 			 inode);
3074 		cinode->oplock = 0;
3075 	}
3076 
3077 	if (inode && S_ISREG(inode->i_mode)) {
3078 		if (CIFS_CACHE_READ(cinode))
3079 			break_lease(inode, O_RDONLY);
3080 		else
3081 			break_lease(inode, O_WRONLY);
3082 		rc = filemap_fdatawrite(inode->i_mapping);
3083 		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3084 			rc = filemap_fdatawait(inode->i_mapping);
3085 			mapping_set_error(inode->i_mapping, rc);
3086 			cifs_zap_mapping(inode);
3087 		}
3088 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3089 		if (CIFS_CACHE_WRITE(cinode))
3090 			goto oplock_break_ack;
3091 	}
3092 
3093 	rc = cifs_push_locks(cfile);
3094 	if (rc)
3095 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3096 
3097 oplock_break_ack:
3098 	/*
3099 	 * When oplock break is received and there are no active
3100 	 * file handles but cached, then schedule deferred close immediately.
3101 	 * So, new open will not use cached handle.
3102 	 */
3103 
3104 	if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3105 		cifs_close_deferred_file(cinode);
3106 
3107 	persistent_fid = cfile->fid.persistent_fid;
3108 	volatile_fid = cfile->fid.volatile_fid;
3109 	net_fid = cfile->fid.netfid;
3110 	oplock_break_cancelled = cfile->oplock_break_cancelled;
3111 
3112 	_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3113 	/*
3114 	 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3115 	 * an acknowledgment to be sent when the file has already been closed.
3116 	 */
3117 	spin_lock(&cinode->open_file_lock);
3118 	/* check list empty since can race with kill_sb calling tree disconnect */
3119 	if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3120 		spin_unlock(&cinode->open_file_lock);
3121 		rc = server->ops->oplock_response(tcon, persistent_fid,
3122 						  volatile_fid, net_fid, cinode);
3123 		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3124 	} else
3125 		spin_unlock(&cinode->open_file_lock);
3126 
3127 	cifs_put_tlink(tlink);
3128 out:
3129 	cifs_done_oplock_break(cinode);
3130 }
3131 
cifs_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3132 static int cifs_swap_activate(struct swap_info_struct *sis,
3133 			      struct file *swap_file, sector_t *span)
3134 {
3135 	struct cifsFileInfo *cfile = swap_file->private_data;
3136 	struct inode *inode = swap_file->f_mapping->host;
3137 	unsigned long blocks;
3138 	long long isize;
3139 
3140 	cifs_dbg(FYI, "swap activate\n");
3141 
3142 	if (!swap_file->f_mapping->a_ops->swap_rw)
3143 		/* Cannot support swap */
3144 		return -EINVAL;
3145 
3146 	spin_lock(&inode->i_lock);
3147 	blocks = inode->i_blocks;
3148 	isize = inode->i_size;
3149 	spin_unlock(&inode->i_lock);
3150 	if (blocks*512 < isize) {
3151 		pr_warn("swap activate: swapfile has holes\n");
3152 		return -EINVAL;
3153 	}
3154 	*span = sis->pages;
3155 
3156 	pr_warn_once("Swap support over SMB3 is experimental\n");
3157 
3158 	/*
3159 	 * TODO: consider adding ACL (or documenting how) to prevent other
3160 	 * users (on this or other systems) from reading it
3161 	 */
3162 
3163 
3164 	/* TODO: add sk_set_memalloc(inet) or similar */
3165 
3166 	if (cfile)
3167 		cfile->swapfile = true;
3168 	/*
3169 	 * TODO: Since file already open, we can't open with DENY_ALL here
3170 	 * but we could add call to grab a byte range lock to prevent others
3171 	 * from reading or writing the file
3172 	 */
3173 
3174 	sis->flags |= SWP_FS_OPS;
3175 	return add_swap_extent(sis, 0, sis->max, 0);
3176 }
3177 
cifs_swap_deactivate(struct file * file)3178 static void cifs_swap_deactivate(struct file *file)
3179 {
3180 	struct cifsFileInfo *cfile = file->private_data;
3181 
3182 	cifs_dbg(FYI, "swap deactivate\n");
3183 
3184 	/* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3185 
3186 	if (cfile)
3187 		cfile->swapfile = false;
3188 
3189 	/* do we need to unpin (or unlock) the file */
3190 }
3191 
3192 /**
3193  * cifs_swap_rw - SMB3 address space operation for swap I/O
3194  * @iocb: target I/O control block
3195  * @iter: I/O buffer
3196  *
3197  * Perform IO to the swap-file.  This is much like direct IO.
3198  */
cifs_swap_rw(struct kiocb * iocb,struct iov_iter * iter)3199 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3200 {
3201 	ssize_t ret;
3202 
3203 	WARN_ON_ONCE(iov_iter_count(iter) != PAGE_SIZE);
3204 
3205 	if (iov_iter_rw(iter) == READ)
3206 		ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3207 	else
3208 		ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3209 	if (ret < 0)
3210 		return ret;
3211 	return 0;
3212 }
3213 
3214 const struct address_space_operations cifs_addr_ops = {
3215 	.read_folio	= netfs_read_folio,
3216 	.readahead	= netfs_readahead,
3217 	.writepages	= netfs_writepages,
3218 	.dirty_folio	= netfs_dirty_folio,
3219 	.release_folio	= netfs_release_folio,
3220 	.direct_IO	= noop_direct_IO,
3221 	.invalidate_folio = netfs_invalidate_folio,
3222 	.migrate_folio	= filemap_migrate_folio,
3223 	/*
3224 	 * TODO: investigate and if useful we could add an is_dirty_writeback
3225 	 * helper if needed
3226 	 */
3227 	.swap_activate	= cifs_swap_activate,
3228 	.swap_deactivate = cifs_swap_deactivate,
3229 	.swap_rw = cifs_swap_rw,
3230 };
3231 
3232 /*
3233  * cifs_readahead requires the server to support a buffer large enough to
3234  * contain the header plus one complete page of data.  Otherwise, we need
3235  * to leave cifs_readahead out of the address space operations.
3236  */
3237 const struct address_space_operations cifs_addr_ops_smallbuf = {
3238 	.read_folio	= netfs_read_folio,
3239 	.writepages	= netfs_writepages,
3240 	.dirty_folio	= netfs_dirty_folio,
3241 	.release_folio	= netfs_release_folio,
3242 	.invalidate_folio = netfs_invalidate_folio,
3243 	.migrate_folio	= filemap_migrate_folio,
3244 };
3245