xref: /linux/fs/smb/client/cifsfs.c (revision f89ea63f)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 __u32 cifs_lock_secret;
161 
162 /*
163  * Bumps refcount for cifs super block.
164  * Note that it should be only called if a referece to VFS super block is
165  * already held, e.g. in open-type syscalls context. Otherwise it can race with
166  * atomic_dec_and_test in deactivate_locked_super.
167  */
168 void
cifs_sb_active(struct super_block * sb)169 cifs_sb_active(struct super_block *sb)
170 {
171 	struct cifs_sb_info *server = CIFS_SB(sb);
172 
173 	if (atomic_inc_return(&server->active) == 1)
174 		atomic_inc(&sb->s_active);
175 }
176 
177 void
cifs_sb_deactive(struct super_block * sb)178 cifs_sb_deactive(struct super_block *sb)
179 {
180 	struct cifs_sb_info *server = CIFS_SB(sb);
181 
182 	if (atomic_dec_and_test(&server->active))
183 		deactivate_super(sb);
184 }
185 
186 static int
cifs_read_super(struct super_block * sb)187 cifs_read_super(struct super_block *sb)
188 {
189 	struct inode *inode;
190 	struct cifs_sb_info *cifs_sb;
191 	struct cifs_tcon *tcon;
192 	struct timespec64 ts;
193 	int rc = 0;
194 
195 	cifs_sb = CIFS_SB(sb);
196 	tcon = cifs_sb_master_tcon(cifs_sb);
197 
198 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 		sb->s_flags |= SB_POSIXACL;
200 
201 	if (tcon->snapshot_time)
202 		sb->s_flags |= SB_RDONLY;
203 
204 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 		sb->s_maxbytes = MAX_LFS_FILESIZE;
206 	else
207 		sb->s_maxbytes = MAX_NON_LFS;
208 
209 	/*
210 	 * Some very old servers like DOS and OS/2 used 2 second granularity
211 	 * (while all current servers use 100ns granularity - see MS-DTYP)
212 	 * but 1 second is the maximum allowed granularity for the VFS
213 	 * so for old servers set time granularity to 1 second while for
214 	 * everything else (current servers) set it to 100ns.
215 	 */
216 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 	    ((tcon->ses->capabilities &
218 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
219 	    !tcon->unix_ext) {
220 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 		sb->s_time_min = ts.tv_sec;
223 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 				    cpu_to_le16(SMB_TIME_MAX), 0);
225 		sb->s_time_max = ts.tv_sec;
226 	} else {
227 		/*
228 		 * Almost every server, including all SMB2+, uses DCE TIME
229 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
230 		 */
231 		sb->s_time_gran = 100;
232 		ts = cifs_NTtimeToUnix(0);
233 		sb->s_time_min = ts.tv_sec;
234 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 		sb->s_time_max = ts.tv_sec;
236 	}
237 
238 	sb->s_magic = CIFS_SUPER_MAGIC;
239 	sb->s_op = &cifs_super_ops;
240 	sb->s_xattr = cifs_xattr_handlers;
241 	rc = super_setup_bdi(sb);
242 	if (rc)
243 		goto out_no_root;
244 	/* tune readahead according to rsize if readahead size not set on mount */
245 	if (cifs_sb->ctx->rsize == 0)
246 		cifs_sb->ctx->rsize =
247 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 	if (cifs_sb->ctx->rasize)
249 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 	else
251 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252 
253 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
255 	inode = cifs_root_iget(sb);
256 
257 	if (IS_ERR(inode)) {
258 		rc = PTR_ERR(inode);
259 		goto out_no_root;
260 	}
261 
262 	if (tcon->nocase)
263 		sb->s_d_op = &cifs_ci_dentry_ops;
264 	else
265 		sb->s_d_op = &cifs_dentry_ops;
266 
267 	sb->s_root = d_make_root(inode);
268 	if (!sb->s_root) {
269 		rc = -ENOMEM;
270 		goto out_no_root;
271 	}
272 
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 		cifs_dbg(FYI, "export ops supported\n");
276 		sb->s_export_op = &cifs_export_ops;
277 	}
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279 
280 	return 0;
281 
282 out_no_root:
283 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 	return rc;
285 }
286 
cifs_kill_sb(struct super_block * sb)287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290 
291 	/*
292 	 * We ned to release all dentries for the cached directories
293 	 * before we kill the sb.
294 	 */
295 	if (cifs_sb->root) {
296 		close_all_cached_dirs(cifs_sb);
297 
298 		/* finally release root dentry */
299 		dput(cifs_sb->root);
300 		cifs_sb->root = NULL;
301 	}
302 
303 	kill_anon_super(sb);
304 	cifs_umount(cifs_sb);
305 }
306 
307 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 	struct super_block *sb = dentry->d_sb;
311 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 	struct TCP_Server_Info *server = tcon->ses->server;
314 	unsigned int xid;
315 	int rc = 0;
316 
317 	xid = get_xid();
318 
319 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
320 		buf->f_namelen =
321 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
322 	else
323 		buf->f_namelen = PATH_MAX;
324 
325 	buf->f_fsid.val[0] = tcon->vol_serial_number;
326 	/* are using part of create time for more randomness, see man statfs */
327 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
328 
329 	buf->f_files = 0;	/* undefined */
330 	buf->f_ffree = 0;	/* unlimited */
331 
332 	if (server->ops->queryfs)
333 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
334 
335 	free_xid(xid);
336 	return rc;
337 }
338 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)339 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
340 {
341 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
342 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
343 	struct TCP_Server_Info *server = tcon->ses->server;
344 
345 	if (server->ops->fallocate)
346 		return server->ops->fallocate(file, tcon, mode, off, len);
347 
348 	return -EOPNOTSUPP;
349 }
350 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)351 static int cifs_permission(struct mnt_idmap *idmap,
352 			   struct inode *inode, int mask)
353 {
354 	struct cifs_sb_info *cifs_sb;
355 
356 	cifs_sb = CIFS_SB(inode->i_sb);
357 
358 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
359 		if ((mask & MAY_EXEC) && !execute_ok(inode))
360 			return -EACCES;
361 		else
362 			return 0;
363 	} else /* file mode might have been restricted at mount time
364 		on the client (above and beyond ACL on servers) for
365 		servers which do not support setting and viewing mode bits,
366 		so allowing client to check permissions is useful */
367 		return generic_permission(&nop_mnt_idmap, inode, mask);
368 }
369 
370 static struct kmem_cache *cifs_inode_cachep;
371 static struct kmem_cache *cifs_req_cachep;
372 static struct kmem_cache *cifs_mid_cachep;
373 static struct kmem_cache *cifs_sm_req_cachep;
374 static struct kmem_cache *cifs_io_request_cachep;
375 static struct kmem_cache *cifs_io_subrequest_cachep;
376 mempool_t *cifs_sm_req_poolp;
377 mempool_t *cifs_req_poolp;
378 mempool_t *cifs_mid_poolp;
379 mempool_t cifs_io_request_pool;
380 mempool_t cifs_io_subrequest_pool;
381 
382 static struct inode *
cifs_alloc_inode(struct super_block * sb)383 cifs_alloc_inode(struct super_block *sb)
384 {
385 	struct cifsInodeInfo *cifs_inode;
386 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
387 	if (!cifs_inode)
388 		return NULL;
389 	cifs_inode->cifsAttrs = 0x20;	/* default */
390 	cifs_inode->time = 0;
391 	/*
392 	 * Until the file is open and we have gotten oplock info back from the
393 	 * server, can not assume caching of file data or metadata.
394 	 */
395 	cifs_set_oplock_level(cifs_inode, 0);
396 	cifs_inode->lease_granted = false;
397 	cifs_inode->flags = 0;
398 	spin_lock_init(&cifs_inode->writers_lock);
399 	cifs_inode->writers = 0;
400 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
401 	cifs_inode->netfs.remote_i_size = 0;
402 	cifs_inode->uniqueid = 0;
403 	cifs_inode->createtime = 0;
404 	cifs_inode->epoch = 0;
405 	spin_lock_init(&cifs_inode->open_file_lock);
406 	generate_random_uuid(cifs_inode->lease_key);
407 	cifs_inode->symlink_target = NULL;
408 
409 	/*
410 	 * Can not set i_flags here - they get immediately overwritten to zero
411 	 * by the VFS.
412 	 */
413 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
414 	INIT_LIST_HEAD(&cifs_inode->openFileList);
415 	INIT_LIST_HEAD(&cifs_inode->llist);
416 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
417 	spin_lock_init(&cifs_inode->deferred_lock);
418 	return &cifs_inode->netfs.inode;
419 }
420 
421 static void
cifs_free_inode(struct inode * inode)422 cifs_free_inode(struct inode *inode)
423 {
424 	struct cifsInodeInfo *cinode = CIFS_I(inode);
425 
426 	if (S_ISLNK(inode->i_mode))
427 		kfree(cinode->symlink_target);
428 	kmem_cache_free(cifs_inode_cachep, cinode);
429 }
430 
431 static void
cifs_evict_inode(struct inode * inode)432 cifs_evict_inode(struct inode *inode)
433 {
434 	netfs_wait_for_outstanding_io(inode);
435 	truncate_inode_pages_final(&inode->i_data);
436 	if (inode->i_state & I_PINNING_NETFS_WB)
437 		cifs_fscache_unuse_inode_cookie(inode, true);
438 	cifs_fscache_release_inode_cookie(inode);
439 	clear_inode(inode);
440 }
441 
442 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)443 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
444 {
445 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
446 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
447 
448 	seq_puts(s, ",addr=");
449 
450 	switch (server->dstaddr.ss_family) {
451 	case AF_INET:
452 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
453 		break;
454 	case AF_INET6:
455 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
456 		if (sa6->sin6_scope_id)
457 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
458 		break;
459 	default:
460 		seq_puts(s, "(unknown)");
461 	}
462 	if (server->rdma)
463 		seq_puts(s, ",rdma");
464 }
465 
466 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)467 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
468 {
469 	if (ses->sectype == Unspecified) {
470 		if (ses->user_name == NULL)
471 			seq_puts(s, ",sec=none");
472 		return;
473 	}
474 
475 	seq_puts(s, ",sec=");
476 
477 	switch (ses->sectype) {
478 	case NTLMv2:
479 		seq_puts(s, "ntlmv2");
480 		break;
481 	case Kerberos:
482 		seq_puts(s, "krb5");
483 		break;
484 	case RawNTLMSSP:
485 		seq_puts(s, "ntlmssp");
486 		break;
487 	default:
488 		/* shouldn't ever happen */
489 		seq_puts(s, "unknown");
490 		break;
491 	}
492 
493 	if (ses->sign)
494 		seq_puts(s, "i");
495 
496 	if (ses->sectype == Kerberos)
497 		seq_printf(s, ",cruid=%u",
498 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
499 }
500 
501 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)502 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
503 {
504 	seq_puts(s, ",cache=");
505 
506 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
507 		seq_puts(s, "strict");
508 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
509 		seq_puts(s, "none");
510 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
511 		seq_puts(s, "singleclient"); /* assume only one client access */
512 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
513 		seq_puts(s, "ro"); /* read only caching assumed */
514 	else
515 		seq_puts(s, "loose");
516 }
517 
518 /*
519  * cifs_show_devname() is used so we show the mount device name with correct
520  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
521  */
cifs_show_devname(struct seq_file * m,struct dentry * root)522 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
523 {
524 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
525 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
526 
527 	if (devname == NULL)
528 		seq_puts(m, "none");
529 	else {
530 		convert_delimiter(devname, '/');
531 		/* escape all spaces in share names */
532 		seq_escape(m, devname, " \t");
533 		kfree(devname);
534 	}
535 	return 0;
536 }
537 
538 /*
539  * cifs_show_options() is for displaying mount options in /proc/mounts.
540  * Not all settable options are displayed but most of the important
541  * ones are.
542  */
543 static int
cifs_show_options(struct seq_file * s,struct dentry * root)544 cifs_show_options(struct seq_file *s, struct dentry *root)
545 {
546 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
547 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
548 	struct sockaddr *srcaddr;
549 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
550 
551 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
552 	cifs_show_security(s, tcon->ses);
553 	cifs_show_cache_flavor(s, cifs_sb);
554 
555 	if (tcon->no_lease)
556 		seq_puts(s, ",nolease");
557 	if (cifs_sb->ctx->multiuser)
558 		seq_puts(s, ",multiuser");
559 	else if (tcon->ses->user_name)
560 		seq_show_option(s, "username", tcon->ses->user_name);
561 
562 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
563 		seq_show_option(s, "domain", tcon->ses->domainName);
564 
565 	if (srcaddr->sa_family != AF_UNSPEC) {
566 		struct sockaddr_in *saddr4;
567 		struct sockaddr_in6 *saddr6;
568 		saddr4 = (struct sockaddr_in *)srcaddr;
569 		saddr6 = (struct sockaddr_in6 *)srcaddr;
570 		if (srcaddr->sa_family == AF_INET6)
571 			seq_printf(s, ",srcaddr=%pI6c",
572 				   &saddr6->sin6_addr);
573 		else if (srcaddr->sa_family == AF_INET)
574 			seq_printf(s, ",srcaddr=%pI4",
575 				   &saddr4->sin_addr.s_addr);
576 		else
577 			seq_printf(s, ",srcaddr=BAD-AF:%i",
578 				   (int)(srcaddr->sa_family));
579 	}
580 
581 	seq_printf(s, ",uid=%u",
582 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
583 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
584 		seq_puts(s, ",forceuid");
585 	else
586 		seq_puts(s, ",noforceuid");
587 
588 	seq_printf(s, ",gid=%u",
589 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
590 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
591 		seq_puts(s, ",forcegid");
592 	else
593 		seq_puts(s, ",noforcegid");
594 
595 	cifs_show_address(s, tcon->ses->server);
596 
597 	if (!tcon->unix_ext)
598 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
599 					   cifs_sb->ctx->file_mode,
600 					   cifs_sb->ctx->dir_mode);
601 	if (cifs_sb->ctx->iocharset)
602 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
603 	if (tcon->seal)
604 		seq_puts(s, ",seal");
605 	else if (tcon->ses->server->ignore_signature)
606 		seq_puts(s, ",signloosely");
607 	if (tcon->nocase)
608 		seq_puts(s, ",nocase");
609 	if (tcon->nodelete)
610 		seq_puts(s, ",nodelete");
611 	if (cifs_sb->ctx->no_sparse)
612 		seq_puts(s, ",nosparse");
613 	if (tcon->local_lease)
614 		seq_puts(s, ",locallease");
615 	if (tcon->retry)
616 		seq_puts(s, ",hard");
617 	else
618 		seq_puts(s, ",soft");
619 	if (tcon->use_persistent)
620 		seq_puts(s, ",persistenthandles");
621 	else if (tcon->use_resilient)
622 		seq_puts(s, ",resilienthandles");
623 	if (tcon->posix_extensions)
624 		seq_puts(s, ",posix");
625 	else if (tcon->unix_ext)
626 		seq_puts(s, ",unix");
627 	else
628 		seq_puts(s, ",nounix");
629 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
630 		seq_puts(s, ",nodfs");
631 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
632 		seq_puts(s, ",posixpaths");
633 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
634 		seq_puts(s, ",setuids");
635 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
636 		seq_puts(s, ",idsfromsid");
637 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
638 		seq_puts(s, ",serverino");
639 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
640 		seq_puts(s, ",rwpidforward");
641 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
642 		seq_puts(s, ",forcemand");
643 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
644 		seq_puts(s, ",nouser_xattr");
645 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
646 		seq_puts(s, ",mapchars");
647 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
648 		seq_puts(s, ",mapposix");
649 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
650 		seq_puts(s, ",sfu");
651 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
652 		seq_puts(s, ",nobrl");
653 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
654 		seq_puts(s, ",nohandlecache");
655 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
656 		seq_puts(s, ",modefromsid");
657 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
658 		seq_puts(s, ",cifsacl");
659 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
660 		seq_puts(s, ",dynperm");
661 	if (root->d_sb->s_flags & SB_POSIXACL)
662 		seq_puts(s, ",acl");
663 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
664 		seq_puts(s, ",mfsymlinks");
665 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
666 		seq_puts(s, ",fsc");
667 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
668 		seq_puts(s, ",nostrictsync");
669 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
670 		seq_puts(s, ",noperm");
671 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
672 		seq_printf(s, ",backupuid=%u",
673 			   from_kuid_munged(&init_user_ns,
674 					    cifs_sb->ctx->backupuid));
675 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
676 		seq_printf(s, ",backupgid=%u",
677 			   from_kgid_munged(&init_user_ns,
678 					    cifs_sb->ctx->backupgid));
679 	seq_show_option(s, "reparse",
680 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
681 
682 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
683 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
684 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
685 	if (cifs_sb->ctx->rasize)
686 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
687 	if (tcon->ses->server->min_offload)
688 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
689 	if (tcon->ses->server->retrans)
690 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
691 	seq_printf(s, ",echo_interval=%lu",
692 			tcon->ses->server->echo_interval / HZ);
693 
694 	/* Only display the following if overridden on mount */
695 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
696 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
697 	if (tcon->ses->server->tcp_nodelay)
698 		seq_puts(s, ",tcpnodelay");
699 	if (tcon->ses->server->noautotune)
700 		seq_puts(s, ",noautotune");
701 	if (tcon->ses->server->noblocksnd)
702 		seq_puts(s, ",noblocksend");
703 	if (tcon->ses->server->nosharesock)
704 		seq_puts(s, ",nosharesock");
705 
706 	if (tcon->snapshot_time)
707 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
708 	if (tcon->handle_timeout)
709 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
710 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
711 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
712 
713 	/*
714 	 * Display file and directory attribute timeout in seconds.
715 	 * If file and directory attribute timeout the same then actimeo
716 	 * was likely specified on mount
717 	 */
718 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
719 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
720 	else {
721 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
722 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
723 	}
724 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
725 
726 	if (tcon->ses->chan_max > 1)
727 		seq_printf(s, ",multichannel,max_channels=%zu",
728 			   tcon->ses->chan_max);
729 
730 	if (tcon->use_witness)
731 		seq_puts(s, ",witness");
732 
733 	return 0;
734 }
735 
cifs_umount_begin(struct super_block * sb)736 static void cifs_umount_begin(struct super_block *sb)
737 {
738 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
739 	struct cifs_tcon *tcon;
740 
741 	if (cifs_sb == NULL)
742 		return;
743 
744 	tcon = cifs_sb_master_tcon(cifs_sb);
745 
746 	spin_lock(&cifs_tcp_ses_lock);
747 	spin_lock(&tcon->tc_lock);
748 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
749 			    netfs_trace_tcon_ref_see_umount);
750 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
751 		/* we have other mounts to same share or we have
752 		   already tried to umount this and woken up
753 		   all waiting network requests, nothing to do */
754 		spin_unlock(&tcon->tc_lock);
755 		spin_unlock(&cifs_tcp_ses_lock);
756 		return;
757 	}
758 	/*
759 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
760 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
761 	 */
762 	spin_unlock(&tcon->tc_lock);
763 	spin_unlock(&cifs_tcp_ses_lock);
764 
765 	cifs_close_all_deferred_files(tcon);
766 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
767 	/* cancel_notify_requests(tcon); */
768 	if (tcon->ses && tcon->ses->server) {
769 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
770 		wake_up_all(&tcon->ses->server->request_q);
771 		wake_up_all(&tcon->ses->server->response_q);
772 		msleep(1); /* yield */
773 		/* we have to kick the requests once more */
774 		wake_up_all(&tcon->ses->server->response_q);
775 		msleep(1);
776 	}
777 
778 	return;
779 }
780 
cifs_freeze(struct super_block * sb)781 static int cifs_freeze(struct super_block *sb)
782 {
783 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
784 	struct cifs_tcon *tcon;
785 
786 	if (cifs_sb == NULL)
787 		return 0;
788 
789 	tcon = cifs_sb_master_tcon(cifs_sb);
790 
791 	cifs_close_all_deferred_files(tcon);
792 	return 0;
793 }
794 
795 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)796 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
797 {
798 	/* BB FIXME */
799 	return 0;
800 }
801 #endif
802 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)803 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
804 {
805 	return netfs_unpin_writeback(inode, wbc);
806 }
807 
cifs_drop_inode(struct inode * inode)808 static int cifs_drop_inode(struct inode *inode)
809 {
810 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
811 
812 	/* no serverino => unconditional eviction */
813 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
814 		generic_drop_inode(inode);
815 }
816 
817 static const struct super_operations cifs_super_ops = {
818 	.statfs = cifs_statfs,
819 	.alloc_inode = cifs_alloc_inode,
820 	.write_inode	= cifs_write_inode,
821 	.free_inode = cifs_free_inode,
822 	.drop_inode	= cifs_drop_inode,
823 	.evict_inode	= cifs_evict_inode,
824 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
825 	.show_devname   = cifs_show_devname,
826 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
827 	function unless later we add lazy close of inodes or unless the
828 	kernel forgets to call us with the same number of releases (closes)
829 	as opens */
830 	.show_options = cifs_show_options,
831 	.umount_begin   = cifs_umount_begin,
832 	.freeze_fs      = cifs_freeze,
833 #ifdef CONFIG_CIFS_STATS2
834 	.show_stats = cifs_show_stats,
835 #endif
836 };
837 
838 /*
839  * Get root dentry from superblock according to prefix path mount option.
840  * Return dentry with refcount + 1 on success and NULL otherwise.
841  */
842 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)843 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
844 {
845 	struct dentry *dentry;
846 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
847 	char *full_path = NULL;
848 	char *s, *p;
849 	char sep;
850 
851 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
852 		return dget(sb->s_root);
853 
854 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
855 				cifs_sb_master_tcon(cifs_sb), 0);
856 	if (full_path == NULL)
857 		return ERR_PTR(-ENOMEM);
858 
859 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
860 
861 	sep = CIFS_DIR_SEP(cifs_sb);
862 	dentry = dget(sb->s_root);
863 	s = full_path;
864 
865 	do {
866 		struct inode *dir = d_inode(dentry);
867 		struct dentry *child;
868 
869 		if (!S_ISDIR(dir->i_mode)) {
870 			dput(dentry);
871 			dentry = ERR_PTR(-ENOTDIR);
872 			break;
873 		}
874 
875 		/* skip separators */
876 		while (*s == sep)
877 			s++;
878 		if (!*s)
879 			break;
880 		p = s++;
881 		/* next separator */
882 		while (*s && *s != sep)
883 			s++;
884 
885 		child = lookup_positive_unlocked(p, dentry, s - p);
886 		dput(dentry);
887 		dentry = child;
888 	} while (!IS_ERR(dentry));
889 	kfree(full_path);
890 	return dentry;
891 }
892 
cifs_set_super(struct super_block * sb,void * data)893 static int cifs_set_super(struct super_block *sb, void *data)
894 {
895 	struct cifs_mnt_data *mnt_data = data;
896 	sb->s_fs_info = mnt_data->cifs_sb;
897 	return set_anon_super(sb, NULL);
898 }
899 
900 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)901 cifs_smb3_do_mount(struct file_system_type *fs_type,
902 	      int flags, struct smb3_fs_context *old_ctx)
903 {
904 	struct cifs_mnt_data mnt_data;
905 	struct cifs_sb_info *cifs_sb;
906 	struct super_block *sb;
907 	struct dentry *root;
908 	int rc;
909 
910 	if (cifsFYI) {
911 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
912 			 old_ctx->source, flags);
913 	} else {
914 		cifs_info("Attempting to mount %s\n", old_ctx->source);
915 	}
916 
917 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
918 	if (!cifs_sb)
919 		return ERR_PTR(-ENOMEM);
920 
921 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
922 	if (!cifs_sb->ctx) {
923 		root = ERR_PTR(-ENOMEM);
924 		goto out;
925 	}
926 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
927 	if (rc) {
928 		root = ERR_PTR(rc);
929 		goto out;
930 	}
931 
932 	rc = cifs_setup_cifs_sb(cifs_sb);
933 	if (rc) {
934 		root = ERR_PTR(rc);
935 		goto out;
936 	}
937 
938 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
939 	if (rc) {
940 		if (!(flags & SB_SILENT))
941 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
942 				 rc);
943 		root = ERR_PTR(rc);
944 		goto out;
945 	}
946 
947 	mnt_data.ctx = cifs_sb->ctx;
948 	mnt_data.cifs_sb = cifs_sb;
949 	mnt_data.flags = flags;
950 
951 	/* BB should we make this contingent on mount parm? */
952 	flags |= SB_NODIRATIME | SB_NOATIME;
953 
954 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
955 	if (IS_ERR(sb)) {
956 		cifs_umount(cifs_sb);
957 		return ERR_CAST(sb);
958 	}
959 
960 	if (sb->s_root) {
961 		cifs_dbg(FYI, "Use existing superblock\n");
962 		cifs_umount(cifs_sb);
963 		cifs_sb = NULL;
964 	} else {
965 		rc = cifs_read_super(sb);
966 		if (rc) {
967 			root = ERR_PTR(rc);
968 			goto out_super;
969 		}
970 
971 		sb->s_flags |= SB_ACTIVE;
972 	}
973 
974 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
975 	if (IS_ERR(root))
976 		goto out_super;
977 
978 	if (cifs_sb)
979 		cifs_sb->root = dget(root);
980 
981 	cifs_dbg(FYI, "dentry root is: %p\n", root);
982 	return root;
983 
984 out_super:
985 	deactivate_locked_super(sb);
986 	return root;
987 out:
988 	kfree(cifs_sb->prepath);
989 	smb3_cleanup_fs_context(cifs_sb->ctx);
990 	kfree(cifs_sb);
991 	return root;
992 }
993 
cifs_llseek(struct file * file,loff_t offset,int whence)994 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
995 {
996 	struct cifsFileInfo *cfile = file->private_data;
997 	struct cifs_tcon *tcon;
998 
999 	/*
1000 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1001 	 * the cached file length
1002 	 */
1003 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1004 		int rc;
1005 		struct inode *inode = file_inode(file);
1006 
1007 		/*
1008 		 * We need to be sure that all dirty pages are written and the
1009 		 * server has the newest file length.
1010 		 */
1011 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1012 		    inode->i_mapping->nrpages != 0) {
1013 			rc = filemap_fdatawait(inode->i_mapping);
1014 			if (rc) {
1015 				mapping_set_error(inode->i_mapping, rc);
1016 				return rc;
1017 			}
1018 		}
1019 		/*
1020 		 * Some applications poll for the file length in this strange
1021 		 * way so we must seek to end on non-oplocked files by
1022 		 * setting the revalidate time to zero.
1023 		 */
1024 		CIFS_I(inode)->time = 0;
1025 
1026 		rc = cifs_revalidate_file_attr(file);
1027 		if (rc < 0)
1028 			return (loff_t)rc;
1029 	}
1030 	if (cfile && cfile->tlink) {
1031 		tcon = tlink_tcon(cfile->tlink);
1032 		if (tcon->ses->server->ops->llseek)
1033 			return tcon->ses->server->ops->llseek(file, tcon,
1034 							      offset, whence);
1035 	}
1036 	return generic_file_llseek(file, offset, whence);
1037 }
1038 
1039 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1040 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1041 {
1042 	/*
1043 	 * Note that this is called by vfs setlease with i_lock held to
1044 	 * protect *lease from going away.
1045 	 */
1046 	struct inode *inode = file_inode(file);
1047 	struct cifsFileInfo *cfile = file->private_data;
1048 
1049 	/* Check if file is oplocked if this is request for new lease */
1050 	if (arg == F_UNLCK ||
1051 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1052 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1053 		return generic_setlease(file, arg, lease, priv);
1054 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1055 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1056 		/*
1057 		 * If the server claims to support oplock on this file, then we
1058 		 * still need to check oplock even if the local_lease mount
1059 		 * option is set, but there are servers which do not support
1060 		 * oplock for which this mount option may be useful if the user
1061 		 * knows that the file won't be changed on the server by anyone
1062 		 * else.
1063 		 */
1064 		return generic_setlease(file, arg, lease, priv);
1065 	else
1066 		return -EAGAIN;
1067 }
1068 
1069 struct file_system_type cifs_fs_type = {
1070 	.owner = THIS_MODULE,
1071 	.name = "cifs",
1072 	.init_fs_context = smb3_init_fs_context,
1073 	.parameters = smb3_fs_parameters,
1074 	.kill_sb = cifs_kill_sb,
1075 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1076 };
1077 MODULE_ALIAS_FS("cifs");
1078 
1079 struct file_system_type smb3_fs_type = {
1080 	.owner = THIS_MODULE,
1081 	.name = "smb3",
1082 	.init_fs_context = smb3_init_fs_context,
1083 	.parameters = smb3_fs_parameters,
1084 	.kill_sb = cifs_kill_sb,
1085 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1086 };
1087 MODULE_ALIAS_FS("smb3");
1088 MODULE_ALIAS("smb3");
1089 
1090 const struct inode_operations cifs_dir_inode_ops = {
1091 	.create = cifs_create,
1092 	.atomic_open = cifs_atomic_open,
1093 	.lookup = cifs_lookup,
1094 	.getattr = cifs_getattr,
1095 	.unlink = cifs_unlink,
1096 	.link = cifs_hardlink,
1097 	.mkdir = cifs_mkdir,
1098 	.rmdir = cifs_rmdir,
1099 	.rename = cifs_rename2,
1100 	.permission = cifs_permission,
1101 	.setattr = cifs_setattr,
1102 	.symlink = cifs_symlink,
1103 	.mknod   = cifs_mknod,
1104 	.listxattr = cifs_listxattr,
1105 	.get_acl = cifs_get_acl,
1106 	.set_acl = cifs_set_acl,
1107 };
1108 
1109 const struct inode_operations cifs_file_inode_ops = {
1110 	.setattr = cifs_setattr,
1111 	.getattr = cifs_getattr,
1112 	.permission = cifs_permission,
1113 	.listxattr = cifs_listxattr,
1114 	.fiemap = cifs_fiemap,
1115 	.get_acl = cifs_get_acl,
1116 	.set_acl = cifs_set_acl,
1117 };
1118 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1119 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1120 			    struct delayed_call *done)
1121 {
1122 	char *target_path;
1123 
1124 	if (!dentry)
1125 		return ERR_PTR(-ECHILD);
1126 
1127 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1128 	if (!target_path)
1129 		return ERR_PTR(-ENOMEM);
1130 
1131 	spin_lock(&inode->i_lock);
1132 	if (likely(CIFS_I(inode)->symlink_target)) {
1133 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1134 	} else {
1135 		kfree(target_path);
1136 		target_path = ERR_PTR(-EOPNOTSUPP);
1137 	}
1138 	spin_unlock(&inode->i_lock);
1139 
1140 	if (!IS_ERR(target_path))
1141 		set_delayed_call(done, kfree_link, target_path);
1142 
1143 	return target_path;
1144 }
1145 
1146 const struct inode_operations cifs_symlink_inode_ops = {
1147 	.get_link = cifs_get_link,
1148 	.setattr = cifs_setattr,
1149 	.permission = cifs_permission,
1150 	.listxattr = cifs_listxattr,
1151 };
1152 
1153 /*
1154  * Advance the EOF marker to after the source range.
1155  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1156 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1157 				struct cifs_tcon *src_tcon,
1158 				unsigned int xid, loff_t src_end)
1159 {
1160 	struct cifsFileInfo *writeable_srcfile;
1161 	int rc = -EINVAL;
1162 
1163 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1164 	if (writeable_srcfile) {
1165 		if (src_tcon->ses->server->ops->set_file_size)
1166 			rc = src_tcon->ses->server->ops->set_file_size(
1167 				xid, src_tcon, writeable_srcfile,
1168 				src_inode->i_size, true /* no need to set sparse */);
1169 		else
1170 			rc = -ENOSYS;
1171 		cifsFileInfo_put(writeable_srcfile);
1172 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1173 	}
1174 
1175 	if (rc < 0)
1176 		goto set_failed;
1177 
1178 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1179 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1180 	return 0;
1181 
1182 set_failed:
1183 	return filemap_write_and_wait(src_inode->i_mapping);
1184 }
1185 
1186 /*
1187  * Flush out either the folio that overlaps the beginning of a range in which
1188  * pos resides or the folio that overlaps the end of a range unless that folio
1189  * is entirely within the range we're going to invalidate.  We extend the flush
1190  * bounds to encompass the folio.
1191  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1192 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1193 			    bool first)
1194 {
1195 	struct folio *folio;
1196 	unsigned long long fpos, fend;
1197 	pgoff_t index = pos / PAGE_SIZE;
1198 	size_t size;
1199 	int rc = 0;
1200 
1201 	folio = filemap_get_folio(inode->i_mapping, index);
1202 	if (IS_ERR(folio))
1203 		return 0;
1204 
1205 	size = folio_size(folio);
1206 	fpos = folio_pos(folio);
1207 	fend = fpos + size - 1;
1208 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1209 	*_fend   = max_t(unsigned long long, *_fend, fend);
1210 	if ((first && pos == fpos) || (!first && pos == fend))
1211 		goto out;
1212 
1213 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1214 out:
1215 	folio_put(folio);
1216 	return rc;
1217 }
1218 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1219 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1220 		struct file *dst_file, loff_t destoff, loff_t len,
1221 		unsigned int remap_flags)
1222 {
1223 	struct inode *src_inode = file_inode(src_file);
1224 	struct inode *target_inode = file_inode(dst_file);
1225 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1226 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1227 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1228 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1229 	struct cifs_tcon *target_tcon, *src_tcon;
1230 	unsigned long long destend, fstart, fend, old_size, new_size;
1231 	unsigned int xid;
1232 	int rc;
1233 
1234 	if (remap_flags & REMAP_FILE_DEDUP)
1235 		return -EOPNOTSUPP;
1236 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1237 		return -EINVAL;
1238 
1239 	cifs_dbg(FYI, "clone range\n");
1240 
1241 	xid = get_xid();
1242 
1243 	if (!smb_file_src || !smb_file_target) {
1244 		rc = -EBADF;
1245 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1246 		goto out;
1247 	}
1248 
1249 	src_tcon = tlink_tcon(smb_file_src->tlink);
1250 	target_tcon = tlink_tcon(smb_file_target->tlink);
1251 
1252 	/*
1253 	 * Note: cifs case is easier than btrfs since server responsible for
1254 	 * checks for proper open modes and file type and if it wants
1255 	 * server could even support copy of range where source = target
1256 	 */
1257 	lock_two_nondirectories(target_inode, src_inode);
1258 
1259 	if (len == 0)
1260 		len = src_inode->i_size - off;
1261 
1262 	cifs_dbg(FYI, "clone range\n");
1263 
1264 	/* Flush the source buffer */
1265 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1266 					  off + len - 1);
1267 	if (rc)
1268 		goto unlock;
1269 
1270 	/* The server-side copy will fail if the source crosses the EOF marker.
1271 	 * Advance the EOF marker after the flush above to the end of the range
1272 	 * if it's short of that.
1273 	 */
1274 	if (src_cifsi->netfs.remote_i_size < off + len) {
1275 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1276 		if (rc < 0)
1277 			goto unlock;
1278 	}
1279 
1280 	new_size = destoff + len;
1281 	destend = destoff + len - 1;
1282 
1283 	/* Flush the folios at either end of the destination range to prevent
1284 	 * accidental loss of dirty data outside of the range.
1285 	 */
1286 	fstart = destoff;
1287 	fend = destend;
1288 
1289 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1290 	if (rc)
1291 		goto unlock;
1292 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1293 	if (rc)
1294 		goto unlock;
1295 	if (fend > target_cifsi->netfs.zero_point)
1296 		target_cifsi->netfs.zero_point = fend + 1;
1297 	old_size = target_cifsi->netfs.remote_i_size;
1298 
1299 	/* Discard all the folios that overlap the destination region. */
1300 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1301 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1302 
1303 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1304 			   i_size_read(target_inode), 0);
1305 
1306 	rc = -EOPNOTSUPP;
1307 	if (target_tcon->ses->server->ops->duplicate_extents) {
1308 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1309 			smb_file_src, smb_file_target, off, len, destoff);
1310 		if (rc == 0 && new_size > old_size) {
1311 			truncate_setsize(target_inode, new_size);
1312 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1313 					      new_size);
1314 		}
1315 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1316 			target_cifsi->netfs.zero_point = new_size;
1317 	}
1318 
1319 	/* force revalidate of size and timestamps of target file now
1320 	   that target is updated on the server */
1321 	CIFS_I(target_inode)->time = 0;
1322 unlock:
1323 	/* although unlocking in the reverse order from locking is not
1324 	   strictly necessary here it is a little cleaner to be consistent */
1325 	unlock_two_nondirectories(src_inode, target_inode);
1326 out:
1327 	free_xid(xid);
1328 	return rc < 0 ? rc : len;
1329 }
1330 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1331 ssize_t cifs_file_copychunk_range(unsigned int xid,
1332 				struct file *src_file, loff_t off,
1333 				struct file *dst_file, loff_t destoff,
1334 				size_t len, unsigned int flags)
1335 {
1336 	struct inode *src_inode = file_inode(src_file);
1337 	struct inode *target_inode = file_inode(dst_file);
1338 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1339 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1340 	struct cifsFileInfo *smb_file_src;
1341 	struct cifsFileInfo *smb_file_target;
1342 	struct cifs_tcon *src_tcon;
1343 	struct cifs_tcon *target_tcon;
1344 	unsigned long long destend, fstart, fend;
1345 	ssize_t rc;
1346 
1347 	cifs_dbg(FYI, "copychunk range\n");
1348 
1349 	if (!src_file->private_data || !dst_file->private_data) {
1350 		rc = -EBADF;
1351 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1352 		goto out;
1353 	}
1354 
1355 	rc = -EXDEV;
1356 	smb_file_target = dst_file->private_data;
1357 	smb_file_src = src_file->private_data;
1358 	src_tcon = tlink_tcon(smb_file_src->tlink);
1359 	target_tcon = tlink_tcon(smb_file_target->tlink);
1360 
1361 	if (src_tcon->ses != target_tcon->ses) {
1362 		cifs_dbg(VFS, "source and target of copy not on same server\n");
1363 		goto out;
1364 	}
1365 
1366 	rc = -EOPNOTSUPP;
1367 	if (!target_tcon->ses->server->ops->copychunk_range)
1368 		goto out;
1369 
1370 	/*
1371 	 * Note: cifs case is easier than btrfs since server responsible for
1372 	 * checks for proper open modes and file type and if it wants
1373 	 * server could even support copy of range where source = target
1374 	 */
1375 	lock_two_nondirectories(target_inode, src_inode);
1376 
1377 	cifs_dbg(FYI, "about to flush pages\n");
1378 
1379 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1380 					  off + len - 1);
1381 	if (rc)
1382 		goto unlock;
1383 
1384 	/* The server-side copy will fail if the source crosses the EOF marker.
1385 	 * Advance the EOF marker after the flush above to the end of the range
1386 	 * if it's short of that.
1387 	 */
1388 	if (src_cifsi->netfs.remote_i_size < off + len) {
1389 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1390 		if (rc < 0)
1391 			goto unlock;
1392 	}
1393 
1394 	destend = destoff + len - 1;
1395 
1396 	/* Flush the folios at either end of the destination range to prevent
1397 	 * accidental loss of dirty data outside of the range.
1398 	 */
1399 	fstart = destoff;
1400 	fend = destend;
1401 
1402 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1403 	if (rc)
1404 		goto unlock;
1405 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1406 	if (rc)
1407 		goto unlock;
1408 	if (fend > target_cifsi->netfs.zero_point)
1409 		target_cifsi->netfs.zero_point = fend + 1;
1410 
1411 	/* Discard all the folios that overlap the destination region. */
1412 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1413 
1414 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1415 			   i_size_read(target_inode), 0);
1416 
1417 	rc = file_modified(dst_file);
1418 	if (!rc) {
1419 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1420 			smb_file_src, smb_file_target, off, len, destoff);
1421 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1422 			truncate_setsize(target_inode, destoff + rc);
1423 			netfs_resize_file(&target_cifsi->netfs,
1424 					  i_size_read(target_inode), true);
1425 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1426 					      i_size_read(target_inode));
1427 		}
1428 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1429 			target_cifsi->netfs.zero_point = destoff + rc;
1430 	}
1431 
1432 	file_accessed(src_file);
1433 
1434 	/* force revalidate of size and timestamps of target file now
1435 	 * that target is updated on the server
1436 	 */
1437 	CIFS_I(target_inode)->time = 0;
1438 
1439 unlock:
1440 	/* although unlocking in the reverse order from locking is not
1441 	 * strictly necessary here it is a little cleaner to be consistent
1442 	 */
1443 	unlock_two_nondirectories(src_inode, target_inode);
1444 
1445 out:
1446 	return rc;
1447 }
1448 
1449 /*
1450  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1451  * is a dummy operation.
1452  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1453 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1454 {
1455 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1456 		 file, datasync);
1457 
1458 	return 0;
1459 }
1460 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1461 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1462 				struct file *dst_file, loff_t destoff,
1463 				size_t len, unsigned int flags)
1464 {
1465 	unsigned int xid = get_xid();
1466 	ssize_t rc;
1467 	struct cifsFileInfo *cfile = dst_file->private_data;
1468 
1469 	if (cfile->swapfile) {
1470 		rc = -EOPNOTSUPP;
1471 		free_xid(xid);
1472 		return rc;
1473 	}
1474 
1475 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1476 					len, flags);
1477 	free_xid(xid);
1478 
1479 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1480 		rc = splice_copy_file_range(src_file, off, dst_file,
1481 					    destoff, len);
1482 	return rc;
1483 }
1484 
1485 const struct file_operations cifs_file_ops = {
1486 	.read_iter = cifs_loose_read_iter,
1487 	.write_iter = cifs_file_write_iter,
1488 	.open = cifs_open,
1489 	.release = cifs_close,
1490 	.lock = cifs_lock,
1491 	.flock = cifs_flock,
1492 	.fsync = cifs_fsync,
1493 	.flush = cifs_flush,
1494 	.mmap  = cifs_file_mmap,
1495 	.splice_read = filemap_splice_read,
1496 	.splice_write = iter_file_splice_write,
1497 	.llseek = cifs_llseek,
1498 	.unlocked_ioctl	= cifs_ioctl,
1499 	.copy_file_range = cifs_copy_file_range,
1500 	.remap_file_range = cifs_remap_file_range,
1501 	.setlease = cifs_setlease,
1502 	.fallocate = cifs_fallocate,
1503 };
1504 
1505 const struct file_operations cifs_file_strict_ops = {
1506 	.read_iter = cifs_strict_readv,
1507 	.write_iter = cifs_strict_writev,
1508 	.open = cifs_open,
1509 	.release = cifs_close,
1510 	.lock = cifs_lock,
1511 	.flock = cifs_flock,
1512 	.fsync = cifs_strict_fsync,
1513 	.flush = cifs_flush,
1514 	.mmap = cifs_file_strict_mmap,
1515 	.splice_read = filemap_splice_read,
1516 	.splice_write = iter_file_splice_write,
1517 	.llseek = cifs_llseek,
1518 	.unlocked_ioctl	= cifs_ioctl,
1519 	.copy_file_range = cifs_copy_file_range,
1520 	.remap_file_range = cifs_remap_file_range,
1521 	.setlease = cifs_setlease,
1522 	.fallocate = cifs_fallocate,
1523 };
1524 
1525 const struct file_operations cifs_file_direct_ops = {
1526 	.read_iter = netfs_unbuffered_read_iter,
1527 	.write_iter = netfs_file_write_iter,
1528 	.open = cifs_open,
1529 	.release = cifs_close,
1530 	.lock = cifs_lock,
1531 	.flock = cifs_flock,
1532 	.fsync = cifs_fsync,
1533 	.flush = cifs_flush,
1534 	.mmap = cifs_file_mmap,
1535 	.splice_read = copy_splice_read,
1536 	.splice_write = iter_file_splice_write,
1537 	.unlocked_ioctl  = cifs_ioctl,
1538 	.copy_file_range = cifs_copy_file_range,
1539 	.remap_file_range = cifs_remap_file_range,
1540 	.llseek = cifs_llseek,
1541 	.setlease = cifs_setlease,
1542 	.fallocate = cifs_fallocate,
1543 };
1544 
1545 const struct file_operations cifs_file_nobrl_ops = {
1546 	.read_iter = cifs_loose_read_iter,
1547 	.write_iter = cifs_file_write_iter,
1548 	.open = cifs_open,
1549 	.release = cifs_close,
1550 	.fsync = cifs_fsync,
1551 	.flush = cifs_flush,
1552 	.mmap  = cifs_file_mmap,
1553 	.splice_read = filemap_splice_read,
1554 	.splice_write = iter_file_splice_write,
1555 	.llseek = cifs_llseek,
1556 	.unlocked_ioctl	= cifs_ioctl,
1557 	.copy_file_range = cifs_copy_file_range,
1558 	.remap_file_range = cifs_remap_file_range,
1559 	.setlease = cifs_setlease,
1560 	.fallocate = cifs_fallocate,
1561 };
1562 
1563 const struct file_operations cifs_file_strict_nobrl_ops = {
1564 	.read_iter = cifs_strict_readv,
1565 	.write_iter = cifs_strict_writev,
1566 	.open = cifs_open,
1567 	.release = cifs_close,
1568 	.fsync = cifs_strict_fsync,
1569 	.flush = cifs_flush,
1570 	.mmap = cifs_file_strict_mmap,
1571 	.splice_read = filemap_splice_read,
1572 	.splice_write = iter_file_splice_write,
1573 	.llseek = cifs_llseek,
1574 	.unlocked_ioctl	= cifs_ioctl,
1575 	.copy_file_range = cifs_copy_file_range,
1576 	.remap_file_range = cifs_remap_file_range,
1577 	.setlease = cifs_setlease,
1578 	.fallocate = cifs_fallocate,
1579 };
1580 
1581 const struct file_operations cifs_file_direct_nobrl_ops = {
1582 	.read_iter = netfs_unbuffered_read_iter,
1583 	.write_iter = netfs_file_write_iter,
1584 	.open = cifs_open,
1585 	.release = cifs_close,
1586 	.fsync = cifs_fsync,
1587 	.flush = cifs_flush,
1588 	.mmap = cifs_file_mmap,
1589 	.splice_read = copy_splice_read,
1590 	.splice_write = iter_file_splice_write,
1591 	.unlocked_ioctl  = cifs_ioctl,
1592 	.copy_file_range = cifs_copy_file_range,
1593 	.remap_file_range = cifs_remap_file_range,
1594 	.llseek = cifs_llseek,
1595 	.setlease = cifs_setlease,
1596 	.fallocate = cifs_fallocate,
1597 };
1598 
1599 const struct file_operations cifs_dir_ops = {
1600 	.iterate_shared = cifs_readdir,
1601 	.release = cifs_closedir,
1602 	.read    = generic_read_dir,
1603 	.unlocked_ioctl  = cifs_ioctl,
1604 	.copy_file_range = cifs_copy_file_range,
1605 	.remap_file_range = cifs_remap_file_range,
1606 	.llseek = generic_file_llseek,
1607 	.fsync = cifs_dir_fsync,
1608 };
1609 
1610 static void
cifs_init_once(void * inode)1611 cifs_init_once(void *inode)
1612 {
1613 	struct cifsInodeInfo *cifsi = inode;
1614 
1615 	inode_init_once(&cifsi->netfs.inode);
1616 	init_rwsem(&cifsi->lock_sem);
1617 }
1618 
1619 static int __init
cifs_init_inodecache(void)1620 cifs_init_inodecache(void)
1621 {
1622 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1623 					      sizeof(struct cifsInodeInfo),
1624 					      0, (SLAB_RECLAIM_ACCOUNT|
1625 						SLAB_ACCOUNT),
1626 					      cifs_init_once);
1627 	if (cifs_inode_cachep == NULL)
1628 		return -ENOMEM;
1629 
1630 	return 0;
1631 }
1632 
1633 static void
cifs_destroy_inodecache(void)1634 cifs_destroy_inodecache(void)
1635 {
1636 	/*
1637 	 * Make sure all delayed rcu free inodes are flushed before we
1638 	 * destroy cache.
1639 	 */
1640 	rcu_barrier();
1641 	kmem_cache_destroy(cifs_inode_cachep);
1642 }
1643 
1644 static int
cifs_init_request_bufs(void)1645 cifs_init_request_bufs(void)
1646 {
1647 	/*
1648 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1649 	 * allocate some more bytes for CIFS.
1650 	 */
1651 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1652 
1653 	if (CIFSMaxBufSize < 8192) {
1654 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1655 	Unicode path name has to fit in any SMB/CIFS path based frames */
1656 		CIFSMaxBufSize = 8192;
1657 	} else if (CIFSMaxBufSize > 1024*127) {
1658 		CIFSMaxBufSize = 1024 * 127;
1659 	} else {
1660 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1661 	}
1662 /*
1663 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1664 		 CIFSMaxBufSize, CIFSMaxBufSize);
1665 */
1666 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1667 					    CIFSMaxBufSize + max_hdr_size, 0,
1668 					    SLAB_HWCACHE_ALIGN, 0,
1669 					    CIFSMaxBufSize + max_hdr_size,
1670 					    NULL);
1671 	if (cifs_req_cachep == NULL)
1672 		return -ENOMEM;
1673 
1674 	if (cifs_min_rcv < 1)
1675 		cifs_min_rcv = 1;
1676 	else if (cifs_min_rcv > 64) {
1677 		cifs_min_rcv = 64;
1678 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1679 	}
1680 
1681 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1682 						  cifs_req_cachep);
1683 
1684 	if (cifs_req_poolp == NULL) {
1685 		kmem_cache_destroy(cifs_req_cachep);
1686 		return -ENOMEM;
1687 	}
1688 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1689 	almost all handle based requests (but not write response, nor is it
1690 	sufficient for path based requests).  A smaller size would have
1691 	been more efficient (compacting multiple slab items on one 4k page)
1692 	for the case in which debug was on, but this larger size allows
1693 	more SMBs to use small buffer alloc and is still much more
1694 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1695 	alloc of large cifs buffers even when page debugging is on */
1696 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1697 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1698 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1699 	if (cifs_sm_req_cachep == NULL) {
1700 		mempool_destroy(cifs_req_poolp);
1701 		kmem_cache_destroy(cifs_req_cachep);
1702 		return -ENOMEM;
1703 	}
1704 
1705 	if (cifs_min_small < 2)
1706 		cifs_min_small = 2;
1707 	else if (cifs_min_small > 256) {
1708 		cifs_min_small = 256;
1709 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1710 	}
1711 
1712 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1713 						     cifs_sm_req_cachep);
1714 
1715 	if (cifs_sm_req_poolp == NULL) {
1716 		mempool_destroy(cifs_req_poolp);
1717 		kmem_cache_destroy(cifs_req_cachep);
1718 		kmem_cache_destroy(cifs_sm_req_cachep);
1719 		return -ENOMEM;
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 static void
cifs_destroy_request_bufs(void)1726 cifs_destroy_request_bufs(void)
1727 {
1728 	mempool_destroy(cifs_req_poolp);
1729 	kmem_cache_destroy(cifs_req_cachep);
1730 	mempool_destroy(cifs_sm_req_poolp);
1731 	kmem_cache_destroy(cifs_sm_req_cachep);
1732 }
1733 
init_mids(void)1734 static int init_mids(void)
1735 {
1736 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1737 					    sizeof(struct mid_q_entry), 0,
1738 					    SLAB_HWCACHE_ALIGN, NULL);
1739 	if (cifs_mid_cachep == NULL)
1740 		return -ENOMEM;
1741 
1742 	/* 3 is a reasonable minimum number of simultaneous operations */
1743 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1744 	if (cifs_mid_poolp == NULL) {
1745 		kmem_cache_destroy(cifs_mid_cachep);
1746 		return -ENOMEM;
1747 	}
1748 
1749 	return 0;
1750 }
1751 
destroy_mids(void)1752 static void destroy_mids(void)
1753 {
1754 	mempool_destroy(cifs_mid_poolp);
1755 	kmem_cache_destroy(cifs_mid_cachep);
1756 }
1757 
cifs_init_netfs(void)1758 static int cifs_init_netfs(void)
1759 {
1760 	cifs_io_request_cachep =
1761 		kmem_cache_create("cifs_io_request",
1762 				  sizeof(struct cifs_io_request), 0,
1763 				  SLAB_HWCACHE_ALIGN, NULL);
1764 	if (!cifs_io_request_cachep)
1765 		goto nomem_req;
1766 
1767 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1768 		goto nomem_reqpool;
1769 
1770 	cifs_io_subrequest_cachep =
1771 		kmem_cache_create("cifs_io_subrequest",
1772 				  sizeof(struct cifs_io_subrequest), 0,
1773 				  SLAB_HWCACHE_ALIGN, NULL);
1774 	if (!cifs_io_subrequest_cachep)
1775 		goto nomem_subreq;
1776 
1777 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1778 		goto nomem_subreqpool;
1779 
1780 	return 0;
1781 
1782 nomem_subreqpool:
1783 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1784 nomem_subreq:
1785 	mempool_destroy(&cifs_io_request_pool);
1786 nomem_reqpool:
1787 	kmem_cache_destroy(cifs_io_request_cachep);
1788 nomem_req:
1789 	return -ENOMEM;
1790 }
1791 
cifs_destroy_netfs(void)1792 static void cifs_destroy_netfs(void)
1793 {
1794 	mempool_exit(&cifs_io_subrequest_pool);
1795 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1796 	mempool_exit(&cifs_io_request_pool);
1797 	kmem_cache_destroy(cifs_io_request_cachep);
1798 }
1799 
1800 static int __init
init_cifs(void)1801 init_cifs(void)
1802 {
1803 	int rc = 0;
1804 	cifs_proc_init();
1805 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1806 /*
1807  *  Initialize Global counters
1808  */
1809 	atomic_set(&sesInfoAllocCount, 0);
1810 	atomic_set(&tconInfoAllocCount, 0);
1811 	atomic_set(&tcpSesNextId, 0);
1812 	atomic_set(&tcpSesAllocCount, 0);
1813 	atomic_set(&tcpSesReconnectCount, 0);
1814 	atomic_set(&tconInfoReconnectCount, 0);
1815 
1816 	atomic_set(&buf_alloc_count, 0);
1817 	atomic_set(&small_buf_alloc_count, 0);
1818 #ifdef CONFIG_CIFS_STATS2
1819 	atomic_set(&total_buf_alloc_count, 0);
1820 	atomic_set(&total_small_buf_alloc_count, 0);
1821 	if (slow_rsp_threshold < 1)
1822 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1823 	else if (slow_rsp_threshold > 32767)
1824 		cifs_dbg(VFS,
1825 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1826 #endif /* CONFIG_CIFS_STATS2 */
1827 
1828 	atomic_set(&mid_count, 0);
1829 	GlobalCurrentXid = 0;
1830 	GlobalTotalActiveXid = 0;
1831 	GlobalMaxActiveXid = 0;
1832 	spin_lock_init(&cifs_tcp_ses_lock);
1833 	spin_lock_init(&GlobalMid_Lock);
1834 
1835 	cifs_lock_secret = get_random_u32();
1836 
1837 	if (cifs_max_pending < 2) {
1838 		cifs_max_pending = 2;
1839 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1840 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1841 		cifs_max_pending = CIFS_MAX_REQ;
1842 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1843 			 CIFS_MAX_REQ);
1844 	}
1845 
1846 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1847 	if (dir_cache_timeout > 65000) {
1848 		dir_cache_timeout = 65000;
1849 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1850 	}
1851 
1852 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1853 	if (!cifsiod_wq) {
1854 		rc = -ENOMEM;
1855 		goto out_clean_proc;
1856 	}
1857 
1858 	/*
1859 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1860 	 * so that we don't launch too many worker threads but
1861 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1862 	 */
1863 
1864 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1865 	decrypt_wq = alloc_workqueue("smb3decryptd",
1866 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1867 	if (!decrypt_wq) {
1868 		rc = -ENOMEM;
1869 		goto out_destroy_cifsiod_wq;
1870 	}
1871 
1872 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1873 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1874 	if (!fileinfo_put_wq) {
1875 		rc = -ENOMEM;
1876 		goto out_destroy_decrypt_wq;
1877 	}
1878 
1879 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1880 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1881 	if (!cifsoplockd_wq) {
1882 		rc = -ENOMEM;
1883 		goto out_destroy_fileinfo_put_wq;
1884 	}
1885 
1886 	deferredclose_wq = alloc_workqueue("deferredclose",
1887 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1888 	if (!deferredclose_wq) {
1889 		rc = -ENOMEM;
1890 		goto out_destroy_cifsoplockd_wq;
1891 	}
1892 
1893 	serverclose_wq = alloc_workqueue("serverclose",
1894 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1895 	if (!serverclose_wq) {
1896 		rc = -ENOMEM;
1897 		goto out_destroy_serverclose_wq;
1898 	}
1899 
1900 	rc = cifs_init_inodecache();
1901 	if (rc)
1902 		goto out_destroy_deferredclose_wq;
1903 
1904 	rc = cifs_init_netfs();
1905 	if (rc)
1906 		goto out_destroy_inodecache;
1907 
1908 	rc = init_mids();
1909 	if (rc)
1910 		goto out_destroy_netfs;
1911 
1912 	rc = cifs_init_request_bufs();
1913 	if (rc)
1914 		goto out_destroy_mids;
1915 
1916 #ifdef CONFIG_CIFS_DFS_UPCALL
1917 	rc = dfs_cache_init();
1918 	if (rc)
1919 		goto out_destroy_request_bufs;
1920 #endif /* CONFIG_CIFS_DFS_UPCALL */
1921 #ifdef CONFIG_CIFS_UPCALL
1922 	rc = init_cifs_spnego();
1923 	if (rc)
1924 		goto out_destroy_dfs_cache;
1925 #endif /* CONFIG_CIFS_UPCALL */
1926 #ifdef CONFIG_CIFS_SWN_UPCALL
1927 	rc = cifs_genl_init();
1928 	if (rc)
1929 		goto out_register_key_type;
1930 #endif /* CONFIG_CIFS_SWN_UPCALL */
1931 
1932 	rc = init_cifs_idmap();
1933 	if (rc)
1934 		goto out_cifs_swn_init;
1935 
1936 	rc = register_filesystem(&cifs_fs_type);
1937 	if (rc)
1938 		goto out_init_cifs_idmap;
1939 
1940 	rc = register_filesystem(&smb3_fs_type);
1941 	if (rc) {
1942 		unregister_filesystem(&cifs_fs_type);
1943 		goto out_init_cifs_idmap;
1944 	}
1945 
1946 	return 0;
1947 
1948 out_init_cifs_idmap:
1949 	exit_cifs_idmap();
1950 out_cifs_swn_init:
1951 #ifdef CONFIG_CIFS_SWN_UPCALL
1952 	cifs_genl_exit();
1953 out_register_key_type:
1954 #endif
1955 #ifdef CONFIG_CIFS_UPCALL
1956 	exit_cifs_spnego();
1957 out_destroy_dfs_cache:
1958 #endif
1959 #ifdef CONFIG_CIFS_DFS_UPCALL
1960 	dfs_cache_destroy();
1961 out_destroy_request_bufs:
1962 #endif
1963 	cifs_destroy_request_bufs();
1964 out_destroy_mids:
1965 	destroy_mids();
1966 out_destroy_netfs:
1967 	cifs_destroy_netfs();
1968 out_destroy_inodecache:
1969 	cifs_destroy_inodecache();
1970 out_destroy_deferredclose_wq:
1971 	destroy_workqueue(deferredclose_wq);
1972 out_destroy_cifsoplockd_wq:
1973 	destroy_workqueue(cifsoplockd_wq);
1974 out_destroy_fileinfo_put_wq:
1975 	destroy_workqueue(fileinfo_put_wq);
1976 out_destroy_decrypt_wq:
1977 	destroy_workqueue(decrypt_wq);
1978 out_destroy_cifsiod_wq:
1979 	destroy_workqueue(cifsiod_wq);
1980 out_destroy_serverclose_wq:
1981 	destroy_workqueue(serverclose_wq);
1982 out_clean_proc:
1983 	cifs_proc_clean();
1984 	return rc;
1985 }
1986 
1987 static void __exit
exit_cifs(void)1988 exit_cifs(void)
1989 {
1990 	cifs_dbg(NOISY, "exit_smb3\n");
1991 	unregister_filesystem(&cifs_fs_type);
1992 	unregister_filesystem(&smb3_fs_type);
1993 	cifs_release_automount_timer();
1994 	exit_cifs_idmap();
1995 #ifdef CONFIG_CIFS_SWN_UPCALL
1996 	cifs_genl_exit();
1997 #endif
1998 #ifdef CONFIG_CIFS_UPCALL
1999 	exit_cifs_spnego();
2000 #endif
2001 #ifdef CONFIG_CIFS_DFS_UPCALL
2002 	dfs_cache_destroy();
2003 #endif
2004 	cifs_destroy_request_bufs();
2005 	destroy_mids();
2006 	cifs_destroy_netfs();
2007 	cifs_destroy_inodecache();
2008 	destroy_workqueue(deferredclose_wq);
2009 	destroy_workqueue(cifsoplockd_wq);
2010 	destroy_workqueue(decrypt_wq);
2011 	destroy_workqueue(fileinfo_put_wq);
2012 	destroy_workqueue(serverclose_wq);
2013 	destroy_workqueue(cifsiod_wq);
2014 	cifs_proc_clean();
2015 }
2016 
2017 MODULE_AUTHOR("Steve French");
2018 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2019 MODULE_DESCRIPTION
2020 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2021 	"also older servers complying with the SNIA CIFS Specification)");
2022 MODULE_VERSION(CIFS_VERSION);
2023 MODULE_SOFTDEP("ecb");
2024 MODULE_SOFTDEP("hmac");
2025 MODULE_SOFTDEP("md5");
2026 MODULE_SOFTDEP("nls");
2027 MODULE_SOFTDEP("aes");
2028 MODULE_SOFTDEP("cmac");
2029 MODULE_SOFTDEP("sha256");
2030 MODULE_SOFTDEP("sha512");
2031 MODULE_SOFTDEP("aead2");
2032 MODULE_SOFTDEP("ccm");
2033 MODULE_SOFTDEP("gcm");
2034 module_init(init_cifs)
2035 module_exit(exit_cifs)
2036