1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24  * Copyright (C) 2015 Jörg Thalheim.
25  */
26 
27 #ifndef _ZFS_VFS_H
28 #define	_ZFS_VFS_H
29 
30 #include <sys/taskq.h>
31 #include <sys/cred.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compat.h>
34 
35 /*
36  * 2.6.34 - 3.19, bdi_setup_and_register() takes 3 arguments.
37  * 4.0 - 4.11, bdi_setup_and_register() takes 2 arguments.
38  * 4.12 - x.y, super_setup_bdi_name() new interface.
39  */
40 #if defined(HAVE_SUPER_SETUP_BDI_NAME)
41 extern atomic_long_t zfs_bdi_seq;
42 
43 static inline int
zpl_bdi_setup(struct super_block * sb,char * name)44 zpl_bdi_setup(struct super_block *sb, char *name)
45 {
46 	return super_setup_bdi_name(sb, "%.28s-%ld", name,
47 	    atomic_long_inc_return(&zfs_bdi_seq));
48 }
49 static inline void
zpl_bdi_destroy(struct super_block * sb)50 zpl_bdi_destroy(struct super_block *sb)
51 {
52 }
53 #elif defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER)
54 static inline int
zpl_bdi_setup(struct super_block * sb,char * name)55 zpl_bdi_setup(struct super_block *sb, char *name)
56 {
57 	struct backing_dev_info *bdi;
58 	int error;
59 
60 	bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
61 	error = bdi_setup_and_register(bdi, name);
62 	if (error) {
63 		kmem_free(bdi, sizeof (struct backing_dev_info));
64 		return (error);
65 	}
66 
67 	sb->s_bdi = bdi;
68 
69 	return (0);
70 }
71 static inline void
zpl_bdi_destroy(struct super_block * sb)72 zpl_bdi_destroy(struct super_block *sb)
73 {
74 	struct backing_dev_info *bdi = sb->s_bdi;
75 
76 	bdi_destroy(bdi);
77 	kmem_free(bdi, sizeof (struct backing_dev_info));
78 	sb->s_bdi = NULL;
79 }
80 #elif defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
81 static inline int
zpl_bdi_setup(struct super_block * sb,char * name)82 zpl_bdi_setup(struct super_block *sb, char *name)
83 {
84 	struct backing_dev_info *bdi;
85 	int error;
86 
87 	bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
88 	error = bdi_setup_and_register(bdi, name, BDI_CAP_MAP_COPY);
89 	if (error) {
90 		kmem_free(sb->s_bdi, sizeof (struct backing_dev_info));
91 		return (error);
92 	}
93 
94 	sb->s_bdi = bdi;
95 
96 	return (0);
97 }
98 static inline void
zpl_bdi_destroy(struct super_block * sb)99 zpl_bdi_destroy(struct super_block *sb)
100 {
101 	struct backing_dev_info *bdi = sb->s_bdi;
102 
103 	bdi_destroy(bdi);
104 	kmem_free(bdi, sizeof (struct backing_dev_info));
105 	sb->s_bdi = NULL;
106 }
107 #else
108 #error "Unsupported kernel"
109 #endif
110 
111 /*
112  * 4.14 adds SB_* flag definitions, define them to MS_* equivalents
113  * if not set.
114  */
115 #ifndef	SB_RDONLY
116 #define	SB_RDONLY	MS_RDONLY
117 #endif
118 
119 #ifndef	SB_SILENT
120 #define	SB_SILENT	MS_SILENT
121 #endif
122 
123 #ifndef	SB_ACTIVE
124 #define	SB_ACTIVE	MS_ACTIVE
125 #endif
126 
127 #ifndef	SB_POSIXACL
128 #define	SB_POSIXACL	MS_POSIXACL
129 #endif
130 
131 #ifndef	SB_MANDLOCK
132 #define	SB_MANDLOCK	MS_MANDLOCK
133 #endif
134 
135 #ifndef	SB_NOATIME
136 #define	SB_NOATIME	MS_NOATIME
137 #endif
138 
139 /*
140  * 3.5 API change,
141  * The clear_inode() function replaces end_writeback() and introduces an
142  * ordering change regarding when the inode_sync_wait() occurs.  See the
143  * configure check in config/kernel-clear-inode.m4 for full details.
144  */
145 #if defined(HAVE_EVICT_INODE) && !defined(HAVE_CLEAR_INODE)
146 #define	clear_inode(ip)		end_writeback(ip)
147 #endif /* HAVE_EVICT_INODE && !HAVE_CLEAR_INODE */
148 
149 #if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE)
150 static inline loff_t
lseek_execute(struct file * filp,struct inode * inode,loff_t offset,loff_t maxsize)151 lseek_execute(
152 	struct file *filp,
153 	struct inode *inode,
154 	loff_t offset,
155 	loff_t maxsize)
156 {
157 	if (offset < 0 && !(filp->f_mode & FMODE_UNSIGNED_OFFSET))
158 		return (-EINVAL);
159 
160 	if (offset > maxsize)
161 		return (-EINVAL);
162 
163 	if (offset != filp->f_pos) {
164 		spin_lock(&filp->f_lock);
165 		filp->f_pos = offset;
166 		filp->f_version = 0;
167 		spin_unlock(&filp->f_lock);
168 	}
169 
170 	return (offset);
171 }
172 #endif /* SEEK_HOLE && SEEK_DATA && !HAVE_LSEEK_EXECUTE */
173 
174 #if defined(CONFIG_FS_POSIX_ACL)
175 /*
176  * These functions safely approximates the behavior of posix_acl_release()
177  * which cannot be used because it calls the GPL-only symbol kfree_rcu().
178  * The in-kernel version, which can access the RCU, frees the ACLs after
179  * the grace period expires.  Because we're unsure how long that grace
180  * period may be this implementation conservatively delays for 60 seconds.
181  * This is several orders of magnitude larger than expected grace period.
182  * At 60 seconds the kernel will also begin issuing RCU stall warnings.
183  */
184 
185 #include <linux/posix_acl.h>
186 
187 #if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
188 #define	zpl_posix_acl_release(arg)		posix_acl_release(arg)
189 #else
190 void zpl_posix_acl_release_impl(struct posix_acl *);
191 
192 static inline void
zpl_posix_acl_release(struct posix_acl * acl)193 zpl_posix_acl_release(struct posix_acl *acl)
194 {
195 	if ((acl == NULL) || (acl == ACL_NOT_CACHED))
196 		return;
197 #ifdef HAVE_ACL_REFCOUNT
198 	if (refcount_dec_and_test(&acl->a_refcount))
199 		zpl_posix_acl_release_impl(acl);
200 #else
201 	if (atomic_dec_and_test(&acl->a_refcount))
202 		zpl_posix_acl_release_impl(acl);
203 #endif
204 }
205 #endif /* HAVE_POSIX_ACL_RELEASE */
206 
207 #ifdef HAVE_SET_CACHED_ACL_USABLE
208 #define	zpl_set_cached_acl(ip, ty, n)		set_cached_acl(ip, ty, n)
209 #define	zpl_forget_cached_acl(ip, ty)		forget_cached_acl(ip, ty)
210 #else
211 static inline void
zpl_set_cached_acl(struct inode * ip,int type,struct posix_acl * newer)212 zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer)
213 {
214 	struct posix_acl *older = NULL;
215 
216 	spin_lock(&ip->i_lock);
217 
218 	if ((newer != ACL_NOT_CACHED) && (newer != NULL))
219 		posix_acl_dup(newer);
220 
221 	switch (type) {
222 	case ACL_TYPE_ACCESS:
223 		older = ip->i_acl;
224 		rcu_assign_pointer(ip->i_acl, newer);
225 		break;
226 	case ACL_TYPE_DEFAULT:
227 		older = ip->i_default_acl;
228 		rcu_assign_pointer(ip->i_default_acl, newer);
229 		break;
230 	}
231 
232 	spin_unlock(&ip->i_lock);
233 
234 	zpl_posix_acl_release(older);
235 }
236 
237 static inline void
zpl_forget_cached_acl(struct inode * ip,int type)238 zpl_forget_cached_acl(struct inode *ip, int type)
239 {
240 	zpl_set_cached_acl(ip, type, (struct posix_acl *)ACL_NOT_CACHED);
241 }
242 #endif /* HAVE_SET_CACHED_ACL_USABLE */
243 
244 /*
245  * 3.1 API change,
246  * posix_acl_chmod() was added as the preferred interface.
247  *
248  * 3.14 API change,
249  * posix_acl_chmod() was changed to __posix_acl_chmod()
250  */
251 #ifndef HAVE___POSIX_ACL_CHMOD
252 #ifdef HAVE_POSIX_ACL_CHMOD
253 #define	__posix_acl_chmod(acl, gfp, mode)	posix_acl_chmod(acl, gfp, mode)
254 #define	__posix_acl_create(acl, gfp, mode)	posix_acl_create(acl, gfp, mode)
255 #else
256 #error "Unsupported kernel"
257 #endif /* HAVE_POSIX_ACL_CHMOD */
258 #endif /* HAVE___POSIX_ACL_CHMOD */
259 
260 /*
261  * 4.8 API change,
262  * posix_acl_valid() now must be passed a namespace, the namespace from
263  * from super block associated with the given inode is used for this purpose.
264  */
265 #ifdef HAVE_POSIX_ACL_VALID_WITH_NS
266 #define	zpl_posix_acl_valid(ip, acl)  posix_acl_valid(ip->i_sb->s_user_ns, acl)
267 #else
268 #define	zpl_posix_acl_valid(ip, acl)  posix_acl_valid(acl)
269 #endif
270 
271 #endif /* CONFIG_FS_POSIX_ACL */
272 
273 /*
274  * 3.19 API change
275  * struct access f->f_dentry->d_inode was replaced by accessor function
276  * file_inode(f)
277  */
278 #ifndef HAVE_FILE_INODE
file_inode(const struct file * f)279 static inline struct inode *file_inode(const struct file *f)
280 {
281 	return (f->f_dentry->d_inode);
282 }
283 #endif /* HAVE_FILE_INODE */
284 
285 /*
286  * 4.1 API change
287  * struct access file->f_path.dentry was replaced by accessor function
288  * file_dentry(f)
289  */
290 #ifndef HAVE_FILE_DENTRY
file_dentry(const struct file * f)291 static inline struct dentry *file_dentry(const struct file *f)
292 {
293 	return (f->f_path.dentry);
294 }
295 #endif /* HAVE_FILE_DENTRY */
296 
zfs_uid_read_impl(struct inode * ip)297 static inline uid_t zfs_uid_read_impl(struct inode *ip)
298 {
299 	return (from_kuid(kcred->user_ns, ip->i_uid));
300 }
301 
zfs_uid_read(struct inode * ip)302 static inline uid_t zfs_uid_read(struct inode *ip)
303 {
304 	return (zfs_uid_read_impl(ip));
305 }
306 
zfs_gid_read_impl(struct inode * ip)307 static inline gid_t zfs_gid_read_impl(struct inode *ip)
308 {
309 	return (from_kgid(kcred->user_ns, ip->i_gid));
310 }
311 
zfs_gid_read(struct inode * ip)312 static inline gid_t zfs_gid_read(struct inode *ip)
313 {
314 	return (zfs_gid_read_impl(ip));
315 }
316 
zfs_uid_write(struct inode * ip,uid_t uid)317 static inline void zfs_uid_write(struct inode *ip, uid_t uid)
318 {
319 	ip->i_uid = make_kuid(kcred->user_ns, uid);
320 }
321 
zfs_gid_write(struct inode * ip,gid_t gid)322 static inline void zfs_gid_write(struct inode *ip, gid_t gid)
323 {
324 	ip->i_gid = make_kgid(kcred->user_ns, gid);
325 }
326 
327 /*
328  * 3.15 API change
329  */
330 #ifndef RENAME_NOREPLACE
331 #define	RENAME_NOREPLACE	(1 << 0) /* Don't overwrite target */
332 #endif
333 #ifndef RENAME_EXCHANGE
334 #define	RENAME_EXCHANGE		(1 << 1) /* Exchange source and dest */
335 #endif
336 #ifndef RENAME_WHITEOUT
337 #define	RENAME_WHITEOUT		(1 << 2) /* Whiteout source */
338 #endif
339 
340 /*
341  * 4.9 API change
342  */
343 #if !(defined(HAVE_SETATTR_PREPARE_NO_USERNS) || \
344     defined(HAVE_SETATTR_PREPARE_USERNS) || \
345     defined(HAVE_SETATTR_PREPARE_IDMAP))
346 static inline int
setattr_prepare(struct dentry * dentry,struct iattr * ia)347 setattr_prepare(struct dentry *dentry, struct iattr *ia)
348 {
349 	return (inode_change_ok(dentry->d_inode, ia));
350 }
351 #endif
352 
353 /*
354  * 4.11 API change
355  * These macros are defined by kernel 4.11.  We define them so that the same
356  * code builds under kernels < 4.11 and >= 4.11.  The macros are set to 0 so
357  * that it will create obvious failures if they are accidentally used when built
358  * against a kernel >= 4.11.
359  */
360 
361 #ifndef STATX_BASIC_STATS
362 #define	STATX_BASIC_STATS	0
363 #endif
364 
365 #ifndef AT_STATX_SYNC_AS_STAT
366 #define	AT_STATX_SYNC_AS_STAT	0
367 #endif
368 
369 /*
370  * 4.11 API change
371  * 4.11 takes struct path *, < 4.11 takes vfsmount *
372  */
373 
374 #ifdef HAVE_VFSMOUNT_IOPS_GETATTR
375 #define	ZPL_GETATTR_WRAPPER(func)					\
376 static int								\
377 func(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)	\
378 {									\
379 	struct path path = { .mnt = mnt, .dentry = dentry };		\
380 	return func##_impl(&path, stat, STATX_BASIC_STATS,		\
381 	    AT_STATX_SYNC_AS_STAT);					\
382 }
383 #elif defined(HAVE_PATH_IOPS_GETATTR)
384 #define	ZPL_GETATTR_WRAPPER(func)					\
385 static int								\
386 func(const struct path *path, struct kstat *stat, u32 request_mask,	\
387     unsigned int query_flags)						\
388 {									\
389 	return (func##_impl(path, stat, request_mask, query_flags));	\
390 }
391 #elif defined(HAVE_USERNS_IOPS_GETATTR)
392 #define	ZPL_GETATTR_WRAPPER(func)					\
393 static int								\
394 func(struct user_namespace *user_ns, const struct path *path,	\
395     struct kstat *stat, u32 request_mask, unsigned int query_flags)	\
396 {									\
397 	return (func##_impl(user_ns, path, stat, request_mask, \
398 	    query_flags));	\
399 }
400 #elif defined(HAVE_IDMAP_IOPS_GETATTR)
401 #define	ZPL_GETATTR_WRAPPER(func)					\
402 static int								\
403 func(struct mnt_idmap *user_ns, const struct path *path,	\
404     struct kstat *stat, u32 request_mask, unsigned int query_flags)	\
405 {									\
406 	return (func##_impl(user_ns, path, stat, request_mask,	\
407 	    query_flags));	\
408 }
409 #else
410 #error
411 #endif
412 
413 /*
414  * 4.9 API change
415  * Preferred interface to get the current FS time.
416  */
417 #if !defined(HAVE_CURRENT_TIME)
418 static inline struct timespec
current_time(struct inode * ip)419 current_time(struct inode *ip)
420 {
421 	return (timespec_trunc(current_kernel_time(), ip->i_sb->s_time_gran));
422 }
423 #endif
424 
425 /*
426  * 4.16 API change
427  * Added iversion interface for managing inode version field.
428  */
429 #ifdef HAVE_INODE_SET_IVERSION
430 #include <linux/iversion.h>
431 #else
432 static inline void
inode_set_iversion(struct inode * ip,u64 val)433 inode_set_iversion(struct inode *ip, u64 val)
434 {
435 	ip->i_version = val;
436 }
437 #endif
438 
439 /*
440  * Returns true when called in the context of a 32-bit system call.
441  */
442 static inline int
zpl_is_32bit_api(void)443 zpl_is_32bit_api(void)
444 {
445 #ifdef CONFIG_COMPAT
446 #ifdef HAVE_IN_COMPAT_SYSCALL
447 	return (in_compat_syscall());
448 #else
449 	return (is_compat_task());
450 #endif
451 #else
452 	return (BITS_PER_LONG == 32);
453 #endif
454 }
455 
456 /*
457  * 5.12 API change
458  * To support id-mapped mounts, generic_fillattr() was modified to
459  * accept a new struct user_namespace* as its first arg.
460  *
461  * 6.3 API change
462  * generic_fillattr() first arg is changed to struct mnt_idmap *
463  *
464  * 6.6 API change
465  * generic_fillattr() gets new second arg request_mask, a u32 type
466  *
467  */
468 #ifdef HAVE_GENERIC_FILLATTR_IDMAP
469 #define	zpl_generic_fillattr(idmap, ip, sp)	\
470     generic_fillattr(idmap, ip, sp)
471 #elif defined(HAVE_GENERIC_FILLATTR_IDMAP_REQMASK)
472 #define	zpl_generic_fillattr(idmap, rqm, ip, sp)	\
473     generic_fillattr(idmap, rqm, ip, sp)
474 #elif defined(HAVE_GENERIC_FILLATTR_USERNS)
475 #define	zpl_generic_fillattr(user_ns, ip, sp)	\
476     generic_fillattr(user_ns, ip, sp)
477 #else
478 #define	zpl_generic_fillattr(user_ns, ip, sp)	generic_fillattr(ip, sp)
479 #endif
480 
481 #endif /* _ZFS_VFS_H */
482