xref: /netbsd/sys/external/bsd/drm2/include/asm/uaccess.h (revision 1e0967a9)
1 /*	$NetBSD: uaccess.h,v 1.11 2022/10/25 23:38:33 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _ASM_UACCESS_H_
33 #define _ASM_UACCESS_H_
34 
35 #include <sys/types.h>
36 #include <sys/errno.h>
37 #include <sys/systm.h>
38 
39 #include <linux/compiler.h>
40 
41 /* XXX This is a cop-out.  */
42 #define	VERIFY_READ	0
43 #define	VERIFY_WRITE	1
44 static inline bool
access_ok(const void * uaddr __unused,size_t nbytes __unused)45 access_ok(const void *uaddr __unused, size_t nbytes __unused)
46 {
47 	return true;
48 }
49 
50 #define	__copy_from_user	copy_from_user
51 #define	__copy_to_user		copy_to_user
52 
53 static inline int
copy_from_user(void * kernel_addr,const void * user_addr,size_t len)54 copy_from_user(void *kernel_addr, const void *user_addr, size_t len)
55 {
56 	/* XXX errno NetBSD->Linux */
57 	return -copyin(user_addr, kernel_addr, len);
58 }
59 
60 static inline long
strncpy_from_user(char * kernel_addr,const void * user_addr,long len)61 strncpy_from_user(char *kernel_addr, const void *user_addr, long len)
62 {
63 	size_t done;
64 	int error;
65 
66 	if (len <= 0)
67 		return 0;
68 	error = copyinstr(user_addr, kernel_addr, len, &done);
69 	if (error)
70 		return error;
71 	KASSERT(done <= (size_t)len);
72 	KASSERT(done >= 1);
73 	return done - 1;
74 }
75 
76 static inline int
copy_to_user(void * user_addr,const void * kernel_addr,size_t len)77 copy_to_user(void *user_addr, const void *kernel_addr, size_t len)
78 {
79 	/* XXX errno NetBSD->Linux */
80 	return -copyout(kernel_addr, user_addr, len);
81 }
82 
83 #define	get_user(KERNEL_LVAL, USER_PTR)					      \
84 	copy_from_user(&(KERNEL_LVAL), (USER_PTR), sizeof(*(USER_PTR)) +      \
85 	    0*sizeof(&(KERNEL_LVAL) - (USER_PTR)))
86 
87 #define	put_user(KERNEL_RVAL, USER_PTR)	({				      \
88 	const typeof(*(USER_PTR)) __put_user_tmp = (KERNEL_RVAL);	      \
89 	copy_to_user((USER_PTR), &__put_user_tmp, sizeof(__put_user_tmp));    \
90 })
91 
92 #define	__get_user	get_user
93 #define	__put_user	put_user
94 
95 #define	user_access_begin(P,N)	access_ok(P,N)
96 #define	user_access_end()	__nothing
97 
98 #define	unsafe_put_user(KERNEL_RVAL, USER_PTR, LABEL)	do {		      \
99 	if (__put_user(KERNEL_RVAL, USER_PTR))				      \
100 		goto LABEL;						      \
101 } while (0)
102 
103 static inline size_t
clear_user(void __user * user_ptr,size_t size)104 clear_user(void __user *user_ptr, size_t size)
105 {
106 	char __user *p = user_ptr;
107 	size_t n = size;
108 
109 	/*
110 	 * This loop which sets up a fault handler on every iteration
111 	 * is not going to win any speed records, but it'll do to copy
112 	 * out an int.
113 	 */
114 	while (n --> 0) {
115 		if (ustore_char(p, 0) != 0)
116 			return ++n;
117 	}
118 
119 	return 0;
120 }
121 
122 #if 0
123 /*
124  * XXX These `inatomic' versions are a cop out, but they should do for
125  * now -- they are used only in fast paths which can't fault but which
126  * can fall back to slower paths that arrange things so faulting is OK.
127  */
128 
129 static inline int
130 __copy_from_user_inatomic(void *kernel_addr __unused,
131     const void *user_addr __unused, size_t len __unused)
132 {
133 	return -EFAULT;
134 }
135 
136 static inline int
137 __copy_to_user_inatomic(void *user_addr __unused,
138     const void *kernel_addr __unused, size_t len __unused)
139 {
140 	return -EFAULT;
141 }
142 #endif	/* 0 */
143 
144 static inline int
__copy_from_user_inatomic_nocache(void * kernel_addr __unused,const void * user_addr __unused,size_t len __unused)145 __copy_from_user_inatomic_nocache(void *kernel_addr __unused,
146     const void *user_addr __unused, size_t len __unused)
147 {
148 	return -EFAULT;
149 }
150 
151 #endif  /* _ASM_UACCESS_H_ */
152