xref: /linux/include/asm-generic/uaccess.h (revision 967747bb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_GENERIC_UACCESS_H
3 #define __ASM_GENERIC_UACCESS_H
4 
5 /*
6  * User space memory access functions, these should work
7  * on any machine that has kernel and user data in the same
8  * address space, e.g. all NOMMU machines.
9  */
10 #include <linux/string.h>
11 #include <asm-generic/access_ok.h>
12 
13 #ifdef CONFIG_UACCESS_MEMCPY
14 #include <asm/unaligned.h>
15 
16 static __always_inline int
__get_user_fn(size_t size,const void __user * from,void * to)17 __get_user_fn(size_t size, const void __user *from, void *to)
18 {
19 	BUILD_BUG_ON(!__builtin_constant_p(size));
20 
21 	switch (size) {
22 	case 1:
23 		*(u8 *)to = *((u8 __force *)from);
24 		return 0;
25 	case 2:
26 		*(u16 *)to = get_unaligned((u16 __force *)from);
27 		return 0;
28 	case 4:
29 		*(u32 *)to = get_unaligned((u32 __force *)from);
30 		return 0;
31 	case 8:
32 		*(u64 *)to = get_unaligned((u64 __force *)from);
33 		return 0;
34 	default:
35 		BUILD_BUG();
36 		return 0;
37 	}
38 
39 }
40 #define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
41 
42 static __always_inline int
__put_user_fn(size_t size,void __user * to,void * from)43 __put_user_fn(size_t size, void __user *to, void *from)
44 {
45 	BUILD_BUG_ON(!__builtin_constant_p(size));
46 
47 	switch (size) {
48 	case 1:
49 		*(u8 __force *)to = *(u8 *)from;
50 		return 0;
51 	case 2:
52 		put_unaligned(*(u16 *)from, (u16 __force *)to);
53 		return 0;
54 	case 4:
55 		put_unaligned(*(u32 *)from, (u32 __force *)to);
56 		return 0;
57 	case 8:
58 		put_unaligned(*(u64 *)from, (u64 __force *)to);
59 		return 0;
60 	default:
61 		BUILD_BUG();
62 		return 0;
63 	}
64 }
65 #define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
66 
67 #define __get_kernel_nofault(dst, src, type, err_label)			\
68 do {									\
69 	*((type *)dst) = get_unaligned((type *)(src));			\
70 	if (0) /* make sure the label looks used to the compiler */	\
71 		goto err_label;						\
72 } while (0)
73 
74 #define __put_kernel_nofault(dst, src, type, err_label)			\
75 do {									\
76 	put_unaligned(*((type *)src), (type *)(dst));			\
77 	if (0) /* make sure the label looks used to the compiler */	\
78 		goto err_label;						\
79 } while (0)
80 
81 static inline __must_check unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)82 raw_copy_from_user(void *to, const void __user * from, unsigned long n)
83 {
84 	memcpy(to, (const void __force *)from, n);
85 	return 0;
86 }
87 
88 static inline __must_check unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)89 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
90 {
91 	memcpy((void __force *)to, from, n);
92 	return 0;
93 }
94 #define INLINE_COPY_FROM_USER
95 #define INLINE_COPY_TO_USER
96 #endif /* CONFIG_UACCESS_MEMCPY */
97 
98 /*
99  * These are the main single-value transfer routines.  They automatically
100  * use the right size if we just have the right pointer type.
101  * This version just falls back to copy_{from,to}_user, which should
102  * provide a fast-path for small values.
103  */
104 #define __put_user(x, ptr) \
105 ({								\
106 	__typeof__(*(ptr)) __x = (x);				\
107 	int __pu_err = -EFAULT;					\
108         __chk_user_ptr(ptr);                                    \
109 	switch (sizeof (*(ptr))) {				\
110 	case 1:							\
111 	case 2:							\
112 	case 4:							\
113 	case 8:							\
114 		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
115 					 ptr, &__x);		\
116 		break;						\
117 	default:						\
118 		__put_user_bad();				\
119 		break;						\
120 	 }							\
121 	__pu_err;						\
122 })
123 
124 #define put_user(x, ptr)					\
125 ({								\
126 	void __user *__p = (ptr);				\
127 	might_fault();						\
128 	access_ok(__p, sizeof(*ptr)) ?		\
129 		__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) :	\
130 		-EFAULT;					\
131 })
132 
133 #ifndef __put_user_fn
134 
__put_user_fn(size_t size,void __user * ptr,void * x)135 static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
136 {
137 	return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
138 }
139 
140 #define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
141 
142 #endif
143 
144 extern int __put_user_bad(void) __attribute__((noreturn));
145 
146 #define __get_user(x, ptr)					\
147 ({								\
148 	int __gu_err = -EFAULT;					\
149 	__chk_user_ptr(ptr);					\
150 	switch (sizeof(*(ptr))) {				\
151 	case 1: {						\
152 		unsigned char __x = 0;				\
153 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
154 					 ptr, &__x);		\
155 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
156 		break;						\
157 	};							\
158 	case 2: {						\
159 		unsigned short __x = 0;				\
160 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
161 					 ptr, &__x);		\
162 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
163 		break;						\
164 	};							\
165 	case 4: {						\
166 		unsigned int __x = 0;				\
167 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
168 					 ptr, &__x);		\
169 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
170 		break;						\
171 	};							\
172 	case 8: {						\
173 		unsigned long long __x = 0;			\
174 		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
175 					 ptr, &__x);		\
176 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
177 		break;						\
178 	};							\
179 	default:						\
180 		__get_user_bad();				\
181 		break;						\
182 	}							\
183 	__gu_err;						\
184 })
185 
186 #define get_user(x, ptr)					\
187 ({								\
188 	const void __user *__p = (ptr);				\
189 	might_fault();						\
190 	access_ok(__p, sizeof(*ptr)) ?		\
191 		__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
192 		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
193 })
194 
195 #ifndef __get_user_fn
__get_user_fn(size_t size,const void __user * ptr,void * x)196 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
197 {
198 	return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
199 }
200 
201 #define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
202 
203 #endif
204 
205 extern int __get_user_bad(void) __attribute__((noreturn));
206 
207 /*
208  * Zero Userspace
209  */
210 #ifndef __clear_user
211 static inline __must_check unsigned long
__clear_user(void __user * to,unsigned long n)212 __clear_user(void __user *to, unsigned long n)
213 {
214 	memset((void __force *)to, 0, n);
215 	return 0;
216 }
217 #endif
218 
219 static inline __must_check unsigned long
clear_user(void __user * to,unsigned long n)220 clear_user(void __user *to, unsigned long n)
221 {
222 	might_fault();
223 	if (!access_ok(to, n))
224 		return n;
225 
226 	return __clear_user(to, n);
227 }
228 
229 #include <asm/extable.h>
230 
231 __must_check long strncpy_from_user(char *dst, const char __user *src,
232 				    long count);
233 __must_check long strnlen_user(const char __user *src, long n);
234 
235 #endif /* __ASM_GENERIC_UACCESS_H */
236