xref: /linux/arch/s390/pci/pci_mmio.c (revision 29ae7d96)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Access to PCI I/O memory from user space programs.
4  *
5  * Copyright IBM Corp. 2014
6  * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/syscalls.h>
10 #include <linux/init.h>
11 #include <linux/mm.h>
12 #include <linux/errno.h>
13 #include <linux/pci.h>
14 #include <asm/asm-extable.h>
15 #include <asm/pci_io.h>
16 #include <asm/pci_debug.h>
17 
zpci_err_mmio(u8 cc,u8 status,u64 offset)18 static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
19 {
20 	struct {
21 		u64 offset;
22 		u8 cc;
23 		u8 status;
24 	} data = {offset, cc, status};
25 
26 	zpci_err_hex(&data, sizeof(data));
27 }
28 
__pcistb_mio_inuser(void __iomem * ioaddr,const void __user * src,u64 len,u8 * status)29 static inline int __pcistb_mio_inuser(
30 		void __iomem *ioaddr, const void __user *src,
31 		u64 len, u8 *status)
32 {
33 	int cc = -ENXIO;
34 
35 	asm volatile (
36 		"       sacf 256\n"
37 		"0:     .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
38 		"1:     ipm     %[cc]\n"
39 		"       srl     %[cc],28\n"
40 		"2:     sacf 768\n"
41 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
42 		: [cc] "+d" (cc), [len] "+d" (len)
43 		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
44 		: "cc", "memory");
45 	*status = len >> 24 & 0xff;
46 	return cc;
47 }
48 
__pcistg_mio_inuser(void __iomem * ioaddr,const void __user * src,u64 ulen,u8 * status)49 static inline int __pcistg_mio_inuser(
50 		void __iomem *ioaddr, const void __user *src,
51 		u64 ulen, u8 *status)
52 {
53 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
54 	int cc = -ENXIO;
55 	u64 val = 0;
56 	u64 cnt = ulen;
57 	u8 tmp;
58 
59 	/*
60 	 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
61 	 * a register, then store it to PCI at @ioaddr while in secondary
62 	 * address space. pcistg then uses the user mappings.
63 	 */
64 	asm volatile (
65 		"       sacf    256\n"
66 		"0:     llgc    %[tmp],0(%[src])\n"
67 		"4:	sllg	%[val],%[val],8\n"
68 		"       aghi    %[src],1\n"
69 		"       ogr     %[val],%[tmp]\n"
70 		"       brctg   %[cnt],0b\n"
71 		"1:     .insn   rre,0xb9d40000,%[val],%[ioaddr_len]\n"
72 		"2:     ipm     %[cc]\n"
73 		"       srl     %[cc],28\n"
74 		"3:     sacf    768\n"
75 		EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
76 		:
77 		[src] "+a" (src), [cnt] "+d" (cnt),
78 		[val] "+d" (val), [tmp] "=d" (tmp),
79 		[cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
80 		:: "cc", "memory");
81 	*status = ioaddr_len.odd >> 24 & 0xff;
82 
83 	/* did we read everything from user memory? */
84 	if (!cc && cnt != 0)
85 		cc = -EFAULT;
86 
87 	return cc;
88 }
89 
__memcpy_toio_inuser(void __iomem * dst,const void __user * src,size_t n)90 static inline int __memcpy_toio_inuser(void __iomem *dst,
91 				   const void __user *src, size_t n)
92 {
93 	int size, rc = 0;
94 	u8 status = 0;
95 
96 	if (!src)
97 		return -EINVAL;
98 
99 	while (n > 0) {
100 		size = zpci_get_max_io_size((u64 __force) dst,
101 					    (u64 __force) src, n,
102 					    ZPCI_MAX_WRITE_SIZE);
103 		if (size > 8) /* main path */
104 			rc = __pcistb_mio_inuser(dst, src, size, &status);
105 		else
106 			rc = __pcistg_mio_inuser(dst, src, size, &status);
107 		if (rc)
108 			break;
109 		src += size;
110 		dst += size;
111 		n -= size;
112 	}
113 	if (rc)
114 		zpci_err_mmio(rc, status, (__force u64) dst);
115 	return rc;
116 }
117 
SYSCALL_DEFINE3(s390_pci_mmio_write,unsigned long,mmio_addr,const void __user *,user_buffer,size_t,length)118 SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
119 		const void __user *, user_buffer, size_t, length)
120 {
121 	u8 local_buf[64];
122 	void __iomem *io_addr;
123 	void *buf;
124 	struct vm_area_struct *vma;
125 	pte_t *ptep;
126 	spinlock_t *ptl;
127 	long ret;
128 
129 	if (!zpci_is_enabled())
130 		return -ENODEV;
131 
132 	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
133 		return -EINVAL;
134 
135 	/*
136 	 * We only support write access to MIO capable devices if we are on
137 	 * a MIO enabled system. Otherwise we would have to check for every
138 	 * address if it is a special ZPCI_ADDR and would have to do
139 	 * a pfn lookup which we don't need for MIO capable devices.  Currently
140 	 * ISM devices are the only devices without MIO support and there is no
141 	 * known need for accessing these from userspace.
142 	 */
143 	if (static_branch_likely(&have_mio)) {
144 		ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
145 					user_buffer,
146 					length);
147 		return ret;
148 	}
149 
150 	if (length > 64) {
151 		buf = kmalloc(length, GFP_KERNEL);
152 		if (!buf)
153 			return -ENOMEM;
154 	} else
155 		buf = local_buf;
156 
157 	ret = -EFAULT;
158 	if (copy_from_user(buf, user_buffer, length))
159 		goto out_free;
160 
161 	mmap_read_lock(current->mm);
162 	ret = -EINVAL;
163 	vma = vma_lookup(current->mm, mmio_addr);
164 	if (!vma)
165 		goto out_unlock_mmap;
166 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
167 		goto out_unlock_mmap;
168 	ret = -EACCES;
169 	if (!(vma->vm_flags & VM_WRITE))
170 		goto out_unlock_mmap;
171 
172 	ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
173 	if (ret)
174 		goto out_unlock_mmap;
175 
176 	io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
177 			(mmio_addr & ~PAGE_MASK));
178 
179 	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
180 		goto out_unlock_pt;
181 
182 	ret = zpci_memcpy_toio(io_addr, buf, length);
183 out_unlock_pt:
184 	pte_unmap_unlock(ptep, ptl);
185 out_unlock_mmap:
186 	mmap_read_unlock(current->mm);
187 out_free:
188 	if (buf != local_buf)
189 		kfree(buf);
190 	return ret;
191 }
192 
__pcilg_mio_inuser(void __user * dst,const void __iomem * ioaddr,u64 ulen,u8 * status)193 static inline int __pcilg_mio_inuser(
194 		void __user *dst, const void __iomem *ioaddr,
195 		u64 ulen, u8 *status)
196 {
197 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
198 	u64 cnt = ulen;
199 	int shift = ulen * 8;
200 	int cc = -ENXIO;
201 	u64 val, tmp;
202 
203 	/*
204 	 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
205 	 * user space) into a register using pcilg then store these bytes at
206 	 * user address @dst
207 	 */
208 	asm volatile (
209 		"       sacf    256\n"
210 		"0:     .insn   rre,0xb9d60000,%[val],%[ioaddr_len]\n"
211 		"1:     ipm     %[cc]\n"
212 		"       srl     %[cc],28\n"
213 		"       ltr     %[cc],%[cc]\n"
214 		"       jne     4f\n"
215 		"2:     ahi     %[shift],-8\n"
216 		"       srlg    %[tmp],%[val],0(%[shift])\n"
217 		"3:     stc     %[tmp],0(%[dst])\n"
218 		"5:	aghi	%[dst],1\n"
219 		"       brctg   %[cnt],2b\n"
220 		"4:     sacf    768\n"
221 		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
222 		:
223 		[ioaddr_len] "+&d" (ioaddr_len.pair),
224 		[cc] "+d" (cc), [val] "=d" (val),
225 		[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
226 		[shift] "+d" (shift)
227 		:: "cc", "memory");
228 
229 	/* did we write everything to the user space buffer? */
230 	if (!cc && cnt != 0)
231 		cc = -EFAULT;
232 
233 	*status = ioaddr_len.odd >> 24 & 0xff;
234 	return cc;
235 }
236 
__memcpy_fromio_inuser(void __user * dst,const void __iomem * src,unsigned long n)237 static inline int __memcpy_fromio_inuser(void __user *dst,
238 				     const void __iomem *src,
239 				     unsigned long n)
240 {
241 	int size, rc = 0;
242 	u8 status;
243 
244 	while (n > 0) {
245 		size = zpci_get_max_io_size((u64 __force) src,
246 					    (u64 __force) dst, n,
247 					    ZPCI_MAX_READ_SIZE);
248 		rc = __pcilg_mio_inuser(dst, src, size, &status);
249 		if (rc)
250 			break;
251 		src += size;
252 		dst += size;
253 		n -= size;
254 	}
255 	if (rc)
256 		zpci_err_mmio(rc, status, (__force u64) dst);
257 	return rc;
258 }
259 
SYSCALL_DEFINE3(s390_pci_mmio_read,unsigned long,mmio_addr,void __user *,user_buffer,size_t,length)260 SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
261 		void __user *, user_buffer, size_t, length)
262 {
263 	u8 local_buf[64];
264 	void __iomem *io_addr;
265 	void *buf;
266 	struct vm_area_struct *vma;
267 	pte_t *ptep;
268 	spinlock_t *ptl;
269 	long ret;
270 
271 	if (!zpci_is_enabled())
272 		return -ENODEV;
273 
274 	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
275 		return -EINVAL;
276 
277 	/*
278 	 * We only support read access to MIO capable devices if we are on
279 	 * a MIO enabled system. Otherwise we would have to check for every
280 	 * address if it is a special ZPCI_ADDR and would have to do
281 	 * a pfn lookup which we don't need for MIO capable devices.  Currently
282 	 * ISM devices are the only devices without MIO support and there is no
283 	 * known need for accessing these from userspace.
284 	 */
285 	if (static_branch_likely(&have_mio)) {
286 		ret = __memcpy_fromio_inuser(
287 				user_buffer, (const void __iomem *)mmio_addr,
288 				length);
289 		return ret;
290 	}
291 
292 	if (length > 64) {
293 		buf = kmalloc(length, GFP_KERNEL);
294 		if (!buf)
295 			return -ENOMEM;
296 	} else {
297 		buf = local_buf;
298 	}
299 
300 	mmap_read_lock(current->mm);
301 	ret = -EINVAL;
302 	vma = vma_lookup(current->mm, mmio_addr);
303 	if (!vma)
304 		goto out_unlock_mmap;
305 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
306 		goto out_unlock_mmap;
307 	ret = -EACCES;
308 	if (!(vma->vm_flags & VM_WRITE))
309 		goto out_unlock_mmap;
310 
311 	ret = follow_pte(vma, mmio_addr, &ptep, &ptl);
312 	if (ret)
313 		goto out_unlock_mmap;
314 
315 	io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
316 			(mmio_addr & ~PAGE_MASK));
317 
318 	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
319 		ret = -EFAULT;
320 		goto out_unlock_pt;
321 	}
322 	ret = zpci_memcpy_fromio(buf, io_addr, length);
323 
324 out_unlock_pt:
325 	pte_unmap_unlock(ptep, ptl);
326 out_unlock_mmap:
327 	mmap_read_unlock(current->mm);
328 
329 	if (!ret && copy_to_user(user_buffer, buf, length))
330 		ret = -EFAULT;
331 
332 	if (buf != local_buf)
333 		kfree(buf);
334 	return ret;
335 }
336