xref: /dragonfly/sys/dev/drm/linux_shmem.c (revision 655933d6)
1 /*-
2  * Copyright (c) 2011 The FreeBSD Foundation
3  * Copyright (c) 2014-2020 François Tigeot <ftigeot@wolfpond.org>
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Konstantin Belousov
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <vm/vm.h>
33 #include <vm/vm_page.h>
34 #include <vm/vm_page2.h>
35 #include <vm/vm_pager.h>
36 #include <vm/vm_extern.h>
37 
38 #include <linux/err.h>
39 #include <linux/shmem_fs.h>
40 
41 /*
42  * This code is typically called with a normal VM object to access
43  * data from a userspace shared memory mapping.  However, handle the
44  * case where it might be called with OBJT_MGTDEVICE anyway.
45  */
46 struct page *
47 shmem_read_mapping_page(vm_object_t object, vm_pindex_t pindex)
48 {
49 	vm_page_t m;
50 	int rv;
51 
52 	VM_OBJECT_LOCK(object);
53 	if (object->type == OBJT_MGTDEVICE) {
54 		m = NULL;
55 		rv = vm_pager_get_page(object, pindex, &m, 1);
56 		if (m == NULL)
57 			return ERR_PTR(-ENOMEM);
58 		if (rv != VM_PAGER_OK) {
59 			vm_page_free(m);
60 			return ERR_PTR(-ENOMEM);
61 		}
62 	} else {
63 		m = vm_page_grab(object, pindex,
64 				 VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
65 		if (m->valid != VM_PAGE_BITS_ALL) {
66 			if (vm_pager_has_page(object, pindex)) {
67 				rv = vm_pager_get_page(object, pindex, &m, 1);
68 				m = vm_page_lookup(object, pindex);
69 				if (m == NULL)
70 					return ERR_PTR(-ENOMEM);
71 				if (rv != VM_PAGER_OK) {
72 					vm_page_free(m);
73 					return ERR_PTR(-ENOMEM);
74 				}
75 			} else {
76 				pmap_zero_page(VM_PAGE_TO_PHYS(m));
77 				m->valid = VM_PAGE_BITS_ALL;
78 				m->dirty = 0;
79 			}
80 		}
81 	}
82 	vm_page_wire(m);		/* put_page() undoes this */
83 	vm_page_wakeup(m);
84 	VM_OBJECT_UNLOCK(object);
85 
86 	return (struct page *)m;
87 }
88 
89 struct page *
90 shmem_read_mapping_page_gfp(struct vm_object *mapping,
91     pgoff_t index, gfp_t gfp_mask)
92 {
93 	return shmem_read_mapping_page(mapping, index);
94 }
95 
96 #include <linux/fs.h>
97 
98 int
99 pagecache_write_begin(struct vm_object *obj, struct address_space *mapping,
100     loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata)
101 {
102 	*pagep = shmem_read_mapping_page(obj, OFF_TO_IDX(pos));
103 
104 	return 0;
105 }
106 
107 /* This is really shmem_write_end() for the i915 gem code */
108 int
109 pagecache_write_end(struct vm_object *obj, struct address_space *mapping,
110     loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
111 {
112 	set_page_dirty(page);
113 	put_page(page);
114 
115 	return copied;
116 }
117 
118 /*
119  * userptr support
120  */
121 long
122 get_user_pages(unsigned long start, unsigned long nr_pages,
123 	       unsigned int gup_flags, struct page **pages,
124 	       struct vm_area_struct **vmas)
125 {
126 	thread_t td;
127 	vm_page_t m;
128 	vm_map_t map;
129 	long i;
130 	int error;
131 	int busied;
132 	int fault_type = VM_PROT_READ;
133 
134 	/* returning related vmas not yet supported */
135 	td = curthread;
136 	KKASSERT(vmas == NULL);
137 	KKASSERT(td->td_proc == NULL);
138 	map = &td->td_proc->p_vmspace->vm_map;
139 
140 	if (gup_flags)
141 		fault_type |= VM_PROT_WRITE;
142 
143 	error = 0;
144 	for (i = 0; i < nr_pages; ++i) {
145 		m = vm_fault_page(map, start + i * PAGE_SIZE,
146 				  fault_type, VM_FAULT_NORMAL,
147 				  &error, &busied);
148 		if (error)
149 			break;
150 		if (busied) {
151 			vm_page_wire(m);
152 		} else {
153 			vm_page_busy_wait(m, TRUE, "drmgup");
154 			vm_page_wire(m);
155 			vm_page_unhold(m);
156 		}
157 		vm_page_wakeup(m);
158 		pages[i] = (void *)m;
159 	}
160 	if (error) {
161 		while (--i >= 0) {
162 			put_page(pages[i]);
163 			pages[i] = NULL;
164 		}
165 		i = -error;
166 	}
167 	return i;
168 }
169 
170 void
171 release_pages(struct page **pages, unsigned long nr_pages)
172 {
173 	while (nr_pages > 0) {
174 		--nr_pages;
175 		put_page(pages[nr_pages]);
176 		pages[nr_pages] = NULL;
177 	}
178 }
179