1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "amdgpu_vm.h"
24 #include "amdgpu_job.h"
25 #include "amdgpu_object.h"
26 #include "amdgpu_trace.h"
27 
28 #define AMDGPU_VM_SDMA_MIN_NUM_DW	256u
29 #define AMDGPU_VM_SDMA_MAX_NUM_DW	(16u * 1024u)
30 
31 /**
32  * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
33  *
34  * @table: newly allocated or validated PD/PT
35  */
36 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
37 {
38 	int r;
39 
40 	r = amdgpu_ttm_alloc_gart(&table->tbo);
41 	if (r)
42 		return r;
43 
44 	if (table->shadow)
45 		r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
46 
47 	return r;
48 }
49 
50 /**
51  * amdgpu_vm_sdma_prepare - prepare SDMA command submission
52  *
53  * @p: see amdgpu_vm_update_params definition
54  * @owner: owner we need to sync to
55  * @exclusive: exclusive move fence we need to sync to
56  *
57  * Returns:
58  * Negativ errno, 0 for success.
59  */
60 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
61 				  void *owner, struct dma_fence *exclusive)
62 {
63 	struct amdgpu_bo *root = p->vm->root.base.bo;
64 	unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
65 	int r;
66 
67 	r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
68 	if (r)
69 		return r;
70 
71 	r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
72 	if (r)
73 		return r;
74 
75 	r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
76 			     owner, false);
77 	if (r)
78 		return r;
79 
80 	p->num_dw_left = ndw;
81 	return 0;
82 }
83 
84 /**
85  * amdgpu_vm_sdma_commit - commit SDMA command submission
86  *
87  * @p: see amdgpu_vm_update_params definition
88  * @fence: resulting fence
89  *
90  * Returns:
91  * Negativ errno, 0 for success.
92  */
93 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
94 				 struct dma_fence **fence)
95 {
96 	struct amdgpu_bo *root = p->vm->root.base.bo;
97 	struct amdgpu_ib *ib = p->job->ibs;
98 	struct amdgpu_ring *ring;
99 	struct dma_fence *f;
100 	int r;
101 
102 	ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
103 
104 	WARN_ON(ib->length_dw == 0);
105 	amdgpu_ring_pad_ib(ring, ib);
106 	WARN_ON(ib->length_dw > p->num_dw_left);
107 	r = amdgpu_job_submit(p->job, &p->vm->entity,
108 			      AMDGPU_FENCE_OWNER_VM, &f);
109 	if (r)
110 		goto error;
111 
112 	amdgpu_bo_fence(root, f, true);
113 	if (fence)
114 		swap(*fence, f);
115 	dma_fence_put(f);
116 	return 0;
117 
118 error:
119 	amdgpu_job_free(p->job);
120 	return r;
121 }
122 
123 
124 /**
125  * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
126  *
127  * @p: see amdgpu_vm_update_params definition
128  * @bo: PD/PT to update
129  * @pe: addr of the page entry
130  * @count: number of page entries to copy
131  *
132  * Traces the parameters and calls the DMA function to copy the PTEs.
133  */
134 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
135 				     struct amdgpu_bo *bo, uint64_t pe,
136 				     unsigned count)
137 {
138 	struct amdgpu_ib *ib = p->job->ibs;
139 	uint64_t src = ib->gpu_addr;
140 
141 	src += p->num_dw_left * 4;
142 
143 	pe += amdgpu_bo_gpu_offset(bo);
144 	trace_amdgpu_vm_copy_ptes(pe, src, count);
145 
146 	amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
147 }
148 
149 /**
150  * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
151  *
152  * @p: see amdgpu_vm_update_params definition
153  * @bo: PD/PT to update
154  * @pe: addr of the page entry
155  * @addr: dst addr to write into pe
156  * @count: number of page entries to update
157  * @incr: increase next addr by incr bytes
158  * @flags: hw access flags
159  *
160  * Traces the parameters and calls the right asic functions
161  * to setup the page table using the DMA.
162  */
163 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
164 				    struct amdgpu_bo *bo, uint64_t pe,
165 				    uint64_t addr, unsigned count,
166 				    uint32_t incr, uint64_t flags)
167 {
168 	struct amdgpu_ib *ib = p->job->ibs;
169 
170 	pe += amdgpu_bo_gpu_offset(bo);
171 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
172 	if (count < 3) {
173 		amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
174 				    count, incr);
175 	} else {
176 		amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
177 				      count, incr, flags);
178 	}
179 }
180 
181 /**
182  * amdgpu_vm_sdma_update - execute VM update
183  *
184  * @p: see amdgpu_vm_update_params definition
185  * @bo: PD/PT to update
186  * @pe: addr of the page entry
187  * @addr: dst addr to write into pe
188  * @count: number of page entries to update
189  * @incr: increase next addr by incr bytes
190  * @flags: hw access flags
191  *
192  * Reserve space in the IB, setup mapping buffer on demand and write commands to
193  * the IB.
194  */
195 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
196 				 struct amdgpu_bo *bo, uint64_t pe,
197 				 uint64_t addr, unsigned count, uint32_t incr,
198 				 uint64_t flags)
199 {
200 	unsigned int i, ndw, nptes;
201 	uint64_t *pte;
202 	int r;
203 
204 	do {
205 		ndw = p->num_dw_left;
206 		ndw -= p->job->ibs->length_dw;
207 
208 		if (ndw < 32) {
209 			r = amdgpu_vm_sdma_commit(p, NULL);
210 			if (r)
211 				return r;
212 
213 			/* estimate how many dw we need */
214 			ndw = 32;
215 			if (p->pages_addr)
216 				ndw += count * 2;
217 			ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
218 			ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
219 
220 			r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
221 			if (r)
222 				return r;
223 
224 			p->num_dw_left = ndw;
225 		}
226 
227 		if (!p->pages_addr) {
228 			/* set page commands needed */
229 			if (bo->shadow)
230 				amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
231 							count, incr, flags);
232 			amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
233 						incr, flags);
234 			return 0;
235 		}
236 
237 		/* copy commands needed */
238 		ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
239 			(bo->shadow ? 2 : 1);
240 
241 		/* for padding */
242 		ndw -= 7;
243 
244 		nptes = min(count, ndw / 2);
245 
246 		/* Put the PTEs at the end of the IB. */
247 		p->num_dw_left -= nptes * 2;
248 		pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
249 		for (i = 0; i < nptes; ++i, addr += incr) {
250 			pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
251 			pte[i] |= flags;
252 		}
253 
254 		if (bo->shadow)
255 			amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
256 		amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
257 
258 		pe += nptes * 8;
259 		count -= nptes;
260 	} while (count);
261 
262 	return 0;
263 }
264 
265 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
266 	.map_table = amdgpu_vm_sdma_map_table,
267 	.prepare = amdgpu_vm_sdma_prepare,
268 	.update = amdgpu_vm_sdma_update,
269 	.commit = amdgpu_vm_sdma_commit
270 };
271