1 /* $NetBSD: radeon_gart.c,v 1.14 2021/12/19 11:26:26 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: radeon_gart.c,v 1.14 2021/12/19 11:26:26 riastradh Exp $");
33
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36
37 #include <drm/radeon_drm.h>
38 #ifdef CONFIG_X86
39 #include <asm/set_memory.h>
40 #endif
41 #include "radeon.h"
42
43 /*
44 * GART
45 * The GART (Graphics Aperture Remapping Table) is an aperture
46 * in the GPU's address space. System pages can be mapped into
47 * the aperture and look like contiguous pages from the GPU's
48 * perspective. A page table maps the pages in the aperture
49 * to the actual backing pages in system memory.
50 *
51 * Radeon GPUs support both an internal GART, as described above,
52 * and AGP. AGP works similarly, but the GART table is configured
53 * and maintained by the northbridge rather than the driver.
54 * Radeon hw has a separate AGP aperture that is programmed to
55 * point to the AGP aperture provided by the northbridge and the
56 * requests are passed through to the northbridge aperture.
57 * Both AGP and internal GART can be used at the same time, however
58 * that is not currently supported by the driver.
59 *
60 * This file handles the common internal GART management.
61 */
62
63 /*
64 * Common GART table functions.
65 */
66 /**
67 * radeon_gart_table_ram_alloc - allocate system ram for gart page table
68 *
69 * @rdev: radeon_device pointer
70 *
71 * Allocate system memory for GART page table
72 * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
73 * gart table to be in system memory.
74 * Returns 0 for success, -ENOMEM for failure.
75 */
radeon_gart_table_ram_alloc(struct radeon_device * rdev)76 int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
77 {
78 #ifdef __NetBSD__
79 int rsegs;
80 int error;
81
82 error = bus_dmamem_alloc(rdev->ddev->dmat, rdev->gart.table_size,
83 PAGE_SIZE, 0, &rdev->gart.rg_table_seg, 1, &rsegs, BUS_DMA_WAITOK);
84 if (error)
85 goto fail0;
86 KASSERT(rsegs == 1);
87 error = bus_dmamap_create(rdev->ddev->dmat, rdev->gart.table_size, 1,
88 rdev->gart.table_size, 0, BUS_DMA_WAITOK,
89 &rdev->gart.rg_table_map);
90 if (error)
91 goto fail1;
92 error = bus_dmamem_map(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1,
93 rdev->gart.table_size, &rdev->gart.ptr,
94 BUS_DMA_WAITOK|BUS_DMA_NOCACHE);
95 if (error)
96 goto fail2;
97 error = bus_dmamap_load(rdev->ddev->dmat, rdev->gart.rg_table_map,
98 rdev->gart.ptr, rdev->gart.table_size, NULL, BUS_DMA_WAITOK);
99 if (error)
100 goto fail3;
101
102 memset(rdev->gart.ptr, 0, rdev->gart.table_size);
103 bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map, 0,
104 rdev->gart.table_size, BUS_DMASYNC_PREWRITE);
105
106 /* Success! */
107 rdev->gart.table_addr = rdev->gart.rg_table_map->dm_segs[0].ds_addr;
108 return 0;
109
110 fail4: __unused
111 bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map);
112 fail3: bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr,
113 rdev->gart.table_size);
114 fail2: bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map);
115 fail1: bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1);
116 fail0: KASSERT(error);
117 /* XXX errno NetBSD->Linux */
118 return -error;
119 #else
120 void *ptr;
121
122 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
123 &rdev->gart.table_addr);
124 if (ptr == NULL) {
125 return -ENOMEM;
126 }
127 #ifdef CONFIG_X86
128 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
129 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
130 set_memory_uc((unsigned long)ptr,
131 rdev->gart.table_size >> PAGE_SHIFT);
132 }
133 #endif
134 rdev->gart.ptr = ptr;
135 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
136 return 0;
137 #endif
138 }
139
140 /**
141 * radeon_gart_table_ram_free - free system ram for gart page table
142 *
143 * @rdev: radeon_device pointer
144 *
145 * Free system memory for GART page table
146 * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
147 * gart table to be in system memory.
148 */
radeon_gart_table_ram_free(struct radeon_device * rdev)149 void radeon_gart_table_ram_free(struct radeon_device *rdev)
150 {
151 if (rdev->gart.ptr == NULL) {
152 return;
153 }
154 #ifdef __NetBSD__
155 bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map);
156 bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr,
157 rdev->gart.table_size);
158 bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map);
159 bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1);
160 #else
161 #ifdef CONFIG_X86
162 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
163 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
164 set_memory_wb((unsigned long)rdev->gart.ptr,
165 rdev->gart.table_size >> PAGE_SHIFT);
166 }
167 #endif
168 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
169 (void *)rdev->gart.ptr,
170 rdev->gart.table_addr);
171 rdev->gart.ptr = NULL;
172 rdev->gart.table_addr = 0;
173 #endif
174 }
175
176 /**
177 * radeon_gart_table_vram_alloc - allocate vram for gart page table
178 *
179 * @rdev: radeon_device pointer
180 *
181 * Allocate video memory for GART page table
182 * (pcie r4xx, r5xx+). These asics require the
183 * gart table to be in video memory.
184 * Returns 0 for success, error for failure.
185 */
radeon_gart_table_vram_alloc(struct radeon_device * rdev)186 int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
187 {
188 int r;
189
190 if (rdev->gart.robj == NULL) {
191 r = radeon_bo_create(rdev, rdev->gart.table_size,
192 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
193 0, NULL, NULL, &rdev->gart.robj);
194 if (r) {
195 return r;
196 }
197 }
198 return 0;
199 }
200
201 /**
202 * radeon_gart_table_vram_pin - pin gart page table in vram
203 *
204 * @rdev: radeon_device pointer
205 *
206 * Pin the GART page table in vram so it will not be moved
207 * by the memory manager (pcie r4xx, r5xx+). These asics require the
208 * gart table to be in video memory.
209 * Returns 0 for success, error for failure.
210 */
radeon_gart_table_vram_pin(struct radeon_device * rdev)211 int radeon_gart_table_vram_pin(struct radeon_device *rdev)
212 {
213 uint64_t gpu_addr;
214 int r;
215
216 r = radeon_bo_reserve(rdev->gart.robj, false);
217 if (unlikely(r != 0))
218 return r;
219 r = radeon_bo_pin(rdev->gart.robj,
220 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
221 if (r) {
222 radeon_bo_unreserve(rdev->gart.robj);
223 return r;
224 }
225 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
226 if (r)
227 radeon_bo_unpin(rdev->gart.robj);
228 radeon_bo_unreserve(rdev->gart.robj);
229 rdev->gart.table_addr = gpu_addr;
230
231 if (!r) {
232 int i;
233
234 /* We might have dropped some GART table updates while it wasn't
235 * mapped, restore all entries
236 */
237 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
238 radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
239 mb();
240 radeon_gart_tlb_flush(rdev);
241 }
242
243 return r;
244 }
245
246 /**
247 * radeon_gart_table_vram_unpin - unpin gart page table in vram
248 *
249 * @rdev: radeon_device pointer
250 *
251 * Unpin the GART page table in vram (pcie r4xx, r5xx+).
252 * These asics require the gart table to be in video memory.
253 */
radeon_gart_table_vram_unpin(struct radeon_device * rdev)254 void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
255 {
256 int r;
257
258 if (rdev->gart.robj == NULL) {
259 return;
260 }
261 r = radeon_bo_reserve(rdev->gart.robj, false);
262 if (likely(r == 0)) {
263 radeon_bo_kunmap(rdev->gart.robj);
264 radeon_bo_unpin(rdev->gart.robj);
265 radeon_bo_unreserve(rdev->gart.robj);
266 rdev->gart.ptr = NULL;
267 }
268 }
269
270 /**
271 * radeon_gart_table_vram_free - free gart page table vram
272 *
273 * @rdev: radeon_device pointer
274 *
275 * Free the video memory used for the GART page table
276 * (pcie r4xx, r5xx+). These asics require the gart table to
277 * be in video memory.
278 */
radeon_gart_table_vram_free(struct radeon_device * rdev)279 void radeon_gart_table_vram_free(struct radeon_device *rdev)
280 {
281 if (rdev->gart.robj == NULL) {
282 return;
283 }
284 radeon_bo_unref(&rdev->gart.robj);
285 }
286
287 #ifdef __NetBSD__
288 static void
radeon_gart_pre_update(struct radeon_device * rdev,unsigned gpu_pgstart,unsigned gpu_npages)289 radeon_gart_pre_update(struct radeon_device *rdev, unsigned gpu_pgstart,
290 unsigned gpu_npages)
291 {
292
293 if (rdev->gart.rg_table_map != NULL) {
294 const unsigned entsize =
295 rdev->gart.table_size / rdev->gart.num_gpu_pages;
296
297 bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map,
298 gpu_pgstart*entsize, gpu_npages*entsize,
299 BUS_DMASYNC_POSTWRITE);
300 }
301 }
302
303 static void
radeon_gart_post_update(struct radeon_device * rdev,unsigned gpu_pgstart,unsigned gpu_npages)304 radeon_gart_post_update(struct radeon_device *rdev, unsigned gpu_pgstart,
305 unsigned gpu_npages)
306 {
307
308 if (rdev->gart.rg_table_map != NULL) {
309 const unsigned entsize =
310 rdev->gart.table_size / rdev->gart.num_gpu_pages;
311
312 bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map,
313 gpu_pgstart*entsize, gpu_npages*entsize,
314 BUS_DMASYNC_PREWRITE);
315 }
316 if (rdev->gart.ptr != NULL) {
317 mb();
318 radeon_gart_tlb_flush(rdev);
319 }
320 }
321 #endif
322
323 /*
324 * Common gart functions.
325 */
326 #ifdef __NetBSD__
327 void
radeon_gart_unbind(struct radeon_device * rdev,unsigned gpu_start,unsigned npages)328 radeon_gart_unbind(struct radeon_device *rdev, unsigned gpu_start,
329 unsigned npages)
330 {
331 const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
332 const unsigned gpu_npages = (npages * gpu_per_cpu);
333 const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE);
334 const unsigned pgstart = (gpu_pgstart / gpu_per_cpu);
335 unsigned pgno, gpu_pgno;
336
337 KASSERT(pgstart == (gpu_start / PAGE_SIZE));
338 KASSERT(npages <= rdev->gart.num_cpu_pages);
339 KASSERT(gpu_npages <= rdev->gart.num_cpu_pages);
340
341 if (!rdev->gart.ready) {
342 WARN(1, "trying to bind memory to uninitialized GART !\n");
343 return;
344 }
345
346 radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages);
347 for (pgno = 0; pgno < npages; pgno++) {
348 if (rdev->gart.pages[pgstart + pgno] == NULL)
349 continue;
350 rdev->gart.pages[pgstart + pgno] = NULL;
351 for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) {
352 const unsigned t = gpu_pgstart + gpu_per_cpu*pgno +
353 gpu_pgno;
354 rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
355 if (rdev->gart.ptr == NULL)
356 continue;
357 radeon_gart_set_page(rdev, t, rdev->dummy_page.entry);
358 }
359 }
360 radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages);
361 }
362 #else
363 /**
364 * radeon_gart_unbind - unbind pages from the gart page table
365 *
366 * @rdev: radeon_device pointer
367 * @offset: offset into the GPU's gart aperture
368 * @pages: number of pages to unbind
369 *
370 * Unbinds the requested pages from the gart page table and
371 * replaces them with the dummy page (all asics).
372 */
radeon_gart_unbind(struct radeon_device * rdev,unsigned offset,int pages)373 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
374 int pages)
375 {
376 unsigned t;
377 unsigned p;
378 int i, j;
379
380 if (!rdev->gart.ready) {
381 WARN(1, "trying to unbind memory from uninitialized GART !\n");
382 return;
383 }
384 t = offset / RADEON_GPU_PAGE_SIZE;
385 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
386 for (i = 0; i < pages; i++, p++) {
387 if (rdev->gart.pages[p]) {
388 rdev->gart.pages[p] = NULL;
389 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
390 rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
391 if (rdev->gart.ptr) {
392 radeon_gart_set_page(rdev, t,
393 rdev->dummy_page.entry);
394 }
395 }
396 }
397 }
398 if (rdev->gart.ptr) {
399 mb();
400 radeon_gart_tlb_flush(rdev);
401 }
402 }
403 #endif
404
405 #ifdef __NetBSD__
406 int
radeon_gart_bind(struct radeon_device * rdev,unsigned gpu_start,unsigned npages,struct page ** pages,bus_dmamap_t dmamap,uint32_t flags)407 radeon_gart_bind(struct radeon_device *rdev, unsigned gpu_start,
408 unsigned npages, struct page **pages, bus_dmamap_t dmamap, uint32_t flags)
409 {
410 const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
411 const unsigned gpu_npages = (npages * gpu_per_cpu);
412 const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE);
413 const unsigned pgstart = (gpu_pgstart / gpu_per_cpu);
414 unsigned pgno, gpu_pgno;
415 uint64_t page_entry;
416
417 KASSERT(pgstart == (gpu_start / PAGE_SIZE));
418 KASSERT(npages == dmamap->dm_nsegs);
419 KASSERT(npages <= rdev->gart.num_cpu_pages);
420 KASSERT(gpu_npages <= rdev->gart.num_cpu_pages);
421
422 if (!rdev->gart.ready) {
423 WARN(1, "trying to bind memory to uninitialized GART !\n");
424 return -EINVAL;
425 }
426
427 radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages);
428 for (pgno = 0; pgno < npages; pgno++) {
429 const bus_addr_t addr = dmamap->dm_segs[pgno].ds_addr;
430
431 KASSERT(dmamap->dm_segs[pgno].ds_len == PAGE_SIZE);
432 rdev->gart.pages[pgstart + pgno] = pages[pgno];
433 for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++) {
434 const unsigned i = gpu_pgstart + gpu_per_cpu*pgno +
435 gpu_pgno;
436 page_entry = radeon_gart_get_page_entry(
437 addr + gpu_pgno*RADEON_GPU_PAGE_SIZE, flags);
438 rdev->gart.pages_entry[i] = page_entry;
439 if (rdev->gart.ptr == NULL)
440 continue;
441 radeon_gart_set_page(rdev, i, page_entry);
442 }
443 }
444 radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages);
445
446 return 0;
447 }
448 #else
449 /**
450 * radeon_gart_bind - bind pages into the gart page table
451 *
452 * @rdev: radeon_device pointer
453 * @offset: offset into the GPU's gart aperture
454 * @pages: number of pages to bind
455 * @pagelist: pages to bind
456 * @dma_addr: DMA addresses of pages
457 * @flags: RADEON_GART_PAGE_* flags
458 *
459 * Binds the requested pages to the gart page table
460 * (all asics).
461 * Returns 0 for success, -EINVAL for failure.
462 */
radeon_gart_bind(struct radeon_device * rdev,unsigned offset,int pages,struct page ** pagelist,dma_addr_t * dma_addr,uint32_t flags)463 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
464 int pages, struct page **pagelist, dma_addr_t *dma_addr,
465 uint32_t flags)
466 {
467 unsigned t;
468 unsigned p;
469 uint64_t page_base, page_entry;
470 int i, j;
471
472 if (!rdev->gart.ready) {
473 WARN(1, "trying to bind memory to uninitialized GART !\n");
474 return -EINVAL;
475 }
476 t = offset / RADEON_GPU_PAGE_SIZE;
477 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
478
479 for (i = 0; i < pages; i++, p++) {
480 rdev->gart.pages[p] = pagelist[i];
481 page_base = dma_addr[i];
482 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
483 page_entry = radeon_gart_get_page_entry(page_base, flags);
484 rdev->gart.pages_entry[t] = page_entry;
485 if (rdev->gart.ptr) {
486 radeon_gart_set_page(rdev, t, page_entry);
487 }
488 page_base += RADEON_GPU_PAGE_SIZE;
489 }
490 }
491 if (rdev->gart.ptr) {
492 mb();
493 radeon_gart_tlb_flush(rdev);
494 }
495 return 0;
496 }
497 #endif
498
499 /**
500 * radeon_gart_init - init the driver info for managing the gart
501 *
502 * @rdev: radeon_device pointer
503 *
504 * Allocate the dummy page and init the gart driver info (all asics).
505 * Returns 0 for success, error for failure.
506 */
radeon_gart_init(struct radeon_device * rdev)507 int radeon_gart_init(struct radeon_device *rdev)
508 {
509 int r, i;
510
511 if (rdev->gart.pages) {
512 return 0;
513 }
514 /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
515 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
516 DRM_ERROR("Page size is smaller than GPU page size!\n");
517 return -EINVAL;
518 }
519 r = radeon_dummy_page_init(rdev);
520 if (r)
521 return r;
522 /* Compute table size */
523 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
524 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
525 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
526 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
527 /* Allocate pages table */
528 rdev->gart.pages = vzalloc(array_size(sizeof(void *),
529 rdev->gart.num_cpu_pages));
530 if (rdev->gart.pages == NULL) {
531 radeon_gart_fini(rdev);
532 return -ENOMEM;
533 }
534 rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
535 rdev->gart.num_gpu_pages));
536 if (rdev->gart.pages_entry == NULL) {
537 radeon_gart_fini(rdev);
538 return -ENOMEM;
539 }
540 /* set GART entry to point to the dummy page by default */
541 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
542 rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
543 return 0;
544 }
545
546 /**
547 * radeon_gart_fini - tear down the driver info for managing the gart
548 *
549 * @rdev: radeon_device pointer
550 *
551 * Tear down the gart driver info and free the dummy page (all asics).
552 */
radeon_gart_fini(struct radeon_device * rdev)553 void radeon_gart_fini(struct radeon_device *rdev)
554 {
555 if (rdev->gart.ready) {
556 /* unbind pages */
557 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
558 }
559 rdev->gart.ready = false;
560 vfree(rdev->gart.pages);
561 vfree(rdev->gart.pages_entry);
562 rdev->gart.pages = NULL;
563 rdev->gart.pages_entry = NULL;
564
565 radeon_dummy_page_fini(rdev);
566 }
567