1 /*	$NetBSD: nouveau_bo.c,v 1.7 2016/04/24 04:26:12 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2007 Dave Airlied
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  */
26 /*
27  * Authors: Dave Airlied <airlied@linux.ie>
28  *	    Ben Skeggs   <darktama@iinet.net.au>
29  *	    Jeremy Kolb  <jkolb@brandeis.edu>
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nouveau_bo.c,v 1.7 2016/04/24 04:26:12 riastradh Exp $");
34 
35 #include <core/engine.h>
36 #include <linux/swiotlb.h>
37 
38 #include <subdev/fb.h>
39 #include <subdev/vm.h>
40 #include <subdev/bar.h>
41 
42 #include "nouveau_drm.h"
43 #include "nouveau_dma.h"
44 #include "nouveau_fence.h"
45 
46 #include "nouveau_bo.h"
47 #include "nouveau_ttm.h"
48 #include "nouveau_gem.h"
49 
50 /*
51  * NV10-NV40 tiling helpers
52  */
53 
54 static void
nv10_bo_update_tile_region(struct drm_device * dev,struct nouveau_drm_tile * reg,u32 addr,u32 size,u32 pitch,u32 flags)55 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
56 			   u32 addr, u32 size, u32 pitch, u32 flags)
57 {
58 	struct nouveau_drm *drm = nouveau_drm(dev);
59 	int i = reg - drm->tile.reg;
60 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
61 	struct nouveau_fb_tile *tile = &pfb->tile.region[i];
62 	struct nouveau_engine *engine;
63 
64 	nouveau_fence_unref(&reg->fence);
65 
66 	if (tile->pitch)
67 		pfb->tile.fini(pfb, i, tile);
68 
69 	if (pitch)
70 		pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
71 
72 	pfb->tile.prog(pfb, i, tile);
73 
74 	if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
75 		engine->tile_prog(engine, i);
76 	if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
77 		engine->tile_prog(engine, i);
78 }
79 
80 static struct nouveau_drm_tile *
nv10_bo_get_tile_region(struct drm_device * dev,int i)81 nv10_bo_get_tile_region(struct drm_device *dev, int i)
82 {
83 	struct nouveau_drm *drm = nouveau_drm(dev);
84 	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
85 
86 	spin_lock(&drm->tile.lock);
87 
88 	if (!tile->used &&
89 	    (!tile->fence || nouveau_fence_done(tile->fence)))
90 		tile->used = true;
91 	else
92 		tile = NULL;
93 
94 	spin_unlock(&drm->tile.lock);
95 	return tile;
96 }
97 
98 static void
nv10_bo_put_tile_region(struct drm_device * dev,struct nouveau_drm_tile * tile,struct nouveau_fence * fence)99 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
100 			struct nouveau_fence *fence)
101 {
102 	struct nouveau_drm *drm = nouveau_drm(dev);
103 
104 	if (tile) {
105 		spin_lock(&drm->tile.lock);
106 		tile->fence = nouveau_fence_ref(fence);
107 		tile->used = false;
108 		spin_unlock(&drm->tile.lock);
109 	}
110 }
111 
112 static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device * dev,u32 addr,u32 size,u32 pitch,u32 flags)113 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
114 		   u32 size, u32 pitch, u32 flags)
115 {
116 	struct nouveau_drm *drm = nouveau_drm(dev);
117 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
118 	struct nouveau_drm_tile *tile, *found = NULL;
119 	int i;
120 
121 	for (i = 0; i < pfb->tile.regions; i++) {
122 		tile = nv10_bo_get_tile_region(dev, i);
123 
124 		if (pitch && !found) {
125 			found = tile;
126 			continue;
127 
128 		} else if (tile && pfb->tile.region[i].pitch) {
129 			/* Kill an unused tile region. */
130 			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
131 		}
132 
133 		nv10_bo_put_tile_region(dev, tile, NULL);
134 	}
135 
136 	if (found)
137 		nv10_bo_update_tile_region(dev, found, addr, size,
138 					    pitch, flags);
139 	return found;
140 }
141 
142 static void
nouveau_bo_del_ttm(struct ttm_buffer_object * bo)143 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
144 {
145 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
146 	struct drm_device *dev = drm->dev;
147 	struct nouveau_bo *nvbo = nouveau_bo(bo);
148 
149 #ifdef __NetBSD__
150 	if (unlikely(nvbo->gem.gemo_shm_uao))
151 #else
152 	if (unlikely(nvbo->gem.filp))
153 #endif
154 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
155 	WARN_ON(nvbo->pin_refcnt > 0);
156 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
157 	kfree(nvbo);
158 }
159 
160 static void
nouveau_bo_fixup_align(struct nouveau_bo * nvbo,u32 flags,int * align,int * size)161 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
162 		       int *align, int *size)
163 {
164 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
165 	struct nouveau_device *device = nv_device(drm->device);
166 
167 	if (device->card_type < NV_50) {
168 		if (nvbo->tile_mode) {
169 			if (device->chipset >= 0x40) {
170 				*align = 65536;
171 				*size = roundup(*size, 64 * nvbo->tile_mode);
172 
173 			} else if (device->chipset >= 0x30) {
174 				*align = 32768;
175 				*size = roundup(*size, 64 * nvbo->tile_mode);
176 
177 			} else if (device->chipset >= 0x20) {
178 				*align = 16384;
179 				*size = roundup(*size, 64 * nvbo->tile_mode);
180 
181 			} else if (device->chipset >= 0x10) {
182 				*align = 16384;
183 				*size = roundup(*size, 32 * nvbo->tile_mode);
184 			}
185 		}
186 	} else {
187 		*size = roundup(*size, (1 << nvbo->page_shift));
188 		*align = max((1 <<  nvbo->page_shift), *align);
189 	}
190 
191 	*size = roundup(*size, PAGE_SIZE);
192 }
193 
194 int
nouveau_bo_new(struct drm_device * dev,int size,int align,uint32_t flags,uint32_t tile_mode,uint32_t tile_flags,struct sg_table * sg,struct nouveau_bo ** pnvbo)195 nouveau_bo_new(struct drm_device *dev, int size, int align,
196 	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
197 	       struct sg_table *sg,
198 	       struct nouveau_bo **pnvbo)
199 {
200 	struct nouveau_drm *drm = nouveau_drm(dev);
201 	struct nouveau_bo *nvbo;
202 	size_t acc_size;
203 	int ret;
204 	int type = ttm_bo_type_device;
205 	int lpg_shift = 12;
206 	int max_size;
207 
208 	if (drm->client.base.vm)
209 		lpg_shift = drm->client.base.vm->vmm->lpg_shift;
210 	max_size = INT_MAX & ~((1 << lpg_shift) - 1);
211 
212 	if (size <= 0 || size > max_size) {
213 		nv_warn(drm, "skipped size %x\n", (u32)size);
214 		return -EINVAL;
215 	}
216 
217 	if (sg)
218 		type = ttm_bo_type_sg;
219 
220 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
221 	if (!nvbo)
222 		return -ENOMEM;
223 	INIT_LIST_HEAD(&nvbo->head);
224 	INIT_LIST_HEAD(&nvbo->entry);
225 	INIT_LIST_HEAD(&nvbo->vma_list);
226 	nvbo->tile_mode = tile_mode;
227 	nvbo->tile_flags = tile_flags;
228 	nvbo->bo.bdev = &drm->ttm.bdev;
229 
230 	nvbo->page_shift = 12;
231 	if (drm->client.base.vm) {
232 		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
233 			nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
234 	}
235 
236 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
237 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
238 	nouveau_bo_placement_set(nvbo, flags, 0);
239 
240 	acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
241 				       sizeof(struct nouveau_bo));
242 
243 	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
244 			  type, &nvbo->placement,
245 			  align >> PAGE_SHIFT, false, NULL, acc_size, sg,
246 			  nouveau_bo_del_ttm);
247 	if (ret) {
248 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
249 		return ret;
250 	}
251 
252 	*pnvbo = nvbo;
253 	return 0;
254 }
255 
256 static void
set_placement_list(uint32_t * pl,unsigned * n,uint32_t type,uint32_t flags)257 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
258 {
259 	*n = 0;
260 
261 	if (type & TTM_PL_FLAG_VRAM)
262 		pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
263 	if (type & TTM_PL_FLAG_TT)
264 		pl[(*n)++] = TTM_PL_FLAG_TT | flags;
265 	if (type & TTM_PL_FLAG_SYSTEM)
266 		pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
267 }
268 
269 static void
set_placement_range(struct nouveau_bo * nvbo,uint32_t type)270 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
271 {
272 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
273 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
274 	u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
275 
276 	if ((nv_device(drm->device)->card_type == NV_10 ||
277 	     nv_device(drm->device)->card_type == NV_11) &&
278 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
279 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
280 		/*
281 		 * Make sure that the color and depth buffers are handled
282 		 * by independent memory controller units. Up to a 9x
283 		 * speed up when alpha-blending and depth-test are enabled
284 		 * at the same time.
285 		 */
286 		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
287 			nvbo->placement.fpfn = vram_pages / 2;
288 			nvbo->placement.lpfn = ~0;
289 		} else {
290 			nvbo->placement.fpfn = 0;
291 			nvbo->placement.lpfn = vram_pages / 2;
292 		}
293 	}
294 }
295 
296 void
nouveau_bo_placement_set(struct nouveau_bo * nvbo,uint32_t type,uint32_t busy)297 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
298 {
299 	struct ttm_placement *pl = &nvbo->placement;
300 	uint32_t flags = TTM_PL_MASK_CACHING |
301 		(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
302 
303 	pl->placement = nvbo->placements;
304 	set_placement_list(nvbo->placements, &pl->num_placement,
305 			   type, flags);
306 
307 	pl->busy_placement = nvbo->busy_placements;
308 	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
309 			   type | busy, flags);
310 
311 	set_placement_range(nvbo, type);
312 }
313 
314 int
nouveau_bo_pin(struct nouveau_bo * nvbo,uint32_t memtype)315 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
316 {
317 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
318 	struct ttm_buffer_object *bo = &nvbo->bo;
319 	int ret;
320 
321 	ret = ttm_bo_reserve(bo, false, false, false, 0);
322 	if (ret)
323 		goto out;
324 
325 	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
326 		NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
327 			 1 << bo->mem.mem_type, memtype);
328 		ret = -EINVAL;
329 		goto out;
330 	}
331 
332 	if (nvbo->pin_refcnt++)
333 		goto out;
334 
335 	nouveau_bo_placement_set(nvbo, memtype, 0);
336 
337 	ret = nouveau_bo_validate(nvbo, false, false);
338 	if (ret == 0) {
339 		switch (bo->mem.mem_type) {
340 		case TTM_PL_VRAM:
341 			drm->gem.vram_available -= bo->mem.size;
342 			break;
343 		case TTM_PL_TT:
344 			drm->gem.gart_available -= bo->mem.size;
345 			break;
346 		default:
347 			break;
348 		}
349 	}
350 out:
351 	ttm_bo_unreserve(bo);
352 	return ret;
353 }
354 
355 int
nouveau_bo_unpin(struct nouveau_bo * nvbo)356 nouveau_bo_unpin(struct nouveau_bo *nvbo)
357 {
358 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
359 	struct ttm_buffer_object *bo = &nvbo->bo;
360 	int ret, ref;
361 
362 	ret = ttm_bo_reserve(bo, false, false, false, 0);
363 	if (ret)
364 		return ret;
365 
366 	ref = --nvbo->pin_refcnt;
367 	WARN_ON_ONCE(ref < 0);
368 	if (ref)
369 		goto out;
370 
371 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
372 
373 	ret = nouveau_bo_validate(nvbo, false, false);
374 	if (ret == 0) {
375 		switch (bo->mem.mem_type) {
376 		case TTM_PL_VRAM:
377 			drm->gem.vram_available += bo->mem.size;
378 			break;
379 		case TTM_PL_TT:
380 			drm->gem.gart_available += bo->mem.size;
381 			break;
382 		default:
383 			break;
384 		}
385 	}
386 
387 out:
388 	ttm_bo_unreserve(bo);
389 	return ret;
390 }
391 
392 int
nouveau_bo_map(struct nouveau_bo * nvbo)393 nouveau_bo_map(struct nouveau_bo *nvbo)
394 {
395 	int ret;
396 
397 	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
398 	if (ret)
399 		return ret;
400 
401 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
402 	ttm_bo_unreserve(&nvbo->bo);
403 	return ret;
404 }
405 
406 void
nouveau_bo_unmap(struct nouveau_bo * nvbo)407 nouveau_bo_unmap(struct nouveau_bo *nvbo)
408 {
409 	if (nvbo)
410 		ttm_bo_kunmap(&nvbo->kmap);
411 }
412 
413 int
nouveau_bo_validate(struct nouveau_bo * nvbo,bool interruptible,bool no_wait_gpu)414 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
415 		    bool no_wait_gpu)
416 {
417 	int ret;
418 
419 	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
420 			      interruptible, no_wait_gpu);
421 	if (ret)
422 		return ret;
423 
424 	return 0;
425 }
426 
427 #ifdef __NetBSD__
428 /*
429  * XXX Can't use bus_space here because this is all mapped through the
430  * radeon_bo abstraction.  Can't assume we're x86 because this is
431  * Nouveau, not Intel.
432  */
433 
434 #  define	__iomem			volatile
435 #  define	__force
436 #  define	ioread16_native		fake_ioread16_native
437 #  define	ioread32_native		fake_ioread32_native
438 #  define	iowrite16_native	fake_iowrite16_native
439 #  define	iowrite32_native	fake_iowrite32_native
440 
441 static inline uint16_t
ioread16_native(const void __iomem * ptr)442 ioread16_native(const void __iomem *ptr)
443 {
444 	uint16_t v;
445 
446 	v = *(const uint16_t __iomem *)ptr;
447 	membar_consumer();
448 
449 	return htole16(v);
450 }
451 
452 static inline uint32_t
ioread32_native(const void __iomem * ptr)453 ioread32_native(const void __iomem *ptr)
454 {
455 	uint32_t v;
456 
457 	v = *(const uint32_t __iomem *)ptr;
458 	membar_consumer();
459 
460 	return htole32(v);
461 }
462 
463 static inline void
iowrite16_native(uint16_t v,void __iomem * ptr)464 iowrite16_native(uint16_t v, void __iomem *ptr)
465 {
466 
467 	membar_producer();
468 	*(uint16_t __iomem *)ptr = le16toh(v);
469 }
470 
471 static inline void
iowrite32_native(uint32_t v,void __iomem * ptr)472 iowrite32_native(uint32_t v, void __iomem *ptr)
473 {
474 
475 	membar_producer();
476 	*(uint32_t __iomem *)ptr = le32toh(v);
477 }
478 #endif
479 
480 u16
nouveau_bo_rd16(struct nouveau_bo * nvbo,unsigned index)481 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
482 {
483 	bool is_iomem;
484 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
485 	mem = &mem[index];
486 	if (is_iomem)
487 		return ioread16_native((void __force __iomem *)mem);
488 	else
489 		return *mem;
490 }
491 
492 void
nouveau_bo_wr16(struct nouveau_bo * nvbo,unsigned index,u16 val)493 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
494 {
495 	bool is_iomem;
496 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
497 	mem = &mem[index];
498 	if (is_iomem)
499 		iowrite16_native(val, (void __force __iomem *)mem);
500 	else
501 		*mem = val;
502 }
503 
504 u32
nouveau_bo_rd32(struct nouveau_bo * nvbo,unsigned index)505 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
506 {
507 	bool is_iomem;
508 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
509 	mem = &mem[index];
510 	if (is_iomem)
511 		return ioread32_native((void __force __iomem *)mem);
512 	else
513 		return *mem;
514 }
515 
516 void
nouveau_bo_wr32(struct nouveau_bo * nvbo,unsigned index,u32 val)517 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
518 {
519 	bool is_iomem;
520 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
521 	mem = &mem[index];
522 	if (is_iomem)
523 		iowrite32_native(val, (void __force __iomem *)mem);
524 	else
525 		*mem = val;
526 }
527 
528 #ifdef __NetBSD__
529 #  undef	__iomem
530 #  undef	__force
531 #  undef	ioread16_native
532 #  undef	ioread32_native
533 #  undef	iowrite16_native
534 #  undef	iowrite32_native
535 #endif
536 
537 static struct ttm_tt *
nouveau_ttm_tt_create(struct ttm_bo_device * bdev,unsigned long size,uint32_t page_flags,struct page * dummy_read)538 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
539 		      uint32_t page_flags, struct page *dummy_read)
540 {
541 #if __OS_HAS_AGP
542 	struct nouveau_drm *drm = nouveau_bdev(bdev);
543 	struct drm_device *dev = drm->dev;
544 
545 	if (drm->agp.stat == ENABLED) {
546 		return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
547 					 page_flags, dummy_read);
548 	}
549 #endif
550 
551 	return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
552 }
553 
554 static int
nouveau_bo_invalidate_caches(struct ttm_bo_device * bdev,uint32_t flags)555 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
556 {
557 	/* We'll do this from user space. */
558 	return 0;
559 }
560 
561 static int
nouveau_bo_init_mem_type(struct ttm_bo_device * bdev,uint32_t type,struct ttm_mem_type_manager * man)562 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
563 			 struct ttm_mem_type_manager *man)
564 {
565 	struct nouveau_drm *drm = nouveau_bdev(bdev);
566 
567 	switch (type) {
568 	case TTM_PL_SYSTEM:
569 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
570 		man->available_caching = TTM_PL_MASK_CACHING;
571 		man->default_caching = TTM_PL_FLAG_CACHED;
572 		break;
573 	case TTM_PL_VRAM:
574 		if (nv_device(drm->device)->card_type >= NV_50) {
575 			man->func = &nouveau_vram_manager;
576 			man->io_reserve_fastpath = false;
577 			man->use_io_reserve_lru = true;
578 		} else {
579 			man->func = &ttm_bo_manager_func;
580 		}
581 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
582 			     TTM_MEMTYPE_FLAG_MAPPABLE;
583 		man->available_caching = TTM_PL_FLAG_UNCACHED |
584 					 TTM_PL_FLAG_WC;
585 		man->default_caching = TTM_PL_FLAG_WC;
586 		break;
587 	case TTM_PL_TT:
588 		if (nv_device(drm->device)->card_type >= NV_50)
589 			man->func = &nouveau_gart_manager;
590 		else
591 		if (drm->agp.stat != ENABLED)
592 			man->func = &nv04_gart_manager;
593 		else
594 			man->func = &ttm_bo_manager_func;
595 
596 		if (drm->agp.stat == ENABLED) {
597 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
598 			man->available_caching = TTM_PL_FLAG_UNCACHED |
599 				TTM_PL_FLAG_WC;
600 			man->default_caching = TTM_PL_FLAG_WC;
601 		} else {
602 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
603 				     TTM_MEMTYPE_FLAG_CMA;
604 			man->available_caching = TTM_PL_MASK_CACHING;
605 			man->default_caching = TTM_PL_FLAG_CACHED;
606 		}
607 
608 		break;
609 	default:
610 		return -EINVAL;
611 	}
612 	return 0;
613 }
614 
615 static void
nouveau_bo_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * pl)616 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
617 {
618 	struct nouveau_bo *nvbo = nouveau_bo(bo);
619 
620 	switch (bo->mem.mem_type) {
621 	case TTM_PL_VRAM:
622 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
623 					 TTM_PL_FLAG_SYSTEM);
624 		break;
625 	default:
626 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
627 		break;
628 	}
629 
630 	*pl = nvbo->placement;
631 }
632 
633 
634 static int
nve0_bo_move_init(struct nouveau_channel * chan,u32 handle)635 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
636 {
637 	int ret = RING_SPACE(chan, 2);
638 	if (ret == 0) {
639 		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
640 		OUT_RING  (chan, handle & 0x0000ffff);
641 		FIRE_RING (chan);
642 	}
643 	return ret;
644 }
645 
646 static int
nve0_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)647 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
648 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
649 {
650 	struct nouveau_mem *node = old_mem->mm_node;
651 	int ret = RING_SPACE(chan, 10);
652 	if (ret == 0) {
653 		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
654 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
655 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
656 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
657 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
658 		OUT_RING  (chan, PAGE_SIZE);
659 		OUT_RING  (chan, PAGE_SIZE);
660 		OUT_RING  (chan, PAGE_SIZE);
661 		OUT_RING  (chan, new_mem->num_pages);
662 		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
663 	}
664 	return ret;
665 }
666 
667 static int
nvc0_bo_move_init(struct nouveau_channel * chan,u32 handle)668 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
669 {
670 	int ret = RING_SPACE(chan, 2);
671 	if (ret == 0) {
672 		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
673 		OUT_RING  (chan, handle);
674 	}
675 	return ret;
676 }
677 
678 static int
nvc0_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)679 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
680 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
681 {
682 	struct nouveau_mem *node = old_mem->mm_node;
683 	u64 src_offset = node->vma[0].offset;
684 	u64 dst_offset = node->vma[1].offset;
685 	u32 page_count = new_mem->num_pages;
686 	int ret;
687 
688 	page_count = new_mem->num_pages;
689 	while (page_count) {
690 		int line_count = (page_count > 8191) ? 8191 : page_count;
691 
692 		ret = RING_SPACE(chan, 11);
693 		if (ret)
694 			return ret;
695 
696 		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
697 		OUT_RING  (chan, upper_32_bits(src_offset));
698 		OUT_RING  (chan, lower_32_bits(src_offset));
699 		OUT_RING  (chan, upper_32_bits(dst_offset));
700 		OUT_RING  (chan, lower_32_bits(dst_offset));
701 		OUT_RING  (chan, PAGE_SIZE);
702 		OUT_RING  (chan, PAGE_SIZE);
703 		OUT_RING  (chan, PAGE_SIZE);
704 		OUT_RING  (chan, line_count);
705 		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
706 		OUT_RING  (chan, 0x00000110);
707 
708 		page_count -= line_count;
709 		src_offset += (PAGE_SIZE * line_count);
710 		dst_offset += (PAGE_SIZE * line_count);
711 	}
712 
713 	return 0;
714 }
715 
716 static int
nvc0_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)717 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
718 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
719 {
720 	struct nouveau_mem *node = old_mem->mm_node;
721 	u64 src_offset = node->vma[0].offset;
722 	u64 dst_offset = node->vma[1].offset;
723 	u32 page_count = new_mem->num_pages;
724 	int ret;
725 
726 	page_count = new_mem->num_pages;
727 	while (page_count) {
728 		int line_count = (page_count > 2047) ? 2047 : page_count;
729 
730 		ret = RING_SPACE(chan, 12);
731 		if (ret)
732 			return ret;
733 
734 		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
735 		OUT_RING  (chan, upper_32_bits(dst_offset));
736 		OUT_RING  (chan, lower_32_bits(dst_offset));
737 		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
738 		OUT_RING  (chan, upper_32_bits(src_offset));
739 		OUT_RING  (chan, lower_32_bits(src_offset));
740 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
741 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
742 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
743 		OUT_RING  (chan, line_count);
744 		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
745 		OUT_RING  (chan, 0x00100110);
746 
747 		page_count -= line_count;
748 		src_offset += (PAGE_SIZE * line_count);
749 		dst_offset += (PAGE_SIZE * line_count);
750 	}
751 
752 	return 0;
753 }
754 
755 static int
nva3_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)756 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
757 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
758 {
759 	struct nouveau_mem *node = old_mem->mm_node;
760 	u64 src_offset = node->vma[0].offset;
761 	u64 dst_offset = node->vma[1].offset;
762 	u32 page_count = new_mem->num_pages;
763 	int ret;
764 
765 	page_count = new_mem->num_pages;
766 	while (page_count) {
767 		int line_count = (page_count > 8191) ? 8191 : page_count;
768 
769 		ret = RING_SPACE(chan, 11);
770 		if (ret)
771 			return ret;
772 
773 		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
774 		OUT_RING  (chan, upper_32_bits(src_offset));
775 		OUT_RING  (chan, lower_32_bits(src_offset));
776 		OUT_RING  (chan, upper_32_bits(dst_offset));
777 		OUT_RING  (chan, lower_32_bits(dst_offset));
778 		OUT_RING  (chan, PAGE_SIZE);
779 		OUT_RING  (chan, PAGE_SIZE);
780 		OUT_RING  (chan, PAGE_SIZE);
781 		OUT_RING  (chan, line_count);
782 		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
783 		OUT_RING  (chan, 0x00000110);
784 
785 		page_count -= line_count;
786 		src_offset += (PAGE_SIZE * line_count);
787 		dst_offset += (PAGE_SIZE * line_count);
788 	}
789 
790 	return 0;
791 }
792 
793 static int
nv98_bo_move_exec(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)794 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
795 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
796 {
797 	struct nouveau_mem *node = old_mem->mm_node;
798 	int ret = RING_SPACE(chan, 7);
799 	if (ret == 0) {
800 		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
801 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
802 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
803 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
804 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
805 		OUT_RING  (chan, 0x00000000 /* COPY */);
806 		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
807 	}
808 	return ret;
809 }
810 
811 static int
nv84_bo_move_exec(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)812 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
813 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
814 {
815 	struct nouveau_mem *node = old_mem->mm_node;
816 	int ret = RING_SPACE(chan, 7);
817 	if (ret == 0) {
818 		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
819 		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
820 		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
821 		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
822 		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
823 		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
824 		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
825 	}
826 	return ret;
827 }
828 
829 static int
nv50_bo_move_init(struct nouveau_channel * chan,u32 handle)830 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
831 {
832 	int ret = RING_SPACE(chan, 6);
833 	if (ret == 0) {
834 		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
835 		OUT_RING  (chan, handle);
836 		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
837 		OUT_RING  (chan, NvNotify0);
838 		OUT_RING  (chan, NvDmaFB);
839 		OUT_RING  (chan, NvDmaFB);
840 	}
841 
842 	return ret;
843 }
844 
845 static int
nv50_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)846 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
847 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
848 {
849 	struct nouveau_mem *node = old_mem->mm_node;
850 	u64 length = (new_mem->num_pages << PAGE_SHIFT);
851 	u64 src_offset = node->vma[0].offset;
852 	u64 dst_offset = node->vma[1].offset;
853 	int src_tiled = !!node->memtype;
854 	int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
855 	int ret;
856 
857 	while (length) {
858 		u32 amount, stride, height;
859 
860 		ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
861 		if (ret)
862 			return ret;
863 
864 		amount  = min(length, (u64)(4 * 1024 * 1024));
865 		stride  = 16 * 4;
866 		height  = amount / stride;
867 
868 		if (src_tiled) {
869 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
870 			OUT_RING  (chan, 0);
871 			OUT_RING  (chan, 0);
872 			OUT_RING  (chan, stride);
873 			OUT_RING  (chan, height);
874 			OUT_RING  (chan, 1);
875 			OUT_RING  (chan, 0);
876 			OUT_RING  (chan, 0);
877 		} else {
878 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
879 			OUT_RING  (chan, 1);
880 		}
881 		if (dst_tiled) {
882 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
883 			OUT_RING  (chan, 0);
884 			OUT_RING  (chan, 0);
885 			OUT_RING  (chan, stride);
886 			OUT_RING  (chan, height);
887 			OUT_RING  (chan, 1);
888 			OUT_RING  (chan, 0);
889 			OUT_RING  (chan, 0);
890 		} else {
891 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
892 			OUT_RING  (chan, 1);
893 		}
894 
895 		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
896 		OUT_RING  (chan, upper_32_bits(src_offset));
897 		OUT_RING  (chan, upper_32_bits(dst_offset));
898 		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
899 		OUT_RING  (chan, lower_32_bits(src_offset));
900 		OUT_RING  (chan, lower_32_bits(dst_offset));
901 		OUT_RING  (chan, stride);
902 		OUT_RING  (chan, stride);
903 		OUT_RING  (chan, stride);
904 		OUT_RING  (chan, height);
905 		OUT_RING  (chan, 0x00000101);
906 		OUT_RING  (chan, 0x00000000);
907 		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
908 		OUT_RING  (chan, 0);
909 
910 		length -= amount;
911 		src_offset += amount;
912 		dst_offset += amount;
913 	}
914 
915 	return 0;
916 }
917 
918 static int
nv04_bo_move_init(struct nouveau_channel * chan,u32 handle)919 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
920 {
921 	int ret = RING_SPACE(chan, 4);
922 	if (ret == 0) {
923 		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
924 		OUT_RING  (chan, handle);
925 		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
926 		OUT_RING  (chan, NvNotify0);
927 	}
928 
929 	return ret;
930 }
931 
932 static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object * bo,struct nouveau_channel * chan,struct ttm_mem_reg * mem)933 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
934 		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
935 {
936 	if (mem->mem_type == TTM_PL_TT)
937 		return NvDmaTT;
938 	return NvDmaFB;
939 }
940 
941 static int
nv04_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)942 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
943 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
944 {
945 	u32 src_offset = old_mem->start << PAGE_SHIFT;
946 	u32 dst_offset = new_mem->start << PAGE_SHIFT;
947 	u32 page_count = new_mem->num_pages;
948 	int ret;
949 
950 	ret = RING_SPACE(chan, 3);
951 	if (ret)
952 		return ret;
953 
954 	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
955 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
956 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
957 
958 	page_count = new_mem->num_pages;
959 	while (page_count) {
960 		int line_count = (page_count > 2047) ? 2047 : page_count;
961 
962 		ret = RING_SPACE(chan, 11);
963 		if (ret)
964 			return ret;
965 
966 		BEGIN_NV04(chan, NvSubCopy,
967 				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
968 		OUT_RING  (chan, src_offset);
969 		OUT_RING  (chan, dst_offset);
970 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
971 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
972 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
973 		OUT_RING  (chan, line_count);
974 		OUT_RING  (chan, 0x00000101);
975 		OUT_RING  (chan, 0x00000000);
976 		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
977 		OUT_RING  (chan, 0);
978 
979 		page_count -= line_count;
980 		src_offset += (PAGE_SIZE * line_count);
981 		dst_offset += (PAGE_SIZE * line_count);
982 	}
983 
984 	return 0;
985 }
986 
987 static int
nouveau_bo_move_prep(struct nouveau_drm * drm,struct ttm_buffer_object * bo,struct ttm_mem_reg * mem)988 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
989 		     struct ttm_mem_reg *mem)
990 {
991 	struct nouveau_mem *old_node = bo->mem.mm_node;
992 	struct nouveau_mem *new_node = mem->mm_node;
993 	u64 size = (u64)mem->num_pages << PAGE_SHIFT;
994 	int ret;
995 
996 	ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
997 			     NV_MEM_ACCESS_RW, &old_node->vma[0]);
998 	if (ret)
999 		return ret;
1000 
1001 	ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
1002 			     NV_MEM_ACCESS_RW, &old_node->vma[1]);
1003 	if (ret) {
1004 		nouveau_vm_put(&old_node->vma[0]);
1005 		return ret;
1006 	}
1007 
1008 	nouveau_vm_map(&old_node->vma[0], old_node);
1009 	nouveau_vm_map(&old_node->vma[1], new_node);
1010 	return 0;
1011 }
1012 
1013 static int
nouveau_bo_move_m2mf(struct ttm_buffer_object * bo,int evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_mem)1014 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1015 		     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1016 {
1017 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1018 	struct nouveau_channel *chan = drm->ttm.chan;
1019 	struct nouveau_fence *fence;
1020 	int ret;
1021 
1022 	/* create temporary vmas for the transfer and attach them to the
1023 	 * old nouveau_mem node, these will get cleaned up after ttm has
1024 	 * destroyed the ttm_mem_reg
1025 	 */
1026 	if (nv_device(drm->device)->card_type >= NV_50) {
1027 		ret = nouveau_bo_move_prep(drm, bo, new_mem);
1028 		if (ret)
1029 			return ret;
1030 	}
1031 
1032 	mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
1033 	ret = nouveau_fence_sync(bo->sync_obj, chan);
1034 	if (ret == 0) {
1035 		ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1036 		if (ret == 0) {
1037 			ret = nouveau_fence_new(chan, false, &fence);
1038 			if (ret == 0) {
1039 				ret = ttm_bo_move_accel_cleanup(bo, fence,
1040 								evict,
1041 								no_wait_gpu,
1042 								new_mem);
1043 				nouveau_fence_unref(&fence);
1044 			}
1045 		}
1046 	}
1047 	mutex_unlock(&chan->cli->mutex);
1048 	return ret;
1049 }
1050 
1051 void
nouveau_bo_move_init(struct nouveau_drm * drm)1052 nouveau_bo_move_init(struct nouveau_drm *drm)
1053 {
1054 	static const struct {
1055 		const char *name;
1056 		int engine;
1057 		u32 oclass;
1058 		int (*exec)(struct nouveau_channel *,
1059 			    struct ttm_buffer_object *,
1060 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
1061 		int (*init)(struct nouveau_channel *, u32 handle);
1062 	} _methods[] = {
1063 		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1064 		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1065 		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1066 		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1067 		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1068 		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1069 		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1070 		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1071 		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1072 		{},
1073 		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1074 	}, *mthd = _methods;
1075 	const char *name = "CPU";
1076 	int ret;
1077 
1078 	do {
1079 		struct nouveau_object *object;
1080 		struct nouveau_channel *chan;
1081 		u32 handle = (mthd->engine << 16) | mthd->oclass;
1082 
1083 		if (mthd->engine)
1084 			chan = drm->cechan;
1085 		else
1086 			chan = drm->channel;
1087 		if (chan == NULL)
1088 			continue;
1089 
1090 		ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
1091 					 mthd->oclass, NULL, 0, &object);
1092 		if (ret == 0) {
1093 			ret = mthd->init(chan, handle);
1094 			if (ret) {
1095 				nouveau_object_del(nv_object(drm),
1096 						   chan->handle, handle);
1097 				continue;
1098 			}
1099 
1100 			drm->ttm.move = mthd->exec;
1101 			drm->ttm.chan = chan;
1102 			name = mthd->name;
1103 			break;
1104 		}
1105 	} while ((++mthd)->exec);
1106 
1107 	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1108 }
1109 
1110 static int
nouveau_bo_move_flipd(struct ttm_buffer_object * bo,bool evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_mem)1111 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1112 		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1113 {
1114 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1115 	struct ttm_placement placement;
1116 	struct ttm_mem_reg tmp_mem;
1117 	int ret;
1118 
1119 	placement.fpfn = placement.lpfn = 0;
1120 	placement.num_placement = placement.num_busy_placement = 1;
1121 	placement.placement = placement.busy_placement = &placement_memtype;
1122 
1123 	tmp_mem = *new_mem;
1124 	tmp_mem.mm_node = NULL;
1125 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1126 	if (ret)
1127 		return ret;
1128 
1129 	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1130 	if (ret)
1131 		goto out;
1132 
1133 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1134 	if (ret)
1135 		goto out;
1136 
1137 	ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1138 out:
1139 	ttm_bo_mem_put(bo, &tmp_mem);
1140 	return ret;
1141 }
1142 
1143 static int
nouveau_bo_move_flips(struct ttm_buffer_object * bo,bool evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_mem)1144 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1145 		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1146 {
1147 	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1148 	struct ttm_placement placement;
1149 	struct ttm_mem_reg tmp_mem;
1150 	int ret;
1151 
1152 	placement.fpfn = placement.lpfn = 0;
1153 	placement.num_placement = placement.num_busy_placement = 1;
1154 	placement.placement = placement.busy_placement = &placement_memtype;
1155 
1156 	tmp_mem = *new_mem;
1157 	tmp_mem.mm_node = NULL;
1158 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1159 	if (ret)
1160 		return ret;
1161 
1162 	ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1163 	if (ret)
1164 		goto out;
1165 
1166 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1167 	if (ret)
1168 		goto out;
1169 
1170 out:
1171 	ttm_bo_mem_put(bo, &tmp_mem);
1172 	return ret;
1173 }
1174 
1175 static void
nouveau_bo_move_ntfy(struct ttm_buffer_object * bo,struct ttm_mem_reg * new_mem)1176 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1177 {
1178 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1179 	struct nouveau_vma *vma;
1180 
1181 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
1182 	if (bo->destroy != nouveau_bo_del_ttm)
1183 		return;
1184 
1185 	list_for_each_entry(vma, &nvbo->vma_list, head) {
1186 		if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1187 			      (new_mem->mem_type == TTM_PL_VRAM ||
1188 			       nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
1189 			nouveau_vm_map(vma, new_mem->mm_node);
1190 		} else {
1191 			nouveau_vm_unmap(vma);
1192 		}
1193 	}
1194 }
1195 
1196 static int
nouveau_bo_vm_bind(struct ttm_buffer_object * bo,struct ttm_mem_reg * new_mem,struct nouveau_drm_tile ** new_tile)1197 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1198 		   struct nouveau_drm_tile **new_tile)
1199 {
1200 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1201 	struct drm_device *dev = drm->dev;
1202 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1203 	u64 offset = new_mem->start << PAGE_SHIFT;
1204 
1205 	*new_tile = NULL;
1206 	if (new_mem->mem_type != TTM_PL_VRAM)
1207 		return 0;
1208 
1209 	if (nv_device(drm->device)->card_type >= NV_10) {
1210 		*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1211 						nvbo->tile_mode,
1212 						nvbo->tile_flags);
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object * bo,struct nouveau_drm_tile * new_tile,struct nouveau_drm_tile ** old_tile)1219 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1220 		      struct nouveau_drm_tile *new_tile,
1221 		      struct nouveau_drm_tile **old_tile)
1222 {
1223 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1224 	struct drm_device *dev = drm->dev;
1225 
1226 	nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1227 	*old_tile = new_tile;
1228 }
1229 
1230 static int
nouveau_bo_move(struct ttm_buffer_object * bo,bool evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_mem)1231 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1232 		bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1233 {
1234 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1235 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1236 	struct ttm_mem_reg *old_mem = &bo->mem;
1237 	struct nouveau_drm_tile *new_tile = NULL;
1238 	int ret = 0;
1239 
1240 	if (nv_device(drm->device)->card_type < NV_50) {
1241 		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1242 		if (ret)
1243 			return ret;
1244 	}
1245 
1246 	/* Fake bo copy. */
1247 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1248 		BUG_ON(bo->mem.mm_node != NULL);
1249 		bo->mem = *new_mem;
1250 		new_mem->mm_node = NULL;
1251 		goto out;
1252 	}
1253 
1254 	/* Hardware assisted copy. */
1255 	if (drm->ttm.move) {
1256 		if (new_mem->mem_type == TTM_PL_SYSTEM)
1257 			ret = nouveau_bo_move_flipd(bo, evict, intr,
1258 						    no_wait_gpu, new_mem);
1259 		else if (old_mem->mem_type == TTM_PL_SYSTEM)
1260 			ret = nouveau_bo_move_flips(bo, evict, intr,
1261 						    no_wait_gpu, new_mem);
1262 		else
1263 			ret = nouveau_bo_move_m2mf(bo, evict, intr,
1264 						   no_wait_gpu, new_mem);
1265 		if (!ret)
1266 			goto out;
1267 	}
1268 
1269 	/* Fallback to software copy. */
1270 	spin_lock(&bo->bdev->fence_lock);
1271 	ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
1272 	spin_unlock(&bo->bdev->fence_lock);
1273 	if (ret == 0)
1274 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1275 
1276 out:
1277 	if (nv_device(drm->device)->card_type < NV_50) {
1278 		if (ret)
1279 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1280 		else
1281 			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1282 	}
1283 
1284 	return ret;
1285 }
1286 
1287 static int
nouveau_bo_verify_access(struct ttm_buffer_object * bo,struct file * filp)1288 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1289 {
1290 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1291 
1292 	return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1293 }
1294 
1295 static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)1296 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1297 {
1298 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1299 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1300 	struct nouveau_mem *node = mem->mm_node;
1301 	struct drm_device *dev = drm->dev;
1302 	int ret;
1303 
1304 	mem->bus.addr = NULL;
1305 	mem->bus.offset = 0;
1306 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
1307 	mem->bus.base = 0;
1308 	mem->bus.is_iomem = false;
1309 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1310 		return -EINVAL;
1311 	switch (mem->mem_type) {
1312 	case TTM_PL_SYSTEM:
1313 		/* System memory */
1314 		return 0;
1315 	case TTM_PL_TT:
1316 #if __OS_HAS_AGP
1317 		if (drm->agp.stat == ENABLED) {
1318 			mem->bus.offset = mem->start << PAGE_SHIFT;
1319 			mem->bus.base = drm->agp.base;
1320 			mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1321 		}
1322 #endif
1323 		if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
1324 			/* untiled */
1325 			break;
1326 		/* fallthrough, tiled memory */
1327 	case TTM_PL_VRAM:
1328 		mem->bus.offset = mem->start << PAGE_SHIFT;
1329 		mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1);
1330 		mem->bus.is_iomem = true;
1331 		if (nv_device(drm->device)->card_type >= NV_50) {
1332 			struct nouveau_bar *bar = nouveau_bar(drm->device);
1333 
1334 			ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1335 					&node->bar_vma);
1336 			if (ret)
1337 				return ret;
1338 
1339 			mem->bus.offset = node->bar_vma.offset;
1340 		}
1341 		break;
1342 	default:
1343 		return -EINVAL;
1344 	}
1345 	return 0;
1346 }
1347 
1348 static void
nouveau_ttm_io_mem_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)1349 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1350 {
1351 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1352 	struct nouveau_bar *bar = nouveau_bar(drm->device);
1353 	struct nouveau_mem *node = mem->mm_node;
1354 
1355 	if (!node->bar_vma.node)
1356 		return;
1357 
1358 	bar->unmap(bar, &node->bar_vma);
1359 }
1360 
1361 static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object * bo)1362 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1363 {
1364 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1365 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1366 	struct nouveau_device *device = nv_device(drm->device);
1367 	u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT;
1368 	int ret;
1369 
1370 	/* as long as the bo isn't in vram, and isn't tiled, we've got
1371 	 * nothing to do here.
1372 	 */
1373 	if (bo->mem.mem_type != TTM_PL_VRAM) {
1374 		if (nv_device(drm->device)->card_type < NV_50 ||
1375 		    !nouveau_bo_tile_layout(nvbo))
1376 			return 0;
1377 
1378 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1379 			nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1380 
1381 			ret = nouveau_bo_validate(nvbo, false, false);
1382 			if (ret)
1383 				return ret;
1384 		}
1385 		return 0;
1386 	}
1387 
1388 	/* make sure bo is in mappable vram */
1389 	if (nv_device(drm->device)->card_type >= NV_50 ||
1390 	    bo->mem.start + bo->mem.num_pages < mappable)
1391 		return 0;
1392 
1393 
1394 	nvbo->placement.fpfn = 0;
1395 	nvbo->placement.lpfn = mappable;
1396 	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1397 	return nouveau_bo_validate(nvbo, false, false);
1398 }
1399 
1400 static int
nouveau_ttm_tt_populate(struct ttm_tt * ttm)1401 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1402 {
1403 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1404 #if defined(__OS_HAS_AGP) || !defined(__NetBSD__)
1405 	struct nouveau_drm *drm;
1406 #endif
1407 #ifndef __NetBSD__
1408 	struct nouveau_device *device;
1409 	struct drm_device *dev;
1410 	unsigned i;
1411 	int r;
1412 #endif
1413 #ifndef __NetBSD__		/* XXX drm prime */
1414 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1415 #endif
1416 
1417 	if (ttm->state != tt_unpopulated)
1418 		return 0;
1419 
1420 #ifndef __NetBSD__		/* XXX drm prime */
1421 	if (slave && ttm->sg) {
1422 		/* make userspace faulting work */
1423 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1424 						 ttm_dma->dma_address, ttm->num_pages);
1425 		ttm->state = tt_unbound;
1426 		return 0;
1427 	}
1428 #endif
1429 
1430 #if defined(__OS_HAS_AGP) || !defined(__NetBSD__)
1431 	drm = nouveau_bdev(ttm->bdev);
1432 #endif
1433 #ifndef __NetBSD__
1434 	device = nv_device(drm->device);
1435 	dev = drm->dev;
1436 #endif
1437 
1438 #if __OS_HAS_AGP
1439 	if (drm->agp.stat == ENABLED) {
1440 		return ttm_agp_tt_populate(ttm);
1441 	}
1442 #endif
1443 
1444 #ifdef __NetBSD__
1445 	return ttm_bus_dma_populate(ttm_dma);
1446 #else
1447 #ifdef CONFIG_SWIOTLB
1448 	if (swiotlb_nr_tbl()) {
1449 		return ttm_dma_populate((void *)ttm, dev->dev);
1450 	}
1451 #endif
1452 
1453 	r = ttm_pool_populate(ttm);
1454 	if (r) {
1455 		return r;
1456 	}
1457 
1458 	for (i = 0; i < ttm->num_pages; i++) {
1459 		ttm_dma->dma_address[i] = nv_device_map_page(device,
1460 							     ttm->pages[i]);
1461 		if (!ttm_dma->dma_address[i]) {
1462 			while (--i) {
1463 				nv_device_unmap_page(device,
1464 						     ttm_dma->dma_address[i]);
1465 				ttm_dma->dma_address[i] = 0;
1466 			}
1467 			ttm_pool_unpopulate(ttm);
1468 			return -EFAULT;
1469 		}
1470 	}
1471 	return 0;
1472 #endif
1473 }
1474 
1475 static void
nouveau_ttm_tt_unpopulate(struct ttm_tt * ttm)1476 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1477 {
1478 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1479 #if defined(__OS_HAS_AGP) || !defined(__NetBSD__)
1480 	struct nouveau_drm *drm;
1481 #endif
1482 #ifndef __NetBSD__
1483 	struct nouveau_device *device;
1484 	struct drm_device *dev;
1485 	unsigned i;
1486 #endif
1487 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1488 
1489 	if (slave)
1490 		return;
1491 
1492 #if defined(__OS_HAS_AGP) || !defined(__NetBSD__)
1493 	drm = nouveau_bdev(ttm->bdev);
1494 #endif
1495 #ifndef __NetBSD__
1496 	device = nv_device(drm->device);
1497 	dev = drm->dev;
1498 #endif
1499 
1500 #if __OS_HAS_AGP
1501 	if (drm->agp.stat == ENABLED) {
1502 		ttm_agp_tt_unpopulate(ttm);
1503 		return;
1504 	}
1505 #endif
1506 
1507 #ifdef __NetBSD__
1508 	ttm_bus_dma_unpopulate(ttm_dma);
1509 #else
1510 #ifdef CONFIG_SWIOTLB
1511 	if (swiotlb_nr_tbl()) {
1512 		ttm_dma_unpopulate((void *)ttm, dev->dev);
1513 		return;
1514 	}
1515 #endif
1516 
1517 	for (i = 0; i < ttm->num_pages; i++) {
1518 		if (ttm_dma->dma_address[i]) {
1519 			nv_device_unmap_page(device, ttm_dma->dma_address[i]);
1520 		}
1521 	}
1522 
1523 	ttm_pool_unpopulate(ttm);
1524 #endif
1525 }
1526 
1527 #ifdef __NetBSD__
1528 static void
nouveau_ttm_tt_swapout(struct ttm_tt * ttm)1529 nouveau_ttm_tt_swapout(struct ttm_tt *ttm)
1530 {
1531 	struct ttm_dma_tt *ttm_dma = container_of(ttm, struct ttm_dma_tt, ttm);
1532 
1533 	ttm_bus_dma_swapout(ttm_dma);
1534 }
1535 #endif
1536 
1537 void
nouveau_bo_fence(struct nouveau_bo * nvbo,struct nouveau_fence * fence)1538 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1539 {
1540 	struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
1541 	struct nouveau_fence *old_fence = NULL;
1542 
1543 	spin_lock(&nvbo->bo.bdev->fence_lock);
1544 	old_fence = nvbo->bo.sync_obj;
1545 	nvbo->bo.sync_obj = new_fence;
1546 	spin_unlock(&nvbo->bo.bdev->fence_lock);
1547 
1548 	nouveau_fence_unref(&old_fence);
1549 }
1550 
1551 static void
nouveau_bo_fence_unref(void ** sync_obj)1552 nouveau_bo_fence_unref(void **sync_obj)
1553 {
1554 	nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1555 }
1556 
1557 static void *
nouveau_bo_fence_ref(void * sync_obj)1558 nouveau_bo_fence_ref(void *sync_obj)
1559 {
1560 	return nouveau_fence_ref(sync_obj);
1561 }
1562 
1563 static bool
nouveau_bo_fence_signalled(void * sync_obj)1564 nouveau_bo_fence_signalled(void *sync_obj)
1565 {
1566 	return nouveau_fence_done(sync_obj);
1567 }
1568 
1569 static int
nouveau_bo_fence_wait(void * sync_obj,bool lazy,bool intr)1570 nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
1571 {
1572 	return nouveau_fence_wait(sync_obj, lazy, intr);
1573 }
1574 
1575 static int
nouveau_bo_fence_flush(void * sync_obj)1576 nouveau_bo_fence_flush(void *sync_obj)
1577 {
1578 	return 0;
1579 }
1580 
1581 #ifdef __NetBSD__
1582 static const struct uvm_pagerops nouveau_uvm_ops = {
1583 	.pgo_reference = &ttm_bo_uvm_reference,
1584 	.pgo_detach = &ttm_bo_uvm_detach,
1585 	.pgo_fault = &ttm_bo_uvm_fault,
1586 };
1587 #endif
1588 
1589 struct ttm_bo_driver nouveau_bo_driver = {
1590 	.ttm_tt_create = &nouveau_ttm_tt_create,
1591 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1592 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1593 #ifdef __NetBSD__
1594 	.ttm_tt_swapout = &nouveau_ttm_tt_swapout,
1595 	.ttm_uvm_ops = &nouveau_uvm_ops,
1596 #endif
1597 	.invalidate_caches = nouveau_bo_invalidate_caches,
1598 	.init_mem_type = nouveau_bo_init_mem_type,
1599 	.evict_flags = nouveau_bo_evict_flags,
1600 	.move_notify = nouveau_bo_move_ntfy,
1601 	.move = nouveau_bo_move,
1602 	.verify_access = nouveau_bo_verify_access,
1603 	.sync_obj_signaled = nouveau_bo_fence_signalled,
1604 	.sync_obj_wait = nouveau_bo_fence_wait,
1605 	.sync_obj_flush = nouveau_bo_fence_flush,
1606 	.sync_obj_unref = nouveau_bo_fence_unref,
1607 	.sync_obj_ref = nouveau_bo_fence_ref,
1608 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1609 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1610 	.io_mem_free = &nouveau_ttm_io_mem_free,
1611 };
1612 
1613 struct nouveau_vma *
nouveau_bo_vma_find(struct nouveau_bo * nvbo,struct nouveau_vm * vm)1614 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1615 {
1616 	struct nouveau_vma *vma;
1617 	list_for_each_entry(vma, &nvbo->vma_list, head) {
1618 		if (vma->vm == vm)
1619 			return vma;
1620 	}
1621 
1622 	return NULL;
1623 }
1624 
1625 int
nouveau_bo_vma_add(struct nouveau_bo * nvbo,struct nouveau_vm * vm,struct nouveau_vma * vma)1626 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1627 		   struct nouveau_vma *vma)
1628 {
1629 	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1630 	int ret;
1631 
1632 	ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1633 			     NV_MEM_ACCESS_RW, vma);
1634 	if (ret)
1635 		return ret;
1636 
1637 	if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1638 	    (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1639 	     nvbo->page_shift != vma->vm->vmm->lpg_shift))
1640 		nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1641 
1642 	list_add_tail(&vma->head, &nvbo->vma_list);
1643 	vma->refcount = 1;
1644 	return 0;
1645 }
1646 
1647 void
nouveau_bo_vma_del(struct nouveau_bo * nvbo,struct nouveau_vma * vma)1648 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1649 {
1650 	if (vma->node) {
1651 		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1652 			nouveau_vm_unmap(vma);
1653 		nouveau_vm_put(vma);
1654 		list_del(&vma->head);
1655 	}
1656 }
1657