1 /*
2  * Copyright (C) 2006 Ben Skeggs.
3  *
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sublicense, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial
16  * portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  */
27 
28 /*
29  * Authors:
30  *   Ben Skeggs <darktama@iinet.net.au>
31  */
32 
33 #include "drmP.h"
34 #include "drm.h"
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
37 
38 /* NVidia uses context objects to drive drawing operations.
39 
40    Context objects can be selected into 8 subchannels in the FIFO,
41    and then used via DMA command buffers.
42 
43    A context object is referenced by a user defined handle (CARD32). The HW
44    looks up graphics objects in a hash table in the instance RAM.
45 
46    An entry in the hash table consists of 2 CARD32. The first CARD32 contains
47    the handle, the second one a bitfield, that contains the address of the
48    object in instance RAM.
49 
50    The format of the second CARD32 seems to be:
51 
52    NV4 to NV30:
53 
54    15: 0  instance_addr >> 4
55    17:16  engine (here uses 1 = graphics)
56    28:24  channel id (here uses 0)
57    31	  valid (use 1)
58 
59    NV40:
60 
61    15: 0  instance_addr >> 4   (maybe 19-0)
62    21:20  engine (here uses 1 = graphics)
63    I'm unsure about the other bits, but using 0 seems to work.
64 
65    The key into the hash table depends on the object handle and channel id and
66    is given as:
67 */
68 static uint32_t
nouveau_ramht_hash_handle(struct drm_device * dev,int channel,uint32_t handle)69 nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
70 {
71 	struct drm_nouveau_private *dev_priv=dev->dev_private;
72 	uint32_t hash = 0;
73 	int i;
74 
75 	DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle);
76 
77 	for (i=32;i>0;i-=dev_priv->ramht_bits) {
78 		hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
79 		handle >>= dev_priv->ramht_bits;
80 	}
81 	if (dev_priv->card_type < NV_50)
82 		hash ^= channel << (dev_priv->ramht_bits - 4);
83 	hash <<= 3;
84 
85 	DRM_DEBUG("hash=0x%08x\n", hash);
86 	return hash;
87 }
88 
89 static int
nouveau_ramht_entry_valid(struct drm_device * dev,struct nouveau_gpuobj * ramht,uint32_t offset)90 nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
91 			  uint32_t offset)
92 {
93 	struct drm_nouveau_private *dev_priv=dev->dev_private;
94 	uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4);
95 
96 	if (dev_priv->card_type < NV_40)
97 		return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
98 	return (ctx != 0);
99 }
100 
101 static int
nouveau_ramht_insert(struct drm_device * dev,struct nouveau_gpuobj_ref * ref)102 nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
103 {
104 	struct drm_nouveau_private *dev_priv=dev->dev_private;
105 	struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
106 	struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
107 	struct nouveau_gpuobj *gpuobj = ref->gpuobj;
108 	uint32_t ctx, co, ho;
109 
110 	if (!ramht) {
111 		DRM_ERROR("No hash table!\n");
112 		return -EINVAL;
113 	}
114 
115 	if (dev_priv->card_type < NV_40) {
116 		ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
117 		      (ref->channel   << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
118 		      (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
119 	} else
120 	if (dev_priv->card_type < NV_50) {
121 		ctx = (ref->instance >> 4) |
122 		      (ref->channel   << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
123 		      (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
124 	} else {
125 		ctx = (ref->instance  >> 4) |
126 		      (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
127 	}
128 
129 	co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
130 	do {
131 		if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
132 			DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
133 				  ref->channel, co, ref->handle, ctx);
134 			INSTANCE_WR(ramht, (co + 0)/4, ref->handle);
135 			INSTANCE_WR(ramht, (co + 4)/4, ctx);
136 
137 			list_add_tail(&ref->list, &chan->ramht_refs);
138 			return 0;
139 		}
140 		DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n",
141 			  ref->channel, co, INSTANCE_RD(ramht, co/4));
142 
143 		co += 8;
144 		if (co >= dev_priv->ramht_size)
145 			co = 0;
146 	} while (co != ho);
147 
148 	DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel);
149 	return -ENOMEM;
150 }
151 
152 static void
nouveau_ramht_remove(struct drm_device * dev,struct nouveau_gpuobj_ref * ref)153 nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
154 {
155 	struct drm_nouveau_private *dev_priv = dev->dev_private;
156 	struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
157 	struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
158 	uint32_t co, ho;
159 
160 	if (!ramht) {
161 		DRM_ERROR("No hash table!\n");
162 		return;
163 	}
164 
165 	co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
166 	do {
167 		if (nouveau_ramht_entry_valid(dev, ramht, co) &&
168 		    (ref->handle == INSTANCE_RD(ramht, (co/4)))) {
169 			DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
170 				  ref->channel, co, ref->handle,
171 				  INSTANCE_RD(ramht, (co + 4)));
172 			INSTANCE_WR(ramht, (co + 0)/4, 0x00000000);
173 			INSTANCE_WR(ramht, (co + 4)/4, 0x00000000);
174 
175 			list_del(&ref->list);
176 			return;
177 		}
178 
179 		co += 8;
180 		if (co >= dev_priv->ramht_size)
181 			co = 0;
182 	} while (co != ho);
183 
184 	DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n",
185 		  ref->channel, ref->handle);
186 }
187 
188 int
nouveau_gpuobj_new(struct drm_device * dev,struct nouveau_channel * chan,int size,int align,uint32_t flags,struct nouveau_gpuobj ** gpuobj_ret)189 nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
190 		   int size, int align, uint32_t flags,
191 		   struct nouveau_gpuobj **gpuobj_ret)
192 {
193 	struct drm_nouveau_private *dev_priv = dev->dev_private;
194 	struct nouveau_engine *engine = &dev_priv->Engine;
195 	struct nouveau_gpuobj *gpuobj;
196 	struct mem_block *pramin = NULL;
197 	int ret;
198 
199 	DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n",
200 		  chan ? chan->id : -1, size, align, flags);
201 
202 	if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
203 		return -EINVAL;
204 
205 	gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
206 	if (!gpuobj)
207 		return -ENOMEM;
208 	DRM_DEBUG("gpuobj %p\n", gpuobj);
209 	gpuobj->flags = flags;
210 	gpuobj->im_channel = chan ? chan->id : -1;
211 
212 	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
213 
214 	/* Choose between global instmem heap, and per-channel private
215 	 * instmem heap.  On <NV50 allow requests for private instmem
216 	 * to be satisfied from global heap if no per-channel area
217 	 * available.
218 	 */
219 	if (chan) {
220 		if (chan->ramin_heap) {
221 			DRM_DEBUG("private heap\n");
222 			pramin = chan->ramin_heap;
223 		} else
224 		if (dev_priv->card_type < NV_50) {
225 			DRM_DEBUG("global heap fallback\n");
226 			pramin = dev_priv->ramin_heap;
227 		}
228 	} else {
229 		DRM_DEBUG("global heap\n");
230 		pramin = dev_priv->ramin_heap;
231 	}
232 
233 	if (!pramin) {
234 		DRM_ERROR("No PRAMIN heap!\n");
235 		return -EINVAL;
236 	}
237 
238 	if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) {
239 		nouveau_gpuobj_del(dev, &gpuobj);
240 		return ret;
241 	}
242 
243 	/* Allocate a chunk of the PRAMIN aperture */
244 	gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
245 						    drm_order(align),
246 						    (struct drm_file *)-2, 0);
247 	if (!gpuobj->im_pramin) {
248 		nouveau_gpuobj_del(dev, &gpuobj);
249 		return -ENOMEM;
250 	}
251 	gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE;
252 
253 	if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) {
254 		nouveau_gpuobj_del(dev, &gpuobj);
255 		return ret;
256 	}
257 
258 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
259 		int i;
260 
261 		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
262 			INSTANCE_WR(gpuobj, i/4, 0);
263 	}
264 
265 	*gpuobj_ret = gpuobj;
266 	return 0;
267 }
268 
269 int
nouveau_gpuobj_early_init(struct drm_device * dev)270 nouveau_gpuobj_early_init(struct drm_device *dev)
271 {
272 	struct drm_nouveau_private *dev_priv = dev->dev_private;
273 
274 	DRM_DEBUG("\n");
275 
276 	INIT_LIST_HEAD(&dev_priv->gpuobj_list);
277 
278 	return 0;
279 }
280 
281 int
nouveau_gpuobj_init(struct drm_device * dev)282 nouveau_gpuobj_init(struct drm_device *dev)
283 {
284 	struct drm_nouveau_private *dev_priv = dev->dev_private;
285 	int ret;
286 
287 	DRM_DEBUG("\n");
288 
289 	if (dev_priv->card_type < NV_50) {
290 		if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset,
291 						   ~0, dev_priv->ramht_size,
292 						   NVOBJ_FLAG_ZERO_ALLOC |
293 						   NVOBJ_FLAG_ALLOW_NO_REFS,
294 						   &dev_priv->ramht, NULL)))
295 			return ret;
296 	}
297 
298 	return 0;
299 }
300 
301 void
nouveau_gpuobj_takedown(struct drm_device * dev)302 nouveau_gpuobj_takedown(struct drm_device *dev)
303 {
304 	struct drm_nouveau_private *dev_priv = dev->dev_private;
305 
306 	DRM_DEBUG("\n");
307 
308 	nouveau_gpuobj_del(dev, &dev_priv->ramht);
309 }
310 
311 void
nouveau_gpuobj_late_takedown(struct drm_device * dev)312 nouveau_gpuobj_late_takedown(struct drm_device *dev)
313 {
314 	struct drm_nouveau_private *dev_priv = dev->dev_private;
315 	struct nouveau_gpuobj *gpuobj = NULL;
316 	struct list_head *entry, *tmp;
317 
318 	DRM_DEBUG("\n");
319 
320 	list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
321 		gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
322 
323 		DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n",
324 			  gpuobj, gpuobj->refcount);
325 		gpuobj->refcount = 0;
326 		nouveau_gpuobj_del(dev, &gpuobj);
327 	}
328 }
329 
330 int
nouveau_gpuobj_del(struct drm_device * dev,struct nouveau_gpuobj ** pgpuobj)331 nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
332 {
333 	struct drm_nouveau_private *dev_priv = dev->dev_private;
334 	struct nouveau_engine *engine = &dev_priv->Engine;
335 	struct nouveau_gpuobj *gpuobj;
336 
337 	DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
338 
339 	if (!dev_priv || !pgpuobj || !(*pgpuobj))
340 		return -EINVAL;
341 	gpuobj = *pgpuobj;
342 
343 	if (gpuobj->refcount != 0) {
344 		DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount);
345 		return -EINVAL;
346 	}
347 
348 	if (gpuobj->dtor)
349 		gpuobj->dtor(dev, gpuobj);
350 
351 	if (gpuobj->im_backing) {
352 		if (gpuobj->flags & NVOBJ_FLAG_FAKE)
353 			drm_free(gpuobj->im_backing,
354 				 sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER);
355 		else
356 			engine->instmem.clear(dev, gpuobj);
357 	}
358 
359 	if (gpuobj->im_pramin) {
360 		if (gpuobj->flags & NVOBJ_FLAG_FAKE)
361 			drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin),
362 				 DRM_MEM_DRIVER);
363 		else
364 			nouveau_mem_free_block(gpuobj->im_pramin);
365 	}
366 
367 	list_del(&gpuobj->list);
368 
369 	*pgpuobj = NULL;
370 	drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER);
371 	return 0;
372 }
373 
374 static int
nouveau_gpuobj_instance_get(struct drm_device * dev,struct nouveau_channel * chan,struct nouveau_gpuobj * gpuobj,uint32_t * inst)375 nouveau_gpuobj_instance_get(struct drm_device *dev,
376 			    struct nouveau_channel *chan,
377 			    struct nouveau_gpuobj *gpuobj, uint32_t *inst)
378 {
379 	struct drm_nouveau_private *dev_priv = dev->dev_private;
380 	struct nouveau_gpuobj *cpramin;
381 
382 	/* <NV50 use PRAMIN address everywhere */
383 	if (dev_priv->card_type < NV_50) {
384 		*inst = gpuobj->im_pramin->start;
385 		return 0;
386 	}
387 
388 	if (chan && gpuobj->im_channel != chan->id) {
389 		DRM_ERROR("Channel mismatch: obj %d, ref %d\n",
390 			  gpuobj->im_channel, chan->id);
391 		return -EINVAL;
392 	}
393 
394 	/* NV50 channel-local instance */
395 	if (chan > 0) {
396 		cpramin = chan->ramin->gpuobj;
397 		*inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
398 		return 0;
399 	}
400 
401 	/* NV50 global (VRAM) instance */
402 	if (gpuobj->im_channel < 0) {
403 		/* ...from global heap */
404 		if (!gpuobj->im_backing) {
405 			DRM_ERROR("AII, no VRAM backing gpuobj\n");
406 			return -EINVAL;
407 		}
408 		*inst = gpuobj->im_backing->start;
409 		return 0;
410 	} else {
411 		/* ...from local heap */
412 		cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj;
413 		*inst = cpramin->im_backing->start +
414 			(gpuobj->im_pramin->start - cpramin->im_pramin->start);
415 		return 0;
416 	}
417 
418 	return -EINVAL;
419 }
420 
421 int
nouveau_gpuobj_ref_add(struct drm_device * dev,struct nouveau_channel * chan,uint32_t handle,struct nouveau_gpuobj * gpuobj,struct nouveau_gpuobj_ref ** ref_ret)422 nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
423 		       uint32_t handle, struct nouveau_gpuobj *gpuobj,
424 		       struct nouveau_gpuobj_ref **ref_ret)
425 {
426 	struct drm_nouveau_private *dev_priv = dev->dev_private;
427 	struct nouveau_gpuobj_ref *ref;
428 	uint32_t instance;
429 	int ret;
430 
431 	DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n",
432 		  chan ? chan->id : -1, handle, gpuobj);
433 
434 	if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
435 		return -EINVAL;
436 
437 	if (!chan && !ref_ret)
438 		return -EINVAL;
439 
440 	ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
441 	if (ret)
442 		return ret;
443 
444 	ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER);
445 	if (!ref)
446 		return -ENOMEM;
447 	ref->gpuobj   = gpuobj;
448 	ref->channel  = chan ? chan->id : -1;
449 	ref->instance = instance;
450 
451 	if (!ref_ret) {
452 		ref->handle = handle;
453 
454 		ret = nouveau_ramht_insert(dev, ref);
455 		if (ret) {
456 			drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER);
457 			return ret;
458 		}
459 	} else {
460 		ref->handle = ~0;
461 		*ref_ret = ref;
462 	}
463 
464 	ref->gpuobj->refcount++;
465 	return 0;
466 }
467 
nouveau_gpuobj_ref_del(struct drm_device * dev,struct nouveau_gpuobj_ref ** pref)468 int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
469 {
470 	struct nouveau_gpuobj_ref *ref;
471 
472 	DRM_DEBUG("ref %p\n", pref ? *pref : NULL);
473 
474 	if (!dev || !pref || *pref == NULL)
475 		return -EINVAL;
476 	ref = *pref;
477 
478 	if (ref->handle != ~0)
479 		nouveau_ramht_remove(dev, ref);
480 
481 	if (ref->gpuobj) {
482 		ref->gpuobj->refcount--;
483 
484 		if (ref->gpuobj->refcount == 0) {
485 			if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
486 				nouveau_gpuobj_del(dev, &ref->gpuobj);
487 		}
488 	}
489 
490 	*pref = NULL;
491 	drm_free(ref, sizeof(ref), DRM_MEM_DRIVER);
492 	return 0;
493 }
494 
495 int
nouveau_gpuobj_new_ref(struct drm_device * dev,struct nouveau_channel * oc,struct nouveau_channel * rc,uint32_t handle,int size,int align,uint32_t flags,struct nouveau_gpuobj_ref ** ref)496 nouveau_gpuobj_new_ref(struct drm_device *dev,
497 		       struct nouveau_channel *oc, struct nouveau_channel *rc,
498 		       uint32_t handle, int size, int align, uint32_t flags,
499 		       struct nouveau_gpuobj_ref **ref)
500 {
501 	struct nouveau_gpuobj *gpuobj = NULL;
502 	int ret;
503 
504 	if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj)))
505 		return ret;
506 
507 	if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) {
508 		nouveau_gpuobj_del(dev, &gpuobj);
509 		return ret;
510 	}
511 
512 	return 0;
513 }
514 
515 int
nouveau_gpuobj_ref_find(struct nouveau_channel * chan,uint32_t handle,struct nouveau_gpuobj_ref ** ref_ret)516 nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
517 			struct nouveau_gpuobj_ref **ref_ret)
518 {
519 	struct nouveau_gpuobj_ref *ref;
520 	struct list_head *entry, *tmp;
521 
522 	list_for_each_safe(entry, tmp, &chan->ramht_refs) {
523 		ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
524 
525 		if (ref->handle == handle) {
526 			if (ref_ret)
527 				*ref_ret = ref;
528 			return 0;
529 		}
530 	}
531 
532 	return -EINVAL;
533 }
534 
535 int
nouveau_gpuobj_new_fake(struct drm_device * dev,uint32_t p_offset,uint32_t b_offset,uint32_t size,uint32_t flags,struct nouveau_gpuobj ** pgpuobj,struct nouveau_gpuobj_ref ** pref)536 nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
537 			uint32_t b_offset, uint32_t size,
538 			uint32_t flags, struct nouveau_gpuobj **pgpuobj,
539 			struct nouveau_gpuobj_ref **pref)
540 {
541 	struct drm_nouveau_private *dev_priv = dev->dev_private;
542 	struct nouveau_gpuobj *gpuobj = NULL;
543 	int i;
544 
545 	DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
546 		  p_offset, b_offset, size, flags);
547 
548 	gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
549 	if (!gpuobj)
550 		return -ENOMEM;
551 	DRM_DEBUG("gpuobj %p\n", gpuobj);
552 	gpuobj->im_channel = -1;
553 	gpuobj->flags      = flags | NVOBJ_FLAG_FAKE;
554 
555 	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
556 
557 	if (p_offset != ~0) {
558 		gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block),
559 					       DRM_MEM_DRIVER);
560 		if (!gpuobj->im_pramin) {
561 			nouveau_gpuobj_del(dev, &gpuobj);
562 			return -ENOMEM;
563 		}
564 		gpuobj->im_pramin->start = p_offset;
565 		gpuobj->im_pramin->size  = size;
566 	}
567 
568 	if (b_offset != ~0) {
569 		gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block),
570 					       DRM_MEM_DRIVER);
571 		if (!gpuobj->im_backing) {
572 			nouveau_gpuobj_del(dev, &gpuobj);
573 			return -ENOMEM;
574 		}
575 		gpuobj->im_backing->start = b_offset;
576 		gpuobj->im_backing->size  = size;
577 	}
578 
579 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
580 		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
581 			INSTANCE_WR(gpuobj, i/4, 0);
582 	}
583 
584 	if (pref) {
585 		if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) {
586 			nouveau_gpuobj_del(dev, &gpuobj);
587 			return i;
588 		}
589 	}
590 
591 	if (pgpuobj)
592 		*pgpuobj = gpuobj;
593 	return 0;
594 }
595 
596 
597 static int
nouveau_gpuobj_class_instmem_size(struct drm_device * dev,int class)598 nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
599 {
600 	struct drm_nouveau_private *dev_priv = dev->dev_private;
601 
602 	/*XXX: dodgy hack for now */
603 	if (dev_priv->card_type >= NV_50)
604 		return 24;
605 	if (dev_priv->card_type >= NV_40)
606 		return 32;
607 	return 16;
608 }
609 
610 /*
611    DMA objects are used to reference a piece of memory in the
612    framebuffer, PCI or AGP address space. Each object is 16 bytes big
613    and looks as follows:
614 
615    entry[0]
616    11:0  class (seems like I can always use 0 here)
617    12    page table present?
618    13    page entry linear?
619    15:14 access: 0 rw, 1 ro, 2 wo
620    17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
621    31:20 dma adjust (bits 0-11 of the address)
622    entry[1]
623    dma limit (size of transfer)
624    entry[X]
625    1     0 readonly, 1 readwrite
626    31:12 dma frame address of the page (bits 12-31 of the address)
627    entry[N]
628    page table terminator, same value as the first pte, as does nvidia
629    rivatv uses 0xffffffff
630 
631    Non linear page tables need a list of frame addresses afterwards,
632    the rivatv project has some info on this.
633 
634    The method below creates a DMA object in instance RAM and returns a handle
635    to it that can be used to set up context objects.
636 */
637 int
nouveau_gpuobj_dma_new(struct nouveau_channel * chan,int class,uint64_t offset,uint64_t size,int access,int target,struct nouveau_gpuobj ** gpuobj)638 nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
639 		       uint64_t offset, uint64_t size, int access,
640 		       int target, struct nouveau_gpuobj **gpuobj)
641 {
642 	struct drm_device *dev = chan->dev;
643 	struct drm_nouveau_private *dev_priv = dev->dev_private;
644 	int ret;
645 	uint32_t is_scatter_gather = 0;
646 
647 	/* Total number of pages covered by the request.
648 	 */
649 	const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE;
650 
651 
652 	DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
653 		  chan->id, class, offset, size);
654 	DRM_DEBUG("access=%d target=%d\n", access, target);
655 
656 	switch (target) {
657         case NV_DMA_TARGET_AGP:
658                  offset += dev_priv->gart_info.aper_base;
659                  break;
660         case NV_DMA_TARGET_PCI_NONLINEAR:
661                 /*assume the "offset" is a virtual memory address*/
662                 is_scatter_gather = 1;
663                 /*put back the right value*/
664                 target = NV_DMA_TARGET_PCI;
665                 break;
666         default:
667                 break;
668         }
669 
670 	ret = nouveau_gpuobj_new(dev, chan,
671 				 is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class),
672 				 16,
673 				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
674 				 gpuobj);
675 	if (ret) {
676 		DRM_ERROR("Error creating gpuobj: %d\n", ret);
677 		return ret;
678 	}
679 
680 	if (dev_priv->card_type < NV_50) {
681 		uint32_t frame, adjust, pte_flags = 0;
682 		adjust = offset &  0x00000fff;
683 		if (access != NV_DMA_ACCESS_RO)
684 				pte_flags |= (1<<1);
685 
686 		if ( ! is_scatter_gather )
687 			{
688 			frame  = offset & ~0x00000fff;
689 
690 			INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) |
691 					(adjust << 20) |
692 					 (access << 14) |
693 					 (target << 16) |
694 					  class));
695 			INSTANCE_WR(*gpuobj, 1, size - 1);
696 			INSTANCE_WR(*gpuobj, 2, frame | pte_flags);
697 			INSTANCE_WR(*gpuobj, 3, frame | pte_flags);
698 			}
699 		else
700 			{
701 			/* Intial page entry in the scatter-gather area that
702 			 * corresponds to the base offset
703 			 */
704 			unsigned int idx = offset / PAGE_SIZE;
705 
706 			uint32_t instance_offset;
707 			unsigned int i;
708 
709 			if ((idx + page_count) > dev->sg->pages) {
710 				DRM_ERROR("Requested page range exceedes "
711 					  "allocated scatter-gather range!");
712 				return -E2BIG;
713 			}
714 
715 			DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size);
716 	                INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) |
717                                 (adjust << 20) |
718                                 (access << 14) |
719                                 (target << 16) |
720                                 class));
721 			INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1);
722 
723 
724 			/*write starting at the third dword*/
725 			instance_offset = 2;
726 
727 			/*for each PAGE, get its bus address, fill in the page table entry, and advance*/
728 			for (i = 0; i < page_count; i++) {
729 				if (dev->sg->busaddr[idx] == 0) {
730 					dev->sg->busaddr[idx] =
731 						pci_map_page(dev->pdev,
732 							     dev->sg->pagelist[idx],
733 							     0,
734 							     PAGE_SIZE,
735 							     DMA_BIDIRECTIONAL);
736 
737 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
738 					/* Not a 100% sure this is the right kdev in all cases. */
739 					if (dma_mapping_error(&dev->primary->kdev, dev->sg->busaddr[idx])) {
740 #else
741 					if (dma_mapping_error(dev->sg->busaddr[idx])) {
742 #endif
743 						return -ENOMEM;
744 					}
745 				}
746 
747 				frame = (uint32_t) dev->sg->busaddr[idx];
748 				INSTANCE_WR(*gpuobj, instance_offset,
749 					    frame | pte_flags);
750 
751 				idx++;
752 				instance_offset ++;
753 			}
754 			}
755 	} else {
756 		uint32_t flags0, flags5;
757 
758 		if (target == NV_DMA_TARGET_VIDMEM) {
759 			flags0 = 0x00190000;
760 			flags5 = 0x00010000;
761 		} else {
762 			flags0 = 0x7fc00000;
763 			flags5 = 0x00080000;
764 		}
765 
766 		INSTANCE_WR(*gpuobj, 0, flags0 | class);
767 		INSTANCE_WR(*gpuobj, 1, offset + size - 1);
768 		INSTANCE_WR(*gpuobj, 2, offset);
769 		INSTANCE_WR(*gpuobj, 5, flags5);
770 	}
771 
772 	(*gpuobj)->engine = NVOBJ_ENGINE_SW;
773 	(*gpuobj)->class  = class;
774 	return 0;
775 }
776 
777 int
778 nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
779 			    uint64_t offset, uint64_t size, int access,
780 			    struct nouveau_gpuobj **gpuobj,
781 			    uint32_t *o_ret)
782 {
783 	struct drm_device *dev = chan->dev;
784 	struct drm_nouveau_private *dev_priv = dev->dev_private;
785 	int ret;
786 
787 	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
788 	    (dev_priv->card_type >= NV_50 &&
789 	     dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
790 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
791 					     offset, size, access,
792 					     NV_DMA_TARGET_AGP, gpuobj);
793 		if (o_ret)
794 			*o_ret = 0;
795 	} else
796 	if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
797 		*gpuobj = dev_priv->gart_info.sg_ctxdma;
798 		if (offset & ~0xffffffffULL) {
799 			DRM_ERROR("obj offset exceeds 32-bits\n");
800 			return -EINVAL;
801 		}
802 		if (o_ret)
803 			*o_ret = (uint32_t)offset;
804 		ret = (*gpuobj != NULL) ? 0 : -EINVAL;
805 	} else {
806 		DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
807 		return -EINVAL;
808 	}
809 
810 	return ret;
811 }
812 
813 /* Context objects in the instance RAM have the following structure.
814  * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
815 
816    NV4 - NV30:
817 
818    entry[0]
819    11:0 class
820    12   chroma key enable
821    13   user clip enable
822    14   swizzle enable
823    17:15 patch config:
824        scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
825    18   synchronize enable
826    19   endian: 1 big, 0 little
827    21:20 dither mode
828    23    single step enable
829    24    patch status: 0 invalid, 1 valid
830    25    context_surface 0: 1 valid
831    26    context surface 1: 1 valid
832    27    context pattern: 1 valid
833    28    context rop: 1 valid
834    29,30 context beta, beta4
835    entry[1]
836    7:0   mono format
837    15:8  color format
838    31:16 notify instance address
839    entry[2]
840    15:0  dma 0 instance address
841    31:16 dma 1 instance address
842    entry[3]
843    dma method traps
844 
845    NV40:
846    No idea what the exact format is. Here's what can be deducted:
847 
848    entry[0]:
849    11:0  class  (maybe uses more bits here?)
850    17    user clip enable
851    21:19 patch config
852    25    patch status valid ?
853    entry[1]:
854    15:0  DMA notifier  (maybe 20:0)
855    entry[2]:
856    15:0  DMA 0 instance (maybe 20:0)
857    24    big endian
858    entry[3]:
859    15:0  DMA 1 instance (maybe 20:0)
860    entry[4]:
861    entry[5]:
862    set to 0?
863 */
864 int
865 nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
866 		      struct nouveau_gpuobj **gpuobj)
867 {
868 	struct drm_device *dev = chan->dev;
869 	struct drm_nouveau_private *dev_priv = dev->dev_private;
870 	int ret;
871 
872 	DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class);
873 
874 	ret = nouveau_gpuobj_new(dev, chan,
875 				 nouveau_gpuobj_class_instmem_size(dev, class),
876 				 16,
877 				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
878 				 gpuobj);
879 	if (ret) {
880 		DRM_ERROR("Error creating gpuobj: %d\n", ret);
881 		return ret;
882 	}
883 
884 	if (dev_priv->card_type >= NV_50) {
885 		INSTANCE_WR(*gpuobj, 0, class);
886 		INSTANCE_WR(*gpuobj, 5, 0x00010000);
887 	} else {
888 	switch (class) {
889 	case NV_CLASS_NULL:
890 		INSTANCE_WR(*gpuobj, 0, 0x00001030);
891 		INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF);
892 		break;
893 	default:
894 		if (dev_priv->card_type >= NV_40) {
895 			INSTANCE_WR(*gpuobj, 0, class);
896 #ifdef __BIG_ENDIAN
897 			INSTANCE_WR(*gpuobj, 2, 0x01000000);
898 #endif
899 		} else {
900 #ifdef __BIG_ENDIAN
901 			INSTANCE_WR(*gpuobj, 0, class | 0x00080000);
902 #else
903 			INSTANCE_WR(*gpuobj, 0, class);
904 #endif
905 		}
906 	}
907 	}
908 
909 	(*gpuobj)->engine = NVOBJ_ENGINE_GR;
910 	(*gpuobj)->class  = class;
911 	return 0;
912 }
913 
914 static int
915 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
916 {
917 	struct drm_device *dev = chan->dev;
918 	struct drm_nouveau_private *dev_priv = dev->dev_private;
919 	struct nouveau_gpuobj *pramin = NULL;
920 	int size, base, ret;
921 
922 	DRM_DEBUG("ch%d\n", chan->id);
923 
924 	/* Base amount for object storage (4KiB enough?) */
925 	size = 0x1000;
926 	base = 0;
927 
928 	/* PGRAPH context */
929 
930 	if (dev_priv->card_type == NV_50) {
931 		/* Various fixed table thingos */
932 		size += 0x1400; /* mostly unknown stuff */
933 		size += 0x4000; /* vm pd */
934 		base  = 0x6000;
935 		/* RAMHT, not sure about setting size yet, 32KiB to be safe */
936 		size += 0x8000;
937 		/* RAMFC */
938 		size += 0x1000;
939 		/* PGRAPH context */
940 		size += 0x70000;
941 	}
942 
943 	DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
944 		  chan->id, size, base);
945 	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
946 				     &chan->ramin);
947 	if (ret) {
948 		DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret);
949 		return ret;
950 	}
951 	pramin = chan->ramin->gpuobj;
952 
953 	ret = nouveau_mem_init_heap(&chan->ramin_heap,
954 				    pramin->im_pramin->start + base, size);
955 	if (ret) {
956 		DRM_ERROR("Error creating PRAMIN heap: %d\n", ret);
957 		nouveau_gpuobj_ref_del(dev, &chan->ramin);
958 		return ret;
959 	}
960 
961 	return 0;
962 }
963 
964 int
965 nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
966 			    uint32_t vram_h, uint32_t tt_h)
967 {
968 	struct drm_device *dev = chan->dev;
969 	struct drm_nouveau_private *dev_priv = dev->dev_private;
970 	struct nouveau_gpuobj *vram = NULL, *tt = NULL;
971 	int ret, i;
972 
973 	INIT_LIST_HEAD(&chan->ramht_refs);
974 
975 	DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
976 
977 	/* Reserve a block of PRAMIN for the channel
978 	 *XXX: maybe on <NV50 too at some point
979 	 */
980 	if (0 || dev_priv->card_type == NV_50) {
981 		ret = nouveau_gpuobj_channel_init_pramin(chan);
982 		if (ret)
983 			return ret;
984 	}
985 
986 	/* NV50 VM
987 	 *  - Allocate per-channel page-directory
988 	 *  - Point offset 0-512MiB at shared PCIEGART table
989 	 *  - Point offset 512-1024MiB at shared VRAM table
990 	 */
991 	if (dev_priv->card_type >= NV_50) {
992 		uint32_t vm_offset;
993 
994 		vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
995 		vm_offset += chan->ramin->gpuobj->im_pramin->start;
996 		if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
997 						   0, &chan->vm_pd, NULL)))
998 			return ret;
999 		for (i=0; i<0x4000; i+=8) {
1000 			INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000);
1001 			INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe);
1002 		}
1003 
1004 		if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1005 						  dev_priv->gart_info.sg_ctxdma,
1006 						  &chan->vm_gart_pt)))
1007 			return ret;
1008 		INSTANCE_WR(chan->vm_pd, (0+0)/4,
1009 			    chan->vm_gart_pt->instance | 0x03);
1010 		INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000);
1011 
1012 		if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1013 						  dev_priv->vm_vram_pt,
1014 						  &chan->vm_vram_pt)))
1015 			return ret;
1016 		INSTANCE_WR(chan->vm_pd, (8+0)/4,
1017 			    chan->vm_vram_pt->instance | 0x61);
1018 		INSTANCE_WR(chan->vm_pd, (8+4)/4, 0x00000000);
1019 	}
1020 
1021 	/* RAMHT */
1022 	if (dev_priv->card_type < NV_50) {
1023 		ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
1024 					     &chan->ramht);
1025 		if (ret)
1026 			return ret;
1027 	} else {
1028 		ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
1029 					     0x8000, 16,
1030 					     NVOBJ_FLAG_ZERO_ALLOC,
1031 					     &chan->ramht);
1032 		if (ret)
1033 			return ret;
1034 	}
1035 
1036 	/* VRAM ctxdma */
1037 	if (dev_priv->card_type >= NV_50) {
1038 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1039 					     0, 0x100000000ULL,
1040 					     NV_DMA_ACCESS_RW,
1041 					     NV_DMA_TARGET_AGP, &vram);
1042 		if (ret) {
1043 			DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
1044 			return ret;
1045 		}
1046 	} else
1047 	if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1048 					  0, dev_priv->fb_available_size,
1049 					  NV_DMA_ACCESS_RW,
1050 					  NV_DMA_TARGET_VIDMEM, &vram))) {
1051 		DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
1052 		return ret;
1053 	}
1054 
1055 	if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) {
1056 		DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret);
1057 		return ret;
1058 	}
1059 
1060 	/* TT memory ctxdma */
1061 	if (dev_priv->card_type >= NV_50) {
1062 		tt = vram;
1063 	} else
1064 	if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
1065 		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
1066 						  dev_priv->gart_info.aper_size,
1067 						  NV_DMA_ACCESS_RW, &tt, NULL);
1068 	} else
1069 	if (dev_priv->pci_heap) {
1070 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1071 					     0, dev->sg->pages * PAGE_SIZE,
1072 					     NV_DMA_ACCESS_RW,
1073 					     NV_DMA_TARGET_PCI_NONLINEAR, &tt);
1074 	} else {
1075 		DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
1076 		ret = -EINVAL;
1077 	}
1078 
1079 	if (ret) {
1080 		DRM_ERROR("Error creating TT ctxdma: %d\n", ret);
1081 		return ret;
1082 	}
1083 
1084 	ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
1085 	if (ret) {
1086 		DRM_ERROR("Error referencing TT ctxdma: %d\n", ret);
1087 		return ret;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 void
1094 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
1095 {
1096 	struct drm_device *dev = chan->dev;
1097 	struct list_head *entry, *tmp;
1098 	struct nouveau_gpuobj_ref *ref;
1099 
1100 	DRM_DEBUG("ch%d\n", chan->id);
1101 
1102 	list_for_each_safe(entry, tmp, &chan->ramht_refs) {
1103 		ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
1104 
1105 		nouveau_gpuobj_ref_del(dev, &ref);
1106 	}
1107 
1108 	nouveau_gpuobj_ref_del(dev, &chan->ramht);
1109 
1110 	nouveau_gpuobj_del(dev, &chan->vm_pd);
1111 	nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
1112 	nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt);
1113 
1114 	if (chan->ramin_heap)
1115 		nouveau_mem_takedown(&chan->ramin_heap);
1116 	if (chan->ramin)
1117 		nouveau_gpuobj_ref_del(dev, &chan->ramin);
1118 
1119 }
1120 
1121 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1122 			      struct drm_file *file_priv)
1123 {
1124 	struct nouveau_channel *chan;
1125 	struct drm_nouveau_grobj_alloc *init = data;
1126 	struct nouveau_gpuobj *gr = NULL;
1127 	int ret;
1128 
1129 	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1130 	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
1131 
1132 	//FIXME: check args, only allow trusted objects to be created
1133 
1134 	if (init->handle == ~0)
1135 		return -EINVAL;
1136 
1137 	if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
1138 		return -EEXIST;
1139 
1140 	ret = nouveau_gpuobj_gr_new(chan, init->class, &gr);
1141 	if (ret) {
1142 		DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n",
1143 			  ret, init->channel, init->handle);
1144 		return ret;
1145 	}
1146 
1147 	if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) {
1148 		DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)",
1149 			  ret, init->channel, init->handle);
1150 		nouveau_gpuobj_del(dev, &gr);
1151 		return ret;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1158 			      struct drm_file *file_priv)
1159 {
1160 	struct drm_nouveau_gpuobj_free *objfree = data;
1161 	struct nouveau_gpuobj_ref *ref;
1162 	struct nouveau_channel *chan;
1163 	int ret;
1164 
1165 	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1166 	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1167 
1168 	if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref)))
1169 		return ret;
1170 	nouveau_gpuobj_ref_del(dev, &ref);
1171 
1172 	return 0;
1173 }
1174