1 /*	$NetBSD: nouveau_abi16.c,v 1.2 2014/08/06 13:54:20 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_abi16.c,v 1.2 2014/08/06 13:54:20 riastradh Exp $");
28 
29 #include <core/object.h>
30 #include <core/client.h>
31 #include <core/device.h>
32 #include <core/class.h>
33 #include <core/mm.h>
34 
35 #include <subdev/fb.h>
36 #include <subdev/timer.h>
37 #include <subdev/instmem.h>
38 #include <engine/graph.h>
39 
40 #include "nouveau_drm.h"
41 #include "nouveau_dma.h"
42 #include "nouveau_gem.h"
43 #include "nouveau_chan.h"
44 #include "nouveau_abi16.h"
45 
46 struct nouveau_abi16 *
nouveau_abi16_get(struct drm_file * file_priv,struct drm_device * dev)47 nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
48 {
49 	struct nouveau_cli *cli = nouveau_cli(file_priv);
50 	mutex_lock(&cli->mutex);
51 	if (!cli->abi16) {
52 		struct nouveau_abi16 *abi16;
53 		cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
54 		if (cli->abi16) {
55 			INIT_LIST_HEAD(&abi16->channels);
56 			abi16->client = nv_object(cli);
57 
58 			/* allocate device object targeting client's default
59 			 * device (ie. the one that belongs to the fd it
60 			 * opened)
61 			 */
62 			if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
63 					       NVDRM_DEVICE, 0x0080,
64 					       &(struct nv_device_class) {
65 						.device = ~0ULL,
66 					       },
67 					       sizeof(struct nv_device_class),
68 					       &abi16->device) == 0)
69 				return cli->abi16;
70 
71 			kfree(cli->abi16);
72 			cli->abi16 = NULL;
73 		}
74 
75 		mutex_unlock(&cli->mutex);
76 	}
77 	return cli->abi16;
78 }
79 
80 int
nouveau_abi16_put(struct nouveau_abi16 * abi16,int ret)81 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
82 {
83 	struct nouveau_cli *cli = (void *)abi16->client;
84 	mutex_unlock(&cli->mutex);
85 	return ret;
86 }
87 
88 u16
nouveau_abi16_swclass(struct nouveau_drm * drm)89 nouveau_abi16_swclass(struct nouveau_drm *drm)
90 {
91 	switch (nv_device(drm->device)->card_type) {
92 	case NV_04:
93 		return 0x006e;
94 	case NV_10:
95 	case NV_11:
96 	case NV_20:
97 	case NV_30:
98 	case NV_40:
99 		return 0x016e;
100 	case NV_50:
101 		return 0x506e;
102 	case NV_C0:
103 	case NV_D0:
104 	case NV_E0:
105 	case GM100:
106 		return 0x906e;
107 	}
108 
109 	return 0x0000;
110 }
111 
112 static void
nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan * chan,struct nouveau_abi16_ntfy * ntfy)113 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
114 			struct nouveau_abi16_ntfy *ntfy)
115 {
116 	nouveau_mm_free(&chan->heap, &ntfy->node);
117 	list_del(&ntfy->head);
118 	kfree(ntfy);
119 }
120 
121 static void
nouveau_abi16_chan_fini(struct nouveau_abi16 * abi16,struct nouveau_abi16_chan * chan)122 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
123 			struct nouveau_abi16_chan *chan)
124 {
125 	struct nouveau_abi16_ntfy *ntfy, *temp;
126 
127 	/* wait for all activity to stop before releasing notify object, which
128 	 * may be still in use */
129 	if (chan->chan && chan->ntfy)
130 		nouveau_channel_idle(chan->chan);
131 
132 	/* cleanup notifier state */
133 	list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
134 		nouveau_abi16_ntfy_fini(chan, ntfy);
135 	}
136 
137 	if (chan->ntfy) {
138 		nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
139 		nouveau_bo_unpin(chan->ntfy);
140 		drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
141 	}
142 
143 	if (chan->heap.block_size)
144 		nouveau_mm_fini(&chan->heap);
145 
146 	/* destroy channel object, all children will be killed too */
147 	if (chan->chan) {
148 		abi16->handles &= ~(1ULL << (chan->chan->handle & 0xffff));
149 		nouveau_channel_del(&chan->chan);
150 	}
151 
152 	list_del(&chan->head);
153 	kfree(chan);
154 }
155 
156 void
nouveau_abi16_fini(struct nouveau_abi16 * abi16)157 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
158 {
159 	struct nouveau_cli *cli = (void *)abi16->client;
160 	struct nouveau_abi16_chan *chan, *temp;
161 
162 	/* cleanup channels */
163 	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
164 		nouveau_abi16_chan_fini(abi16, chan);
165 	}
166 
167 	/* destroy the device object */
168 	nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
169 
170 	kfree(cli->abi16);
171 	cli->abi16 = NULL;
172 }
173 
174 int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)175 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
176 {
177 	struct nouveau_drm *drm = nouveau_drm(dev);
178 	struct nouveau_device *device = nv_device(drm->device);
179 	struct nouveau_timer *ptimer = nouveau_timer(device);
180 	struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR);
181 	struct drm_nouveau_getparam *getparam = data;
182 
183 	switch (getparam->param) {
184 	case NOUVEAU_GETPARAM_CHIPSET_ID:
185 		getparam->value = device->chipset;
186 		break;
187 	case NOUVEAU_GETPARAM_PCI_VENDOR:
188 		if (nv_device_is_pci(device))
189 			getparam->value = dev->pdev->vendor;
190 		else
191 			getparam->value = 0;
192 		break;
193 	case NOUVEAU_GETPARAM_PCI_DEVICE:
194 		if (nv_device_is_pci(device))
195 			getparam->value = dev->pdev->device;
196 		else
197 			getparam->value = 0;
198 		break;
199 	case NOUVEAU_GETPARAM_BUS_TYPE:
200 		if (!nv_device_is_pci(device))
201 			getparam->value = 3;
202 		else
203 		if (drm_pci_device_is_agp(dev))
204 			getparam->value = 0;
205 		else
206 		if (!pci_is_pcie(dev->pdev))
207 			getparam->value = 1;
208 		else
209 			getparam->value = 2;
210 		break;
211 	case NOUVEAU_GETPARAM_FB_SIZE:
212 		getparam->value = drm->gem.vram_available;
213 		break;
214 	case NOUVEAU_GETPARAM_AGP_SIZE:
215 		getparam->value = drm->gem.gart_available;
216 		break;
217 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
218 		getparam->value = 0; /* deprecated */
219 		break;
220 	case NOUVEAU_GETPARAM_PTIMER_TIME:
221 		getparam->value = ptimer->read(ptimer);
222 		break;
223 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
224 		getparam->value = 1;
225 		break;
226 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
227 		getparam->value = 1;
228 		break;
229 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
230 		getparam->value = graph->units ? graph->units(graph) : 0;
231 		break;
232 	default:
233 		nv_debug(device, "unknown parameter %"PRId64"\n",
234 		    getparam->param);
235 		return -EINVAL;
236 	}
237 
238 	return 0;
239 }
240 
241 int
nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)242 nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
243 {
244 	return -EINVAL;
245 }
246 
247 int
nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)248 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
249 {
250 	struct drm_nouveau_channel_alloc *init = data;
251 	struct nouveau_cli *cli = nouveau_cli(file_priv);
252 	struct nouveau_drm *drm = nouveau_drm(dev);
253 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
254 	struct nouveau_abi16_chan *chan;
255 	struct nouveau_client *client;
256 	struct nouveau_device *device;
257 	int ret;
258 
259 	if (unlikely(!abi16))
260 		return -ENOMEM;
261 
262 	if (!drm->channel)
263 		return nouveau_abi16_put(abi16, -ENODEV);
264 
265 	client = nv_client(abi16->client);
266 	device = nv_device(abi16->device);
267 
268 	/* hack to allow channel engine type specification on kepler */
269 	if (device->card_type >= NV_E0) {
270 		if (init->fb_ctxdma_handle != ~0)
271 			init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
272 		else
273 			init->fb_ctxdma_handle = init->tt_ctxdma_handle;
274 
275 		/* allow flips to be executed if this is a graphics channel */
276 		init->tt_ctxdma_handle = 0;
277 		if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
278 			init->tt_ctxdma_handle = 1;
279 	}
280 
281 	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
282 		return nouveau_abi16_put(abi16, -EINVAL);
283 
284 	/* allocate "abi16 channel" data and make up a handle for it */
285 	init->channel = __ffs64(~abi16->handles);
286 	if (~abi16->handles == 0)
287 		return nouveau_abi16_put(abi16, -ENOSPC);
288 
289 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
290 	if (!chan)
291 		return nouveau_abi16_put(abi16, -ENOMEM);
292 
293 	INIT_LIST_HEAD(&chan->notifiers);
294 	list_add(&chan->head, &abi16->channels);
295 	abi16->handles |= (1ULL << init->channel);
296 
297 	/* create channel object and initialise dma and fence management */
298 	ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
299 				  init->channel, init->fb_ctxdma_handle,
300 				  init->tt_ctxdma_handle, &chan->chan);
301 	if (ret)
302 		goto done;
303 
304 	if (device->card_type >= NV_50)
305 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
306 					NOUVEAU_GEM_DOMAIN_GART;
307 	else
308 	if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
309 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
310 	else
311 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
312 
313 	if (device->card_type < NV_10) {
314 		init->subchan[0].handle = 0x00000000;
315 		init->subchan[0].grclass = 0x0000;
316 		init->subchan[1].handle = NvSw;
317 		init->subchan[1].grclass = 0x506e;
318 		init->nr_subchan = 2;
319 	}
320 
321 	/* Named memory object area */
322 	ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
323 			      0, 0, &chan->ntfy);
324 	if (ret == 0)
325 		ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
326 	if (ret)
327 		goto done;
328 
329 	if (device->card_type >= NV_50) {
330 		ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
331 					&chan->ntfy_vma);
332 		if (ret)
333 			goto done;
334 	}
335 
336 	ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
337 				    &init->notifier_handle);
338 	if (ret)
339 		goto done;
340 
341 	ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
342 done:
343 	if (ret)
344 		nouveau_abi16_chan_fini(abi16, chan);
345 	return nouveau_abi16_put(abi16, ret);
346 }
347 
348 
349 int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)350 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
351 {
352 	struct drm_nouveau_channel_free *req = data;
353 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
354 	struct nouveau_abi16_chan *chan;
355 	int ret = -ENOENT;
356 
357 	if (unlikely(!abi16))
358 		return -ENOMEM;
359 
360 	list_for_each_entry(chan, &abi16->channels, head) {
361 		if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
362 			nouveau_abi16_chan_fini(abi16, chan);
363 			return nouveau_abi16_put(abi16, 0);
364 		}
365 	}
366 
367 	return nouveau_abi16_put(abi16, ret);
368 }
369 
370 int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)371 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
372 {
373 	struct drm_nouveau_grobj_alloc *init = data;
374 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
375 	struct nouveau_drm *drm = nouveau_drm(dev);
376 	struct nouveau_object *object;
377 	int ret;
378 
379 	if (unlikely(!abi16))
380 		return -ENOMEM;
381 
382 	if (init->handle == ~0)
383 		return nouveau_abi16_put(abi16, -EINVAL);
384 
385 	/* compatibility with userspace that assumes 506e for all chipsets */
386 	if (init->class == 0x506e) {
387 		init->class = nouveau_abi16_swclass(drm);
388 		if (init->class == 0x906e)
389 			return nouveau_abi16_put(abi16, 0);
390 	}
391 
392 	ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
393 				  init->handle, init->class, NULL, 0, &object);
394 	return nouveau_abi16_put(abi16, ret);
395 }
396 
397 int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)398 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
399 {
400 	struct drm_nouveau_notifierobj_alloc *info = data;
401 	struct nouveau_drm *drm = nouveau_drm(dev);
402 	struct nouveau_device *device = nv_device(drm->device);
403 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
404 	struct nouveau_abi16_chan *chan = NULL, *temp;
405 	struct nouveau_abi16_ntfy *ntfy;
406 	struct nouveau_object *object;
407 	static const struct nv_dma_class zero_args;
408 	struct nv_dma_class args = zero_args;
409 	int ret;
410 
411 	if (unlikely(!abi16))
412 		return -ENOMEM;
413 
414 	/* completely unnecessary for these chipsets... */
415 	if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
416 		return nouveau_abi16_put(abi16, -EINVAL);
417 
418 	list_for_each_entry(temp, &abi16->channels, head) {
419 		if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
420 			chan = temp;
421 			break;
422 		}
423 	}
424 
425 	if (!chan)
426 		return nouveau_abi16_put(abi16, -ENOENT);
427 
428 	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
429 	if (!ntfy)
430 		return nouveau_abi16_put(abi16, -ENOMEM);
431 
432 	list_add(&ntfy->head, &chan->notifiers);
433 	ntfy->handle = info->handle;
434 
435 	ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
436 			      &ntfy->node);
437 	if (ret)
438 		goto done;
439 
440 	args.start = ntfy->node->offset;
441 	args.limit = ntfy->node->offset + ntfy->node->length - 1;
442 	if (device->card_type >= NV_50) {
443 		args.flags  = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
444 		args.start += chan->ntfy_vma.offset;
445 		args.limit += chan->ntfy_vma.offset;
446 	} else
447 	if (drm->agp.stat == ENABLED) {
448 		args.flags  = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
449 		args.start += drm->agp.base + chan->ntfy->bo.offset;
450 		args.limit += drm->agp.base + chan->ntfy->bo.offset;
451 	} else {
452 		args.flags  = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
453 		args.start += chan->ntfy->bo.offset;
454 		args.limit += chan->ntfy->bo.offset;
455 	}
456 
457 	ret = nouveau_object_new(abi16->client, chan->chan->handle,
458 				 ntfy->handle, 0x003d, &args,
459 				 sizeof(args), &object);
460 	if (ret)
461 		goto done;
462 
463 	info->offset = ntfy->node->offset;
464 
465 done:
466 	if (ret)
467 		nouveau_abi16_ntfy_fini(chan, ntfy);
468 	return nouveau_abi16_put(abi16, ret);
469 }
470 
471 int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)472 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
473 {
474 	struct drm_nouveau_gpuobj_free *fini = data;
475 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
476 	struct nouveau_abi16_chan *chan = NULL, *temp;
477 	struct nouveau_abi16_ntfy *ntfy;
478 	int ret;
479 
480 	if (unlikely(!abi16))
481 		return -ENOMEM;
482 
483 	list_for_each_entry(temp, &abi16->channels, head) {
484 		if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
485 			chan = temp;
486 			break;
487 		}
488 	}
489 
490 	if (!chan)
491 		return nouveau_abi16_put(abi16, -ENOENT);
492 
493 	/* synchronize with the user channel and destroy the gpu object */
494 	nouveau_channel_idle(chan->chan);
495 
496 	ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
497 	if (ret)
498 		return nouveau_abi16_put(abi16, ret);
499 
500 	/* cleanup extra state if this object was a notifier */
501 	list_for_each_entry(ntfy, &chan->notifiers, head) {
502 		if (ntfy->handle == fini->handle) {
503 			nouveau_mm_free(&chan->heap, &ntfy->node);
504 			list_del(&ntfy->head);
505 			break;
506 		}
507 	}
508 
509 	return nouveau_abi16_put(abi16, 0);
510 }
511