xref: /linux/drivers/gpu/drm/nouveau/nouveau_prime.c (revision a9bf3efc)
1 /*
2  * Copyright 2011 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  */
24 
25 #include <linux/dma-buf.h>
26 #include <drm/ttm/ttm_tt.h>
27 
28 #include "nouveau_drv.h"
29 #include "nouveau_gem.h"
30 
nouveau_gem_prime_get_sg_table(struct drm_gem_object * obj)31 struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
32 {
33 	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
34 
35 	return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
36 				     nvbo->bo.ttm->num_pages);
37 }
38 
nouveau_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sg)39 struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
40 							 struct dma_buf_attachment *attach,
41 							 struct sg_table *sg)
42 {
43 	struct nouveau_drm *drm = nouveau_drm(dev);
44 	struct drm_gem_object *obj;
45 	struct nouveau_bo *nvbo;
46 	struct dma_resv *robj = attach->dmabuf->resv;
47 	u64 size = attach->dmabuf->size;
48 	int align = 0;
49 	int ret;
50 
51 	dma_resv_lock(robj, NULL);
52 	nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
53 				NOUVEAU_GEM_DOMAIN_GART, 0, 0, true);
54 	if (IS_ERR(nvbo)) {
55 		obj = ERR_CAST(nvbo);
56 		goto unlock;
57 	}
58 
59 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
60 
61 	nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
62 
63 	/* Initialize the embedded gem-object. We return a single gem-reference
64 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
65 	ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
66 	if (ret) {
67 		drm_gem_object_release(&nvbo->bo.base);
68 		kfree(nvbo);
69 		obj = ERR_PTR(-ENOMEM);
70 		goto unlock;
71 	}
72 
73 	ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
74 			      sg, robj);
75 	if (ret) {
76 		obj = ERR_PTR(ret);
77 		goto unlock;
78 	}
79 
80 	obj = &nvbo->bo.base;
81 
82 unlock:
83 	dma_resv_unlock(robj);
84 	return obj;
85 }
86 
nouveau_gem_prime_pin(struct drm_gem_object * obj)87 int nouveau_gem_prime_pin(struct drm_gem_object *obj)
88 {
89 	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
90 	int ret;
91 
92 	/* pin buffer into GTT */
93 	ret = nouveau_bo_pin_locked(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
94 	if (ret)
95 		ret = -EINVAL;
96 
97 	return ret;
98 }
99 
nouveau_gem_prime_unpin(struct drm_gem_object * obj)100 void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
101 {
102 	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
103 
104 	nouveau_bo_unpin_locked(nvbo);
105 }
106 
nouveau_gem_prime_export(struct drm_gem_object * gobj,int flags)107 struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
108 					 int flags)
109 {
110 	struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
111 
112 	if (nvbo->no_share)
113 		return ERR_PTR(-EPERM);
114 
115 	return drm_gem_prime_export(gobj, flags);
116 }
117