1 /*
2 * Copyright 2007 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27
28 #include "nouveau_private.h"
29 #include "nouveau_dma.h"
30
31 #define PB_BUFMGR_DWORDS (4096 / 2)
32 #define PB_MIN_USER_DWORDS 2048
33
34 static uint32_t
nouveau_pushbuf_calc_reloc(struct drm_nouveau_gem_pushbuf_bo * pbbo,struct drm_nouveau_gem_pushbuf_reloc * r,int mm_enabled)35 nouveau_pushbuf_calc_reloc(struct drm_nouveau_gem_pushbuf_bo *pbbo,
36 struct drm_nouveau_gem_pushbuf_reloc *r,
37 int mm_enabled)
38 {
39 uint32_t push = 0;
40 const unsigned is_vram = mm_enabled ? NOUVEAU_GEM_DOMAIN_VRAM :
41 NOUVEAU_BO_VRAM;
42
43 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
44 push = (pbbo->presumed_offset + r->data);
45 else
46 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
47 push = (pbbo->presumed_offset + r->data) >> 32;
48 else
49 push = r->data;
50
51 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
52 if (pbbo->presumed_domain & is_vram)
53 push |= r->vor;
54 else
55 push |= r->tor;
56 }
57
58 return push;
59 }
60
61 int
nouveau_pushbuf_emit_reloc(struct nouveau_channel * chan,void * ptr,struct nouveau_bo * bo,uint32_t data,uint32_t flags,uint32_t vor,uint32_t tor)62 nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
63 struct nouveau_bo *bo, uint32_t data, uint32_t flags,
64 uint32_t vor, uint32_t tor)
65 {
66 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
67 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
68 struct drm_nouveau_gem_pushbuf_reloc *r;
69 struct drm_nouveau_gem_pushbuf_bo *pbbo;
70 uint32_t domains = 0;
71
72 if (nvpb->nr_relocs >= NOUVEAU_PUSHBUF_MAX_RELOCS)
73 return -ENOMEM;
74
75 if (nouveau_bo(bo)->user && (flags & NOUVEAU_BO_WR)) {
76 fprintf(stderr, "write to user buffer!!\n");
77 return -EINVAL;
78 }
79
80 pbbo = nouveau_bo_emit_buffer(chan, bo);
81 if (!pbbo)
82 return -ENOMEM;
83
84 if (flags & NOUVEAU_BO_VRAM)
85 domains |= NOUVEAU_GEM_DOMAIN_VRAM;
86 if (flags & NOUVEAU_BO_GART)
87 domains |= NOUVEAU_GEM_DOMAIN_GART;
88 pbbo->valid_domains &= domains;
89 assert(pbbo->valid_domains);
90
91 if (!nvdev->mm_enabled) {
92 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
93
94 nouveau_fence_ref(nvpb->fence, &nvbo->fence);
95 if (flags & NOUVEAU_BO_WR)
96 nouveau_fence_ref(nvpb->fence, &nvbo->wr_fence);
97 }
98
99 assert(flags & NOUVEAU_BO_RDWR);
100 if (flags & NOUVEAU_BO_RD) {
101 pbbo->read_domains |= domains;
102 }
103 if (flags & NOUVEAU_BO_WR) {
104 pbbo->write_domains |= domains;
105 nouveau_bo(bo)->write_marker = 1;
106 }
107
108 r = nvpb->relocs + nvpb->nr_relocs++;
109 r->bo_index = pbbo - nvpb->buffers;
110 r->reloc_index = (uint32_t *)ptr - nvpb->pushbuf;
111 r->flags = 0;
112 if (flags & NOUVEAU_BO_LOW)
113 r->flags |= NOUVEAU_GEM_RELOC_LOW;
114 if (flags & NOUVEAU_BO_HIGH)
115 r->flags |= NOUVEAU_GEM_RELOC_HIGH;
116 if (flags & NOUVEAU_BO_OR)
117 r->flags |= NOUVEAU_GEM_RELOC_OR;
118 r->data = data;
119 r->vor = vor;
120 r->tor = tor;
121
122 *(uint32_t *)ptr = (flags & NOUVEAU_BO_DUMMY) ? 0 :
123 nouveau_pushbuf_calc_reloc(pbbo, r, nvdev->mm_enabled);
124 return 0;
125 }
126
127 static int
nouveau_pushbuf_space(struct nouveau_channel * chan,unsigned min)128 nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
129 {
130 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
131 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
132
133 if (nvpb->pushbuf) {
134 free(nvpb->pushbuf);
135 nvpb->pushbuf = NULL;
136 }
137
138 nvpb->size = min < PB_MIN_USER_DWORDS ? PB_MIN_USER_DWORDS : min;
139 nvpb->pushbuf = malloc(sizeof(uint32_t) * nvpb->size);
140
141 nvpb->base.channel = chan;
142 nvpb->base.remaining = nvpb->size;
143 nvpb->base.cur = nvpb->pushbuf;
144
145 if (!nouveau_device(chan->device)->mm_enabled) {
146 nouveau_fence_ref(NULL, &nvpb->fence);
147 nouveau_fence_new(chan, &nvpb->fence);
148 }
149
150 return 0;
151 }
152
153 int
nouveau_pushbuf_init(struct nouveau_channel * chan)154 nouveau_pushbuf_init(struct nouveau_channel *chan)
155 {
156 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
157 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
158
159 nouveau_pushbuf_space(chan, 0);
160
161 nvpb->buffers = calloc(NOUVEAU_PUSHBUF_MAX_BUFFERS,
162 sizeof(struct drm_nouveau_gem_pushbuf_bo));
163 nvpb->relocs = calloc(NOUVEAU_PUSHBUF_MAX_RELOCS,
164 sizeof(struct drm_nouveau_gem_pushbuf_reloc));
165
166 chan->pushbuf = &nvpb->base;
167 return 0;
168 }
169
170 static int
nouveau_pushbuf_flush_nomm(struct nouveau_channel_priv * nvchan)171 nouveau_pushbuf_flush_nomm(struct nouveau_channel_priv *nvchan)
172 {
173 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
174 struct drm_nouveau_gem_pushbuf_bo *bo = nvpb->buffers;
175 struct drm_nouveau_gem_pushbuf_reloc *reloc = nvpb->relocs;
176 unsigned b, r;
177 int ret;
178
179 for (b = 0; b < nvpb->nr_buffers; b++) {
180 struct nouveau_bo_priv *nvbo =
181 (void *)(unsigned long)bo[b].user_priv;
182 uint32_t flags = 0;
183
184 if (bo[b].valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
185 flags |= NOUVEAU_BO_VRAM;
186 if (bo[b].valid_domains & NOUVEAU_GEM_DOMAIN_GART)
187 flags |= NOUVEAU_BO_GART;
188
189 ret = nouveau_bo_validate_nomm(nvbo, flags);
190 if (ret)
191 return ret;
192
193 if (1 || bo[b].presumed_domain != nvbo->domain ||
194 bo[b].presumed_offset != nvbo->offset) {
195 bo[b].presumed_ok = 0;
196 bo[b].presumed_domain = nvbo->domain;
197 bo[b].presumed_offset = nvbo->offset;
198 }
199 }
200
201 for (r = 0; r < nvpb->nr_relocs; r++, reloc++) {
202 uint32_t push;
203
204 if (bo[reloc->bo_index].presumed_ok)
205 continue;
206
207 push = nouveau_pushbuf_calc_reloc(&bo[reloc->bo_index], reloc, 0);
208 nvpb->pushbuf[reloc->reloc_index] = push;
209 }
210
211 nouveau_dma_space(&nvchan->base, nvpb->size);
212 nouveau_dma_outp (&nvchan->base, nvpb->pushbuf, nvpb->size);
213 nouveau_fence_emit(nvpb->fence);
214
215 return 0;
216 }
217
218 int
nouveau_pushbuf_flush(struct nouveau_channel * chan,unsigned min)219 nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
220 {
221 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
222 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
223 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
224 struct drm_nouveau_gem_pushbuf req;
225 unsigned i;
226 int ret;
227
228 if (nvpb->base.remaining == nvpb->size)
229 return 0;
230 nvpb->size -= nvpb->base.remaining;
231
232 if (nvdev->mm_enabled) {
233 req.channel = chan->id;
234 req.nr_dwords = nvpb->size;
235 req.dwords = (uint64_t)(unsigned long)nvpb->pushbuf;
236 req.nr_buffers = nvpb->nr_buffers;
237 req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
238 req.nr_relocs = nvpb->nr_relocs;
239 req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
240 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
241 &req, sizeof(req));
242 } else {
243 nouveau_fence_flush(chan);
244 ret = nouveau_pushbuf_flush_nomm(nvchan);
245 }
246 assert(ret == 0);
247
248
249 /* Update presumed offset/domain for any buffers that moved.
250 * Dereference all buffers on validate list
251 */
252 for (i = 0; i < nvpb->nr_buffers; i++) {
253 struct drm_nouveau_gem_pushbuf_bo *pbbo = &nvpb->buffers[i];
254 struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
255
256 if (pbbo->presumed_ok == 0) {
257 nouveau_bo(bo)->domain = pbbo->presumed_domain;
258 nouveau_bo(bo)->offset = pbbo->presumed_offset;
259 }
260
261 nouveau_bo(bo)->pending = NULL;
262 nouveau_bo_ref(NULL, &bo);
263 }
264 nvpb->nr_buffers = 0;
265 nvpb->nr_relocs = 0;
266
267 /* Allocate space for next push buffer */
268 ret = nouveau_pushbuf_space(chan, min);
269 assert(!ret);
270
271 if (chan->flush_notify)
272 chan->flush_notify(chan);
273
274 return 0;
275 }
276
277