1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie
24  */
25 
26 #include <stdio.h>
27 
28 #include "util/u_inlines.h"
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/u_math.h"
32 
33 #include "r300_screen_buffer.h"
34 
r300_upload_index_buffer(struct r300_context * r300,struct pipe_resource ** index_buffer,unsigned index_size,unsigned * start,unsigned count,const uint8_t * ptr)35 void r300_upload_index_buffer(struct r300_context *r300,
36 			      struct pipe_resource **index_buffer,
37 			      unsigned index_size, unsigned *start,
38 			      unsigned count, const uint8_t *ptr)
39 {
40     unsigned index_offset;
41 
42     *index_buffer = NULL;
43 
44     u_upload_data(r300->uploader,
45                   0, count * index_size, 4,
46                   ptr + (*start * index_size),
47                   &index_offset,
48                   index_buffer);
49 
50     *start = index_offset / index_size;
51 }
52 
r300_resource_destroy(struct pipe_screen * screen,struct pipe_resource * buf)53 void r300_resource_destroy(struct pipe_screen *screen,
54                            struct pipe_resource *buf)
55 {
56    if (buf->target == PIPE_BUFFER) {
57       struct r300_resource *rbuf = r300_resource(buf);
58 
59       align_free(rbuf->malloced_buffer);
60 
61       if (rbuf->buf)
62          pb_reference(&rbuf->buf, NULL);
63 
64       FREE(rbuf);
65    } else {
66       struct r300_screen *rscreen = r300_screen(screen);
67       struct r300_resource* tex = (struct r300_resource*)buf;
68 
69       if (tex->tex.cmask_dwords) {
70           mtx_lock(&rscreen->cmask_mutex);
71           if (buf == rscreen->cmask_resource) {
72               rscreen->cmask_resource = NULL;
73           }
74           mtx_unlock(&rscreen->cmask_mutex);
75       }
76       pb_reference(&tex->buf, NULL);
77       FREE(tex);
78    }
79 }
80 
81 void *
r300_buffer_transfer_map(struct pipe_context * context,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)82 r300_buffer_transfer_map( struct pipe_context *context,
83                           struct pipe_resource *resource,
84                           unsigned level,
85                           unsigned usage,
86                           const struct pipe_box *box,
87                           struct pipe_transfer **ptransfer )
88 {
89     struct r300_context *r300 = r300_context(context);
90     struct radeon_winsys *rws = r300->screen->rws;
91     struct r300_resource *rbuf = r300_resource(resource);
92     struct pipe_transfer *transfer;
93     uint8_t *map;
94 
95     transfer = slab_alloc(&r300->pool_transfers);
96     transfer->resource = resource;
97     transfer->level = level;
98     transfer->usage = usage;
99     transfer->box = *box;
100     transfer->stride = 0;
101     transfer->layer_stride = 0;
102 
103     if (rbuf->malloced_buffer) {
104         *ptransfer = transfer;
105         return rbuf->malloced_buffer + box->x;
106     }
107 
108     if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE &&
109         !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
110         assert(usage & PIPE_MAP_WRITE);
111 
112         /* Check if mapping this buffer would cause waiting for the GPU. */
113         if (r300->rws->cs_is_buffer_referenced(&r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
114             !r300->rws->buffer_wait(r300->rws, rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
115             unsigned i;
116             struct pb_buffer *new_buf;
117 
118             /* Create a new one in the same pipe_resource. */
119             new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.width0,
120                                                R300_BUFFER_ALIGNMENT,
121                                                rbuf->domain,
122                                                RADEON_FLAG_NO_INTERPROCESS_SHARING);
123             if (new_buf) {
124                 /* Discard the old buffer. */
125                 pb_reference(&rbuf->buf, NULL);
126                 rbuf->buf = new_buf;
127 
128                 /* We changed the buffer, now we need to bind it where the old one was bound. */
129                 for (i = 0; i < r300->nr_vertex_buffers; i++) {
130                     if (r300->vertex_buffer[i].buffer.resource == &rbuf->b) {
131                         r300->vertex_arrays_dirty = TRUE;
132                         break;
133                     }
134                 }
135             }
136         }
137     }
138 
139     /* Buffers are never used for write, therefore mapping for read can be
140      * unsynchronized. */
141     if (!(usage & PIPE_MAP_WRITE)) {
142        usage |= PIPE_MAP_UNSYNCHRONIZED;
143     }
144 
145     map = rws->buffer_map(rws, rbuf->buf, &r300->cs, usage);
146 
147     if (!map) {
148         slab_free(&r300->pool_transfers, transfer);
149         return NULL;
150     }
151 
152     *ptransfer = transfer;
153     return map + box->x;
154 }
155 
r300_buffer_transfer_unmap(struct pipe_context * pipe,struct pipe_transfer * transfer)156 void r300_buffer_transfer_unmap( struct pipe_context *pipe,
157                                  struct pipe_transfer *transfer )
158 {
159     struct r300_context *r300 = r300_context(pipe);
160 
161     slab_free(&r300->pool_transfers, transfer);
162 }
163 
r300_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ)164 struct pipe_resource *r300_buffer_create(struct pipe_screen *screen,
165 					 const struct pipe_resource *templ)
166 {
167     struct r300_screen *r300screen = r300_screen(screen);
168     struct r300_resource *rbuf;
169 
170     rbuf = MALLOC_STRUCT(r300_resource);
171 
172     rbuf->b = *templ;
173     pipe_reference_init(&rbuf->b.reference, 1);
174     rbuf->b.screen = screen;
175     rbuf->domain = RADEON_DOMAIN_GTT;
176     rbuf->buf = NULL;
177     rbuf->malloced_buffer = NULL;
178 
179     /* Allocate constant buffers and SWTCL vertex and index buffers in RAM.
180      * Note that uploaded index buffers use the flag PIPE_BIND_CUSTOM, so that
181      * we can distinguish them from user-created buffers.
182      */
183     if (templ->bind & PIPE_BIND_CONSTANT_BUFFER ||
184         (!r300screen->caps.has_tcl && !(templ->bind & PIPE_BIND_CUSTOM))) {
185         rbuf->malloced_buffer = align_malloc(templ->width0, 64);
186         return &rbuf->b;
187     }
188 
189     rbuf->buf =
190         r300screen->rws->buffer_create(r300screen->rws, rbuf->b.width0,
191                                        R300_BUFFER_ALIGNMENT,
192                                        rbuf->domain,
193                                        RADEON_FLAG_NO_INTERPROCESS_SHARING);
194     if (!rbuf->buf) {
195         FREE(rbuf);
196         return NULL;
197     }
198     return &rbuf->b;
199 }
200