1 /*
2  * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3  * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4  * Copyright 2018 Advanced Micro Devices, Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * on the rights to use, copy, modify, merge, publish, distribute, sub
11  * license, and/or sell copies of the Software, and to permit persons to whom
12  * the Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE. */
25 
26 #ifndef RADEON_WINSYS_H
27 #define RADEON_WINSYS_H
28 
29 /* The public winsys interface header for the radeon driver. */
30 
31 /* Whether the next IB can start immediately and not wait for draws and
32  * dispatches from the current IB to finish. */
33 #define RADEON_FLUSH_START_NEXT_GFX_IB_NOW (1u << 31)
34 
35 #define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW                                                   \
36    (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
37 
38 #include "amd/common/ac_gpu_info.h"
39 #include "amd/common/ac_surface.h"
40 #include "pipebuffer/pb_buffer.h"
41 
42 /* Tiling flags. */
43 enum radeon_bo_layout
44 {
45    RADEON_LAYOUT_LINEAR = 0,
46    RADEON_LAYOUT_TILED,
47    RADEON_LAYOUT_SQUARETILED,
48 
49    RADEON_LAYOUT_UNKNOWN
50 };
51 
52 enum radeon_bo_domain
53 { /* bitfield */
54   RADEON_DOMAIN_GTT = 2,
55   RADEON_DOMAIN_VRAM = 4,
56   RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT,
57   RADEON_DOMAIN_GDS = 8,
58   RADEON_DOMAIN_OA = 16,
59 };
60 
61 enum radeon_bo_flag
62 { /* bitfield */
63   RADEON_FLAG_GTT_WC = (1 << 0),
64   RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
65   RADEON_FLAG_NO_SUBALLOC = (1 << 2),
66   RADEON_FLAG_SPARSE = (1 << 3),
67   RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
68   RADEON_FLAG_READ_ONLY = (1 << 5),
69   RADEON_FLAG_32BIT = (1 << 6),
70   RADEON_FLAG_ENCRYPTED = (1 << 7),
71   RADEON_FLAG_UNCACHED = (1 << 8), /* only gfx9 and newer */
72 };
73 
74 enum radeon_dependency_flag
75 {
76    /* Add the dependency to the parallel compute IB only. */
77    RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY = 1 << 0,
78 
79    /* Instead of waiting for a job to finish execution, the dependency will
80     * be signaled when the job starts execution.
81     */
82    RADEON_DEPENDENCY_START_FENCE = 1 << 1,
83 };
84 
85 enum radeon_bo_usage
86 { /* bitfield */
87   RADEON_USAGE_READ = 2,
88   RADEON_USAGE_WRITE = 4,
89   RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
90 
91   /* The winsys ensures that the CS submission will be scheduled after
92    * previously flushed CSs referencing this BO in a conflicting way.
93    */
94   RADEON_USAGE_SYNCHRONIZED = 8
95 };
96 
97 enum radeon_transfer_flags
98 {
99    /* Indicates that the caller will unmap the buffer.
100     *
101     * Not unmapping buffers is an important performance optimization for
102     * OpenGL (avoids kernel overhead for frequently mapped buffers).
103     */
104    RADEON_TRANSFER_TEMPORARY = (PIPE_TRANSFER_DRV_PRV << 0),
105 };
106 
107 #define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
108 
109 enum radeon_value_id
110 {
111    RADEON_REQUESTED_VRAM_MEMORY,
112    RADEON_REQUESTED_GTT_MEMORY,
113    RADEON_MAPPED_VRAM,
114    RADEON_MAPPED_GTT,
115    RADEON_BUFFER_WAIT_TIME_NS,
116    RADEON_NUM_MAPPED_BUFFERS,
117    RADEON_TIMESTAMP,
118    RADEON_NUM_GFX_IBS,
119    RADEON_NUM_SDMA_IBS,
120    RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
121    RADEON_GFX_IB_SIZE_COUNTER,
122    RADEON_NUM_BYTES_MOVED,
123    RADEON_NUM_EVICTIONS,
124    RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
125    RADEON_VRAM_USAGE,
126    RADEON_VRAM_VIS_USAGE,
127    RADEON_GTT_USAGE,
128    RADEON_GPU_TEMPERATURE, /* DRM 2.42.0 */
129    RADEON_CURRENT_SCLK,
130    RADEON_CURRENT_MCLK,
131    RADEON_CS_THREAD_TIME,
132 };
133 
134 enum radeon_bo_priority
135 {
136    /* Each group of two has the same priority. */
137    RADEON_PRIO_FENCE = 0,
138    RADEON_PRIO_TRACE,
139 
140    RADEON_PRIO_SO_FILLED_SIZE = 2,
141    RADEON_PRIO_QUERY,
142 
143    RADEON_PRIO_IB1 = 4, /* main IB submitted to the kernel */
144    RADEON_PRIO_IB2,     /* IB executed with INDIRECT_BUFFER */
145 
146    RADEON_PRIO_DRAW_INDIRECT = 6,
147    RADEON_PRIO_INDEX_BUFFER,
148 
149    RADEON_PRIO_CP_DMA = 8,
150    RADEON_PRIO_BORDER_COLORS,
151 
152    RADEON_PRIO_CONST_BUFFER = 10,
153    RADEON_PRIO_DESCRIPTORS,
154 
155    RADEON_PRIO_SAMPLER_BUFFER = 12,
156    RADEON_PRIO_VERTEX_BUFFER,
157 
158    RADEON_PRIO_SHADER_RW_BUFFER = 14,
159    RADEON_PRIO_COMPUTE_GLOBAL,
160 
161    RADEON_PRIO_SAMPLER_TEXTURE = 16,
162    RADEON_PRIO_SHADER_RW_IMAGE,
163 
164    RADEON_PRIO_SAMPLER_TEXTURE_MSAA = 18,
165    RADEON_PRIO_COLOR_BUFFER,
166 
167    RADEON_PRIO_DEPTH_BUFFER = 20,
168 
169    RADEON_PRIO_COLOR_BUFFER_MSAA = 22,
170 
171    RADEON_PRIO_DEPTH_BUFFER_MSAA = 24,
172 
173    RADEON_PRIO_SEPARATE_META = 26,
174    RADEON_PRIO_SHADER_BINARY, /* the hw can't hide instruction cache misses */
175 
176    RADEON_PRIO_SHADER_RINGS = 28,
177 
178    RADEON_PRIO_SCRATCH_BUFFER = 30,
179    /* 31 is the maximum value */
180 };
181 
182 struct winsys_handle;
183 struct radeon_winsys_ctx;
184 
185 struct radeon_cmdbuf_chunk {
186    unsigned cdw;    /* Number of used dwords. */
187    unsigned max_dw; /* Maximum number of dwords. */
188    uint32_t *buf;   /* The base pointer of the chunk. */
189 };
190 
191 struct radeon_cmdbuf {
192    struct radeon_cmdbuf_chunk current;
193    struct radeon_cmdbuf_chunk *prev;
194    unsigned num_prev; /* Number of previous chunks. */
195    unsigned max_prev; /* Space in array pointed to by prev. */
196    unsigned prev_dw;  /* Total number of dwords in previous chunks. */
197 
198    /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
199    uint64_t used_vram;
200    uint64_t used_gart;
201    uint64_t gpu_address;
202 };
203 
204 /* Tiling info for display code, DRI sharing, and other data. */
205 struct radeon_bo_metadata {
206    /* Tiling flags describing the texture layout for display code
207     * and DRI sharing.
208     */
209    union {
210       struct {
211          enum radeon_bo_layout microtile;
212          enum radeon_bo_layout macrotile;
213          unsigned pipe_config;
214          unsigned bankw;
215          unsigned bankh;
216          unsigned tile_split;
217          unsigned mtilea;
218          unsigned num_banks;
219          unsigned stride;
220          bool scanout;
221       } legacy;
222    } u;
223 
224    enum radeon_surf_mode mode;   /* Output from buffer_get_metadata */
225 
226    /* Additional metadata associated with the buffer, in bytes.
227     * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
228     * Supported by amdgpu only.
229     */
230    uint32_t size_metadata;
231    uint32_t metadata[64];
232 };
233 
234 enum radeon_feature_id
235 {
236    RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
237    RADEON_FID_R300_CMASK_ACCESS,
238 };
239 
240 struct radeon_bo_list_item {
241    uint64_t bo_size;
242    uint64_t vm_address;
243    uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
244 };
245 
246 struct radeon_winsys {
247    /**
248     * The screen object this winsys was created for
249     */
250    struct pipe_screen *screen;
251 
252    /**
253     * Decrement the winsys reference count.
254     *
255     * \param ws  The winsys this function is called for.
256     * \return    True if the winsys and screen should be destroyed.
257     */
258    bool (*unref)(struct radeon_winsys *ws);
259 
260    /**
261     * Destroy this winsys.
262     *
263     * \param ws        The winsys this function is called from.
264     */
265    void (*destroy)(struct radeon_winsys *ws);
266 
267    /**
268     * Query an info structure from winsys.
269     *
270     * \param ws        The winsys this function is called from.
271     * \param info      Return structure
272     */
273    void (*query_info)(struct radeon_winsys *ws, struct radeon_info *info);
274 
275    /**
276     * A hint for the winsys that it should pin its execution threads to
277     * a group of cores sharing a specific L3 cache if the CPU has multiple
278     * L3 caches. This is needed for good multithreading performance on
279     * AMD Zen CPUs.
280     */
281    void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cache);
282 
283    /**************************************************************************
284     * Buffer management. Buffer attributes are mostly fixed over its lifetime.
285     *
286     * Remember that gallium gets to choose the interface it needs, and the
287     * window systems must then implement that interface (rather than the
288     * other way around...).
289     *************************************************************************/
290 
291    /**
292     * Create a buffer object.
293     *
294     * \param ws        The winsys this function is called from.
295     * \param size      The size to allocate.
296     * \param alignment An alignment of the buffer in memory.
297     * \param use_reusable_pool Whether the cache buffer manager should be used.
298     * \param domain    A bitmask of the RADEON_DOMAIN_* flags.
299     * \return          The created buffer object.
300     */
301    struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws, uint64_t size, unsigned alignment,
302                                       enum radeon_bo_domain domain, enum radeon_bo_flag flags);
303 
304    /**
305     * Map the entire data store of a buffer object into the client's address
306     * space.
307     *
308     * Callers are expected to unmap buffers again if and only if the
309     * RADEON_TRANSFER_TEMPORARY flag is set in \p usage.
310     *
311     * \param buf       A winsys buffer object to map.
312     * \param cs        A command stream to flush if the buffer is referenced by it.
313     * \param usage     A bitmask of the PIPE_TRANSFER_* and RADEON_TRANSFER_* flags.
314     * \return          The pointer at the beginning of the buffer.
315     */
316    void *(*buffer_map)(struct pb_buffer *buf, struct radeon_cmdbuf *cs,
317                        enum pipe_transfer_usage usage);
318 
319    /**
320     * Unmap a buffer object from the client's address space.
321     *
322     * \param buf       A winsys buffer object to unmap.
323     */
324    void (*buffer_unmap)(struct pb_buffer *buf);
325 
326    /**
327     * Wait for the buffer and return true if the buffer is not used
328     * by the device.
329     *
330     * The timeout of 0 will only return the status.
331     * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
332     * is idle.
333     */
334    bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout, enum radeon_bo_usage usage);
335 
336    /**
337     * Return buffer metadata.
338     * (tiling info for display code, DRI sharing, and other data)
339     *
340     * \param buf       A winsys buffer object to get the flags from.
341     * \param md        Metadata
342     */
343    void (*buffer_get_metadata)(struct pb_buffer *buf, struct radeon_bo_metadata *md,
344                                struct radeon_surf *surf);
345 
346    /**
347     * Set buffer metadata.
348     * (tiling info for display code, DRI sharing, and other data)
349     *
350     * \param buf       A winsys buffer object to set the flags for.
351     * \param md        Metadata
352     */
353    void (*buffer_set_metadata)(struct pb_buffer *buf, struct radeon_bo_metadata *md,
354                                struct radeon_surf *surf);
355 
356    /**
357     * Get a winsys buffer from a winsys handle. The internal structure
358     * of the handle is platform-specific and only a winsys should access it.
359     *
360     * \param ws        The winsys this function is called from.
361     * \param whandle   A winsys handle pointer as was received from a state
362     *                  tracker.
363     */
364    struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws, struct winsys_handle *whandle,
365                                            unsigned vm_alignment);
366 
367    /**
368     * Get a winsys buffer from a user pointer. The resulting buffer can't
369     * be exported. Both pointer and size must be page aligned.
370     *
371     * \param ws        The winsys this function is called from.
372     * \param pointer   User pointer to turn into a buffer object.
373     * \param Size      Size in bytes for the new buffer.
374     */
375    struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer, uint64_t size);
376 
377    /**
378     * Whether the buffer was created from a user pointer.
379     *
380     * \param buf       A winsys buffer object
381     * \return          whether \p buf was created via buffer_from_ptr
382     */
383    bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
384 
385    /** Whether the buffer was suballocated. */
386    bool (*buffer_is_suballocated)(struct pb_buffer *buf);
387 
388    /**
389     * Get a winsys handle from a winsys buffer. The internal structure
390     * of the handle is platform-specific and only a winsys should access it.
391     *
392     * \param ws        The winsys instance for which the handle is to be valid
393     * \param buf       A winsys buffer object to get the handle from.
394     * \param whandle   A winsys handle pointer.
395     * \return          true on success.
396     */
397    bool (*buffer_get_handle)(struct radeon_winsys *ws, struct pb_buffer *buf,
398                              struct winsys_handle *whandle);
399 
400    /**
401     * Change the commitment of a (64KB-page aligned) region of the given
402     * sparse buffer.
403     *
404     * \warning There is no automatic synchronization with command submission.
405     *
406     * \note Only implemented by the amdgpu winsys.
407     *
408     * \return false on out of memory or other failure, true on success.
409     */
410    bool (*buffer_commit)(struct pb_buffer *buf, uint64_t offset, uint64_t size, bool commit);
411 
412    /**
413     * Return the virtual address of a buffer.
414     *
415     * When virtual memory is not in use, this is the offset relative to the
416     * relocation base (non-zero for sub-allocated buffers).
417     *
418     * \param buf       A winsys buffer object
419     * \return          virtual address
420     */
421    uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
422 
423    /**
424     * Return the offset of this buffer relative to the relocation base.
425     * This is only non-zero for sub-allocated buffers.
426     *
427     * This is only supported in the radeon winsys, since amdgpu uses virtual
428     * addresses in submissions even for the video engines.
429     *
430     * \param buf      A winsys buffer object
431     * \return         the offset for relocations
432     */
433    unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
434 
435    /**
436     * Query the initial placement of the buffer from the kernel driver.
437     */
438    enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
439 
440    /**
441     * Query the flags used for creation of this buffer.
442     *
443     * Note that for imported buffer this may be lossy since not all flags
444     * are passed 1:1.
445     */
446    enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer *buf);
447 
448    /**************************************************************************
449     * Command submission.
450     *
451     * Each pipe context should create its own command stream and submit
452     * commands independently of other contexts.
453     *************************************************************************/
454 
455    /**
456     * Create a command submission context.
457     * Various command streams can be submitted to the same context.
458     */
459    struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws);
460 
461    /**
462     * Destroy a context.
463     */
464    void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
465 
466    /**
467     * Query a GPU reset status.
468     */
469    enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx);
470 
471    /**
472     * Create a command stream.
473     *
474     * \param ctx       The submission context
475     * \param ring_type The ring type (GFX, DMA, UVD)
476     * \param flush     Flush callback function associated with the command stream.
477     * \param user      User pointer that will be passed to the flush callback.
478     */
479    struct radeon_cmdbuf *(*cs_create)(struct radeon_winsys_ctx *ctx, enum ring_type ring_type,
480                                       void (*flush)(void *ctx, unsigned flags,
481                                                     struct pipe_fence_handle **fence),
482                                       void *flush_ctx, bool stop_exec_on_failure);
483 
484    /**
485     * Add a parallel compute IB to a gfx IB. It will share the buffer list
486     * and fence dependencies with the gfx IB. The gfx flush call will submit
487     * both IBs at the same time.
488     *
489     * The compute IB doesn't have an output fence, so the primary IB has
490     * to use a wait packet for synchronization.
491     *
492     * The returned IB is only a stream for writing packets to the new
493     * IB. Calling other winsys functions with it is not allowed, not even
494     * "cs_destroy". Use the gfx IB instead.
495     *
496     * \param cs              Gfx IB
497     */
498    struct radeon_cmdbuf *(*cs_add_parallel_compute_ib)(struct radeon_cmdbuf *cs,
499                                                        bool uses_gds_ordered_append);
500 
501    /**
502     * Set up and enable mid command buffer preemption for the command stream.
503     *
504     * \param cs               Command stream
505     * \param preamble_ib      Non-preemptible preamble IB for the context.
506     * \param preamble_num_dw  Number of dwords in the preamble IB.
507     */
508    bool (*cs_setup_preemption)(struct radeon_cmdbuf *cs, const uint32_t *preamble_ib,
509                                unsigned preamble_num_dw);
510 
511    /**
512     * Destroy a command stream.
513     *
514     * \param cs        A command stream to destroy.
515     */
516    void (*cs_destroy)(struct radeon_cmdbuf *cs);
517 
518    /**
519     * Add a buffer. Each buffer used by a CS must be added using this function.
520     *
521     * \param cs      Command stream
522     * \param buf     Buffer
523     * \param usage   Whether the buffer is used for read and/or write.
524     * \param domain  Bitmask of the RADEON_DOMAIN_* flags.
525     * \param priority  A higher number means a greater chance of being
526     *                  placed in the requested domain. 15 is the maximum.
527     * \return Buffer index.
528     */
529    unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
530                              enum radeon_bo_usage usage, enum radeon_bo_domain domain,
531                              enum radeon_bo_priority priority);
532 
533    /**
534     * Return the index of an already-added buffer.
535     *
536     * Not supported on amdgpu. Drivers with GPUVM should not care about
537     * buffer indices.
538     *
539     * \param cs        Command stream
540     * \param buf       Buffer
541     * \return          The buffer index, or -1 if the buffer has not been added.
542     */
543    int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf);
544 
545    /**
546     * Return true if there is enough memory in VRAM and GTT for the buffers
547     * added so far. If the validation fails, all buffers which have
548     * been added since the last call of cs_validate will be removed and
549     * the CS will be flushed (provided there are still any buffers).
550     *
551     * \param cs        A command stream to validate.
552     */
553    bool (*cs_validate)(struct radeon_cmdbuf *cs);
554 
555    /**
556     * Check whether the given number of dwords is available in the IB.
557     * Optionally chain a new chunk of the IB if necessary and supported.
558     *
559     * \param cs        A command stream.
560     * \param dw        Number of CS dwords requested by the caller.
561     * \param force_chaining  Chain the IB into a new buffer now to discard
562     *                        the CP prefetch cache (to emulate PKT3_REWIND)
563     * \return true if there is enough space
564     */
565    bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw, bool force_chaining);
566 
567    /**
568     * Return the buffer list.
569     *
570     * This is the buffer list as passed to the kernel, i.e. it only contains
571     * the parent buffers of sub-allocated buffers.
572     *
573     * \param cs    Command stream
574     * \param list  Returned buffer list. Set to NULL to query the count only.
575     * \return      The buffer count.
576     */
577    unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs, struct radeon_bo_list_item *list);
578 
579    /**
580     * Flush a command stream.
581     *
582     * \param cs          A command stream to flush.
583     * \param flags,      PIPE_FLUSH_* flags.
584     * \param fence       Pointer to a fence. If non-NULL, a fence is inserted
585     *                    after the CS and is returned through this parameter.
586     * \return Negative POSIX error code or 0 for success.
587     *         Asynchronous submissions never return an error.
588     */
589    int (*cs_flush)(struct radeon_cmdbuf *cs, unsigned flags, struct pipe_fence_handle **fence);
590 
591    /**
592     * Create a fence before the CS is flushed.
593     * The user must flush manually to complete the initializaton of the fence.
594     *
595     * The fence must not be used for anything except \ref cs_add_fence_dependency
596     * before the flush.
597     */
598    struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
599 
600    /**
601     * Return true if a buffer is referenced by a command stream.
602     *
603     * \param cs        A command stream.
604     * \param buf       A winsys buffer.
605     */
606    bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
607                                    enum radeon_bo_usage usage);
608 
609    /**
610     * Request access to a feature for a command stream.
611     *
612     * \param cs        A command stream.
613     * \param fid       Feature ID, one of RADEON_FID_*
614     * \param enable    Whether to enable or disable the feature.
615     */
616    bool (*cs_request_feature)(struct radeon_cmdbuf *cs, enum radeon_feature_id fid, bool enable);
617    /**
618     * Make sure all asynchronous flush of the cs have completed
619     *
620     * \param cs        A command stream.
621     */
622    void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
623 
624    /**
625     * Add a fence dependency to the CS, so that the CS will wait for
626     * the fence before execution.
627     *
628     * \param dependency_flags  Bitmask of RADEON_DEPENDENCY_*
629     */
630    void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence,
631                                    unsigned dependency_flags);
632 
633    /**
634     * Signal a syncobj when the CS finishes execution.
635     */
636    void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence);
637 
638    /**
639     * Wait for the fence and return true if the fence has been signalled.
640     * The timeout of 0 will only return the status.
641     * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
642     * is signalled.
643     */
644    bool (*fence_wait)(struct radeon_winsys *ws, struct pipe_fence_handle *fence, uint64_t timeout);
645 
646    /**
647     * Reference counting for fences.
648     */
649    void (*fence_reference)(struct pipe_fence_handle **dst, struct pipe_fence_handle *src);
650 
651    /**
652     * Create a new fence object corresponding to the given syncobj fd.
653     */
654    struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws, int fd);
655 
656    /**
657     * Create a new fence object corresponding to the given sync_file.
658     */
659    struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws, int fd);
660 
661    /**
662     * Return a sync_file FD corresponding to the given fence object.
663     */
664    int (*fence_export_sync_file)(struct radeon_winsys *ws, struct pipe_fence_handle *fence);
665 
666    /**
667     * Return a sync file FD that is already signalled.
668     */
669    int (*export_signalled_sync_file)(struct radeon_winsys *ws);
670 
671    /**
672     * Initialize surface
673     *
674     * \param ws        The winsys this function is called from.
675     * \param tex       Input texture description
676     * \param flags     Bitmask of RADEON_SURF_* flags
677     * \param bpe       Bytes per pixel, it can be different for Z buffers.
678     * \param mode      Preferred tile mode. (linear, 1D, or 2D)
679     * \param surf      Output structure
680     */
681    int (*surface_init)(struct radeon_winsys *ws, const struct pipe_resource *tex, unsigned flags,
682                        unsigned bpe, enum radeon_surf_mode mode, struct radeon_surf *surf);
683 
684    uint64_t (*query_value)(struct radeon_winsys *ws, enum radeon_value_id value);
685 
686    bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset, unsigned num_registers,
687                           uint32_t *out);
688 
689    /**
690     * Secure context
691     */
692    bool (*ws_is_secure)(struct radeon_winsys *ws);
693    bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
694    void (*cs_set_secure)(struct radeon_cmdbuf *cs, bool secure);
695 };
696 
radeon_emitted(struct radeon_cmdbuf * cs,unsigned num_dw)697 static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
698 {
699    return cs && (cs->prev_dw + cs->current.cdw > num_dw);
700 }
701 
radeon_emit(struct radeon_cmdbuf * cs,uint32_t value)702 static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
703 {
704    cs->current.buf[cs->current.cdw++] = value;
705 }
706 
radeon_emit_array(struct radeon_cmdbuf * cs,const uint32_t * values,unsigned count)707 static inline void radeon_emit_array(struct radeon_cmdbuf *cs, const uint32_t *values,
708                                      unsigned count)
709 {
710    memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
711    cs->current.cdw += count;
712 }
713 
714 enum radeon_heap
715 {
716    RADEON_HEAP_VRAM_NO_CPU_ACCESS,
717    RADEON_HEAP_VRAM_READ_ONLY,
718    RADEON_HEAP_VRAM_READ_ONLY_32BIT,
719    RADEON_HEAP_VRAM_32BIT,
720    RADEON_HEAP_VRAM,
721    RADEON_HEAP_GTT_WC,
722    RADEON_HEAP_GTT_WC_READ_ONLY,
723    RADEON_HEAP_GTT_WC_READ_ONLY_32BIT,
724    RADEON_HEAP_GTT_WC_32BIT,
725    RADEON_HEAP_GTT,
726    RADEON_HEAP_GTT_UNCACHED_WC,
727    RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY,
728    RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT,
729    RADEON_HEAP_GTT_UNCACHED_WC_32BIT,
730    RADEON_HEAP_GTT_UNCACHED,
731    RADEON_MAX_SLAB_HEAPS,
732    RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
733 };
734 
radeon_domain_from_heap(enum radeon_heap heap)735 static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
736 {
737    switch (heap) {
738    case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
739    case RADEON_HEAP_VRAM_READ_ONLY:
740    case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
741    case RADEON_HEAP_VRAM_32BIT:
742    case RADEON_HEAP_VRAM:
743       return RADEON_DOMAIN_VRAM;
744    case RADEON_HEAP_GTT_WC:
745    case RADEON_HEAP_GTT_WC_READ_ONLY:
746    case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
747    case RADEON_HEAP_GTT_WC_32BIT:
748    case RADEON_HEAP_GTT:
749    case RADEON_HEAP_GTT_UNCACHED_WC:
750    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
751    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
752    case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
753    case RADEON_HEAP_GTT_UNCACHED:
754       return RADEON_DOMAIN_GTT;
755    default:
756       assert(0);
757       return (enum radeon_bo_domain)0;
758    }
759 }
760 
radeon_flags_from_heap(enum radeon_heap heap)761 static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
762 {
763    unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING;
764 
765    switch (heap) {
766    case RADEON_HEAP_GTT:
767    case RADEON_HEAP_GTT_UNCACHED:
768       break;
769    default:
770       flags |= RADEON_FLAG_GTT_WC;
771    }
772 
773    switch (heap) {
774    case RADEON_HEAP_GTT_UNCACHED_WC:
775    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
776    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
777    case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
778    case RADEON_HEAP_GTT_UNCACHED:
779       flags |= RADEON_FLAG_UNCACHED;
780       break;
781    default:
782       break;
783    }
784 
785    switch (heap) {
786    case RADEON_HEAP_VRAM_READ_ONLY:
787    case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
788    case RADEON_HEAP_GTT_WC_READ_ONLY:
789    case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
790    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
791    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
792       flags |= RADEON_FLAG_READ_ONLY;
793       break;
794    default:
795       break;
796    }
797 
798    switch (heap) {
799    case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
800    case RADEON_HEAP_VRAM_32BIT:
801    case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
802    case RADEON_HEAP_GTT_WC_32BIT:
803    case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
804    case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
805       flags |= RADEON_FLAG_32BIT;
806    default:
807       break;
808    }
809 
810    switch (heap) {
811    case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
812       flags |= RADEON_FLAG_NO_CPU_ACCESS;
813       break;
814    default:
815       break;
816    }
817 
818    return flags;
819 }
820 
821 /* Return the heap index for winsys allocators, or -1 on failure. */
radeon_get_heap_index(enum radeon_bo_domain domain,enum radeon_bo_flag flags)822 static inline int radeon_get_heap_index(enum radeon_bo_domain domain, enum radeon_bo_flag flags)
823 {
824    bool uncached;
825 
826    /* VRAM implies WC (write combining) */
827    assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
828    /* NO_CPU_ACCESS implies VRAM only. */
829    assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
830 
831    /* Resources with interprocess sharing don't use any winsys allocators. */
832    if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
833       return -1;
834 
835    /* Unsupported flags: NO_SUBALLOC, SPARSE. */
836    if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_UNCACHED |
837                  RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT))
838       return -1;
839 
840    switch (domain) {
841    case RADEON_DOMAIN_VRAM:
842       switch (flags & (RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT)) {
843       case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
844       case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY:
845          assert(!"NO_CPU_ACCESS | READ_ONLY doesn't make sense");
846          return -1;
847       case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_32BIT:
848          assert(!"NO_CPU_ACCESS with 32BIT is disallowed");
849          return -1;
850       case RADEON_FLAG_NO_CPU_ACCESS:
851          return RADEON_HEAP_VRAM_NO_CPU_ACCESS;
852       case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
853          return RADEON_HEAP_VRAM_READ_ONLY_32BIT;
854       case RADEON_FLAG_READ_ONLY:
855          return RADEON_HEAP_VRAM_READ_ONLY;
856       case RADEON_FLAG_32BIT:
857          return RADEON_HEAP_VRAM_32BIT;
858       case 0:
859          return RADEON_HEAP_VRAM;
860       }
861       break;
862    case RADEON_DOMAIN_GTT:
863       uncached = flags & RADEON_FLAG_UNCACHED;
864 
865       switch (flags & (RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT)) {
866       case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
867          return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT
868                          : RADEON_HEAP_GTT_WC_READ_ONLY_32BIT;
869       case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY:
870          return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY
871                          : RADEON_HEAP_GTT_WC_READ_ONLY;
872       case RADEON_FLAG_GTT_WC | RADEON_FLAG_32BIT:
873          return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_32BIT
874                          : RADEON_HEAP_GTT_WC_32BIT;
875       case RADEON_FLAG_GTT_WC:
876          return uncached ? RADEON_HEAP_GTT_UNCACHED_WC : RADEON_HEAP_GTT_WC;
877       case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
878       case RADEON_FLAG_READ_ONLY:
879          assert(!"READ_ONLY without WC is disallowed");
880          return -1;
881       case RADEON_FLAG_32BIT:
882          assert(!"32BIT without WC is disallowed");
883          return -1;
884       case 0:
885          return uncached ? RADEON_HEAP_GTT_UNCACHED : RADEON_HEAP_GTT;
886       }
887       break;
888    default:
889       break;
890    }
891    return -1;
892 }
893 
894 #endif
895