1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrTypes_DEFINED
9 #define GrTypes_DEFINED
10 
11 #include "include/core/SkMath.h"
12 #include "include/core/SkTypes.h"
13 #include "include/gpu/GrConfig.h"
14 
15 class GrBackendSemaphore;
16 class SkImage;
17 class SkSurface;
18 
19 ////////////////////////////////////////////////////////////////////////////////
20 
21 /**
22  * Defines overloaded bitwise operators to make it easier to use an enum as a
23  * bitfield.
24  */
25 #define GR_MAKE_BITFIELD_OPS(X) \
26     inline X operator |(X a, X b) { \
27         return (X) (+a | +b); \
28     } \
29     inline X& operator |=(X& a, X b) { \
30         return (a = a | b); \
31     } \
32     inline X operator &(X a, X b) { \
33         return (X) (+a & +b); \
34     } \
35     inline X& operator &=(X& a, X b) { \
36         return (a = a & b); \
37     } \
38     template <typename T> \
39     inline X operator &(T a, X b) { \
40         return (X) (+a & +b); \
41     } \
42     template <typename T> \
43     inline X operator &(X a, T b) { \
44         return (X) (+a & +b); \
45     } \
46 
47 #define GR_DECL_BITFIELD_OPS_FRIENDS(X) \
48     friend X operator |(X a, X b); \
49     friend X& operator |=(X& a, X b); \
50     \
51     friend X operator &(X a, X b); \
52     friend X& operator &=(X& a, X b); \
53     \
54     template <typename T> \
55     friend X operator &(T a, X b); \
56     \
57     template <typename T> \
58     friend X operator &(X a, T b); \
59 
60 /**
61  * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of
62  * masking with type safety. Instantiated with the ~ operator.
63  */
64 template<typename TFlags> class GrTFlagsMask {
65 public:
GrTFlagsMask(TFlags value)66     constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {}
GrTFlagsMask(int value)67     constexpr explicit GrTFlagsMask(int value) : fValue(value) {}
value()68     constexpr int value() const { return fValue; }
69 private:
70     const int fValue;
71 };
72 
73 // Or-ing a mask always returns another mask.
74 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
75                                                                    GrTFlagsMask<TFlags> b) {
76     return GrTFlagsMask<TFlags>(a.value() | b.value());
77 }
78 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
79                                                                    TFlags b) {
80     return GrTFlagsMask<TFlags>(a.value() | static_cast<int>(b));
81 }
82 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(TFlags a,
83                                                                    GrTFlagsMask<TFlags> b) {
84     return GrTFlagsMask<TFlags>(static_cast<int>(a) | b.value());
85 }
86 template<typename TFlags> inline GrTFlagsMask<TFlags>& operator|=(GrTFlagsMask<TFlags>& a,
87                                                                   GrTFlagsMask<TFlags> b) {
88     return (a = a | b);
89 }
90 
91 // And-ing two masks returns another mask; and-ing one with regular flags returns flags.
92 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator&(GrTFlagsMask<TFlags> a,
93                                                                    GrTFlagsMask<TFlags> b) {
94     return GrTFlagsMask<TFlags>(a.value() & b.value());
95 }
96 template<typename TFlags> constexpr TFlags operator&(GrTFlagsMask<TFlags> a, TFlags b) {
97     return static_cast<TFlags>(a.value() & static_cast<int>(b));
98 }
99 template<typename TFlags> constexpr TFlags operator&(TFlags a, GrTFlagsMask<TFlags> b) {
100     return static_cast<TFlags>(static_cast<int>(a) & b.value());
101 }
102 template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFlags> b) {
103     return (a = a & b);
104 }
105 
106 /**
107  * Defines bitwise operators that make it possible to use an enum class as a
108  * basic bitfield.
109  */
110 #define GR_MAKE_BITFIELD_CLASS_OPS(X) \
111     constexpr GrTFlagsMask<X> operator~(X a) { \
112         return GrTFlagsMask<X>(~static_cast<int>(a)); \
113     } \
114     constexpr X operator|(X a, X b) { \
115         return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \
116     } \
117     inline X& operator|=(X& a, X b) { \
118         return (a = a | b); \
119     } \
120     constexpr bool operator&(X a, X b) { \
121         return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \
122     } \
123 
124 #define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
125     friend constexpr GrTFlagsMask<X> operator ~(X); \
126     friend constexpr X operator |(X, X); \
127     friend X& operator |=(X&, X); \
128     friend constexpr bool operator &(X, X)
129 
130 ///////////////////////////////////////////////////////////////////////////////
131 
132 /**
133  * Possible 3D APIs that may be used by Ganesh.
134  */
135 enum class GrBackendApi : unsigned {
136     kOpenGL,
137     kVulkan,
138     kMetal,
139     kDirect3D,
140     kDawn,
141     /**
142      * Mock is a backend that does not draw anything. It is used for unit tests
143      * and to measure CPU overhead.
144      */
145     kMock,
146 
147     /**
148      * Added here to support the legacy GrBackend enum value and clients who referenced it using
149      * GrBackend::kOpenGL_GrBackend.
150      */
151     kOpenGL_GrBackend = kOpenGL,
152 };
153 
154 /**
155  * Previously the above enum was not an enum class but a normal enum. To support the legacy use of
156  * the enum values we define them below so that no clients break.
157  */
158 typedef GrBackendApi GrBackend;
159 
160 static constexpr GrBackendApi kMetal_GrBackend = GrBackendApi::kMetal;
161 static constexpr GrBackendApi kVulkan_GrBackend = GrBackendApi::kVulkan;
162 static constexpr GrBackendApi kMock_GrBackend = GrBackendApi::kMock;
163 
164 ///////////////////////////////////////////////////////////////////////////////
165 
166 /**
167  * Used to say whether a texture has mip levels allocated or not.
168  */
169 enum class GrMipmapped : bool {
170     kNo = false,
171     kYes = true
172 };
173 /** Deprecated legacy alias of GrMipmapped. */
174 using GrMipMapped = GrMipmapped;
175 
176 /*
177  * Can a GrBackendObject be rendered to?
178  */
179 enum class GrRenderable : bool {
180     kNo = false,
181     kYes = true
182 };
183 
184 /*
185  * Used to say whether texture is backed by protected memory.
186  */
187 enum class GrProtected : bool {
188     kNo = false,
189     kYes = true
190 };
191 
192 ///////////////////////////////////////////////////////////////////////////////
193 
194 /**
195  * GPU SkImage and SkSurfaces can be stored such that (0, 0) in texture space may correspond to
196  * either the top-left or bottom-left content pixel.
197  */
198 enum GrSurfaceOrigin : int {
199     kTopLeft_GrSurfaceOrigin,
200     kBottomLeft_GrSurfaceOrigin,
201 };
202 
203 /**
204  * A GrContext's cache of backend context state can be partially invalidated.
205  * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
206  */
207 enum GrGLBackendState {
208     kRenderTarget_GrGLBackendState     = 1 << 0,
209     // Also includes samplers bound to texture units.
210     kTextureBinding_GrGLBackendState   = 1 << 1,
211     // View state stands for scissor and viewport
212     kView_GrGLBackendState             = 1 << 2,
213     kBlend_GrGLBackendState            = 1 << 3,
214     kMSAAEnable_GrGLBackendState       = 1 << 4,
215     kVertex_GrGLBackendState           = 1 << 5,
216     kStencil_GrGLBackendState          = 1 << 6,
217     kPixelStore_GrGLBackendState       = 1 << 7,
218     kProgram_GrGLBackendState          = 1 << 8,
219     kFixedFunction_GrGLBackendState    = 1 << 9,
220     kMisc_GrGLBackendState             = 1 << 10,
221     kPathRendering_GrGLBackendState    = 1 << 11,
222     kALL_GrGLBackendState              = 0xffff
223 };
224 
225 /**
226  * This value translates to reseting all the context state for any backend.
227  */
228 static const uint32_t kAll_GrBackendState = 0xffffffff;
229 
230 typedef void* GrGpuFinishedContext;
231 typedef void (*GrGpuFinishedProc)(GrGpuFinishedContext finishedContext);
232 
233 typedef void* GrGpuSubmittedContext;
234 typedef void (*GrGpuSubmittedProc)(GrGpuSubmittedContext submittedContext, bool success);
235 
236 /**
237  * Struct to supply options to flush calls.
238  *
239  * After issuing all commands, fNumSemaphore semaphores will be signaled by the gpu. The client
240  * passes in an array of fNumSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's
241  * can be either initialized or not. If they are initialized, the backend uses the passed in
242  * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore
243  * object is initialized with that semaphore. The semaphores are not sent to the GPU until the next
244  * GrContext::submit call is made. See the GrContext::submit for more information.
245  *
246  * The client will own and be responsible for deleting the underlying semaphores that are stored
247  * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects
248  * themselves can be deleted as soon as this function returns.
249  *
250  * If a finishedProc is provided, the finishedProc will be called when all work submitted to the gpu
251  * from this flush call and all previous flush calls has finished on the GPU. If the flush call
252  * fails due to an error and nothing ends up getting sent to the GPU, the finished proc is called
253  * immediately.
254  *
255  * If a submittedProc is provided, the submittedProc will be called when all work from this flush
256  * call is submitted to the GPU. If the flush call fails due to an error and nothing will get sent
257  * to the GPU, the submitted proc is called immediately. It is possibly that when work is finally
258  * submitted, that the submission actual fails. In this case we will not reattempt to do the
259  * submission. Skia notifies the client of these via the success bool passed into the submittedProc.
260  * The submittedProc is useful to the client to know when semaphores that were sent with the flush
261  * have actually been submitted to the GPU so that they can be waited on (or deleted if the submit
262  * fails).
263  * Note about GL: In GL work gets sent to the driver immediately during the flush call, but we don't
264  * really know when the driver sends the work to the GPU. Therefore, we treat the submitted proc as
265  * we do in other backends. It will be called when the next GrContext::submit is called after the
266  * flush (or possibly during the flush if there is no work to be done for the flush). The main use
267  * case for the submittedProc is to know when semaphores have been sent to the GPU and even in GL
268  * it is required to call GrContext::submit to flush them. So a client should be able to treat all
269  * backend APIs the same in terms of how the submitted procs are treated.
270  */
271 struct GrFlushInfo {
272     int fNumSemaphores = 0;
273     GrBackendSemaphore* fSignalSemaphores = nullptr;
274     GrGpuFinishedProc fFinishedProc = nullptr;
275     GrGpuFinishedContext fFinishedContext = nullptr;
276     GrGpuSubmittedProc fSubmittedProc = nullptr;
277     GrGpuSubmittedContext fSubmittedContext = nullptr;
278 };
279 
280 /**
281  * Enum used as return value when flush with semaphores so the client knows whether the valid
282  * semaphores will be submitted on the next GrContext::submit call.
283  */
284 enum class GrSemaphoresSubmitted : bool {
285     kNo = false,
286     kYes = true
287 };
288 
289 #endif
290