1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrTypes_DEFINED
9 #define GrTypes_DEFINED
10
11 #include "SkMath.h"
12 #include "SkTypes.h"
13 #include "GrConfig.h"
14
15 ////////////////////////////////////////////////////////////////////////////////
16
17 /**
18 * Defines overloaded bitwise operators to make it easier to use an enum as a
19 * bitfield.
20 */
21 #define GR_MAKE_BITFIELD_OPS(X) \
22 inline X operator |(X a, X b) { \
23 return (X) (+a | +b); \
24 } \
25 inline X& operator |=(X& a, X b) { \
26 return (a = a | b); \
27 } \
28 inline X operator &(X a, X b) { \
29 return (X) (+a & +b); \
30 } \
31 inline X& operator &=(X& a, X b) { \
32 return (a = a & b); \
33 } \
34 template <typename T> \
35 inline X operator &(T a, X b) { \
36 return (X) (+a & +b); \
37 } \
38 template <typename T> \
39 inline X operator &(X a, T b) { \
40 return (X) (+a & +b); \
41 } \
42
43 #define GR_DECL_BITFIELD_OPS_FRIENDS(X) \
44 friend X operator |(X a, X b); \
45 friend X& operator |=(X& a, X b); \
46 \
47 friend X operator &(X a, X b); \
48 friend X& operator &=(X& a, X b); \
49 \
50 template <typename T> \
51 friend X operator &(T a, X b); \
52 \
53 template <typename T> \
54 friend X operator &(X a, T b); \
55
56 /**
57 * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of
58 * masking with type safety. Instantiated with the ~ operator.
59 */
60 template<typename TFlags> class GrTFlagsMask {
61 public:
GrTFlagsMask(TFlags value)62 constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {}
GrTFlagsMask(int value)63 constexpr explicit GrTFlagsMask(int value) : fValue(value) {}
value()64 constexpr int value() const { return fValue; }
65 private:
66 const int fValue;
67 };
68
69 // Or-ing a mask always returns another mask.
70 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
71 GrTFlagsMask<TFlags> b) {
72 return GrTFlagsMask<TFlags>(a.value() | b.value());
73 }
74 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a,
75 TFlags b) {
76 return GrTFlagsMask<TFlags>(a.value() | static_cast<int>(b));
77 }
78 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(TFlags a,
79 GrTFlagsMask<TFlags> b) {
80 return GrTFlagsMask<TFlags>(static_cast<int>(a) | b.value());
81 }
82 template<typename TFlags> inline GrTFlagsMask<TFlags>& operator|=(GrTFlagsMask<TFlags>& a,
83 GrTFlagsMask<TFlags> b) {
84 return (a = a | b);
85 }
86
87 // And-ing two masks returns another mask; and-ing one with regular flags returns flags.
88 template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator&(GrTFlagsMask<TFlags> a,
89 GrTFlagsMask<TFlags> b) {
90 return GrTFlagsMask<TFlags>(a.value() & b.value());
91 }
92 template<typename TFlags> constexpr TFlags operator&(GrTFlagsMask<TFlags> a, TFlags b) {
93 return static_cast<TFlags>(a.value() & static_cast<int>(b));
94 }
95 template<typename TFlags> constexpr TFlags operator&(TFlags a, GrTFlagsMask<TFlags> b) {
96 return static_cast<TFlags>(static_cast<int>(a) & b.value());
97 }
98 template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFlags> b) {
99 return (a = a & b);
100 }
101
102 /**
103 * Defines bitwise operators that make it possible to use an enum class as a
104 * basic bitfield.
105 */
106 #define GR_MAKE_BITFIELD_CLASS_OPS(X) \
107 constexpr GrTFlagsMask<X> operator~(X a) { \
108 return GrTFlagsMask<X>(~static_cast<int>(a)); \
109 } \
110 constexpr X operator|(X a, X b) { \
111 return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \
112 } \
113 inline X& operator|=(X& a, X b) { \
114 return (a = a | b); \
115 } \
116 constexpr bool operator&(X a, X b) { \
117 return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \
118 } \
119
120 #define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \
121 friend constexpr GrTFlagsMask<X> operator ~(X); \
122 friend constexpr X operator |(X, X); \
123 friend X& operator |=(X&, X); \
124 friend constexpr bool operator &(X, X);
125
126 ////////////////////////////////////////////////////////////////////////////////
127
128 // compile time versions of min/max
129 #define GR_CT_MAX(a, b) (((b) < (a)) ? (a) : (b))
130 #define GR_CT_MIN(a, b) (((b) < (a)) ? (b) : (a))
131
132 /**
133 * divide, rounding up
134 */
GrIDivRoundUp(int x,int y)135 static inline int32_t GrIDivRoundUp(int x, int y) {
136 SkASSERT(y > 0);
137 return (x + (y-1)) / y;
138 }
GrUIDivRoundUp(uint32_t x,uint32_t y)139 static inline uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
140 return (x + (y-1)) / y;
141 }
GrSizeDivRoundUp(size_t x,size_t y)142 static inline size_t GrSizeDivRoundUp(size_t x, size_t y) {
143 return (x + (y-1)) / y;
144 }
145
146 // compile time, evaluates Y multiple times
147 #define GR_CT_DIV_ROUND_UP(X, Y) (((X) + ((Y)-1)) / (Y))
148
149 /**
150 * align up
151 */
GrUIAlignUp(uint32_t x,uint32_t alignment)152 static inline uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
153 return GrUIDivRoundUp(x, alignment) * alignment;
154 }
GrSizeAlignUp(size_t x,size_t alignment)155 static inline size_t GrSizeAlignUp(size_t x, size_t alignment) {
156 return GrSizeDivRoundUp(x, alignment) * alignment;
157 }
158
159 // compile time, evaluates A multiple times
160 #define GR_CT_ALIGN_UP(X, A) (GR_CT_DIV_ROUND_UP((X),(A)) * (A))
161
162 /**
163 * amount of pad needed to align up
164 */
GrUIAlignUpPad(uint32_t x,uint32_t alignment)165 static inline uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
166 return (alignment - x % alignment) % alignment;
167 }
GrSizeAlignUpPad(size_t x,size_t alignment)168 static inline size_t GrSizeAlignUpPad(size_t x, size_t alignment) {
169 return (alignment - x % alignment) % alignment;
170 }
171
172 /**
173 * align down
174 */
GrUIAlignDown(uint32_t x,uint32_t alignment)175 static inline uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
176 return (x / alignment) * alignment;
177 }
GrSizeAlignDown(size_t x,uint32_t alignment)178 static inline size_t GrSizeAlignDown(size_t x, uint32_t alignment) {
179 return (x / alignment) * alignment;
180 }
181
182 ///////////////////////////////////////////////////////////////////////////////
183
184 /**
185 * Possible 3D APIs that may be used by Ganesh.
186 */
187 enum GrBackend {
188 kMetal_GrBackend,
189 kOpenGL_GrBackend,
190 kVulkan_GrBackend,
191 /**
192 * Mock is a backend that does not draw anything. It is used for unit tests
193 * and to measure CPU overhead.
194 */
195 kMock_GrBackend,
196 };
197
198 /**
199 * Backend-specific 3D context handle
200 * OpenGL: const GrGLInterface*. If null will use the result of GrGLMakeNativeInterface().
201 * Vulkan: GrVkBackendContext*.
202 * Mock: const GrMockOptions* or null for default constructed GrMockContextOptions.
203 */
204 typedef intptr_t GrBackendContext;
205
206 ///////////////////////////////////////////////////////////////////////////////
207
208 /**
209 * Used to control antialiasing in draw calls.
210 */
211 enum class GrAA : bool {
212 kNo = false,
213 kYes = true
214 };
215
216 ///////////////////////////////////////////////////////////////////////////////
217
218 /**
219 * Used to say whether a texture has mip levels allocated or not.
220 */
221 enum class GrMipMapped : bool {
222 kNo = false,
223 kYes = true
224 };
225
226 ///////////////////////////////////////////////////////////////////////////////
227
228 /**
229 * Geometric primitives used for drawing.
230 */
231 enum class GrPrimitiveType {
232 kTriangles,
233 kTriangleStrip,
234 kTriangleFan,
235 kPoints,
236 kLines, // 1 pix wide only
237 kLineStrip, // 1 pix wide only
238 kLinesAdjacency // requires geometry shader support.
239 };
240 static constexpr int kNumGrPrimitiveTypes = (int) GrPrimitiveType::kLinesAdjacency + 1;
241
GrIsPrimTypeLines(GrPrimitiveType type)242 static constexpr bool GrIsPrimTypeLines(GrPrimitiveType type) {
243 return GrPrimitiveType::kLines == type ||
244 GrPrimitiveType::kLineStrip == type ||
245 GrPrimitiveType::kLinesAdjacency == type;
246 }
247
GrIsPrimTypeTris(GrPrimitiveType type)248 static constexpr bool GrIsPrimTypeTris(GrPrimitiveType type) {
249 return GrPrimitiveType::kTriangles == type ||
250 GrPrimitiveType::kTriangleStrip == type ||
251 GrPrimitiveType::kTriangleFan == type;
252 }
253
GrPrimTypeRequiresGeometryShaderSupport(GrPrimitiveType type)254 static constexpr bool GrPrimTypeRequiresGeometryShaderSupport(GrPrimitiveType type) {
255 return GrPrimitiveType::kLinesAdjacency == type;
256 }
257
258 /**
259 * Formats for masks, used by the font cache.
260 * Important that these are 0-based.
261 */
262 enum GrMaskFormat {
263 kA8_GrMaskFormat, //!< 1-byte per pixel
264 kA565_GrMaskFormat, //!< 2-bytes per pixel, RGB represent 3-channel LCD coverage
265 kARGB_GrMaskFormat, //!< 4-bytes per pixel, color format
266
267 kLast_GrMaskFormat = kARGB_GrMaskFormat
268 };
269 static const int kMaskFormatCount = kLast_GrMaskFormat + 1;
270
271 /**
272 * Return the number of bytes-per-pixel for the specified mask format.
273 */
GrMaskFormatBytesPerPixel(GrMaskFormat format)274 static inline int GrMaskFormatBytesPerPixel(GrMaskFormat format) {
275 SkASSERT(format < kMaskFormatCount);
276 // kA8 (0) -> 1
277 // kA565 (1) -> 2
278 // kARGB (2) -> 4
279 static const int sBytesPerPixel[] = { 1, 2, 4 };
280 static_assert(SK_ARRAY_COUNT(sBytesPerPixel) == kMaskFormatCount, "array_size_mismatch");
281 static_assert(kA8_GrMaskFormat == 0, "enum_order_dependency");
282 static_assert(kA565_GrMaskFormat == 1, "enum_order_dependency");
283 static_assert(kARGB_GrMaskFormat == 2, "enum_order_dependency");
284
285 return sBytesPerPixel[(int) format];
286 }
287
288 /**
289 * Pixel configurations.
290 */
291 enum GrPixelConfig {
292 kUnknown_GrPixelConfig,
293 kAlpha_8_GrPixelConfig,
294 kGray_8_GrPixelConfig,
295 kRGB_565_GrPixelConfig,
296 /**
297 * Premultiplied
298 */
299 kRGBA_4444_GrPixelConfig,
300 /**
301 * Premultiplied. Byte order is r,g,b,a.
302 */
303 kRGBA_8888_GrPixelConfig,
304 /**
305 * Premultiplied. Byte order is b,g,r,a.
306 */
307 kBGRA_8888_GrPixelConfig,
308 /**
309 * Premultiplied and sRGB. Byte order is r,g,b,a.
310 */
311 kSRGBA_8888_GrPixelConfig,
312 /**
313 * Premultiplied and sRGB. Byte order is b,g,r,a.
314 */
315 kSBGRA_8888_GrPixelConfig,
316
317 /**
318 * Byte order is r, g, b, a. This color format is 32 bits per channel
319 */
320 kRGBA_float_GrPixelConfig,
321 /**
322 * Byte order is r, g. This color format is 32 bits per channel
323 */
324 kRG_float_GrPixelConfig,
325
326 /**
327 * This color format is a single 16 bit float channel
328 */
329 kAlpha_half_GrPixelConfig,
330
331 /**
332 * Byte order is r, g, b, a. This color format is 16 bits per channel
333 */
334 kRGBA_half_GrPixelConfig,
335
336 kPrivateConfig1_GrPixelConfig,
337 kPrivateConfig2_GrPixelConfig,
338 kPrivateConfig3_GrPixelConfig,
339 kPrivateConfig4_GrPixelConfig,
340 kPrivateConfig5_GrPixelConfig,
341
342 kLast_GrPixelConfig = kPrivateConfig5_GrPixelConfig
343 };
344 static const int kGrPixelConfigCnt = kLast_GrPixelConfig + 1;
345
346 // Aliases for pixel configs that match skia's byte order.
347 #ifndef SK_CPU_LENDIAN
348 #error "Skia gpu currently assumes little endian"
349 #endif
350 #if SK_PMCOLOR_BYTE_ORDER(B,G,R,A)
351 static const GrPixelConfig kSkia8888_GrPixelConfig = kBGRA_8888_GrPixelConfig;
352 #elif SK_PMCOLOR_BYTE_ORDER(R,G,B,A)
353 static const GrPixelConfig kSkia8888_GrPixelConfig = kRGBA_8888_GrPixelConfig;
354 #else
355 #error "SK_*32_SHIFT values must correspond to GL_BGRA or GL_RGBA format."
356 #endif
357
358 /**
359 * Optional bitfield flags that can be set on GrSurfaceDesc (below).
360 */
361 enum GrSurfaceFlags {
362 kNone_GrSurfaceFlags = 0x0,
363 /**
364 * Creates a texture that can be rendered to as a GrRenderTarget. Use
365 * GrTexture::asRenderTarget() to access.
366 */
367 kRenderTarget_GrSurfaceFlag = 0x1,
368 /**
369 * Clears to zero on creation. It will cause creation failure if initial data is supplied to the
370 * texture. This only affects the base level if the texture is created with MIP levels.
371 */
372 kPerformInitialClear_GrSurfaceFlag = 0x2
373 };
374
375 GR_MAKE_BITFIELD_OPS(GrSurfaceFlags)
376
377 // opaque type for 3D API object handles
378 typedef intptr_t GrBackendObject;
379
380 /**
381 * Some textures will be stored such that the upper and left edges of the content meet at the
382 * the origin (in texture coord space) and for other textures the lower and left edges meet at
383 * the origin.
384 */
385
386 enum GrSurfaceOrigin {
387 kTopLeft_GrSurfaceOrigin,
388 kBottomLeft_GrSurfaceOrigin,
389 };
390
391 struct GrMipLevel {
392 const void* fPixels;
393 size_t fRowBytes;
394 };
395
396 /**
397 * Describes a surface to be created.
398 */
399 struct GrSurfaceDesc {
GrSurfaceDescGrSurfaceDesc400 GrSurfaceDesc()
401 : fFlags(kNone_GrSurfaceFlags)
402 , fOrigin(kTopLeft_GrSurfaceOrigin)
403 , fWidth(0)
404 , fHeight(0)
405 , fConfig(kUnknown_GrPixelConfig)
406 , fSampleCnt(1) {}
407
408 GrSurfaceFlags fFlags; //!< bitfield of TextureFlags
409 GrSurfaceOrigin fOrigin; //!< origin of the texture
410 int fWidth; //!< Width of the texture
411 int fHeight; //!< Height of the texture
412
413 /**
414 * Format of source data of the texture. Not guaranteed to be the same as
415 * internal format used by 3D API.
416 */
417 GrPixelConfig fConfig;
418
419 /**
420 * The number of samples per pixel. Zero is treated equivalently to 1. This only
421 * applies if the kRenderTarget_GrSurfaceFlag is set. The actual number
422 * of samples may not exactly match the request. The request will be rounded
423 * up to the next supported sample count. A value larger than the largest
424 * supported sample count will fail.
425 */
426 int fSampleCnt;
427 };
428
429 /**
430 * Clips are composed from these objects.
431 */
432 enum GrClipType {
433 kRect_ClipType,
434 kPath_ClipType
435 };
436
437 ///////////////////////////////////////////////////////////////////////////////
438
439 /** Ownership rules for external GPU resources imported into Skia. */
440 enum GrWrapOwnership {
441 /** Skia will assume the client will keep the resource alive and Skia will not free it. */
442 kBorrow_GrWrapOwnership,
443
444 /** Skia will assume ownership of the resource and free it. */
445 kAdopt_GrWrapOwnership,
446 };
447
448 ///////////////////////////////////////////////////////////////////////////////
449
450 /**
451 * The GrContext's cache of backend context state can be partially invalidated.
452 * These enums are specific to the GL backend and we'd add a new set for an alternative backend.
453 */
454 enum GrGLBackendState {
455 kRenderTarget_GrGLBackendState = 1 << 0,
456 kTextureBinding_GrGLBackendState = 1 << 1,
457 // View state stands for scissor and viewport
458 kView_GrGLBackendState = 1 << 2,
459 kBlend_GrGLBackendState = 1 << 3,
460 kMSAAEnable_GrGLBackendState = 1 << 4,
461 kVertex_GrGLBackendState = 1 << 5,
462 kStencil_GrGLBackendState = 1 << 6,
463 kPixelStore_GrGLBackendState = 1 << 7,
464 kProgram_GrGLBackendState = 1 << 8,
465 kFixedFunction_GrGLBackendState = 1 << 9,
466 kMisc_GrGLBackendState = 1 << 10,
467 kPathRendering_GrGLBackendState = 1 << 11,
468 kALL_GrGLBackendState = 0xffff
469 };
470
471 /**
472 * This value translates to reseting all the context state for any backend.
473 */
474 static const uint32_t kAll_GrBackendState = 0xffffffff;
475
476 // Enum used as return value when flush with semaphores so the client knows whether the
477 // semaphores were submitted to GPU or not.
478 enum class GrSemaphoresSubmitted : bool {
479 kNo = false,
480 kYes = true
481 };
482
483 #endif
484