1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2020 Blender Foundation.
17  * All rights reserved.
18  */
19 
20 /** \file
21  * \ingroup gpu
22  */
23 
24 #pragma once
25 
26 #include "BLI_assert.h"
27 
28 #include "GPU_vertex_buffer.h"
29 
30 #include "gpu_framebuffer_private.hh"
31 
32 namespace blender {
33 namespace gpu {
34 
35 typedef enum eGPUTextureFormatFlag {
36   GPU_FORMAT_DEPTH = (1 << 0),
37   GPU_FORMAT_STENCIL = (1 << 1),
38   GPU_FORMAT_INTEGER = (1 << 2),
39   GPU_FORMAT_FLOAT = (1 << 3),
40   GPU_FORMAT_COMPRESSED = (1 << 4),
41 
42   GPU_FORMAT_DEPTH_STENCIL = (GPU_FORMAT_DEPTH | GPU_FORMAT_STENCIL),
43 } eGPUTextureFormatFlag;
44 
45 ENUM_OPERATORS(eGPUTextureFormatFlag, GPU_FORMAT_DEPTH_STENCIL)
46 
47 typedef enum eGPUTextureType {
48   GPU_TEXTURE_1D = (1 << 0),
49   GPU_TEXTURE_2D = (1 << 1),
50   GPU_TEXTURE_3D = (1 << 2),
51   GPU_TEXTURE_CUBE = (1 << 3),
52   GPU_TEXTURE_ARRAY = (1 << 4),
53   GPU_TEXTURE_BUFFER = (1 << 5),
54 
55   GPU_TEXTURE_1D_ARRAY = (GPU_TEXTURE_1D | GPU_TEXTURE_ARRAY),
56   GPU_TEXTURE_2D_ARRAY = (GPU_TEXTURE_2D | GPU_TEXTURE_ARRAY),
57   GPU_TEXTURE_CUBE_ARRAY = (GPU_TEXTURE_CUBE | GPU_TEXTURE_ARRAY),
58 } eGPUTextureType;
59 
60 ENUM_OPERATORS(eGPUTextureType, GPU_TEXTURE_CUBE_ARRAY)
61 
62 #ifdef DEBUG
63 #  define DEBUG_NAME_LEN 64
64 #else
65 #  define DEBUG_NAME_LEN 8
66 #endif
67 
68 /* Maximum number of FBOs a texture can be attached to. */
69 #define GPU_TEX_MAX_FBO_ATTACHED 16
70 
71 /**
72  * Implementation of Textures.
73  * Base class which is then specialized for each implementation (GL, VK, ...).
74  **/
75 class Texture {
76  public:
77   /** Internal Sampler state. */
78   eGPUSamplerState sampler_state = GPU_SAMPLER_DEFAULT;
79   /** Reference counter. */
80   int refcount = 1;
81   /** Width & Height (of source data), optional. */
82   int src_w = 0, src_h = 0;
83 
84  protected:
85   /* ---- Texture format (immutable after init). ---- */
86   /** Width & Height & Depth. For cubemap arrays, d is number of facelayers. */
87   int w_, h_, d_;
88   /** Internal data format. */
89   eGPUTextureFormat format_;
90   /** Format caracteristics. */
91   eGPUTextureFormatFlag format_flag_;
92   /** Texture type. */
93   eGPUTextureType type_;
94 
95   /** Number of mipmaps this texture has (Max miplvl). */
96   /* TODO(fclem): Should become immutable and the need for mipmaps should be specified upfront. */
97   int mipmaps_ = -1;
98   /** For error checking */
99   int mip_min_ = 0, mip_max_ = 0;
100 
101   /** For debugging */
102   char name_[DEBUG_NAME_LEN];
103 
104   /** Framebuffer references to update on deletion. */
105   GPUAttachmentType fb_attachment_[GPU_TEX_MAX_FBO_ATTACHED];
106   FrameBuffer *fb_[GPU_TEX_MAX_FBO_ATTACHED];
107 
108  public:
109   Texture(const char *name);
110   virtual ~Texture();
111 
112   /* Return true on success. */
113   bool init_1D(int w, int layers, eGPUTextureFormat format);
114   bool init_2D(int w, int h, int layers, eGPUTextureFormat format);
115   bool init_3D(int w, int h, int d, eGPUTextureFormat format);
116   bool init_cubemap(int w, int layers, eGPUTextureFormat format);
117   bool init_buffer(GPUVertBuf *vbo, eGPUTextureFormat format);
118 
119   virtual void generate_mipmap(void) = 0;
120   virtual void copy_to(Texture *tex) = 0;
121   virtual void clear(eGPUDataFormat format, const void *data) = 0;
122   virtual void swizzle_set(const char swizzle_mask[4]) = 0;
123   virtual void mip_range_set(int min, int max) = 0;
124   virtual void *read(int mip, eGPUDataFormat format) = 0;
125 
126   void attach_to(FrameBuffer *fb, GPUAttachmentType type);
127   void detach_from(FrameBuffer *fb);
128   void update(eGPUDataFormat format, const void *data);
129 
130   virtual void update_sub(
131       int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data) = 0;
132 
133   /* TODO(fclem): Legacy. Should be removed at some point. */
134   virtual uint gl_bindcode_get(void) const = 0;
135 
width_get(void) const136   int width_get(void) const
137   {
138     return w_;
139   }
height_get(void) const140   int height_get(void) const
141   {
142     return h_;
143   }
depth_get(void) const144   int depth_get(void) const
145   {
146     return d_;
147   }
148 
mip_size_get(int mip,int r_size[3]) const149   void mip_size_get(int mip, int r_size[3]) const
150   {
151     /* TODO assert if lvl is below the limit of 1px in each dimension. */
152     int div = 1 << mip;
153     r_size[0] = max_ii(1, w_ / div);
154 
155     if (type_ == GPU_TEXTURE_1D_ARRAY) {
156       r_size[1] = h_;
157     }
158     else if (h_ > 0) {
159       r_size[1] = max_ii(1, h_ / div);
160     }
161 
162     if (type_ & (GPU_TEXTURE_ARRAY | GPU_TEXTURE_CUBE)) {
163       r_size[2] = d_;
164     }
165     else if (d_ > 0) {
166       r_size[2] = max_ii(1, d_ / div);
167     }
168   }
169 
mip_width_get(int mip) const170   int mip_width_get(int mip) const
171   {
172     return max_ii(1, w_ / (1 << mip));
173   }
mip_height_get(int mip) const174   int mip_height_get(int mip) const
175   {
176     return (type_ == GPU_TEXTURE_1D_ARRAY) ? h_ : max_ii(1, h_ / (1 << mip));
177   }
mip_depth_get(int mip) const178   int mip_depth_get(int mip) const
179   {
180     return (type_ & (GPU_TEXTURE_ARRAY | GPU_TEXTURE_CUBE)) ? d_ : max_ii(1, d_ / (1 << mip));
181   }
182 
183   /* Return number of dimension taking the array type into account. */
dimensions_count(void) const184   int dimensions_count(void) const
185   {
186     const int array = (type_ & GPU_TEXTURE_ARRAY) ? 1 : 0;
187     switch (type_ & ~GPU_TEXTURE_ARRAY) {
188       case GPU_TEXTURE_BUFFER:
189         return 1;
190       case GPU_TEXTURE_1D:
191         return 1 + array;
192       case GPU_TEXTURE_2D:
193         return 2 + array;
194       case GPU_TEXTURE_CUBE:
195       case GPU_TEXTURE_3D:
196       default:
197         return 3;
198     }
199   }
200   /* Return number of array layer (or face layer) for texture array or 1 for the others. */
layer_count(void) const201   int layer_count(void) const
202   {
203     switch (type_) {
204       case GPU_TEXTURE_1D_ARRAY:
205         return h_;
206       case GPU_TEXTURE_2D_ARRAY:
207       case GPU_TEXTURE_CUBE_ARRAY:
208         return d_;
209       default:
210         return 1;
211     }
212   }
213 
format_get(void) const214   eGPUTextureFormat format_get(void) const
215   {
216     return format_;
217   }
format_flag_get(void) const218   eGPUTextureFormatFlag format_flag_get(void) const
219   {
220     return format_flag_;
221   }
type_get(void) const222   eGPUTextureType type_get(void) const
223   {
224     return type_;
225   }
attachment_type(int slot) const226   GPUAttachmentType attachment_type(int slot) const
227   {
228     switch (format_) {
229       case GPU_DEPTH_COMPONENT32F:
230       case GPU_DEPTH_COMPONENT24:
231       case GPU_DEPTH_COMPONENT16:
232         BLI_assert(slot == 0);
233         return GPU_FB_DEPTH_ATTACHMENT;
234       case GPU_DEPTH24_STENCIL8:
235       case GPU_DEPTH32F_STENCIL8:
236         BLI_assert(slot == 0);
237         return GPU_FB_DEPTH_STENCIL_ATTACHMENT;
238       default:
239         return GPU_FB_COLOR_ATTACHMENT0 + slot;
240     }
241   }
242 
243  protected:
244   virtual bool init_internal(void) = 0;
245   virtual bool init_internal(GPUVertBuf *vbo) = 0;
246 };
247 
248 /* Syntacting suggar. */
wrap(Texture * vert)249 static inline GPUTexture *wrap(Texture *vert)
250 {
251   return reinterpret_cast<GPUTexture *>(vert);
252 }
unwrap(GPUTexture * vert)253 static inline Texture *unwrap(GPUTexture *vert)
254 {
255   return reinterpret_cast<Texture *>(vert);
256 }
unwrap(const GPUTexture * vert)257 static inline const Texture *unwrap(const GPUTexture *vert)
258 {
259   return reinterpret_cast<const Texture *>(vert);
260 }
261 
262 #undef DEBUG_NAME_LEN
263 
to_bytesize(eGPUTextureFormat format)264 inline size_t to_bytesize(eGPUTextureFormat format)
265 {
266   switch (format) {
267     case GPU_RGBA32F:
268       return 32;
269     case GPU_RG32F:
270     case GPU_RGBA16F:
271     case GPU_RGBA16:
272       return 16;
273     case GPU_RGB16F:
274       return 12;
275     case GPU_DEPTH32F_STENCIL8: /* 32-bit depth, 8 bits stencil, and 24 unused bits. */
276       return 8;
277     case GPU_RG16F:
278     case GPU_RG16I:
279     case GPU_RG16UI:
280     case GPU_RG16:
281     case GPU_DEPTH24_STENCIL8:
282     case GPU_DEPTH_COMPONENT32F:
283     case GPU_RGBA8UI:
284     case GPU_RGBA8:
285     case GPU_SRGB8_A8:
286     case GPU_R11F_G11F_B10F:
287     case GPU_R32F:
288     case GPU_R32UI:
289     case GPU_R32I:
290       return 4;
291     case GPU_DEPTH_COMPONENT24:
292       return 3;
293     case GPU_DEPTH_COMPONENT16:
294     case GPU_R16F:
295     case GPU_R16UI:
296     case GPU_R16I:
297     case GPU_RG8:
298     case GPU_R16:
299       return 2;
300     case GPU_R8:
301     case GPU_R8UI:
302       return 1;
303     case GPU_SRGB8_A8_DXT1:
304     case GPU_SRGB8_A8_DXT3:
305     case GPU_SRGB8_A8_DXT5:
306     case GPU_RGBA8_DXT1:
307     case GPU_RGBA8_DXT3:
308     case GPU_RGBA8_DXT5:
309       return 1; /* Incorrect but actual size is fractional. */
310     default:
311       BLI_assert(!"Texture format incorrect or unsupported\n");
312       return 0;
313   }
314 }
315 
to_block_size(eGPUTextureFormat data_type)316 inline size_t to_block_size(eGPUTextureFormat data_type)
317 {
318   switch (data_type) {
319     case GPU_SRGB8_A8_DXT1:
320     case GPU_RGBA8_DXT1:
321       return 8;
322     case GPU_SRGB8_A8_DXT3:
323     case GPU_SRGB8_A8_DXT5:
324     case GPU_RGBA8_DXT3:
325     case GPU_RGBA8_DXT5:
326       return 16;
327     default:
328       BLI_assert(!"Texture format is not a compressed format\n");
329       return 0;
330   }
331 }
332 
to_format_flag(eGPUTextureFormat format)333 inline eGPUTextureFormatFlag to_format_flag(eGPUTextureFormat format)
334 {
335   switch (format) {
336     case GPU_DEPTH_COMPONENT24:
337     case GPU_DEPTH_COMPONENT16:
338     case GPU_DEPTH_COMPONENT32F:
339       return GPU_FORMAT_DEPTH;
340     case GPU_DEPTH24_STENCIL8:
341     case GPU_DEPTH32F_STENCIL8:
342       return GPU_FORMAT_DEPTH_STENCIL;
343     case GPU_R8UI:
344     case GPU_RG16I:
345     case GPU_R16I:
346     case GPU_RG16UI:
347     case GPU_R16UI:
348     case GPU_R32UI:
349       return GPU_FORMAT_INTEGER;
350     case GPU_SRGB8_A8_DXT1:
351     case GPU_SRGB8_A8_DXT3:
352     case GPU_SRGB8_A8_DXT5:
353     case GPU_RGBA8_DXT1:
354     case GPU_RGBA8_DXT3:
355     case GPU_RGBA8_DXT5:
356       return GPU_FORMAT_COMPRESSED;
357     default:
358       return GPU_FORMAT_FLOAT;
359   }
360 }
361 
to_component_len(eGPUTextureFormat format)362 inline int to_component_len(eGPUTextureFormat format)
363 {
364   switch (format) {
365     case GPU_RGBA8:
366     case GPU_RGBA8UI:
367     case GPU_RGBA16F:
368     case GPU_RGBA16:
369     case GPU_RGBA32F:
370     case GPU_SRGB8_A8:
371       return 4;
372     case GPU_RGB16F:
373     case GPU_R11F_G11F_B10F:
374       return 3;
375     case GPU_RG8:
376     case GPU_RG16:
377     case GPU_RG16F:
378     case GPU_RG16I:
379     case GPU_RG16UI:
380     case GPU_RG32F:
381       return 2;
382     default:
383       return 1;
384   }
385 }
386 
to_bytesize(eGPUTextureFormat tex_format,eGPUDataFormat data_format)387 inline size_t to_bytesize(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
388 {
389   switch (data_format) {
390     case GPU_DATA_UNSIGNED_BYTE:
391       return 1 * to_component_len(tex_format);
392     case GPU_DATA_FLOAT:
393     case GPU_DATA_INT:
394     case GPU_DATA_UNSIGNED_INT:
395       return 4 * to_component_len(tex_format);
396     case GPU_DATA_UNSIGNED_INT_24_8:
397     case GPU_DATA_10_11_11_REV:
398       return 4;
399     default:
400       BLI_assert(!"Data format incorrect or unsupported\n");
401       return 0;
402   }
403 }
404 
405 /* Definitely not complete, edit according to the gl specification. */
validate_data_format(eGPUTextureFormat tex_format,eGPUDataFormat data_format)406 inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
407 {
408   switch (tex_format) {
409     case GPU_DEPTH_COMPONENT24:
410     case GPU_DEPTH_COMPONENT16:
411     case GPU_DEPTH_COMPONENT32F:
412       return data_format == GPU_DATA_FLOAT;
413     case GPU_DEPTH24_STENCIL8:
414     case GPU_DEPTH32F_STENCIL8:
415       return data_format == GPU_DATA_UNSIGNED_INT_24_8;
416     case GPU_R8UI:
417     case GPU_R16UI:
418     case GPU_RG16UI:
419     case GPU_R32UI:
420       return data_format == GPU_DATA_UNSIGNED_INT;
421     case GPU_RG16I:
422     case GPU_R16I:
423       return data_format == GPU_DATA_INT;
424     case GPU_R8:
425     case GPU_RG8:
426     case GPU_RGBA8:
427     case GPU_RGBA8UI:
428     case GPU_SRGB8_A8:
429       return ELEM(data_format, GPU_DATA_UNSIGNED_BYTE, GPU_DATA_FLOAT);
430     case GPU_R11F_G11F_B10F:
431       return ELEM(data_format, GPU_DATA_10_11_11_REV, GPU_DATA_FLOAT);
432     default:
433       return data_format == GPU_DATA_FLOAT;
434   }
435 }
436 
437 /* Definitely not complete, edit according to the gl specification. */
to_data_format(eGPUTextureFormat tex_format)438 inline eGPUDataFormat to_data_format(eGPUTextureFormat tex_format)
439 {
440   switch (tex_format) {
441     case GPU_DEPTH_COMPONENT24:
442     case GPU_DEPTH_COMPONENT16:
443     case GPU_DEPTH_COMPONENT32F:
444       return GPU_DATA_FLOAT;
445     case GPU_DEPTH24_STENCIL8:
446     case GPU_DEPTH32F_STENCIL8:
447       return GPU_DATA_UNSIGNED_INT_24_8;
448     case GPU_R8UI:
449     case GPU_R16UI:
450     case GPU_RG16UI:
451     case GPU_R32UI:
452       return GPU_DATA_UNSIGNED_INT;
453     case GPU_RG16I:
454     case GPU_R16I:
455       return GPU_DATA_INT;
456     case GPU_R8:
457     case GPU_RG8:
458     case GPU_RGBA8:
459     case GPU_RGBA8UI:
460     case GPU_SRGB8_A8:
461       return GPU_DATA_UNSIGNED_BYTE;
462     case GPU_R11F_G11F_B10F:
463       return GPU_DATA_10_11_11_REV;
464     default:
465       return GPU_DATA_FLOAT;
466   }
467 }
468 
to_framebuffer_bits(eGPUTextureFormat tex_format)469 inline eGPUFrameBufferBits to_framebuffer_bits(eGPUTextureFormat tex_format)
470 {
471   switch (tex_format) {
472     case GPU_DEPTH_COMPONENT24:
473     case GPU_DEPTH_COMPONENT16:
474     case GPU_DEPTH_COMPONENT32F:
475       return GPU_DEPTH_BIT;
476     case GPU_DEPTH24_STENCIL8:
477     case GPU_DEPTH32F_STENCIL8:
478       return GPU_DEPTH_BIT | GPU_STENCIL_BIT;
479     default:
480       return GPU_COLOR_BIT;
481   }
482 }
483 
to_texture_format(const GPUVertFormat * format)484 static inline eGPUTextureFormat to_texture_format(const GPUVertFormat *format)
485 {
486   if (format->attr_len > 1 || format->attr_len == 0) {
487     BLI_assert(!"Incorrect vertex format for buffer texture");
488     return GPU_DEPTH_COMPONENT24;
489   }
490   switch (format->attrs[0].comp_len) {
491     case 1:
492       switch (format->attrs[0].comp_type) {
493         case GPU_COMP_I8:
494           return GPU_R8I;
495         case GPU_COMP_U8:
496           return GPU_R8UI;
497         case GPU_COMP_I16:
498           return GPU_R16I;
499         case GPU_COMP_U16:
500           return GPU_R16UI;
501         case GPU_COMP_I32:
502           return GPU_R32I;
503         case GPU_COMP_U32:
504           return GPU_R32UI;
505         case GPU_COMP_F32:
506           return GPU_R32F;
507         default:
508           break;
509       }
510       break;
511     case 2:
512       switch (format->attrs[0].comp_type) {
513         case GPU_COMP_I8:
514           return GPU_RG8I;
515         case GPU_COMP_U8:
516           return GPU_RG8UI;
517         case GPU_COMP_I16:
518           return GPU_RG16I;
519         case GPU_COMP_U16:
520           return GPU_RG16UI;
521         case GPU_COMP_I32:
522           return GPU_RG32I;
523         case GPU_COMP_U32:
524           return GPU_RG32UI;
525         case GPU_COMP_F32:
526           return GPU_RG32F;
527         default:
528           break;
529       }
530       break;
531     case 3:
532       /* Not supported until GL 4.0 */
533       break;
534     case 4:
535       switch (format->attrs[0].comp_type) {
536         case GPU_COMP_I8:
537           return GPU_RGBA8I;
538         case GPU_COMP_U8:
539           return GPU_RGBA8UI;
540         case GPU_COMP_I16:
541           return GPU_RGBA16I;
542         case GPU_COMP_U16:
543           return GPU_RGBA16UI;
544         case GPU_COMP_I32:
545           return GPU_RGBA32I;
546         case GPU_COMP_U32:
547           return GPU_RGBA32UI;
548         case GPU_COMP_F32:
549           return GPU_RGBA32F;
550         default:
551           break;
552       }
553       break;
554     default:
555       break;
556   }
557   BLI_assert(!"Unsupported vertex format for buffer texture");
558   return GPU_DEPTH_COMPONENT24;
559 }
560 
561 }  // namespace gpu
562 }  // namespace blender
563