1 // https://github.com/floooh/sokol/blob/master/sokol_gfx.h
2 // LICENSE: zlib/libpng, https://github.com/floooh/sokol/blob/master/LICENSE
3 
4 #ifndef SOKOL_GFX_INCLUDED
5 /*
6     sokol_gfx.h -- simple 3D API wrapper
7 
8     Project URL: https://github.com/floooh/sokol
9 
10     Do this:
11         #define SOKOL_IMPL
12     before you include this file in *one* C or C++ file to create the
13     implementation.
14 
15     In the same place define one of the following to select the rendering
16     backend:
17         #define SOKOL_GLCORE33
18         #define SOKOL_GLES2
19         #define SOKOL_GLES3
20         #define SOKOL_D3D11
21         #define SOKOL_METAL
22         #define SOKOL_WGPU
23         #define SOKOL_DUMMY_BACKEND
24 
25     I.e. for the GL 3.3 Core Profile it should look like this:
26 
27     #include ...
28     #include ...
29     #define SOKOL_IMPL
30     #define SOKOL_GLCORE33
31     #include "sokol_gfx.h"
32 
33     The dummy backend replaces the platform-specific backend code with empty
34     stub functions. This is useful for writing tests that need to run on the
35     command line.
36 
37     Optionally provide the following defines with your own implementations:
38 
39     SOKOL_ASSERT(c)     - your own assert macro (default: assert(c))
40     SOKOL_MALLOC(s)     - your own malloc function (default: malloc(s))
41     SOKOL_FREE(p)       - your own free function (default: free(p))
42     SOKOL_LOG(msg)      - your own logging function (default: puts(msg))
43     SOKOL_UNREACHABLE() - a guard macro for unreachable code (default: assert(false))
44     SOKOL_API_DECL      - public function declaration prefix (default: extern)
45     SOKOL_API_IMPL      - public function implementation prefix (default: -)
46     SOKOL_TRACE_HOOKS   - enable trace hook callbacks (search below for TRACE HOOKS)
47 
48     If sokol_gfx.h is compiled as a DLL, define the following before
49     including the declaration or implementation:
50 
51     SOKOL_DLL
52 
53     On Windows, SOKOL_DLL will define SOKOL_API_DECL as __declspec(dllexport)
54     or __declspec(dllimport) as needed.
55 
56     If you want to compile without deprecated structs and functions,
57     define:
58 
59     SOKOL_NO_DEPRECATED
60 
61     API usage validation macros:
62 
63     SOKOL_VALIDATE_BEGIN()      - begin a validation block (default:_sg_validate_begin())
64     SOKOL_VALIDATE(cond, err)   - like assert but for API validation (default: _sg_validate(cond, err))
65     SOKOL_VALIDATE_END()        - end a validation block, return true if all checks in block passed (default: bool _sg_validate())
66 
67     If you don't want validation errors to be fatal, define SOKOL_VALIDATE_NON_FATAL,
68     be aware though that this may spam SOKOL_LOG messages.
69 
70     Optionally define the following to force debug checks and validations
71     even in release mode:
72 
73     SOKOL_DEBUG         - by default this is defined if _DEBUG is defined
74 
75 
76     sokol_gfx DOES NOT:
77     ===================
78     - create a window or the 3D-API context/device, you must do this
79       before sokol_gfx is initialized, and pass any required information
80       (like 3D device pointers) to the sokol_gfx initialization call
81 
82     - present the rendered frame, how this is done exactly usually depends
83       on how the window and 3D-API context/device was created
84 
85     - provide a unified shader language, instead 3D-API-specific shader
86       source-code or shader-bytecode must be provided
87 
88     For complete code examples using the various backend 3D-APIs, see:
89 
90         https://github.com/floooh/sokol-samples
91 
92     For an optional shader-cross-compile solution, see:
93 
94         https://github.com/floooh/sokol-tools/blob/master/docs/sokol-shdc.md
95 
96 
97     STEP BY STEP
98     ============
99     --- to initialize sokol_gfx, after creating a window and a 3D-API
100         context/device, call:
101 
102             sg_setup(const sg_desc*)
103 
104     --- create resource objects (at least buffers, shaders and pipelines,
105         and optionally images and passes):
106 
107             sg_buffer sg_make_buffer(const sg_buffer_desc*)
108             sg_image sg_make_image(const sg_image_desc*)
109             sg_shader sg_make_shader(const sg_shader_desc*)
110             sg_pipeline sg_make_pipeline(const sg_pipeline_desc*)
111             sg_pass sg_make_pass(const sg_pass_desc*)
112 
113     --- start rendering to the default frame buffer with:
114 
115             sg_begin_default_pass(const sg_pass_action* actions, int width, int height)
116 
117     --- or start rendering to an offscreen framebuffer with:
118 
119             sg_begin_pass(sg_pass pass, const sg_pass_action* actions)
120 
121     --- set the pipeline state for the next draw call with:
122 
123             sg_apply_pipeline(sg_pipeline pip)
124 
125     --- fill an sg_bindings struct with the resource bindings for the next
126         draw call (1..N vertex buffers, 0 or 1 index buffer, 0..N image objects
127         to use as textures each on the vertex-shader- and fragment-shader-stage
128         and then call
129 
130             sg_apply_bindings(const sg_bindings* bindings)
131 
132         to update the resource bindings
133 
134     --- optionally update shader uniform data with:
135 
136             sg_apply_uniforms(sg_shader_stage stage, int ub_index, const void* data, int num_bytes)
137 
138     --- kick off a draw call with:
139 
140             sg_draw(int base_element, int num_elements, int num_instances)
141 
142     --- finish the current rendering pass with:
143 
144             sg_end_pass()
145 
146     --- when done with the current frame, call
147 
148             sg_commit()
149 
150     --- at the end of your program, shutdown sokol_gfx with:
151 
152             sg_shutdown()
153 
154     --- if you need to destroy resources before sg_shutdown(), call:
155 
156             sg_destroy_buffer(sg_buffer buf)
157             sg_destroy_image(sg_image img)
158             sg_destroy_shader(sg_shader shd)
159             sg_destroy_pipeline(sg_pipeline pip)
160             sg_destroy_pass(sg_pass pass)
161 
162     --- to set a new viewport rectangle, call
163 
164             sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left)
165 
166     --- to set a new scissor rect, call:
167 
168             sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left)
169 
170         both sg_apply_viewport() and sg_apply_scissor_rect() must be called
171         inside a rendering pass
172 
173         beginning a pass will reset the viewport to the size of the framebuffer used
174         in the new pass,
175 
176     --- to update (overwrite) the content of buffer and image resources, call:
177 
178             sg_update_buffer(sg_buffer buf, const void* ptr, int num_bytes)
179             sg_update_image(sg_image img, const sg_image_content* content)
180 
181         Buffers and images to be updated must have been created with
182         SG_USAGE_DYNAMIC or SG_USAGE_STREAM
183 
184         Only one update per frame is allowed for buffer and image resources.
185         The rationale is to have a simple countermeasure to avoid the CPU
186         scribbling over data the GPU is currently using, or the CPU having to
187         wait for the GPU
188 
189         Buffer and image updates can be partial, as long as a rendering
190         operation only references the valid (updated) data in the
191         buffer or image.
192 
193     --- to append a chunk of data to a buffer resource, call:
194 
195             int sg_append_buffer(sg_buffer buf, const void* ptr, int num_bytes)
196 
197         The difference to sg_update_buffer() is that sg_append_buffer()
198         can be called multiple times per frame to append new data to the
199         buffer piece by piece, optionally interleaved with draw calls referencing
200         the previously written data.
201 
202         sg_append_buffer() returns a byte offset to the start of the
203         written data, this offset can be assigned to
204         sg_bindings.vertex_buffer_offsets[n] or
205         sg_bindings.index_buffer_offset
206 
207         Code example:
208 
209         for (...) {
210             const void* data = ...;
211             const int num_bytes = ...;
212             int offset = sg_append_buffer(buf, data, num_bytes);
213             bindings.vertex_buffer_offsets[0] = offset;
214             sg_apply_pipeline(pip);
215             sg_apply_bindings(&bindings);
216             sg_apply_uniforms(...);
217             sg_draw(...);
218         }
219 
220         A buffer to be used with sg_append_buffer() must have been created
221         with SG_USAGE_DYNAMIC or SG_USAGE_STREAM.
222 
223         If the application appends more data to the buffer then fits into
224         the buffer, the buffer will go into the "overflow" state for the
225         rest of the frame.
226 
227         Any draw calls attempting to render an overflown buffer will be
228         silently dropped (in debug mode this will also result in a
229         validation error).
230 
231         You can also check manually if a buffer is in overflow-state by calling
232 
233             bool sg_query_buffer_overflow(sg_buffer buf)
234 
235         NOTE: Due to restrictions in underlying 3D-APIs, appended chunks of
236         data will be 4-byte aligned in the destination buffer. This means
237         that there will be gaps in index buffers containing 16-bit indices
238         when the number of indices in a call to sg_append_buffer() is
239         odd. This isn't a problem when each call to sg_append_buffer()
240         is associated with one draw call, but will be problematic when
241         a single indexed draw call spans several appended chunks of indices.
242 
243     --- to check at runtime for optional features, limits and pixelformat support,
244         call:
245 
246             sg_features sg_query_features()
247             sg_limits sg_query_limits()
248             sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt)
249 
250     --- if you need to call into the underlying 3D-API directly, you must call:
251 
252             sg_reset_state_cache()
253 
254         ...before calling sokol_gfx functions again
255 
256     --- you can inspect the original sg_desc structure handed to sg_setup()
257         by calling sg_query_desc(). This will return an sg_desc struct with
258         the default values patched in instead of any zero-initialized values
259 
260     --- you can inspect various internal resource attributes via:
261 
262             sg_buffer_info sg_query_buffer_info(sg_buffer buf)
263             sg_image_info sg_query_image_info(sg_image img)
264             sg_shader_info sg_query_shader_info(sg_shader shd)
265             sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip)
266             sg_pass_info sg_query_pass_info(sg_pass pass)
267 
268         ...please note that the returned info-structs are tied quite closely
269         to sokol_gfx.h internals, and may change more often than other
270         public API functions and structs.
271 
272     --- you can ask at runtime what backend sokol_gfx.h has been compiled
273         for, or whether the GLES3 backend had to fall back to GLES2 with:
274 
275             sg_backend sg_query_backend(void)
276 
277     --- you can query the default resource creation parameters through the functions
278 
279             sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc)
280             sg_image_desc sg_query_image_defaults(const sg_image_desc* desc)
281             sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc)
282             sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc)
283             sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc)
284 
285         These functions take a pointer to a desc structure which may contain
286         zero-initialized items for default values. These zero-init values
287         will be replaced with their concrete values in the returned desc
288         struct.
289 
290     ON INITIALIZATION:
291     ==================
292     When calling sg_setup(), a pointer to an sg_desc struct must be provided
293     which contains initialization options. These options provide two types
294     of information to sokol-gfx:
295 
296         (1) upper bounds and limits needed to allocate various internal
297             data structures:
298                 - the max number of resources of each type that can
299                   be alive at the same time, this is used for allocating
300                   internal pools
301                 - the max overall size of uniform data that can be
302                   updated per frame, including a worst-case alignment
303                   per uniform update (this worst-case alignment is 256 bytes)
304                 - the max size of all dynamic resource updates (sg_update_buffer,
305                   sg_append_buffer and sg_update_image) per frame
306                 - the max number of entries in the texture sampler cache
307                   (how many unique texture sampler can exist at the same time)
308             Not all of those limit values are used by all backends, but it is
309             good practice to provide them none-the-less.
310 
311         (2) 3D-API "context information" (sometimes also called "bindings"):
312             sokol_gfx.h doesn't create or initialize 3D API objects which are
313             closely related to the presentation layer (this includes the "rendering
314             device", the swapchain, and any objects which depend on the
315             swapchain). These API objects (or callback functions to obtain
316             them, if those objects might change between frames), must
317             be provided in a nested sg_context_desc struct inside the
318             sg_desc struct. If sokol_gfx.h is used together with
319             sokol_app.h, have a look at the sokol_glue.h header which provides
320             a convenience function to get a sg_context_desc struct filled out
321             with context information provided by sokol_app.h
322 
323     See the documention block of the sg_desc struct below for more information.
324 
325     BACKEND-SPECIFIC TOPICS:
326     ========================
327     --- the GL backends need to know about the internal structure of uniform
328         blocks, and the texture sampler-name and -type:
329 
330             typedef struct {
331                 float mvp[16];      // model-view-projection matrix
332                 float offset0[2];   // some 2D vectors
333                 float offset1[2];
334                 float offset2[2];
335             } params_t;
336 
337             // uniform block structure and texture image definition in sg_shader_desc:
338             sg_shader_desc desc = {
339                 // uniform block description (size and internal structure)
340                 .vs.uniform_blocks[0] = {
341                     .size = sizeof(params_t),
342                     .uniforms = {
343                         [0] = { .name="mvp", .type=SG_UNIFORMTYPE_MAT4 },
344                         [1] = { .name="offset0", .type=SG_UNIFORMTYPE_VEC2 },
345                         ...
346                     }
347                 },
348                 // one texture on the fragment-shader-stage, GLES2/WebGL needs name and image type
349                 .fs.images[0] = { .name="tex", .type=SG_IMAGETYPE_ARRAY }
350                 ...
351             };
352 
353     --- the Metal and D3D11 backends only need to know the size of uniform blocks,
354         not their internal member structure, and they only need to know
355         the type of a texture sampler, not its name:
356 
357             sg_shader_desc desc = {
358                 .vs.uniform_blocks[0].size = sizeof(params_t),
359                 .fs.images[0].type = SG_IMAGETYPE_ARRAY,
360                 ...
361             };
362 
363     --- when creating a shader object, GLES2/WebGL need to know the vertex
364         attribute names as used in the vertex shader:
365 
366             sg_shader_desc desc = {
367                 .attrs = {
368                     [0] = { .name="position" },
369                     [1] = { .name="color1" }
370                 }
371             };
372 
373         The vertex attribute names provided when creating a shader will be
374         used later in sg_create_pipeline() for matching the vertex layout
375         to vertex shader inputs.
376 
377     --- on D3D11 you need to provide a semantic name and semantic index in the
378         shader description struct instead (see the D3D11 documentation on
379         D3D11_INPUT_ELEMENT_DESC for details):
380 
381             sg_shader_desc desc = {
382                 .attrs = {
383                     [0] = { .sem_name="POSITION", .sem_index=0 }
384                     [1] = { .sem_name="COLOR", .sem_index=1 }
385                 }
386             };
387 
388         The provided semantic information will be used later in sg_create_pipeline()
389         to match the vertex layout to vertex shader inputs.
390 
391     --- on D3D11, and when passing HLSL source code (instead of byte code) to shader
392         creation, you can optionally define the shader model targets on the vertex
393         stage:
394 
395             sg_shader_Desc desc = {
396                 .vs = {
397                     ...
398                     .d3d11_target = "vs_5_0"
399                 },
400                 .fs = {
401                     ...
402                     .d3d11_target = "ps_5_0"
403                 }
404             };
405 
406         The default targets are "ps_4_0" and "fs_4_0". Note that those target names
407         are only used when compiling shaders from source. They are ignored when
408         creating a shader from bytecode.
409 
410     --- on Metal, GL 3.3 or GLES3/WebGL2, you don't need to provide an attribute
411         name or semantic name, since vertex attributes can be bound by their slot index
412         (this is mandatory in Metal, and optional in GL):
413 
414             sg_pipeline_desc desc = {
415                 .layout = {
416                     .attrs = {
417                         [0] = { .format=SG_VERTEXFORMAT_FLOAT3 },
418                         [1] = { .format=SG_VERTEXFORMAT_FLOAT4 }
419                     }
420                 }
421             };
422 
423     WORKING WITH CONTEXTS
424     =====================
425     sokol-gfx allows to switch between different rendering contexts and
426     associate resource objects with contexts. This is useful to
427     create GL applications that render into multiple windows.
428 
429     A rendering context keeps track of all resources created while
430     the context is active. When the context is destroyed, all resources
431     "belonging to the context" are destroyed as well.
432 
433     A default context will be created and activated implicitly in
434     sg_setup(), and destroyed in sg_shutdown(). So for a typical application
435     which *doesn't* use multiple contexts, nothing changes, and calling
436     the context functions isn't necessary.
437 
438     Three functions have been added to work with contexts:
439 
440     --- sg_context sg_setup_context():
441         This must be called once after a GL context has been created and
442         made active.
443 
444     --- void sg_activate_context(sg_context ctx)
445         This must be called after making a different GL context active.
446         Apart from 3D-API-specific actions, the call to sg_activate_context()
447         will internally call sg_reset_state_cache().
448 
449     --- void sg_discard_context(sg_context ctx)
450         This must be called right before a GL context is destroyed and
451         will destroy all resources associated with the context (that
452         have been created while the context was active) The GL context must be
453         active at the time sg_discard_context(sg_context ctx) is called.
454 
455     Also note that resources (buffers, images, shaders and pipelines) must
456     only be used or destroyed while the same GL context is active that
457     was also active while the resource was created (an exception is
458     resource sharing on GL, such resources can be used while
459     another context is active, but must still be destroyed under
460     the same context that was active during creation).
461 
462     For more information, check out the multiwindow-glfw sample:
463 
464     https://github.com/floooh/sokol-samples/blob/master/glfw/multiwindow-glfw.c
465 
466     TRACE HOOKS:
467     ============
468     sokol_gfx.h optionally allows to install "trace hook" callbacks for
469     each public API functions. When a public API function is called, and
470     a trace hook callback has been installed for this function, the
471     callback will be invoked with the parameters and result of the function.
472     This is useful for things like debugging- and profiling-tools, or
473     keeping track of resource creation and destruction.
474 
475     To use the trace hook feature:
476 
477     --- Define SOKOL_TRACE_HOOKS before including the implementation.
478 
479     --- Setup an sg_trace_hooks structure with your callback function
480         pointers (keep all function pointers you're not interested
481         in zero-initialized), optionally set the user_data member
482         in the sg_trace_hooks struct.
483 
484     --- Install the trace hooks by calling sg_install_trace_hooks(),
485         the return value of this function is another sg_trace_hooks
486         struct which contains the previously set of trace hooks.
487         You should keep this struct around, and call those previous
488         functions pointers from your own trace callbacks for proper
489         chaining.
490 
491     As an example of how trace hooks are used, have a look at the
492     imgui/sokol_gfx_imgui.h header which implements a realtime
493     debugging UI for sokol_gfx.h on top of Dear ImGui.
494 
495     A NOTE ON PORTABLE PACKED VERTEX FORMATS:
496     =========================================
497     There are two things to consider when using packed
498     vertex formats like UBYTE4, SHORT2, etc which need to work
499     across all backends:
500 
501     - D3D11 can only convert *normalized* vertex formats to
502       floating point during vertex fetch, normalized formats
503       have a trailing 'N', and are "normalized" to a range
504       -1.0..+1.0 (for the signed formats) or 0.0..1.0 (for the
505       unsigned formats):
506 
507         - SG_VERTEXFORMAT_BYTE4N
508         - SG_VERTEXFORMAT_UBYTE4N
509         - SG_VERTEXFORMAT_SHORT2N
510         - SG_VERTEXFORMAT_USHORT2N
511         - SG_VERTEXFORMAT_SHORT4N
512         - SG_VERTEXFORMAT_USHORT4N
513 
514       D3D11 will not convert *non-normalized* vertex formats to floating point
515       vertex shader inputs, those can only be uses with the *ivecn* vertex shader
516       input types when D3D11 is used as backend (GL and Metal can use both formats)
517 
518         - SG_VERTEXFORMAT_BYTE4,
519         - SG_VERTEXFORMAT_UBYTE4
520         - SG_VERTEXFORMAT_SHORT2
521         - SG_VERTEXFORMAT_SHORT4
522 
523     - WebGL/GLES2 cannot use integer vertex shader inputs (int or ivecn)
524 
525     - SG_VERTEXFORMAT_UINT10_N2 is not supported on WebGL/GLES2
526 
527     So for a vertex input layout which works on all platforms, only use the following
528     vertex formats, and if needed "expand" the normalized vertex shader
529     inputs in the vertex shader by multiplying with 127.0, 255.0, 32767.0 or
530     65535.0:
531 
532         - SG_VERTEXFORMAT_FLOAT,
533         - SG_VERTEXFORMAT_FLOAT2,
534         - SG_VERTEXFORMAT_FLOAT3,
535         - SG_VERTEXFORMAT_FLOAT4,
536         - SG_VERTEXFORMAT_BYTE4N,
537         - SG_VERTEXFORMAT_UBYTE4N,
538         - SG_VERTEXFORMAT_SHORT2N,
539         - SG_VERTEXFORMAT_USHORT2N
540         - SG_VERTEXFORMAT_SHORT4N,
541         - SG_VERTEXFORMAT_USHORT4N
542 
543     TODO:
544     ====
545     - talk about asynchronous resource creation
546 
547     zlib/libpng license
548 
549     Copyright (c) 2018 Andre Weissflog
550 
551     This software is provided 'as-is', without any express or implied warranty.
552     In no event will the authors be held liable for any damages arising from the
553     use of this software.
554 
555     Permission is granted to anyone to use this software for any purpose,
556     including commercial applications, and to alter it and redistribute it
557     freely, subject to the following restrictions:
558 
559         1. The origin of this software must not be misrepresented; you must not
560         claim that you wrote the original software. If you use this software in a
561         product, an acknowledgment in the product documentation would be
562         appreciated but is not required.
563 
564         2. Altered source versions must be plainly marked as such, and must not
565         be misrepresented as being the original software.
566 
567         3. This notice may not be removed or altered from any source
568         distribution.
569 */
570 #define SOKOL_GFX_INCLUDED (1)
571 #include <stdint.h>
572 #include <stdbool.h>
573 
574 #ifndef SOKOL_API_DECL
575 #if defined(_WIN32) && defined(SOKOL_DLL) && defined(SOKOL_IMPL)
576 #define SOKOL_API_DECL __declspec(dllexport)
577 #elif defined(_WIN32) && defined(SOKOL_DLL)
578 #define SOKOL_API_DECL __declspec(dllimport)
579 #else
580 #define SOKOL_API_DECL extern
581 #endif
582 #endif
583 
584 #ifdef __cplusplus
585 extern "C" {
586 #endif
587 
588 #ifdef _MSC_VER
589 #pragma warning(push)
590 #pragma warning(disable:4201)   /* nonstandard extension used: nameless struct/union */
591 #endif
592 
593 /*
594     Resource id typedefs:
595 
596     sg_buffer:      vertex- and index-buffers
597     sg_image:       textures and render targets
598     sg_shader:      vertex- and fragment-shaders, uniform blocks
599     sg_pipeline:    associated shader and vertex-layouts, and render states
600     sg_pass:        a bundle of render targets and actions on them
601     sg_context:     a 'context handle' for switching between 3D-API contexts
602 
603     Instead of pointers, resource creation functions return a 32-bit
604     number which uniquely identifies the resource object.
605 
606     The 32-bit resource id is split into a 16-bit pool index in the lower bits,
607     and a 16-bit 'unique counter' in the upper bits. The index allows fast
608     pool lookups, and combined with the unique-mask it allows to detect
609     'dangling accesses' (trying to use an object which no longer exists, and
610     its pool slot has been reused for a new object)
611 
612     The resource ids are wrapped into a struct so that the compiler
613     can complain when the wrong resource type is used.
614 */
615 typedef struct sg_buffer   { uint32_t id; } sg_buffer;
616 typedef struct sg_image    { uint32_t id; } sg_image;
617 typedef struct sg_shader   { uint32_t id; } sg_shader;
618 typedef struct sg_pipeline { uint32_t id; } sg_pipeline;
619 typedef struct sg_pass     { uint32_t id; } sg_pass;
620 typedef struct sg_context  { uint32_t id; } sg_context;
621 
622 /*
623     various compile-time constants
624 
625     FIXME: it may make sense to convert some of those into defines so
626     that the user code can override them.
627 */
628 enum {
629     SG_INVALID_ID = 0,
630     SG_NUM_SHADER_STAGES = 2,
631     SG_NUM_INFLIGHT_FRAMES = 2,
632     SG_MAX_COLOR_ATTACHMENTS = 4,
633     SG_MAX_SHADERSTAGE_BUFFERS = 8,
634     SG_MAX_SHADERSTAGE_IMAGES = 12,
635     SG_MAX_SHADERSTAGE_UBS = 4,
636     SG_MAX_UB_MEMBERS = 16,
637     SG_MAX_VERTEX_ATTRIBUTES = 16,      /* NOTE: actual max vertex attrs can be less on GLES2, see sg_limits! */
638     SG_MAX_MIPMAPS = 16,
639     SG_MAX_TEXTUREARRAY_LAYERS = 128
640 };
641 
642 /*
643     sg_backend
644 
645     The active 3D-API backend, use the function sg_query_backend()
646     to get the currently active backend.
647 
648     For returned value corresponds with the compile-time define to select
649     a backend, with the only exception of SOKOL_GLES3: this may
650     return SG_BACKEND_GLES2 if the backend has to fallback to GLES2 mode
651     because GLES3 isn't supported.
652 */
653 typedef enum sg_backend {
654     SG_BACKEND_GLCORE33,
655     SG_BACKEND_GLES2,
656     SG_BACKEND_GLES3,
657     SG_BACKEND_D3D11,
658     SG_BACKEND_METAL_IOS,
659     SG_BACKEND_METAL_MACOS,
660     SG_BACKEND_METAL_SIMULATOR,
661     SG_BACKEND_WGPU,
662     SG_BACKEND_DUMMY,
663 } sg_backend;
664 
665 /*
666     sg_pixel_format
667 
668     sokol_gfx.h basically uses the same pixel formats as WebGPU, since these
669     are supported on most newer GPUs. GLES2 and WebGL has a much smaller
670     subset of available pixel formats. Call sg_query_pixelformat() to check
671     at runtime if a pixel format supports the desired features.
672 
673     A pixelformat name consist of three parts:
674 
675         - components (R, RG, RGB or RGBA)
676         - bit width per component (8, 16 or 32)
677         - component data type:
678             - unsigned normalized (no postfix)
679             - signed normalized (SN postfix)
680             - unsigned integer (UI postfix)
681             - signed integer (SI postfix)
682             - float (F postfix)
683 
684     Not all pixel formats can be used for everything, call sg_query_pixelformat()
685     to inspect the capabilities of a given pixelformat. The function returns
686     an sg_pixelformat_info struct with the following bool members:
687 
688         - sample: the pixelformat can be sampled as texture at least with
689                   nearest filtering
690         - filter: the pixelformat can be samples as texture with linear
691                   filtering
692         - render: the pixelformat can be used for render targets
693         - blend:  blending is supported when using the pixelformat for
694                   render targets
695         - msaa:   multisample-antialiasing is supported when using the
696                   pixelformat for render targets
697         - depth:  the pixelformat can be used for depth-stencil attachments
698 
699     When targeting GLES2/WebGL, the only safe formats to use
700     as texture are SG_PIXELFORMAT_R8 and SG_PIXELFORMAT_RGBA8. For rendering
701     in GLES2/WebGL, only SG_PIXELFORMAT_RGBA8 is safe. All other formats
702     must be checked via sg_query_pixelformats().
703 
704     The default pixel format for texture images is SG_PIXELFORMAT_RGBA8.
705 
706     The default pixel format for render target images is platform-dependent:
707         - for Metal and D3D11 it is SG_PIXELFORMAT_BGRA8
708         - for GL backends it is SG_PIXELFORMAT_RGBA8
709 
710     This is mainly because of the default framebuffer which is setup outside
711     of sokol_gfx.h. On some backends, using BGRA for the default frame buffer
712     allows more efficient frame flips. For your own offscreen-render-targets,
713     use whatever renderable pixel format is convenient for you.
714 */
715 typedef enum sg_pixel_format {
716     _SG_PIXELFORMAT_DEFAULT,    /* value 0 reserved for default-init */
717     SG_PIXELFORMAT_NONE,
718 
719     SG_PIXELFORMAT_R8,
720     SG_PIXELFORMAT_R8SN,
721     SG_PIXELFORMAT_R8UI,
722     SG_PIXELFORMAT_R8SI,
723 
724     SG_PIXELFORMAT_R16,
725     SG_PIXELFORMAT_R16SN,
726     SG_PIXELFORMAT_R16UI,
727     SG_PIXELFORMAT_R16SI,
728     SG_PIXELFORMAT_R16F,
729     SG_PIXELFORMAT_RG8,
730     SG_PIXELFORMAT_RG8SN,
731     SG_PIXELFORMAT_RG8UI,
732     SG_PIXELFORMAT_RG8SI,
733 
734     SG_PIXELFORMAT_R32UI,
735     SG_PIXELFORMAT_R32SI,
736     SG_PIXELFORMAT_R32F,
737     SG_PIXELFORMAT_RG16,
738     SG_PIXELFORMAT_RG16SN,
739     SG_PIXELFORMAT_RG16UI,
740     SG_PIXELFORMAT_RG16SI,
741     SG_PIXELFORMAT_RG16F,
742     SG_PIXELFORMAT_RGBA8,
743     SG_PIXELFORMAT_RGBA8SN,
744     SG_PIXELFORMAT_RGBA8UI,
745     SG_PIXELFORMAT_RGBA8SI,
746     SG_PIXELFORMAT_BGRA8,
747     SG_PIXELFORMAT_RGB10A2,
748     SG_PIXELFORMAT_RG11B10F,
749 
750     SG_PIXELFORMAT_RG32UI,
751     SG_PIXELFORMAT_RG32SI,
752     SG_PIXELFORMAT_RG32F,
753     SG_PIXELFORMAT_RGBA16,
754     SG_PIXELFORMAT_RGBA16SN,
755     SG_PIXELFORMAT_RGBA16UI,
756     SG_PIXELFORMAT_RGBA16SI,
757     SG_PIXELFORMAT_RGBA16F,
758 
759     SG_PIXELFORMAT_RGBA32UI,
760     SG_PIXELFORMAT_RGBA32SI,
761     SG_PIXELFORMAT_RGBA32F,
762 
763     SG_PIXELFORMAT_DEPTH,
764     SG_PIXELFORMAT_DEPTH_STENCIL,
765 
766     SG_PIXELFORMAT_BC1_RGBA,
767     SG_PIXELFORMAT_BC2_RGBA,
768     SG_PIXELFORMAT_BC3_RGBA,
769     SG_PIXELFORMAT_BC4_R,
770     SG_PIXELFORMAT_BC4_RSN,
771     SG_PIXELFORMAT_BC5_RG,
772     SG_PIXELFORMAT_BC5_RGSN,
773     SG_PIXELFORMAT_BC6H_RGBF,
774     SG_PIXELFORMAT_BC6H_RGBUF,
775     SG_PIXELFORMAT_BC7_RGBA,
776     SG_PIXELFORMAT_PVRTC_RGB_2BPP,
777     SG_PIXELFORMAT_PVRTC_RGB_4BPP,
778     SG_PIXELFORMAT_PVRTC_RGBA_2BPP,
779     SG_PIXELFORMAT_PVRTC_RGBA_4BPP,
780     SG_PIXELFORMAT_ETC2_RGB8,
781     SG_PIXELFORMAT_ETC2_RGB8A1,
782     SG_PIXELFORMAT_ETC2_RGBA8,
783     SG_PIXELFORMAT_ETC2_RG11,
784     SG_PIXELFORMAT_ETC2_RG11SN,
785 
786     _SG_PIXELFORMAT_NUM,
787     _SG_PIXELFORMAT_FORCE_U32 = 0x7FFFFFFF
788 } sg_pixel_format;
789 
790 /*
791     Runtime information about a pixel format, returned
792     by sg_query_pixelformat().
793 */
794 typedef struct sg_pixelformat_info {
795     bool sample;        /* pixel format can be sampled in shaders */
796     bool filter;        /* pixel format can be sampled with filtering */
797     bool render;        /* pixel format can be used as render target */
798     bool blend;         /* alpha-blending is supported */
799     bool msaa;          /* pixel format can be used as MSAA render target */
800     bool depth;         /* pixel format is a depth format */
801 } sg_pixelformat_info;
802 
803 /*
804     Runtime information about available optional features,
805     returned by sg_query_features()
806 */
807 typedef struct sg_features {
808     bool instancing;                /* hardware instancing supported */
809     bool origin_top_left;           /* framebuffer and texture origin is in top left corner */
810     bool multiple_render_targets;   /* offscreen render passes can have multiple render targets attached */
811     bool msaa_render_targets;       /* offscreen render passes support MSAA antialiasing */
812     bool imagetype_3d;              /* creation of SG_IMAGETYPE_3D images is supported */
813     bool imagetype_array;           /* creation of SG_IMAGETYPE_ARRAY images is supported */
814     bool image_clamp_to_border;     /* border color and clamp-to-border UV-wrap mode is supported */
815 } sg_features;
816 
817 /*
818     Runtime information about resource limits, returned by sg_query_limit()
819 */
820 typedef struct sg_limits {
821     uint32_t max_image_size_2d;         /* max width/height of SG_IMAGETYPE_2D images */
822     uint32_t max_image_size_cube;       /* max width/height of SG_IMAGETYPE_CUBE images */
823     uint32_t max_image_size_3d;         /* max width/height/depth of SG_IMAGETYPE_3D images */
824     uint32_t max_image_size_array;      /* max width/height pf SG_IMAGETYPE_ARRAY images */
825     uint32_t max_image_array_layers;    /* max number of layers in SG_IMAGETYPE_ARRAY images */
826     uint32_t max_vertex_attrs;          /* <= SG_MAX_VERTEX_ATTRIBUTES (only on some GLES2 impls) */
827 } sg_limits;
828 
829 /*
830     sg_resource_state
831 
832     The current state of a resource in its resource pool.
833     Resources start in the INITIAL state, which means the
834     pool slot is unoccupied and can be allocated. When a resource is
835     created, first an id is allocated, and the resource pool slot
836     is set to state ALLOC. After allocation, the resource is
837     initialized, which may result in the VALID or FAILED state. The
838     reason why allocation and initialization are separate is because
839     some resource types (e.g. buffers and images) might be asynchronously
840     initialized by the user application. If a resource which is not
841     in the VALID state is attempted to be used for rendering, rendering
842     operations will silently be dropped.
843 
844     The special INVALID state is returned in sg_query_xxx_state() if no
845     resource object exists for the provided resource id.
846 */
847 typedef enum sg_resource_state {
848     SG_RESOURCESTATE_INITIAL,
849     SG_RESOURCESTATE_ALLOC,
850     SG_RESOURCESTATE_VALID,
851     SG_RESOURCESTATE_FAILED,
852     SG_RESOURCESTATE_INVALID,
853     _SG_RESOURCESTATE_FORCE_U32 = 0x7FFFFFFF
854 } sg_resource_state;
855 
856 /*
857     sg_usage
858 
859     A resource usage hint describing the update strategy of
860     buffers and images. This is used in the sg_buffer_desc.usage
861     and sg_image_desc.usage members when creating buffers
862     and images:
863 
864     SG_USAGE_IMMUTABLE:     the resource will never be updated with
865                             new data, instead the content of the
866                             resource must be provided on creation
867     SG_USAGE_DYNAMIC:       the resource will be updated infrequently
868                             with new data (this could range from "once
869                             after creation", to "quite often but not
870                             every frame")
871     SG_USAGE_STREAM:        the resource will be updated each frame
872                             with new content
873 
874     The rendering backends use this hint to prevent that the
875     CPU needs to wait for the GPU when attempting to update
876     a resource that might be currently accessed by the GPU.
877 
878     Resource content is updated with the function sg_update_buffer() for
879     buffer objects, and sg_update_image() for image objects. Only
880     one update is allowed per frame and resource object. The
881     application must update all data required for rendering (this
882     means that the update data can be smaller than the resource size,
883     if only a part of the overall resource size is used for rendering,
884     you only need to make sure that the data that *is* used is valid).
885 
886     The default usage is SG_USAGE_IMMUTABLE.
887 */
888 typedef enum sg_usage {
889     _SG_USAGE_DEFAULT,      /* value 0 reserved for default-init */
890     SG_USAGE_IMMUTABLE,
891     SG_USAGE_DYNAMIC,
892     SG_USAGE_STREAM,
893     _SG_USAGE_NUM,
894     _SG_USAGE_FORCE_U32 = 0x7FFFFFFF
895 } sg_usage;
896 
897 /*
898     sg_buffer_type
899 
900     This indicates whether a buffer contains vertex- or index-data,
901     used in the sg_buffer_desc.type member when creating a buffer.
902 
903     The default value is SG_BUFFERTYPE_VERTEXBUFFER.
904 */
905 typedef enum sg_buffer_type {
906     _SG_BUFFERTYPE_DEFAULT,         /* value 0 reserved for default-init */
907     SG_BUFFERTYPE_VERTEXBUFFER,
908     SG_BUFFERTYPE_INDEXBUFFER,
909     _SG_BUFFERTYPE_NUM,
910     _SG_BUFFERTYPE_FORCE_U32 = 0x7FFFFFFF
911 } sg_buffer_type;
912 
913 /*
914     sg_index_type
915 
916     Indicates whether indexed rendering (fetching vertex-indices from an
917     index buffer) is used, and if yes, the index data type (16- or 32-bits).
918     This is used in the sg_pipeline_desc.index_type member when creating a
919     pipeline object.
920 
921     The default index type is SG_INDEXTYPE_NONE.
922 */
923 typedef enum sg_index_type {
924     _SG_INDEXTYPE_DEFAULT,   /* value 0 reserved for default-init */
925     SG_INDEXTYPE_NONE,
926     SG_INDEXTYPE_UINT16,
927     SG_INDEXTYPE_UINT32,
928     _SG_INDEXTYPE_NUM,
929     _SG_INDEXTYPE_FORCE_U32 = 0x7FFFFFFF
930 } sg_index_type;
931 
932 /*
933     sg_image_type
934 
935     Indicates the basic type of an image object (2D-texture, cubemap,
936     3D-texture or 2D-array-texture). 3D- and array-textures are not supported
937     on the GLES2/WebGL backend (use sg_query_features().imagetype_3d and
938     sg_query_features().imagetype_array to check for support). The image type
939     is used in the sg_image_desc.type member when creating an image, and
940     in sg_shader_image_desc when describing a shader's texture sampler binding.
941 
942     The default image type when creating an image is SG_IMAGETYPE_2D.
943 */
944 typedef enum sg_image_type {
945     _SG_IMAGETYPE_DEFAULT,  /* value 0 reserved for default-init */
946     SG_IMAGETYPE_2D,
947     SG_IMAGETYPE_CUBE,
948     SG_IMAGETYPE_3D,
949     SG_IMAGETYPE_ARRAY,
950     _SG_IMAGETYPE_NUM,
951     _SG_IMAGETYPE_FORCE_U32 = 0x7FFFFFFF
952 } sg_image_type;
953 
954 /*
955     sg_sampler_type
956 
957     Indicates the basic data type of a shader's texture sampler which
958     can be float , unsigned integer or signed integer. The sampler
959     type is used in the sg_shader_image_desc to describe the
960     sampler type of a shader's texture sampler binding.
961 
962     The default sampler type is SG_SAMPLERTYPE_FLOAT.
963 */
964 typedef enum sg_sampler_type {
965     _SG_SAMPLERTYPE_DEFAULT,  /* value 0 reserved for default-init */
966     SG_SAMPLERTYPE_FLOAT,
967     SG_SAMPLERTYPE_SINT,
968     SG_SAMPLERTYPE_UINT,
969 } sg_sampler_type;
970 
971 /*
972     sg_cube_face
973 
974     The cubemap faces. Use these as indices in the sg_image_desc.content
975     array.
976 */
977 typedef enum sg_cube_face {
978     SG_CUBEFACE_POS_X,
979     SG_CUBEFACE_NEG_X,
980     SG_CUBEFACE_POS_Y,
981     SG_CUBEFACE_NEG_Y,
982     SG_CUBEFACE_POS_Z,
983     SG_CUBEFACE_NEG_Z,
984     SG_CUBEFACE_NUM,
985     _SG_CUBEFACE_FORCE_U32 = 0x7FFFFFFF
986 } sg_cube_face;
987 
988 /*
989     sg_shader_stage
990 
991     There are 2 shader stages: vertex- and fragment-shader-stage.
992     Each shader stage consists of:
993 
994     - one slot for a shader function (provided as source- or byte-code)
995     - SG_MAX_SHADERSTAGE_UBS slots for uniform blocks
996     - SG_MAX_SHADERSTAGE_IMAGES slots for images used as textures by
997       the shader function
998 */
999 typedef enum sg_shader_stage {
1000     SG_SHADERSTAGE_VS,
1001     SG_SHADERSTAGE_FS,
1002     _SG_SHADERSTAGE_FORCE_U32 = 0x7FFFFFFF
1003 } sg_shader_stage;
1004 
1005 /*
1006     sg_primitive_type
1007 
1008     This is the common subset of 3D primitive types supported across all 3D
1009     APIs. This is used in the sg_pipeline_desc.primitive_type member when
1010     creating a pipeline object.
1011 
1012     The default primitive type is SG_PRIMITIVETYPE_TRIANGLES.
1013 */
1014 typedef enum sg_primitive_type {
1015     _SG_PRIMITIVETYPE_DEFAULT,  /* value 0 reserved for default-init */
1016     SG_PRIMITIVETYPE_POINTS,
1017     SG_PRIMITIVETYPE_LINES,
1018     SG_PRIMITIVETYPE_LINE_STRIP,
1019     SG_PRIMITIVETYPE_TRIANGLES,
1020     SG_PRIMITIVETYPE_TRIANGLE_STRIP,
1021     _SG_PRIMITIVETYPE_NUM,
1022     _SG_PRIMITIVETYPE_FORCE_U32 = 0x7FFFFFFF
1023 } sg_primitive_type;
1024 
1025 /*
1026     sg_filter
1027 
1028     The filtering mode when sampling a texture image. This is
1029     used in the sg_image_desc.min_filter and sg_image_desc.mag_filter
1030     members when creating an image object.
1031 
1032     The default filter mode is SG_FILTER_NEAREST.
1033 */
1034 typedef enum sg_filter {
1035     _SG_FILTER_DEFAULT, /* value 0 reserved for default-init */
1036     SG_FILTER_NEAREST,
1037     SG_FILTER_LINEAR,
1038     SG_FILTER_NEAREST_MIPMAP_NEAREST,
1039     SG_FILTER_NEAREST_MIPMAP_LINEAR,
1040     SG_FILTER_LINEAR_MIPMAP_NEAREST,
1041     SG_FILTER_LINEAR_MIPMAP_LINEAR,
1042     _SG_FILTER_NUM,
1043     _SG_FILTER_FORCE_U32 = 0x7FFFFFFF
1044 } sg_filter;
1045 
1046 /*
1047     sg_wrap
1048 
1049     The texture coordinates wrapping mode when sampling a texture
1050     image. This is used in the sg_image_desc.wrap_u, .wrap_v
1051     and .wrap_w members when creating an image.
1052 
1053     The default wrap mode is SG_WRAP_REPEAT.
1054 
1055     NOTE: SG_WRAP_CLAMP_TO_BORDER is not supported on all backends
1056     and platforms. To check for support, call sg_query_features()
1057     and check the "clamp_to_border" boolean in the returned
1058     sg_features struct.
1059 
1060     Platforms which don't support SG_WRAP_CLAMP_TO_BORDER will silently fall back
1061     to SG_WRAP_CLAMP_TO_EDGE without a validation error.
1062 
1063     Platforms which support clamp-to-border are:
1064 
1065         - all desktop GL platforms
1066         - Metal on macOS
1067         - D3D11
1068 
1069     Platforms which do not support clamp-to-border:
1070 
1071         - GLES2/3 and WebGL/WebGL2
1072         - Metal on iOS
1073 */
1074 typedef enum sg_wrap {
1075     _SG_WRAP_DEFAULT,   /* value 0 reserved for default-init */
1076     SG_WRAP_REPEAT,
1077     SG_WRAP_CLAMP_TO_EDGE,
1078     SG_WRAP_CLAMP_TO_BORDER,
1079     SG_WRAP_MIRRORED_REPEAT,
1080     _SG_WRAP_NUM,
1081     _SG_WRAP_FORCE_U32 = 0x7FFFFFFF
1082 } sg_wrap;
1083 
1084 /*
1085     sg_border_color
1086 
1087     The border color to use when sampling a texture, and the UV wrap
1088     mode is SG_WRAP_CLAMP_TO_BORDER.
1089 
1090     The default border color is SG_BORDERCOLOR_OPAQUE_BLACK
1091 */
1092 typedef enum sg_border_color {
1093     _SG_BORDERCOLOR_DEFAULT,    /* value 0 reserved for default-init */
1094     SG_BORDERCOLOR_TRANSPARENT_BLACK,
1095     SG_BORDERCOLOR_OPAQUE_BLACK,
1096     SG_BORDERCOLOR_OPAQUE_WHITE,
1097     _SG_BORDERCOLOR_NUM,
1098     _SG_BORDERCOLOR_FORCE_U32 = 0x7FFFFFFF
1099 } sg_border_color;
1100 
1101 /*
1102     sg_vertex_format
1103 
1104     The data type of a vertex component. This is used to describe
1105     the layout of vertex data when creating a pipeline object.
1106 */
1107 typedef enum sg_vertex_format {
1108     SG_VERTEXFORMAT_INVALID,
1109     SG_VERTEXFORMAT_FLOAT,
1110     SG_VERTEXFORMAT_FLOAT2,
1111     SG_VERTEXFORMAT_FLOAT3,
1112     SG_VERTEXFORMAT_FLOAT4,
1113     SG_VERTEXFORMAT_BYTE4,
1114     SG_VERTEXFORMAT_BYTE4N,
1115     SG_VERTEXFORMAT_UBYTE4,
1116     SG_VERTEXFORMAT_UBYTE4N,
1117     SG_VERTEXFORMAT_SHORT2,
1118     SG_VERTEXFORMAT_SHORT2N,
1119     SG_VERTEXFORMAT_USHORT2N,
1120     SG_VERTEXFORMAT_SHORT4,
1121     SG_VERTEXFORMAT_SHORT4N,
1122     SG_VERTEXFORMAT_USHORT4N,
1123     SG_VERTEXFORMAT_UINT10_N2,
1124     _SG_VERTEXFORMAT_NUM,
1125     _SG_VERTEXFORMAT_FORCE_U32 = 0x7FFFFFFF
1126 } sg_vertex_format;
1127 
1128 /*
1129     sg_vertex_step
1130 
1131     Defines whether the input pointer of a vertex input stream is advanced
1132     'per vertex' or 'per instance'. The default step-func is
1133     SG_VERTEXSTEP_PER_VERTEX. SG_VERTEXSTEP_PER_INSTANCE is used with
1134     instanced-rendering.
1135 
1136     The vertex-step is part of the vertex-layout definition
1137     when creating pipeline objects.
1138 */
1139 typedef enum sg_vertex_step {
1140     _SG_VERTEXSTEP_DEFAULT,     /* value 0 reserved for default-init */
1141     SG_VERTEXSTEP_PER_VERTEX,
1142     SG_VERTEXSTEP_PER_INSTANCE,
1143     _SG_VERTEXSTEP_NUM,
1144     _SG_VERTEXSTEP_FORCE_U32 = 0x7FFFFFFF
1145 } sg_vertex_step;
1146 
1147 /*
1148     sg_uniform_type
1149 
1150     The data type of a uniform block member. This is used to
1151     describe the internal layout of uniform blocks when creating
1152     a shader object.
1153 */
1154 typedef enum sg_uniform_type {
1155     SG_UNIFORMTYPE_INVALID,
1156     SG_UNIFORMTYPE_FLOAT,
1157     SG_UNIFORMTYPE_FLOAT2,
1158     SG_UNIFORMTYPE_FLOAT3,
1159     SG_UNIFORMTYPE_FLOAT4,
1160     SG_UNIFORMTYPE_MAT4,
1161     _SG_UNIFORMTYPE_NUM,
1162     _SG_UNIFORMTYPE_FORCE_U32 = 0x7FFFFFFF
1163 } sg_uniform_type;
1164 
1165 /*
1166     sg_cull_mode
1167 
1168     The face-culling mode, this is used in the
1169     sg_pipeline_desc.rasterizer.cull_mode member when creating a
1170     pipeline object.
1171 
1172     The default cull mode is SG_CULLMODE_NONE
1173 */
1174 typedef enum sg_cull_mode {
1175     _SG_CULLMODE_DEFAULT,   /* value 0 reserved for default-init */
1176     SG_CULLMODE_NONE,
1177     SG_CULLMODE_FRONT,
1178     SG_CULLMODE_BACK,
1179     _SG_CULLMODE_NUM,
1180     _SG_CULLMODE_FORCE_U32 = 0x7FFFFFFF
1181 } sg_cull_mode;
1182 
1183 /*
1184     sg_face_winding
1185 
1186     The vertex-winding rule that determines a front-facing primitive. This
1187     is used in the member sg_pipeline_desc.rasterizer.face_winding
1188     when creating a pipeline object.
1189 
1190     The default winding is SG_FACEWINDING_CW (clockwise)
1191 */
1192 typedef enum sg_face_winding {
1193     _SG_FACEWINDING_DEFAULT,    /* value 0 reserved for default-init */
1194     SG_FACEWINDING_CCW,
1195     SG_FACEWINDING_CW,
1196     _SG_FACEWINDING_NUM,
1197     _SG_FACEWINDING_FORCE_U32 = 0x7FFFFFFF
1198 } sg_face_winding;
1199 
1200 /*
1201     sg_compare_func
1202 
1203     The compare-function for depth- and stencil-ref tests.
1204     This is used when creating pipeline objects in the members:
1205 
1206     sg_pipeline_desc
1207         .depth_stencil
1208             .depth_compare_func
1209             .stencil_front.compare_func
1210             .stencil_back.compare_func
1211 
1212     The default compare func for depth- and stencil-tests is
1213     SG_COMPAREFUNC_ALWAYS.
1214 */
1215 typedef enum sg_compare_func {
1216     _SG_COMPAREFUNC_DEFAULT,    /* value 0 reserved for default-init */
1217     SG_COMPAREFUNC_NEVER,
1218     SG_COMPAREFUNC_LESS,
1219     SG_COMPAREFUNC_EQUAL,
1220     SG_COMPAREFUNC_LESS_EQUAL,
1221     SG_COMPAREFUNC_GREATER,
1222     SG_COMPAREFUNC_NOT_EQUAL,
1223     SG_COMPAREFUNC_GREATER_EQUAL,
1224     SG_COMPAREFUNC_ALWAYS,
1225     _SG_COMPAREFUNC_NUM,
1226     _SG_COMPAREFUNC_FORCE_U32 = 0x7FFFFFFF
1227 } sg_compare_func;
1228 
1229 /*
1230     sg_stencil_op
1231 
1232     The operation performed on a currently stored stencil-value when a
1233     comparison test passes or fails. This is used when creating a pipeline
1234     object in the members:
1235 
1236     sg_pipeline_desc
1237         .depth_stencil
1238             .stencil_front
1239                 .fail_op
1240                 .depth_fail_op
1241                 .pass_op
1242             .stencil_back
1243                 .fail_op
1244                 .depth_fail_op
1245                 .pass_op
1246 
1247     The default value is SG_STENCILOP_KEEP.
1248 */
1249 typedef enum sg_stencil_op {
1250     _SG_STENCILOP_DEFAULT,      /* value 0 reserved for default-init */
1251     SG_STENCILOP_KEEP,
1252     SG_STENCILOP_ZERO,
1253     SG_STENCILOP_REPLACE,
1254     SG_STENCILOP_INCR_CLAMP,
1255     SG_STENCILOP_DECR_CLAMP,
1256     SG_STENCILOP_INVERT,
1257     SG_STENCILOP_INCR_WRAP,
1258     SG_STENCILOP_DECR_WRAP,
1259     _SG_STENCILOP_NUM,
1260     _SG_STENCILOP_FORCE_U32 = 0x7FFFFFFF
1261 } sg_stencil_op;
1262 
1263 /*
1264     sg_blend_factor
1265 
1266     The source and destination factors in blending operations.
1267     This is used in the following members when creating a pipeline object:
1268 
1269     sg_pipeline_desc
1270         .blend
1271             .src_factor_rgb
1272             .dst_factor_rgb
1273             .src_factor_alpha
1274             .dst_factor_alpha
1275 
1276     The default value is SG_BLENDFACTOR_ONE for source
1277     factors, and SG_BLENDFACTOR_ZERO for destination factors.
1278 */
1279 typedef enum sg_blend_factor {
1280     _SG_BLENDFACTOR_DEFAULT,    /* value 0 reserved for default-init */
1281     SG_BLENDFACTOR_ZERO,
1282     SG_BLENDFACTOR_ONE,
1283     SG_BLENDFACTOR_SRC_COLOR,
1284     SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR,
1285     SG_BLENDFACTOR_SRC_ALPHA,
1286     SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA,
1287     SG_BLENDFACTOR_DST_COLOR,
1288     SG_BLENDFACTOR_ONE_MINUS_DST_COLOR,
1289     SG_BLENDFACTOR_DST_ALPHA,
1290     SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA,
1291     SG_BLENDFACTOR_SRC_ALPHA_SATURATED,
1292     SG_BLENDFACTOR_BLEND_COLOR,
1293     SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR,
1294     SG_BLENDFACTOR_BLEND_ALPHA,
1295     SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA,
1296     _SG_BLENDFACTOR_NUM,
1297     _SG_BLENDFACTOR_FORCE_U32 = 0x7FFFFFFF
1298 } sg_blend_factor;
1299 
1300 /*
1301     sg_blend_op
1302 
1303     Describes how the source and destination values are combined in the
1304     fragment blending operation. It is used in the following members when
1305     creating a pipeline object:
1306 
1307     sg_pipeline_desc
1308         .blend
1309             .op_rgb
1310             .op_alpha
1311 
1312     The default value is SG_BLENDOP_ADD.
1313 */
1314 typedef enum sg_blend_op {
1315     _SG_BLENDOP_DEFAULT,    /* value 0 reserved for default-init */
1316     SG_BLENDOP_ADD,
1317     SG_BLENDOP_SUBTRACT,
1318     SG_BLENDOP_REVERSE_SUBTRACT,
1319     _SG_BLENDOP_NUM,
1320     _SG_BLENDOP_FORCE_U32 = 0x7FFFFFFF
1321 } sg_blend_op;
1322 
1323 /*
1324     sg_color_mask
1325 
1326     Selects the color channels when writing a fragment color to the
1327     framebuffer. This is used in the members
1328     sg_pipeline_desc.blend.color_write_mask when creating a pipeline object.
1329 
1330     The default colormask is SG_COLORMASK_RGBA (write all colors channels)
1331 
1332     NOTE: since the color mask value 0 is reserved for the default value
1333     (SG_COLORMASK_RGBA), use SG_COLORMASK_NONE if all color channels
1334     should be disabled.
1335 */
1336 typedef enum sg_color_mask {
1337     _SG_COLORMASK_DEFAULT = 0,      /* value 0 reserved for default-init */
1338     SG_COLORMASK_NONE = (0x10),     /* special value for 'all channels disabled */
1339     SG_COLORMASK_R = (1<<0),
1340     SG_COLORMASK_G = (1<<1),
1341     SG_COLORMASK_B = (1<<2),
1342     SG_COLORMASK_A = (1<<3),
1343     SG_COLORMASK_RGB = 0x7,
1344     SG_COLORMASK_RGBA = 0xF,
1345     _SG_COLORMASK_FORCE_U32 = 0x7FFFFFFF
1346 } sg_color_mask;
1347 
1348 /*
1349     sg_action
1350 
1351     Defines what action should be performed at the start of a render pass:
1352 
1353     SG_ACTION_CLEAR:    clear the render target image
1354     SG_ACTION_LOAD:     load the previous content of the render target image
1355     SG_ACTION_DONTCARE: leave the render target image content undefined
1356 
1357     This is used in the sg_pass_action structure.
1358 
1359     The default action for all pass attachments is SG_ACTION_CLEAR, with the
1360     clear color rgba = {0.5f, 0.5f, 0.5f, 1.0f], depth=1.0 and stencil=0.
1361 
1362     If you want to override the default behaviour, it is important to not
1363     only set the clear color, but the 'action' field as well (as long as this
1364     is in its _SG_ACTION_DEFAULT, the value fields will be ignored).
1365 */
1366 typedef enum sg_action {
1367     _SG_ACTION_DEFAULT,
1368     SG_ACTION_CLEAR,
1369     SG_ACTION_LOAD,
1370     SG_ACTION_DONTCARE,
1371     _SG_ACTION_NUM,
1372     _SG_ACTION_FORCE_U32 = 0x7FFFFFFF
1373 } sg_action;
1374 
1375 /*
1376     sg_pass_action
1377 
1378     The sg_pass_action struct defines the actions to be performed
1379     at the start of a rendering pass in the functions sg_begin_pass()
1380     and sg_begin_default_pass().
1381 
1382     A separate action and clear values can be defined for each
1383     color attachment, and for the depth-stencil attachment.
1384 
1385     The default clear values are defined by the macros:
1386 
1387     - SG_DEFAULT_CLEAR_RED:     0.5f
1388     - SG_DEFAULT_CLEAR_GREEN:   0.5f
1389     - SG_DEFAULT_CLEAR_BLUE:    0.5f
1390     - SG_DEFAULT_CLEAR_ALPHA:   1.0f
1391     - SG_DEFAULT_CLEAR_DEPTH:   1.0f
1392     - SG_DEFAULT_CLEAR_STENCIL: 0
1393 */
1394 typedef struct sg_color_attachment_action {
1395     sg_action action;
1396     float val[4];
1397 } sg_color_attachment_action;
1398 
1399 typedef struct sg_depth_attachment_action {
1400     sg_action action;
1401     float val;
1402 } sg_depth_attachment_action;
1403 
1404 typedef struct sg_stencil_attachment_action {
1405     sg_action action;
1406     uint8_t val;
1407 } sg_stencil_attachment_action;
1408 
1409 typedef struct sg_pass_action {
1410     uint32_t _start_canary;
1411     sg_color_attachment_action colors[SG_MAX_COLOR_ATTACHMENTS];
1412     sg_depth_attachment_action depth;
1413     sg_stencil_attachment_action stencil;
1414     uint32_t _end_canary;
1415 } sg_pass_action;
1416 
1417 /*
1418     sg_bindings
1419 
1420     The sg_bindings structure defines the resource binding slots
1421     of the sokol_gfx render pipeline, used as argument to the
1422     sg_apply_bindings() function.
1423 
1424     A resource binding struct contains:
1425 
1426     - 1..N vertex buffers
1427     - 0..N vertex buffer offsets
1428     - 0..1 index buffers
1429     - 0..1 index buffer offsets
1430     - 0..N vertex shader stage images
1431     - 0..N fragment shader stage images
1432 
1433     The max number of vertex buffer and shader stage images
1434     are defined by the SG_MAX_SHADERSTAGE_BUFFERS and
1435     SG_MAX_SHADERSTAGE_IMAGES configuration constants.
1436 
1437     The optional buffer offsets can be used to put different unrelated
1438     chunks of vertex- and/or index-data into the same buffer objects.
1439 */
1440 typedef struct sg_bindings {
1441     uint32_t _start_canary;
1442     sg_buffer vertex_buffers[SG_MAX_SHADERSTAGE_BUFFERS];
1443     int vertex_buffer_offsets[SG_MAX_SHADERSTAGE_BUFFERS];
1444     sg_buffer index_buffer;
1445     int index_buffer_offset;
1446     sg_image vs_images[SG_MAX_SHADERSTAGE_IMAGES];
1447     sg_image fs_images[SG_MAX_SHADERSTAGE_IMAGES];
1448     uint32_t _end_canary;
1449 } sg_bindings;
1450 
1451 /*
1452     sg_buffer_desc
1453 
1454     Creation parameters for sg_buffer objects, used in the
1455     sg_make_buffer() call.
1456 
1457     The default configuration is:
1458 
1459     .size:      0       (this *must* be set to a valid size in bytes)
1460     .type:      SG_BUFFERTYPE_VERTEXBUFFER
1461     .usage:     SG_USAGE_IMMUTABLE
1462     .content    0
1463     .label      0       (optional string label for trace hooks)
1464 
1465     The label will be ignored by sokol_gfx.h, it is only useful
1466     when hooking into sg_make_buffer() or sg_init_buffer() via
1467     the sg_install_trace_hooks() function.
1468 
1469     ADVANCED TOPIC: Injecting native 3D-API buffers:
1470 
1471     The following struct members allow to inject your own GL, Metal
1472     or D3D11 buffers into sokol_gfx:
1473 
1474     .gl_buffers[SG_NUM_INFLIGHT_FRAMES]
1475     .mtl_buffers[SG_NUM_INFLIGHT_FRAMES]
1476     .d3d11_buffer
1477 
1478     You must still provide all other members except the .content member, and
1479     these must match the creation parameters of the native buffers you
1480     provide. For SG_USAGE_IMMUTABLE, only provide a single native 3D-API
1481     buffer, otherwise you need to provide SG_NUM_INFLIGHT_FRAMES buffers
1482     (only for GL and Metal, not D3D11). Providing multiple buffers for GL and
1483     Metal is necessary because sokol_gfx will rotate through them when
1484     calling sg_update_buffer() to prevent lock-stalls.
1485 
1486     Note that it is expected that immutable injected buffer have already been
1487     initialized with content, and the .content member must be 0!
1488 
1489     Also you need to call sg_reset_state_cache() after calling native 3D-API
1490     functions, and before calling any sokol_gfx function.
1491 */
1492 typedef struct sg_buffer_desc {
1493     uint32_t _start_canary;
1494     int size;
1495     sg_buffer_type type;
1496     sg_usage usage;
1497     const void* content;
1498     const char* label;
1499     /* GL specific */
1500     uint32_t gl_buffers[SG_NUM_INFLIGHT_FRAMES];
1501     /* Metal specific */
1502     const void* mtl_buffers[SG_NUM_INFLIGHT_FRAMES];
1503     /* D3D11 specific */
1504     const void* d3d11_buffer;
1505     /* WebGPU specific */
1506     const void* wgpu_buffer;
1507     uint32_t _end_canary;
1508 } sg_buffer_desc;
1509 
1510 /*
1511     sg_subimage_content
1512 
1513     Pointer to and size of a subimage-surface data, this is
1514     used to describe the initial content of immutable-usage images,
1515     or for updating a dynamic- or stream-usage images.
1516 
1517     For 3D- or array-textures, one sg_subimage_content item
1518     describes an entire mipmap level consisting of all array- or
1519     3D-slices of the mipmap level. It is only possible to update
1520     an entire mipmap level, not parts of it.
1521 */
1522 typedef struct sg_subimage_content {
1523     const void* ptr;    /* pointer to subimage data */
1524     int size;           /* size in bytes of pointed-to subimage data */
1525 } sg_subimage_content;
1526 
1527 /*
1528     sg_image_content
1529 
1530     Defines the content of an image through a 2D array
1531     of sg_subimage_content structs. The first array dimension
1532     is the cubemap face, and the second array dimension the
1533     mipmap level.
1534 */
1535 typedef struct sg_image_content {
1536     sg_subimage_content subimage[SG_CUBEFACE_NUM][SG_MAX_MIPMAPS];
1537 } sg_image_content;
1538 
1539 /*
1540     sg_image_desc
1541 
1542     Creation parameters for sg_image objects, used in the
1543     sg_make_image() call.
1544 
1545     The default configuration is:
1546 
1547     .type:              SG_IMAGETYPE_2D
1548     .render_target:     false
1549     .width              0 (must be set to >0)
1550     .height             0 (must be set to >0)
1551     .depth/.layers:     1
1552     .num_mipmaps:       1
1553     .usage:             SG_USAGE_IMMUTABLE
1554     .pixel_format:      SG_PIXELFORMAT_RGBA8 for textures, or sg_desc.context.color_format for render targets
1555     .sample_count:      1 for textures, or sg_desc.context.sample_count for render target
1556     .min_filter:        SG_FILTER_NEAREST
1557     .mag_filter:        SG_FILTER_NEAREST
1558     .wrap_u:            SG_WRAP_REPEAT
1559     .wrap_v:            SG_WRAP_REPEAT
1560     .wrap_w:            SG_WRAP_REPEAT (only SG_IMAGETYPE_3D)
1561     .border_color       SG_BORDERCOLOR_OPAQUE_BLACK
1562     .max_anisotropy     1 (must be 1..16)
1563     .min_lod            0.0f
1564     .max_lod            FLT_MAX
1565     .content            an sg_image_content struct to define the initial content
1566     .label              0       (optional string label for trace hooks)
1567 
1568     Q: Why is the default sample_count for render targets identical with the
1569     "default sample count" from sg_desc.context.sample_count?
1570 
1571     A: So that it matches the default sample count in pipeline objects. Even
1572     though it is a bit strange/confusing that offscreen render targets by default
1573     get the same sample count as the default framebuffer, but it's better that
1574     an offscreen render target created with default parameters matches
1575     a pipeline object created with default parameters.
1576 
1577     NOTE:
1578 
1579     SG_IMAGETYPE_ARRAY and SG_IMAGETYPE_3D are not supported on
1580     WebGL/GLES2, use sg_query_features().imagetype_array and
1581     sg_query_features().imagetype_3d at runtime to check
1582     if array- and 3D-textures are supported.
1583 
1584     Images with usage SG_USAGE_IMMUTABLE must be fully initialized by
1585     providing a valid .content member which points to
1586     initialization data.
1587 
1588     ADVANCED TOPIC: Injecting native 3D-API textures:
1589 
1590     The following struct members allow to inject your own GL, Metal
1591     or D3D11 textures into sokol_gfx:
1592 
1593     .gl_textures[SG_NUM_INFLIGHT_FRAMES]
1594     .mtl_textures[SG_NUM_INFLIGHT_FRAMES]
1595     .d3d11_texture
1596 
1597     The same rules apply as for injecting native buffers
1598     (see sg_buffer_desc documentation for more details).
1599 */
1600 typedef struct sg_image_desc {
1601     uint32_t _start_canary;
1602     sg_image_type type;
1603     bool render_target;
1604     int width;
1605     int height;
1606     union {
1607         int depth;
1608         int layers;
1609     };
1610     int num_mipmaps;
1611     sg_usage usage;
1612     sg_pixel_format pixel_format;
1613     int sample_count;
1614     sg_filter min_filter;
1615     sg_filter mag_filter;
1616     sg_wrap wrap_u;
1617     sg_wrap wrap_v;
1618     sg_wrap wrap_w;
1619     sg_border_color border_color;
1620     uint32_t max_anisotropy;
1621     float min_lod;
1622     float max_lod;
1623     sg_image_content content;
1624     const char* label;
1625     /* GL specific */
1626     uint32_t gl_textures[SG_NUM_INFLIGHT_FRAMES];
1627     /* Metal specific */
1628     const void* mtl_textures[SG_NUM_INFLIGHT_FRAMES];
1629     /* D3D11 specific */
1630     const void* d3d11_texture;
1631     /* WebGPU specific */
1632     const void* wgpu_texture;
1633     uint32_t _end_canary;
1634 } sg_image_desc;
1635 
1636 /*
1637     sg_shader_desc
1638 
1639     The structure sg_shader_desc defines all creation parameters
1640     for shader programs, used as input to the sg_make_shader() function:
1641 
1642     - reflection information for vertex attributes (vertex shader inputs):
1643         - vertex attribute name (required for GLES2, optional for GLES3 and GL)
1644         - a semantic name and index (required for D3D11)
1645     - for each vertex- and fragment-shader-stage:
1646         - the shader source or bytecode
1647         - an optional entry function name
1648         - an optional compile target (only for D3D11 when source is provided, defaults are "vs_4_0" and "ps_4_0")
1649         - reflection info for each uniform block used by the shader stage:
1650             - the size of the uniform block in bytes
1651             - reflection info for each uniform block member (only required for GL backends):
1652                 - member name
1653                 - member type (SG_UNIFORMTYPE_xxx)
1654                 - if the member is an array, the number of array items
1655         - reflection info for the texture images used by the shader stage:
1656             - the image type (SG_IMAGETYPE_xxx)
1657             - the sampler type (SG_SAMPLERTYPE_xxx, default is SG_SAMPLERTYPE_FLOAT)
1658             - the name of the texture sampler (required for GLES2, optional everywhere else)
1659 
1660     For all GL backends, shader source-code must be provided. For D3D11 and Metal,
1661     either shader source-code or byte-code can be provided.
1662 
1663     For D3D11, if source code is provided, the d3dcompiler_47.dll will be loaded
1664     on demand. If this fails, shader creation will fail. When compiling HLSL
1665     source code, you can provide an optional target string via
1666     sg_shader_stage_desc.d3d11_target, the default target is "vs_4_0" for the
1667     vertex shader stage and "ps_4_0" for the pixel shader stage.
1668 */
1669 typedef struct sg_shader_attr_desc {
1670     const char* name;           /* GLSL vertex attribute name (only required for GLES2) */
1671     const char* sem_name;       /* HLSL semantic name */
1672     int sem_index;              /* HLSL semantic index */
1673 } sg_shader_attr_desc;
1674 
1675 typedef struct sg_shader_uniform_desc {
1676     const char* name;
1677     sg_uniform_type type;
1678     int array_count;
1679 } sg_shader_uniform_desc;
1680 
1681 typedef struct sg_shader_uniform_block_desc {
1682     int size;
1683     sg_shader_uniform_desc uniforms[SG_MAX_UB_MEMBERS];
1684 } sg_shader_uniform_block_desc;
1685 
1686 typedef struct sg_shader_image_desc {
1687     const char* name;
1688     sg_image_type type;         /* FIXME: should this be renamed to 'image_type'? */
1689     sg_sampler_type sampler_type;
1690 } sg_shader_image_desc;
1691 
1692 typedef struct sg_shader_stage_desc {
1693     const char* source;
1694     const uint8_t* byte_code;
1695     int byte_code_size;
1696     const char* entry;
1697     const char* d3d11_target;
1698     sg_shader_uniform_block_desc uniform_blocks[SG_MAX_SHADERSTAGE_UBS];
1699     sg_shader_image_desc images[SG_MAX_SHADERSTAGE_IMAGES];
1700 } sg_shader_stage_desc;
1701 
1702 typedef struct sg_shader_desc {
1703     uint32_t _start_canary;
1704     sg_shader_attr_desc attrs[SG_MAX_VERTEX_ATTRIBUTES];
1705     sg_shader_stage_desc vs;
1706     sg_shader_stage_desc fs;
1707     const char* label;
1708     uint32_t _end_canary;
1709 } sg_shader_desc;
1710 
1711 /*
1712     sg_pipeline_desc
1713 
1714     The sg_pipeline_desc struct defines all creation parameters
1715     for an sg_pipeline object, used as argument to the
1716     sg_make_pipeline() function:
1717 
1718     - the vertex layout for all input vertex buffers
1719     - a shader object
1720     - the 3D primitive type (points, lines, triangles, ...)
1721     - the index type (none, 16- or 32-bit)
1722     - depth-stencil state
1723     - alpha-blending state
1724     - rasterizer state
1725 
1726     If the vertex data has no gaps between vertex components, you can omit
1727     the .layout.buffers[].stride and layout.attrs[].offset items (leave them
1728     default-initialized to 0), sokol-gfx will then compute the offsets and strides
1729     from the vertex component formats (.layout.attrs[].format). Please note
1730     that ALL vertex attribute offsets must be 0 in order for the
1731     automatic offset computation to kick in.
1732 
1733     The default configuration is as follows:
1734 
1735     .layout:
1736         .buffers[]:         vertex buffer layouts
1737             .stride:        0 (if no stride is given it will be computed)
1738             .step_func      SG_VERTEXSTEP_PER_VERTEX
1739             .step_rate      1
1740         .attrs[]:           vertex attribute declarations
1741             .buffer_index   0 the vertex buffer bind slot
1742             .offset         0 (offsets can be omitted if the vertex layout has no gaps)
1743             .format         SG_VERTEXFORMAT_INVALID (must be initialized!)
1744     .shader:            0 (must be initialized with a valid sg_shader id!)
1745     .primitive_type:    SG_PRIMITIVETYPE_TRIANGLES
1746     .index_type:        SG_INDEXTYPE_NONE
1747     .depth_stencil:
1748         .stencil_front, .stencil_back:
1749             .fail_op:               SG_STENCILOP_KEEP
1750             .depth_fail_op:         SG_STENCILOP_KEEP
1751             .pass_op:               SG_STENCILOP_KEEP
1752             .compare_func           SG_COMPAREFUNC_ALWAYS
1753         .depth_compare_func:    SG_COMPAREFUNC_ALWAYS
1754         .depth_write_enabled:   false
1755         .stencil_enabled:       false
1756         .stencil_read_mask:     0
1757         .stencil_write_mask:    0
1758         .stencil_ref:           0
1759     .blend:
1760         .enabled:               false
1761         .src_factor_rgb:        SG_BLENDFACTOR_ONE
1762         .dst_factor_rgb:        SG_BLENDFACTOR_ZERO
1763         .op_rgb:                SG_BLENDOP_ADD
1764         .src_factor_alpha:      SG_BLENDFACTOR_ONE
1765         .dst_factor_alpha:      SG_BLENDFACTOR_ZERO
1766         .op_alpha:              SG_BLENDOP_ADD
1767         .color_write_mask:      SG_COLORMASK_RGBA
1768         .color_attachment_count 1
1769         .color_format           SG_PIXELFORMAT_RGBA8
1770         .depth_format           SG_PIXELFORMAT_DEPTHSTENCIL
1771         .blend_color:           { 0.0f, 0.0f, 0.0f, 0.0f }
1772     .rasterizer:
1773         .alpha_to_coverage_enabled:     false
1774         .cull_mode:                     SG_CULLMODE_NONE
1775         .face_winding:                  SG_FACEWINDING_CW
1776         .sample_count:                  sg_desc.context.sample_count
1777         .depth_bias:                    0.0f
1778         .depth_bias_slope_scale:        0.0f
1779         .depth_bias_clamp:              0.0f
1780     .label  0       (optional string label for trace hooks)
1781 */
1782 typedef struct sg_buffer_layout_desc {
1783     int stride;
1784     sg_vertex_step step_func;
1785     int step_rate;
1786 } sg_buffer_layout_desc;
1787 
1788 typedef struct sg_vertex_attr_desc {
1789     int buffer_index;
1790     int offset;
1791     sg_vertex_format format;
1792 } sg_vertex_attr_desc;
1793 
1794 typedef struct sg_layout_desc {
1795     sg_buffer_layout_desc buffers[SG_MAX_SHADERSTAGE_BUFFERS];
1796     sg_vertex_attr_desc attrs[SG_MAX_VERTEX_ATTRIBUTES];
1797 } sg_layout_desc;
1798 
1799 typedef struct sg_stencil_state {
1800     sg_stencil_op fail_op;
1801     sg_stencil_op depth_fail_op;
1802     sg_stencil_op pass_op;
1803     sg_compare_func compare_func;
1804 } sg_stencil_state;
1805 
1806 typedef struct sg_depth_stencil_state {
1807     sg_stencil_state stencil_front;
1808     sg_stencil_state stencil_back;
1809     sg_compare_func depth_compare_func;
1810     bool depth_write_enabled;
1811     bool stencil_enabled;
1812     uint8_t stencil_read_mask;
1813     uint8_t stencil_write_mask;
1814     uint8_t stencil_ref;
1815 } sg_depth_stencil_state;
1816 
1817 typedef struct sg_blend_state {
1818     bool enabled;
1819     sg_blend_factor src_factor_rgb;
1820     sg_blend_factor dst_factor_rgb;
1821     sg_blend_op op_rgb;
1822     sg_blend_factor src_factor_alpha;
1823     sg_blend_factor dst_factor_alpha;
1824     sg_blend_op op_alpha;
1825     uint8_t color_write_mask;
1826     int color_attachment_count;
1827     sg_pixel_format color_format;
1828     sg_pixel_format depth_format;
1829     float blend_color[4];
1830 } sg_blend_state;
1831 
1832 typedef struct sg_rasterizer_state {
1833     bool alpha_to_coverage_enabled;
1834     sg_cull_mode cull_mode;
1835     sg_face_winding face_winding;
1836     int sample_count;
1837     float depth_bias;
1838     float depth_bias_slope_scale;
1839     float depth_bias_clamp;
1840 } sg_rasterizer_state;
1841 
1842 typedef struct sg_pipeline_desc {
1843     uint32_t _start_canary;
1844     sg_layout_desc layout;
1845     sg_shader shader;
1846     sg_primitive_type primitive_type;
1847     sg_index_type index_type;
1848     sg_depth_stencil_state depth_stencil;
1849     sg_blend_state blend;
1850     sg_rasterizer_state rasterizer;
1851     const char* label;
1852     uint32_t _end_canary;
1853 } sg_pipeline_desc;
1854 
1855 /*
1856     sg_pass_desc
1857 
1858     Creation parameters for an sg_pass object, used as argument
1859     to the sg_make_pass() function.
1860 
1861     A pass object contains 1..4 color-attachments and none, or one,
1862     depth-stencil-attachment. Each attachment consists of
1863     an image, and two additional indices describing
1864     which subimage the pass will render to: one mipmap index, and
1865     if the image is a cubemap, array-texture or 3D-texture, the
1866     face-index, array-layer or depth-slice.
1867 
1868     Pass images must fulfill the following requirements:
1869 
1870     All images must have:
1871     - been created as render target (sg_image_desc.render_target = true)
1872     - the same size
1873     - the same sample count
1874 
1875     In addition, all color-attachment images must have the same pixel format.
1876 */
1877 typedef struct sg_attachment_desc {
1878     sg_image image;
1879     int mip_level;
1880     union {
1881         int face;
1882         int layer;
1883         int slice;
1884     };
1885 } sg_attachment_desc;
1886 
1887 typedef struct sg_pass_desc {
1888     uint32_t _start_canary;
1889     sg_attachment_desc color_attachments[SG_MAX_COLOR_ATTACHMENTS];
1890     sg_attachment_desc depth_stencil_attachment;
1891     const char* label;
1892     uint32_t _end_canary;
1893 } sg_pass_desc;
1894 
1895 /*
1896     sg_trace_hooks
1897 
1898     Installable callback functions to keep track of the sokol-gfx calls,
1899     this is useful for debugging, or keeping track of resource creation
1900     and destruction.
1901 
1902     Trace hooks are installed with sg_install_trace_hooks(), this returns
1903     another sg_trace_hooks struct with the previous set of
1904     trace hook function pointers. These should be invoked by the
1905     new trace hooks to form a proper call chain.
1906 */
1907 typedef struct sg_trace_hooks {
1908     void* user_data;
1909     void (*reset_state_cache)(void* user_data);
1910     void (*make_buffer)(const sg_buffer_desc* desc, sg_buffer result, void* user_data);
1911     void (*make_image)(const sg_image_desc* desc, sg_image result, void* user_data);
1912     void (*make_shader)(const sg_shader_desc* desc, sg_shader result, void* user_data);
1913     void (*make_pipeline)(const sg_pipeline_desc* desc, sg_pipeline result, void* user_data);
1914     void (*make_pass)(const sg_pass_desc* desc, sg_pass result, void* user_data);
1915     void (*destroy_buffer)(sg_buffer buf, void* user_data);
1916     void (*destroy_image)(sg_image img, void* user_data);
1917     void (*destroy_shader)(sg_shader shd, void* user_data);
1918     void (*destroy_pipeline)(sg_pipeline pip, void* user_data);
1919     void (*destroy_pass)(sg_pass pass, void* user_data);
1920     void (*update_buffer)(sg_buffer buf, const void* data_ptr, int data_size, void* user_data);
1921     void (*update_image)(sg_image img, const sg_image_content* data, void* user_data);
1922     void (*append_buffer)(sg_buffer buf, const void* data_ptr, int data_size, int result, void* user_data);
1923     void (*begin_default_pass)(const sg_pass_action* pass_action, int width, int height, void* user_data);
1924     void (*begin_pass)(sg_pass pass, const sg_pass_action* pass_action, void* user_data);
1925     void (*apply_viewport)(int x, int y, int width, int height, bool origin_top_left, void* user_data);
1926     void (*apply_scissor_rect)(int x, int y, int width, int height, bool origin_top_left, void* user_data);
1927     void (*apply_pipeline)(sg_pipeline pip, void* user_data);
1928     void (*apply_bindings)(const sg_bindings* bindings, void* user_data);
1929     void (*apply_uniforms)(sg_shader_stage stage, int ub_index, const void* data, int num_bytes, void* user_data);
1930     void (*draw)(int base_element, int num_elements, int num_instances, void* user_data);
1931     void (*end_pass)(void* user_data);
1932     void (*commit)(void* user_data);
1933     void (*alloc_buffer)(sg_buffer result, void* user_data);
1934     void (*alloc_image)(sg_image result, void* user_data);
1935     void (*alloc_shader)(sg_shader result, void* user_data);
1936     void (*alloc_pipeline)(sg_pipeline result, void* user_data);
1937     void (*alloc_pass)(sg_pass result, void* user_data);
1938     void (*init_buffer)(sg_buffer buf_id, const sg_buffer_desc* desc, void* user_data);
1939     void (*init_image)(sg_image img_id, const sg_image_desc* desc, void* user_data);
1940     void (*init_shader)(sg_shader shd_id, const sg_shader_desc* desc, void* user_data);
1941     void (*init_pipeline)(sg_pipeline pip_id, const sg_pipeline_desc* desc, void* user_data);
1942     void (*init_pass)(sg_pass pass_id, const sg_pass_desc* desc, void* user_data);
1943     void (*fail_buffer)(sg_buffer buf_id, void* user_data);
1944     void (*fail_image)(sg_image img_id, void* user_data);
1945     void (*fail_shader)(sg_shader shd_id, void* user_data);
1946     void (*fail_pipeline)(sg_pipeline pip_id, void* user_data);
1947     void (*fail_pass)(sg_pass pass_id, void* user_data);
1948     void (*push_debug_group)(const char* name, void* user_data);
1949     void (*pop_debug_group)(void* user_data);
1950     void (*err_buffer_pool_exhausted)(void* user_data);
1951     void (*err_image_pool_exhausted)(void* user_data);
1952     void (*err_shader_pool_exhausted)(void* user_data);
1953     void (*err_pipeline_pool_exhausted)(void* user_data);
1954     void (*err_pass_pool_exhausted)(void* user_data);
1955     void (*err_context_mismatch)(void* user_data);
1956     void (*err_pass_invalid)(void* user_data);
1957     void (*err_draw_invalid)(void* user_data);
1958     void (*err_bindings_invalid)(void* user_data);
1959 } sg_trace_hooks;
1960 
1961 /*
1962     sg_buffer_info
1963     sg_image_info
1964     sg_shader_info
1965     sg_pipeline_info
1966     sg_pass_info
1967 
1968     These structs contain various internal resource attributes which
1969     might be useful for debug-inspection. Please don't rely on the
1970     actual content of those structs too much, as they are quite closely
1971     tied to sokol_gfx.h internals and may change more frequently than
1972     the other public API elements.
1973 
1974     The *_info structs are used as the return values of the following functions:
1975 
1976     sg_query_buffer_info()
1977     sg_query_image_info()
1978     sg_query_shader_info()
1979     sg_query_pipeline_info()
1980     sg_query_pass_info()
1981 */
1982 typedef struct sg_slot_info {
1983     sg_resource_state state;    /* the current state of this resource slot */
1984     uint32_t res_id;        /* type-neutral resource if (e.g. sg_buffer.id) */
1985     uint32_t ctx_id;        /* the context this resource belongs to */
1986 } sg_slot_info;
1987 
1988 typedef struct sg_buffer_info {
1989     sg_slot_info slot;              /* resource pool slot info */
1990     uint32_t update_frame_index;    /* frame index of last sg_update_buffer() */
1991     uint32_t append_frame_index;    /* frame index of last sg_append_buffer() */
1992     int append_pos;                 /* current position in buffer for sg_append_buffer() */
1993     bool append_overflow;           /* is buffer in overflow state (due to sg_append_buffer) */
1994     int num_slots;                  /* number of renaming-slots for dynamically updated buffers */
1995     int active_slot;                /* currently active write-slot for dynamically updated buffers */
1996 } sg_buffer_info;
1997 
1998 typedef struct sg_image_info {
1999     sg_slot_info slot;              /* resource pool slot info */
2000     uint32_t upd_frame_index;       /* frame index of last sg_update_image() */
2001     int num_slots;                  /* number of renaming-slots for dynamically updated images */
2002     int active_slot;                /* currently active write-slot for dynamically updated images */
2003     int width;                      /* image width */
2004     int height;                     /* image height */
2005 } sg_image_info;
2006 
2007 typedef struct sg_shader_info {
2008     sg_slot_info slot;              /* resoure pool slot info */
2009 } sg_shader_info;
2010 
2011 typedef struct sg_pipeline_info {
2012     sg_slot_info slot;              /* resource pool slot info */
2013 } sg_pipeline_info;
2014 
2015 typedef struct sg_pass_info {
2016     sg_slot_info slot;              /* resource pool slot info */
2017 } sg_pass_info;
2018 
2019 /*
2020     sg_desc
2021 
2022     The sg_desc struct contains configuration values for sokol_gfx,
2023     it is used as parameter to the sg_setup() call.
2024 
2025     FIXME: explain the various configuration options
2026 
2027     The default configuration is:
2028 
2029     .buffer_pool_size       128
2030     .image_pool_size        128
2031     .shader_pool_size       32
2032     .pipeline_pool_size     64
2033     .pass_pool_size         16
2034     .context_pool_size      16
2035     .sampler_cache_size     64
2036     .uniform_buffer_size    4 MB (4*1024*1024)
2037     .staging_buffer_size    8 MB (8*1024*1024)
2038 
2039     .context.color_format: default value depends on selected backend:
2040         all GL backends:    SG_PIXELFORMAT_RGBA8
2041         Metal and D3D11:    SG_PIXELFORMAT_BGRA8
2042         WGPU:               *no default* (must be queried from WGPU swapchain)
2043     .context.depth_format   SG_PIXELFORMAT_DEPTH_STENCIL
2044     .context.sample_count   1
2045 
2046     GL specific:
2047         .context.gl.force_gles2
2048             if this is true the GL backend will act in "GLES2 fallback mode" even
2049             when compiled with SOKOL_GLES3, this is useful to fall back
2050             to traditional WebGL if a browser doesn't support a WebGL2 context
2051 
2052     Metal specific:
2053         (NOTE: All Objective-C object references are transferred through
2054         a bridged (const void*) to sokol_gfx, which will use a unretained
2055         bridged cast (__bridged id<xxx>) to retrieve the Objective-C
2056         references back. Since the bridge cast is unretained, the caller
2057         must hold a strong reference to the Objective-C object for the
2058         duration of the sokol_gfx call!
2059 
2060         .context.metal.device
2061             a pointer to the MTLDevice object
2062         .context.metal.renderpass_descriptor_cb
2063             a C callback function to obtain the MTLRenderPassDescriptor for the
2064             current frame when rendering to the default framebuffer, will be called
2065             in sg_begin_default_pass()
2066         .context.metal.drawable_cb
2067             a C callback function to obtain a MTLDrawable for the current
2068             frame when rendering to the default framebuffer, will be called in
2069             sg_end_pass() of the default pass
2070 
2071     D3D11 specific:
2072         .context.d3d11.device
2073             a pointer to the ID3D11Device object, this must have been created
2074             before sg_setup() is called
2075         .context..d3d11.device_context
2076             a pointer to the ID3D11DeviceContext object
2077         .context..d3d11.render_target_view_cb
2078             a C callback function to obtain a pointer to the current
2079             ID3D11RenderTargetView object of the default framebuffer,
2080             this function will be called in sg_begin_pass() when rendering
2081             to the default framebuffer
2082         .context.d3d11.depth_stencil_view_cb
2083             a C callback function to obtain a pointer to the current
2084             ID3D11DepthStencilView object of the default framebuffer,
2085             this function will be called in sg_begin_pass() when rendering
2086             to the default framebuffer
2087 
2088     WebGPU specific:
2089         .context.wgpu.device
2090             a WGPUDevice handle
2091         .context.wgpu.render_format
2092             WGPUTextureFormat of the swap chain surface
2093         .context.wgpu.render_view_cb
2094             callback to get the current WGPUTextureView of the swapchain's
2095             rendering attachment (may be an MSAA surface)
2096         .context.wgpu.resolve_view_cb
2097             callback to get the current WGPUTextureView of the swapchain's
2098             MSAA-resolve-target surface, must return 0 if not MSAA rendering
2099         .context.wgpu.depth_stencil_view_cb
2100             callback to get current default-pass depth-stencil-surface WGPUTextureView
2101             the pixel format of the default WGPUTextureView must be WGPUTextureFormat_Depth24Plus8
2102 
2103     When using sokol_gfx.h and sokol_app.h together, consider using the
2104     helper function sapp_sgcontext() in the sokol_glue.h header to
2105     initialize the sg_desc.context nested struct. sapp_sgcontext() returns
2106     a completely initialized sg_context_desc struct with information
2107     provided by sokol_app.h.
2108 */
2109 typedef struct sg_gl_context_desc {
2110     bool force_gles2;
2111 } sg_gl_context_desc;
2112 
2113 typedef struct sg_mtl_context_desc {
2114     const void* device;
2115     const void* (*renderpass_descriptor_cb)(void);
2116     const void* (*drawable_cb)(void);
2117 } sg_metal_context_desc;
2118 
2119 typedef struct sg_d3d11_context_desc {
2120     const void* device;
2121     const void* device_context;
2122     const void* (*render_target_view_cb)(void);
2123     const void* (*depth_stencil_view_cb)(void);
2124 } sg_d3d11_context_desc;
2125 
2126 typedef struct sg_wgpu_context_desc {
2127     const void* device;                    /* WGPUDevice */
2128     const void* (*render_view_cb)(void);   /* returns WGPUTextureView */
2129     const void* (*resolve_view_cb)(void);  /* returns WGPUTextureView */
2130     const void* (*depth_stencil_view_cb)(void);    /* returns WGPUTextureView, must be WGPUTextureFormat_Depth24Plus8 */
2131 } sg_wgpu_context_desc;
2132 
2133 typedef struct sg_context_desc {
2134     sg_pixel_format color_format;
2135     sg_pixel_format depth_format;
2136     int sample_count;
2137     sg_gl_context_desc gl;
2138     sg_metal_context_desc metal;
2139     sg_d3d11_context_desc d3d11;
2140     sg_wgpu_context_desc wgpu;
2141 } sg_context_desc;
2142 
2143 typedef struct sg_desc {
2144     uint32_t _start_canary;
2145     int buffer_pool_size;
2146     int image_pool_size;
2147     int shader_pool_size;
2148     int pipeline_pool_size;
2149     int pass_pool_size;
2150     int context_pool_size;
2151     int uniform_buffer_size;
2152     int staging_buffer_size;
2153     int sampler_cache_size;
2154     sg_context_desc context;
2155     uint32_t _end_canary;
2156 } sg_desc;
2157 
2158 /* setup and misc functions */
2159 SOKOL_API_DECL void sg_setup(const sg_desc* desc);
2160 SOKOL_API_DECL void sg_shutdown(void);
2161 SOKOL_API_DECL bool sg_isvalid(void);
2162 SOKOL_API_DECL void sg_reset_state_cache(void);
2163 SOKOL_API_DECL sg_trace_hooks sg_install_trace_hooks(const sg_trace_hooks* trace_hooks);
2164 SOKOL_API_DECL void sg_push_debug_group(const char* name);
2165 SOKOL_API_DECL void sg_pop_debug_group(void);
2166 
2167 /* resource creation, destruction and updating */
2168 SOKOL_API_DECL sg_buffer sg_make_buffer(const sg_buffer_desc* desc);
2169 SOKOL_API_DECL sg_image sg_make_image(const sg_image_desc* desc);
2170 SOKOL_API_DECL sg_shader sg_make_shader(const sg_shader_desc* desc);
2171 SOKOL_API_DECL sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc);
2172 SOKOL_API_DECL sg_pass sg_make_pass(const sg_pass_desc* desc);
2173 SOKOL_API_DECL void sg_destroy_buffer(sg_buffer buf);
2174 SOKOL_API_DECL void sg_destroy_image(sg_image img);
2175 SOKOL_API_DECL void sg_destroy_shader(sg_shader shd);
2176 SOKOL_API_DECL void sg_destroy_pipeline(sg_pipeline pip);
2177 SOKOL_API_DECL void sg_destroy_pass(sg_pass pass);
2178 SOKOL_API_DECL void sg_update_buffer(sg_buffer buf, const void* data_ptr, int data_size);
2179 SOKOL_API_DECL void sg_update_image(sg_image img, const sg_image_content* data);
2180 SOKOL_API_DECL int sg_append_buffer(sg_buffer buf, const void* data_ptr, int data_size);
2181 SOKOL_API_DECL bool sg_query_buffer_overflow(sg_buffer buf);
2182 
2183 /* rendering functions */
2184 SOKOL_API_DECL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height);
2185 SOKOL_API_DECL void sg_begin_pass(sg_pass pass, const sg_pass_action* pass_action);
2186 SOKOL_API_DECL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left);
2187 SOKOL_API_DECL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left);
2188 SOKOL_API_DECL void sg_apply_pipeline(sg_pipeline pip);
2189 SOKOL_API_DECL void sg_apply_bindings(const sg_bindings* bindings);
2190 SOKOL_API_DECL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const void* data, int num_bytes);
2191 SOKOL_API_DECL void sg_draw(int base_element, int num_elements, int num_instances);
2192 SOKOL_API_DECL void sg_end_pass(void);
2193 SOKOL_API_DECL void sg_commit(void);
2194 
2195 /* getting information */
2196 SOKOL_API_DECL sg_desc sg_query_desc(void);
2197 SOKOL_API_DECL sg_backend sg_query_backend(void);
2198 SOKOL_API_DECL sg_features sg_query_features(void);
2199 SOKOL_API_DECL sg_limits sg_query_limits(void);
2200 SOKOL_API_DECL sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt);
2201 /* get current state of a resource (INITIAL, ALLOC, VALID, FAILED, INVALID) */
2202 SOKOL_API_DECL sg_resource_state sg_query_buffer_state(sg_buffer buf);
2203 SOKOL_API_DECL sg_resource_state sg_query_image_state(sg_image img);
2204 SOKOL_API_DECL sg_resource_state sg_query_shader_state(sg_shader shd);
2205 SOKOL_API_DECL sg_resource_state sg_query_pipeline_state(sg_pipeline pip);
2206 SOKOL_API_DECL sg_resource_state sg_query_pass_state(sg_pass pass);
2207 /* get runtime information about a resource */
2208 SOKOL_API_DECL sg_buffer_info sg_query_buffer_info(sg_buffer buf);
2209 SOKOL_API_DECL sg_image_info sg_query_image_info(sg_image img);
2210 SOKOL_API_DECL sg_shader_info sg_query_shader_info(sg_shader shd);
2211 SOKOL_API_DECL sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip);
2212 SOKOL_API_DECL sg_pass_info sg_query_pass_info(sg_pass pass);
2213 /* get resource creation desc struct with their default values replaced */
2214 SOKOL_API_DECL sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc);
2215 SOKOL_API_DECL sg_image_desc sg_query_image_defaults(const sg_image_desc* desc);
2216 SOKOL_API_DECL sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc);
2217 SOKOL_API_DECL sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc);
2218 SOKOL_API_DECL sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc);
2219 
2220 /* separate resource allocation and initialization (for async setup) */
2221 SOKOL_API_DECL sg_buffer sg_alloc_buffer(void);
2222 SOKOL_API_DECL sg_image sg_alloc_image(void);
2223 SOKOL_API_DECL sg_shader sg_alloc_shader(void);
2224 SOKOL_API_DECL sg_pipeline sg_alloc_pipeline(void);
2225 SOKOL_API_DECL sg_pass sg_alloc_pass(void);
2226 SOKOL_API_DECL void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc);
2227 SOKOL_API_DECL void sg_init_image(sg_image img_id, const sg_image_desc* desc);
2228 SOKOL_API_DECL void sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc);
2229 SOKOL_API_DECL void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc);
2230 SOKOL_API_DECL void sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc);
2231 SOKOL_API_DECL void sg_fail_buffer(sg_buffer buf_id);
2232 SOKOL_API_DECL void sg_fail_image(sg_image img_id);
2233 SOKOL_API_DECL void sg_fail_shader(sg_shader shd_id);
2234 SOKOL_API_DECL void sg_fail_pipeline(sg_pipeline pip_id);
2235 SOKOL_API_DECL void sg_fail_pass(sg_pass pass_id);
2236 
2237 /* rendering contexts (optional) */
2238 SOKOL_API_DECL sg_context sg_setup_context(void);
2239 SOKOL_API_DECL void sg_activate_context(sg_context ctx_id);
2240 SOKOL_API_DECL void sg_discard_context(sg_context ctx_id);
2241 
2242 /* Backend-specific helper functions, these may come in handy for mixing
2243    sokol-gfx rendering with 'native backend' rendering functions.
2244 
2245    This group of functions will be expanded as needed.
2246 */
2247 
2248 /* Metal: return __bridge-casted MTLRenderCommandEncoder in current pass (or zero if outside pass) */
2249 SOKOL_API_DECL const void* sg_mtl_render_command_encoder(void);
2250 
2251 #ifdef _MSC_VER
2252 #pragma warning(pop)
2253 #endif
2254 #ifdef __cplusplus
2255 } /* extern "C" */
2256 
2257 /* reference-based equivalents for c++ */
sg_setup(const sg_desc & desc)2258 inline void sg_setup(const sg_desc& desc) { return sg_setup(&desc); }
2259 
sg_make_buffer(const sg_buffer_desc & desc)2260 inline sg_buffer sg_make_buffer(const sg_buffer_desc& desc) { return sg_make_buffer(&desc); }
sg_make_image(const sg_image_desc & desc)2261 inline sg_image sg_make_image(const sg_image_desc& desc) { return sg_make_image(&desc); }
sg_make_shader(const sg_shader_desc & desc)2262 inline sg_shader sg_make_shader(const sg_shader_desc& desc) { return sg_make_shader(&desc); }
sg_make_pipeline(const sg_pipeline_desc & desc)2263 inline sg_pipeline sg_make_pipeline(const sg_pipeline_desc& desc) { return sg_make_pipeline(&desc); }
sg_make_pass(const sg_pass_desc & desc)2264 inline sg_pass sg_make_pass(const sg_pass_desc& desc) { return sg_make_pass(&desc); }
sg_update_image(sg_image img,const sg_image_content & data)2265 inline void sg_update_image(sg_image img, const sg_image_content& data) { return sg_update_image(img, &data); }
2266 
sg_begin_default_pass(const sg_pass_action & pass_action,int width,int height)2267 inline void sg_begin_default_pass(const sg_pass_action& pass_action, int width, int height) { return sg_begin_default_pass(&pass_action, width, height); }
sg_begin_pass(sg_pass pass,const sg_pass_action & pass_action)2268 inline void sg_begin_pass(sg_pass pass, const sg_pass_action& pass_action) { return sg_begin_pass(pass, &pass_action); }
sg_apply_bindings(const sg_bindings & bindings)2269 inline void sg_apply_bindings(const sg_bindings& bindings) { return sg_apply_bindings(&bindings); }
2270 
sg_query_buffer_defaults(const sg_buffer_desc & desc)2271 inline sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc& desc) { return sg_query_buffer_defaults(&desc); }
sg_query_image_defaults(const sg_image_desc & desc)2272 inline sg_image_desc sg_query_image_defaults(const sg_image_desc& desc) { return sg_query_image_defaults(&desc); }
sg_query_shader_defaults(const sg_shader_desc & desc)2273 inline sg_shader_desc sg_query_shader_defaults(const sg_shader_desc& desc) { return sg_query_shader_defaults(&desc); }
sg_query_pipeline_defaults(const sg_pipeline_desc & desc)2274 inline sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc& desc) { return sg_query_pipeline_defaults(&desc); }
sg_query_pass_defaults(const sg_pass_desc & desc)2275 inline sg_pass_desc sg_query_pass_defaults(const sg_pass_desc& desc) { return sg_query_pass_defaults(&desc); }
2276 
sg_init_buffer(sg_buffer buf_id,const sg_buffer_desc & desc)2277 inline void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc& desc) { return sg_init_buffer(buf_id, &desc); }
sg_init_image(sg_image img_id,const sg_image_desc & desc)2278 inline void sg_init_image(sg_image img_id, const sg_image_desc& desc) { return sg_init_image(img_id, &desc); }
sg_init_shader(sg_shader shd_id,const sg_shader_desc & desc)2279 inline void sg_init_shader(sg_shader shd_id, const sg_shader_desc& desc) { return sg_init_shader(shd_id, &desc); }
sg_init_pipeline(sg_pipeline pip_id,const sg_pipeline_desc & desc)2280 inline void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc& desc) { return sg_init_pipeline(pip_id, &desc); }
sg_init_pass(sg_pass pass_id,const sg_pass_desc & desc)2281 inline void sg_init_pass(sg_pass pass_id, const sg_pass_desc& desc) { return sg_init_pass(pass_id, &desc); }
2282 
2283 #endif
2284 #endif // SOKOL_GFX_INCLUDED
2285 
2286 /*--- IMPLEMENTATION ---------------------------------------------------------*/
2287 #ifdef SOKOL_IMPL
2288 #define SOKOL_GFX_IMPL_INCLUDED (1)
2289 
2290 #if !(defined(SOKOL_GLCORE33)||defined(SOKOL_GLES2)||defined(SOKOL_GLES3)||defined(SOKOL_D3D11)||defined(SOKOL_METAL)||defined(SOKOL_WGPU)||defined(SOKOL_DUMMY_BACKEND))
2291 #error "Please select a backend with SOKOL_GLCORE33, SOKOL_GLES2, SOKOL_GLES3, SOKOL_D3D11, SOKOL_METAL, SOKOL_WGPU or SOKOL_DUMMY_BACKEND"
2292 #endif
2293 #include <string.h> /* memset */
2294 #include <float.h> /* FLT_MAX */
2295 
2296 #ifndef SOKOL_API_IMPL
2297     #define SOKOL_API_IMPL
2298 #endif
2299 #ifndef SOKOL_DEBUG
2300     #ifndef NDEBUG
2301         #define SOKOL_DEBUG (1)
2302     #endif
2303 #endif
2304 #ifndef SOKOL_ASSERT
2305     #include <assert.h>
2306     #define SOKOL_ASSERT(c) assert(c)
2307 #endif
2308 #ifndef SOKOL_VALIDATE_BEGIN
2309     #define SOKOL_VALIDATE_BEGIN() _sg_validate_begin()
2310 #endif
2311 #ifndef SOKOL_VALIDATE
2312     #define SOKOL_VALIDATE(cond, err) _sg_validate((cond), err)
2313 #endif
2314 #ifndef SOKOL_VALIDATE_END
2315     #define SOKOL_VALIDATE_END() _sg_validate_end()
2316 #endif
2317 #ifndef SOKOL_UNREACHABLE
2318     #define SOKOL_UNREACHABLE SOKOL_ASSERT(false)
2319 #endif
2320 #ifndef SOKOL_MALLOC
2321     #include <stdlib.h>
2322     #define SOKOL_MALLOC(s) malloc(s)
2323     #define SOKOL_FREE(p) free(p)
2324 #endif
2325 #ifndef SOKOL_LOG
2326     #ifdef SOKOL_DEBUG
2327         #include <stdio.h>
2328         #define SOKOL_LOG(s) { SOKOL_ASSERT(s); puts(s); }
2329     #else
2330         #define SOKOL_LOG(s)
2331     #endif
2332 #endif
2333 
2334 #ifndef _SOKOL_PRIVATE
2335     #if defined(__GNUC__) || defined(__clang__)
2336         #define _SOKOL_PRIVATE __attribute__((unused)) static
2337     #else
2338         #define _SOKOL_PRIVATE static
2339     #endif
2340 #endif
2341 
2342 #ifndef _SOKOL_UNUSED
2343     #define _SOKOL_UNUSED(x) (void)(x)
2344 #endif
2345 
2346 #if defined(SOKOL_TRACE_HOOKS)
2347 #define _SG_TRACE_ARGS(fn, ...) if (_sg.hooks.fn) { _sg.hooks.fn(__VA_ARGS__, _sg.hooks.user_data); }
2348 #define _SG_TRACE_NOARGS(fn) if (_sg.hooks.fn) { _sg.hooks.fn(_sg.hooks.user_data); }
2349 #else
2350 #define _SG_TRACE_ARGS(fn, ...)
2351 #define _SG_TRACE_NOARGS(fn)
2352 #endif
2353 
2354 /* default clear values */
2355 #ifndef SG_DEFAULT_CLEAR_RED
2356 #define SG_DEFAULT_CLEAR_RED (0.5f)
2357 #endif
2358 #ifndef SG_DEFAULT_CLEAR_GREEN
2359 #define SG_DEFAULT_CLEAR_GREEN (0.5f)
2360 #endif
2361 #ifndef SG_DEFAULT_CLEAR_BLUE
2362 #define SG_DEFAULT_CLEAR_BLUE (0.5f)
2363 #endif
2364 #ifndef SG_DEFAULT_CLEAR_ALPHA
2365 #define SG_DEFAULT_CLEAR_ALPHA (1.0f)
2366 #endif
2367 #ifndef SG_DEFAULT_CLEAR_DEPTH
2368 #define SG_DEFAULT_CLEAR_DEPTH (1.0f)
2369 #endif
2370 #ifndef SG_DEFAULT_CLEAR_STENCIL
2371 #define SG_DEFAULT_CLEAR_STENCIL (0)
2372 #endif
2373 
2374 #ifdef _MSC_VER
2375 #pragma warning(push)
2376 #pragma warning(disable:4201)   /* nonstandard extension used: nameless struct/union */
2377 #pragma warning(disable:4115)   /* named type definition in parentheses */
2378 #pragma warning(disable:4505)   /* unreferenced local function has been removed */
2379 #endif
2380 
2381 #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3)
2382     #define _SOKOL_ANY_GL (1)
2383 
2384     #ifndef GL_UNSIGNED_INT_2_10_10_10_REV
2385     #define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368
2386     #endif
2387     #ifndef GL_UNSIGNED_INT_24_8
2388     #define GL_UNSIGNED_INT_24_8 0x84FA
2389     #endif
2390     #ifndef GL_TEXTURE_MAX_ANISOTROPY_EXT
2391     #define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE
2392     #endif
2393     #ifndef GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT
2394     #define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF
2395     #endif
2396     #ifndef GL_COMPRESSED_RGBA_S3TC_DXT1_EXT
2397     #define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1
2398     #endif
2399     #ifndef GL_COMPRESSED_RGBA_S3TC_DXT3_EXT
2400     #define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
2401     #endif
2402     #ifndef GL_COMPRESSED_RGBA_S3TC_DXT5_EXT
2403     #define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
2404     #endif
2405     #ifndef GL_COMPRESSED_RED_RGTC1
2406     #define GL_COMPRESSED_RED_RGTC1 0x8DBB
2407     #endif
2408     #ifndef GL_COMPRESSED_SIGNED_RED_RGTC1
2409     #define GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC
2410     #endif
2411     #ifndef GL_COMPRESSED_RED_GREEN_RGTC2
2412     #define GL_COMPRESSED_RED_GREEN_RGTC2 0x8DBD
2413     #endif
2414     #ifndef GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2
2415     #define GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2 0x8DBE
2416     #endif
2417     #ifndef GL_COMPRESSED_RGBA_BPTC_UNORM_ARB
2418     #define GL_COMPRESSED_RGBA_BPTC_UNORM_ARB 0x8E8C
2419     #endif
2420     #ifndef GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB
2421     #define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB 0x8E8D
2422     #endif
2423     #ifndef GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB
2424     #define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB 0x8E8E
2425     #endif
2426     #ifndef GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB
2427     #define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB 0x8E8F
2428     #endif
2429     #ifndef GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG
2430     #define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01
2431     #endif
2432     #ifndef GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG
2433     #define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00
2434     #endif
2435     #ifndef GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG
2436     #define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03
2437     #endif
2438     #ifndef GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG
2439     #define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02
2440     #endif
2441     #ifndef GL_COMPRESSED_RGB8_ETC2
2442     #define GL_COMPRESSED_RGB8_ETC2 0x9274
2443     #endif
2444     #ifndef GL_COMPRESSED_RGBA8_ETC2_EAC
2445     #define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278
2446     #endif
2447     #ifndef GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2
2448     #define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276
2449     #endif
2450     #ifndef GL_COMPRESSED_RG11_EAC
2451     #define GL_COMPRESSED_RG11_EAC 0x9272
2452     #endif
2453     #ifndef GL_COMPRESSED_SIGNED_RG11_EAC
2454     #define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273
2455     #endif
2456     #ifndef GL_DEPTH24_STENCIL8
2457     #define GL_DEPTH24_STENCIL8 0x88F0
2458     #endif
2459     #ifndef GL_HALF_FLOAT
2460     #define GL_HALF_FLOAT 0x140B
2461     #endif
2462     #ifndef GL_DEPTH_STENCIL
2463     #define GL_DEPTH_STENCIL 0x84F9
2464     #endif
2465     #ifndef GL_LUMINANCE
2466     #define GL_LUMINANCE 0x1909
2467     #endif
2468 
2469     #ifdef SOKOL_GLES2
2470     #   ifdef GL_ANGLE_instanced_arrays
2471     #       define SOKOL_INSTANCING_ENABLED
2472     #       define glDrawArraysInstanced(mode, first, count, instancecount)  glDrawArraysInstancedANGLE(mode, first, count, instancecount)
2473     #       define glDrawElementsInstanced(mode, count, type, indices, instancecount) glDrawElementsInstancedANGLE(mode, count, type, indices, instancecount)
2474     #       define glVertexAttribDivisor(index, divisor) glVertexAttribDivisorANGLE(index, divisor)
2475     #   elif defined(GL_EXT_draw_instanced) && defined(GL_EXT_instanced_arrays)
2476     #       define SOKOL_INSTANCING_ENABLED
2477     #       define glDrawArraysInstanced(mode, first, count, instancecount)  glDrawArraysInstancedEXT(mode, first, count, instancecount)
2478     #       define glDrawElementsInstanced(mode, count, type, indices, instancecount) glDrawElementsInstancedEXT(mode, count, type, indices, instancecount)
2479     #       define glVertexAttribDivisor(index, divisor) glVertexAttribDivisorEXT(index, divisor)
2480     #   else
2481     #       define SOKOL_GLES2_INSTANCING_ERROR "Select GL_ANGLE_instanced_arrays or (GL_EXT_draw_instanced & GL_EXT_instanced_arrays) to enable instancing in GLES2"
2482     #       define glDrawArraysInstanced(mode, first, count, instancecount) SOKOL_ASSERT(0 && SOKOL_GLES2_INSTANCING_ERROR)
2483     #       define glDrawElementsInstanced(mode, count, type, indices, instancecount) SOKOL_ASSERT(0 && SOKOL_GLES2_INSTANCING_ERROR)
2484     #       define glVertexAttribDivisor(index, divisor) SOKOL_ASSERT(0 && SOKOL_GLES2_INSTANCING_ERROR)
2485     #   endif
2486     #else
2487     #   define SOKOL_INSTANCING_ENABLED
2488     #endif
2489     #define _SG_GL_CHECK_ERROR() { SOKOL_ASSERT(glGetError() == GL_NO_ERROR); }
2490 
2491 #elif defined(SOKOL_D3D11)
2492     #ifndef D3D11_NO_HELPERS
2493     #define D3D11_NO_HELPERS
2494     #endif
2495     #ifndef CINTERFACE
2496     #define CINTERFACE
2497     #endif
2498     #ifndef COBJMACROS
2499     #define COBJMACROS
2500     #endif
2501     #ifndef WIN32_LEAN_AND_MEAN
2502     #define WIN32_LEAN_AND_MEAN
2503     #endif
2504     #ifndef NOMINMAX
2505     #define NOMINMAX
2506     #endif
2507     #include <d3d11.h>
2508     #include <d3dcompiler.h>
2509     #ifdef _MSC_VER
2510     #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP))
2511     #pragma comment (lib, "WindowsApp.lib")
2512     #else
2513     #pragma comment (lib, "user32.lib")
2514     #pragma comment (lib, "dxgi.lib")
2515     #pragma comment (lib, "d3d11.lib")
2516     #pragma comment (lib, "dxguid.lib")
2517     #endif
2518     #endif
2519 #elif defined(SOKOL_METAL)
2520     // see https://clang.llvm.org/docs/LanguageExtensions.html#automatic-reference-counting
2521     #if !defined(__cplusplus)
2522         #if __has_feature(objc_arc) && !__has_feature(objc_arc_fields)
2523             #error "sokol_app.h requires __has_feature(objc_arc_field) if ARC is enabled (use a more recent compiler version)"
2524         #endif
2525     #endif
2526     #include <TargetConditionals.h>
2527     #import <Metal/Metal.h>
2528     #if defined(TARGET_OS_IPHONE) && !TARGET_OS_IPHONE
2529         #define _SG_TARGET_MACOS (1)
2530     #else
2531         #define _SG_TARGET_IOS (1)
2532         #if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
2533             #define _SG_TARGET_IOS_SIMULATOR (1)
2534         #endif
2535     #endif
2536 #elif defined(SOKOL_WGPU)
2537     #if defined(__EMSCRIPTEN__)
2538         #include <webgpu/webgpu.h>
2539     #else
2540         #include <dawn/webgpu.h>
2541     #endif
2542 #endif
2543 
2544 /*=== COMMON BACKEND STUFF ===================================================*/
2545 
2546 /* resource pool slots */
2547 typedef struct {
2548     uint32_t id;
2549     uint32_t ctx_id;
2550     sg_resource_state state;
2551 } _sg_slot_t;
2552 
2553 /* constants */
2554 enum {
2555     _SG_STRING_SIZE = 16,
2556     _SG_SLOT_SHIFT = 16,
2557     _SG_SLOT_MASK = (1<<_SG_SLOT_SHIFT)-1,
2558     _SG_MAX_POOL_SIZE = (1<<_SG_SLOT_SHIFT),
2559     _SG_DEFAULT_BUFFER_POOL_SIZE = 128,
2560     _SG_DEFAULT_IMAGE_POOL_SIZE = 128,
2561     _SG_DEFAULT_SHADER_POOL_SIZE = 32,
2562     _SG_DEFAULT_PIPELINE_POOL_SIZE = 64,
2563     _SG_DEFAULT_PASS_POOL_SIZE = 16,
2564     _SG_DEFAULT_CONTEXT_POOL_SIZE = 16,
2565     _SG_DEFAULT_SAMPLER_CACHE_CAPACITY = 64,
2566     _SG_DEFAULT_UB_SIZE = 4 * 1024 * 1024,
2567     _SG_DEFAULT_STAGING_SIZE = 8 * 1024 * 1024,
2568 };
2569 
2570 /* fixed-size string */
2571 typedef struct {
2572     char buf[_SG_STRING_SIZE];
2573 } _sg_str_t;
2574 
2575 /* helper macros */
2576 #define _sg_def(val, def) (((val) == 0) ? (def) : (val))
2577 #define _sg_def_flt(val, def) (((val) == 0.0f) ? (def) : (val))
2578 #define _sg_min(a,b) ((a<b)?a:b)
2579 #define _sg_max(a,b) ((a>b)?a:b)
2580 #define _sg_clamp(v,v0,v1) ((v<v0)?(v0):((v>v1)?(v1):(v)))
2581 #define _sg_fequal(val,cmp,delta) (((val-cmp)> -delta)&&((val-cmp)<delta))
2582 
2583 typedef struct {
2584     int size;
2585     int append_pos;
2586     bool append_overflow;
2587     sg_buffer_type type;
2588     sg_usage usage;
2589     uint32_t update_frame_index;
2590     uint32_t append_frame_index;
2591     int num_slots;
2592     int active_slot;
2593 } _sg_buffer_common_t;
2594 
_sg_buffer_common_init(_sg_buffer_common_t * cmn,const sg_buffer_desc * desc)2595 _SOKOL_PRIVATE void _sg_buffer_common_init(_sg_buffer_common_t* cmn, const sg_buffer_desc* desc) {
2596     cmn->size = desc->size;
2597     cmn->append_pos = 0;
2598     cmn->append_overflow = false;
2599     cmn->type = desc->type;
2600     cmn->usage = desc->usage;
2601     cmn->update_frame_index = 0;
2602     cmn->append_frame_index = 0;
2603     cmn->num_slots = (cmn->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES;
2604     cmn->active_slot = 0;
2605 }
2606 
2607 typedef struct {
2608     sg_image_type type;
2609     bool render_target;
2610     int width;
2611     int height;
2612     int depth;
2613     int num_mipmaps;
2614     sg_usage usage;
2615     sg_pixel_format pixel_format;
2616     int sample_count;
2617     sg_filter min_filter;
2618     sg_filter mag_filter;
2619     sg_wrap wrap_u;
2620     sg_wrap wrap_v;
2621     sg_wrap wrap_w;
2622     sg_border_color border_color;
2623     uint32_t max_anisotropy;
2624     uint32_t upd_frame_index;
2625     int num_slots;
2626     int active_slot;
2627 } _sg_image_common_t;
2628 
_sg_image_common_init(_sg_image_common_t * cmn,const sg_image_desc * desc)2629 _SOKOL_PRIVATE void _sg_image_common_init(_sg_image_common_t* cmn, const sg_image_desc* desc) {
2630     cmn->type = desc->type;
2631     cmn->render_target = desc->render_target;
2632     cmn->width = desc->width;
2633     cmn->height = desc->height;
2634     cmn->depth = desc->depth;
2635     cmn->num_mipmaps = desc->num_mipmaps;
2636     cmn->usage = desc->usage;
2637     cmn->pixel_format = desc->pixel_format;
2638     cmn->sample_count = desc->sample_count;
2639     cmn->min_filter = desc->min_filter;
2640     cmn->mag_filter = desc->mag_filter;
2641     cmn->wrap_u = desc->wrap_u;
2642     cmn->wrap_v = desc->wrap_v;
2643     cmn->wrap_w = desc->wrap_w;
2644     cmn->border_color = desc->border_color;
2645     cmn->max_anisotropy = desc->max_anisotropy;
2646     cmn->upd_frame_index = 0;
2647     cmn->num_slots = (cmn->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES;
2648     cmn->active_slot = 0;
2649 }
2650 
2651 typedef struct {
2652     int size;
2653 } _sg_uniform_block_t;
2654 
2655 typedef struct {
2656     sg_image_type type;
2657     sg_sampler_type sampler_type;
2658 } _sg_shader_image_t;
2659 
2660 typedef struct {
2661     int num_uniform_blocks;
2662     int num_images;
2663     _sg_uniform_block_t uniform_blocks[SG_MAX_SHADERSTAGE_UBS];
2664     _sg_shader_image_t images[SG_MAX_SHADERSTAGE_IMAGES];
2665 } _sg_shader_stage_t;
2666 
2667 typedef struct {
2668     _sg_shader_stage_t stage[SG_NUM_SHADER_STAGES];
2669 } _sg_shader_common_t;
2670 
_sg_shader_common_init(_sg_shader_common_t * cmn,const sg_shader_desc * desc)2671 _SOKOL_PRIVATE void _sg_shader_common_init(_sg_shader_common_t* cmn, const sg_shader_desc* desc) {
2672     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
2673         const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs;
2674         _sg_shader_stage_t* stage = &cmn->stage[stage_index];
2675         SOKOL_ASSERT(stage->num_uniform_blocks == 0);
2676         for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) {
2677             const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index];
2678             if (0 == ub_desc->size) {
2679                 break;
2680             }
2681             stage->uniform_blocks[ub_index].size = ub_desc->size;
2682             stage->num_uniform_blocks++;
2683         }
2684         SOKOL_ASSERT(stage->num_images == 0);
2685         for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) {
2686             const sg_shader_image_desc* img_desc = &stage_desc->images[img_index];
2687             if (img_desc->type == _SG_IMAGETYPE_DEFAULT) {
2688                 break;
2689             }
2690             stage->images[img_index].type = img_desc->type;
2691             stage->images[img_index].sampler_type = img_desc->sampler_type;
2692             stage->num_images++;
2693         }
2694     }
2695 }
2696 
2697 typedef struct {
2698     sg_shader shader_id;
2699     sg_index_type index_type;
2700     bool vertex_layout_valid[SG_MAX_SHADERSTAGE_BUFFERS];
2701     int color_attachment_count;
2702     sg_pixel_format color_format;
2703     sg_pixel_format depth_format;
2704     int sample_count;
2705     float depth_bias;
2706     float depth_bias_slope_scale;
2707     float depth_bias_clamp;
2708     float blend_color[4];
2709 } _sg_pipeline_common_t;
2710 
_sg_pipeline_common_init(_sg_pipeline_common_t * cmn,const sg_pipeline_desc * desc)2711 _SOKOL_PRIVATE void _sg_pipeline_common_init(_sg_pipeline_common_t* cmn, const sg_pipeline_desc* desc) {
2712     cmn->shader_id = desc->shader;
2713     cmn->index_type = desc->index_type;
2714     for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) {
2715         cmn->vertex_layout_valid[i] = false;
2716     }
2717     cmn->color_attachment_count = desc->blend.color_attachment_count;
2718     cmn->color_format = desc->blend.color_format;
2719     cmn->depth_format = desc->blend.depth_format;
2720     cmn->sample_count = desc->rasterizer.sample_count;
2721     cmn->depth_bias = desc->rasterizer.depth_bias;
2722     cmn->depth_bias_slope_scale = desc->rasterizer.depth_bias_slope_scale;
2723     cmn->depth_bias_clamp = desc->rasterizer.depth_bias_clamp;
2724     for (int i = 0; i < 4; i++) {
2725         cmn->blend_color[i] = desc->blend.blend_color[i];
2726     }
2727 }
2728 
2729 typedef struct {
2730     sg_image image_id;
2731     int mip_level;
2732     int slice;
2733 } _sg_attachment_common_t;
2734 
2735 typedef struct {
2736     int num_color_atts;
2737     _sg_attachment_common_t color_atts[SG_MAX_COLOR_ATTACHMENTS];
2738     _sg_attachment_common_t ds_att;
2739 } _sg_pass_common_t;
2740 
_sg_pass_common_init(_sg_pass_common_t * cmn,const sg_pass_desc * desc)2741 _SOKOL_PRIVATE void _sg_pass_common_init(_sg_pass_common_t* cmn, const sg_pass_desc* desc) {
2742     const sg_attachment_desc* att_desc;
2743     _sg_attachment_common_t* att;
2744     for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
2745         att_desc = &desc->color_attachments[i];
2746         if (att_desc->image.id != SG_INVALID_ID) {
2747             cmn->num_color_atts++;
2748             att = &cmn->color_atts[i];
2749             att->image_id = att_desc->image;
2750             att->mip_level = att_desc->mip_level;
2751             att->slice = att_desc->slice;
2752         }
2753     }
2754     att_desc = &desc->depth_stencil_attachment;
2755     if (att_desc->image.id != SG_INVALID_ID) {
2756         att = &cmn->ds_att;
2757         att->image_id = att_desc->image;
2758         att->mip_level = att_desc->mip_level;
2759         att->slice = att_desc->slice;
2760     }
2761 }
2762 
2763 /*=== GENERIC SAMPLER CACHE ==================================================*/
2764 
2765 /*
2766     this is used by the Metal and WGPU backends to reduce the
2767     number of sampler state objects created through the backend API
2768 */
2769 typedef struct {
2770     sg_filter min_filter;
2771     sg_filter mag_filter;
2772     sg_wrap wrap_u;
2773     sg_wrap wrap_v;
2774     sg_wrap wrap_w;
2775     sg_border_color border_color;
2776     uint32_t max_anisotropy;
2777     int min_lod;    /* orig min/max_lod is float, this is int(min/max_lod*1000.0) */
2778     int max_lod;
2779     uintptr_t sampler_handle;
2780 } _sg_sampler_cache_item_t;
2781 
2782 typedef struct {
2783     int capacity;
2784     int num_items;
2785     _sg_sampler_cache_item_t* items;
2786 } _sg_sampler_cache_t;
2787 
_sg_smpcache_init(_sg_sampler_cache_t * cache,int capacity)2788 _SOKOL_PRIVATE void _sg_smpcache_init(_sg_sampler_cache_t* cache, int capacity) {
2789     SOKOL_ASSERT(cache && (capacity > 0));
2790     memset(cache, 0, sizeof(_sg_sampler_cache_t));
2791     cache->capacity = capacity;
2792     const int size = cache->capacity * sizeof(_sg_sampler_cache_item_t);
2793     cache->items = (_sg_sampler_cache_item_t*) SOKOL_MALLOC(size);
2794     memset(cache->items, 0, size);
2795 }
2796 
_sg_smpcache_discard(_sg_sampler_cache_t * cache)2797 _SOKOL_PRIVATE void _sg_smpcache_discard(_sg_sampler_cache_t* cache) {
2798     SOKOL_ASSERT(cache && cache->items);
2799     SOKOL_FREE(cache->items);
2800     cache->items = 0;
2801     cache->num_items = 0;
2802     cache->capacity = 0;
2803 }
2804 
_sg_smpcache_minlod_int(float min_lod)2805 _SOKOL_PRIVATE int _sg_smpcache_minlod_int(float min_lod) {
2806     return (int) (min_lod * 1000.0f);
2807 }
2808 
_sg_smpcache_maxlod_int(float max_lod)2809 _SOKOL_PRIVATE int _sg_smpcache_maxlod_int(float max_lod) {
2810     return (int) (_sg_clamp(max_lod, 0.0f, 1000.0f) * 1000.0f);
2811 }
2812 
_sg_smpcache_find_item(const _sg_sampler_cache_t * cache,const sg_image_desc * img_desc)2813 _SOKOL_PRIVATE int _sg_smpcache_find_item(const _sg_sampler_cache_t* cache, const sg_image_desc* img_desc) {
2814     /* return matching sampler cache item index or -1 */
2815     SOKOL_ASSERT(cache && cache->items);
2816     SOKOL_ASSERT(img_desc);
2817     const int min_lod = _sg_smpcache_minlod_int(img_desc->min_lod);
2818     const int max_lod = _sg_smpcache_maxlod_int(img_desc->max_lod);
2819     for (int i = 0; i < cache->num_items; i++) {
2820         const _sg_sampler_cache_item_t* item = &cache->items[i];
2821         if ((img_desc->min_filter == item->min_filter) &&
2822             (img_desc->mag_filter == item->mag_filter) &&
2823             (img_desc->wrap_u == item->wrap_u) &&
2824             (img_desc->wrap_v == item->wrap_v) &&
2825             (img_desc->wrap_w == item->wrap_w) &&
2826             (img_desc->max_anisotropy == item->max_anisotropy) &&
2827             (img_desc->border_color == item->border_color) &&
2828             (min_lod == item->min_lod) &&
2829             (max_lod == item->max_lod))
2830         {
2831             return i;
2832         }
2833     }
2834     /* fallthrough: no matching cache item found */
2835     return -1;
2836 }
2837 
_sg_smpcache_add_item(_sg_sampler_cache_t * cache,const sg_image_desc * img_desc,uintptr_t sampler_handle)2838 _SOKOL_PRIVATE void _sg_smpcache_add_item(_sg_sampler_cache_t* cache, const sg_image_desc* img_desc, uintptr_t sampler_handle) {
2839     SOKOL_ASSERT(cache && cache->items);
2840     SOKOL_ASSERT(img_desc);
2841     SOKOL_ASSERT(cache->num_items < cache->capacity);
2842     const int item_index = cache->num_items++;
2843     _sg_sampler_cache_item_t* item = &cache->items[item_index];
2844     item->min_filter = img_desc->min_filter;
2845     item->mag_filter = img_desc->mag_filter;
2846     item->wrap_u = img_desc->wrap_u;
2847     item->wrap_v = img_desc->wrap_v;
2848     item->wrap_w = img_desc->wrap_w;
2849     item->border_color = img_desc->border_color;
2850     item->max_anisotropy = img_desc->max_anisotropy;
2851     item->min_lod = _sg_smpcache_minlod_int(img_desc->min_lod);
2852     item->max_lod = _sg_smpcache_maxlod_int(img_desc->max_lod);
2853     item->sampler_handle = sampler_handle;
2854 }
2855 
_sg_smpcache_sampler(_sg_sampler_cache_t * cache,int item_index)2856 _SOKOL_PRIVATE uintptr_t _sg_smpcache_sampler(_sg_sampler_cache_t* cache, int item_index) {
2857     SOKOL_ASSERT(cache && cache->items);
2858     SOKOL_ASSERT((item_index >= 0) && (item_index < cache->num_items));
2859     return cache->items[item_index].sampler_handle;
2860 }
2861 
2862 /*=== DUMMY BACKEND DECLARATIONS =============================================*/
2863 #if defined(SOKOL_DUMMY_BACKEND)
2864 typedef struct {
2865     _sg_slot_t slot;
2866     _sg_buffer_common_t cmn;
2867 } _sg_dummy_buffer_t;
2868 typedef _sg_dummy_buffer_t _sg_buffer_t;
2869 
2870 typedef struct {
2871     _sg_slot_t slot;
2872     _sg_image_common_t cmn;
2873 } _sg_dummy_image_t;
2874 typedef _sg_dummy_image_t _sg_image_t;
2875 
2876 typedef struct {
2877     _sg_slot_t slot;
2878     _sg_shader_common_t cmn;
2879 } _sg_dummy_shader_t;
2880 typedef _sg_dummy_shader_t _sg_shader_t;
2881 
2882 typedef struct {
2883     _sg_slot_t slot;
2884     _sg_shader_t* shader;
2885     _sg_pipeline_common_t cmn;
2886 } _sg_dummy_pipeline_t;
2887 typedef _sg_dummy_pipeline_t _sg_pipeline_t;
2888 
2889 typedef struct {
2890     _sg_image_t* image;
2891 } _sg_dummy_attachment_t;
2892 
2893 typedef struct {
2894     _sg_slot_t slot;
2895     _sg_pass_common_t cmn;
2896     struct {
2897         _sg_dummy_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS];
2898         _sg_dummy_attachment_t ds_att;
2899     } dmy;
2900 } _sg_dummy_pass_t;
2901 typedef _sg_dummy_pass_t _sg_pass_t;
2902 typedef _sg_attachment_common_t _sg_attachment_t;
2903 
2904 typedef struct {
2905     _sg_slot_t slot;
2906 } _sg_dummy_context_t;
2907 typedef _sg_dummy_context_t _sg_context_t;
2908 
2909 /*== GL BACKEND DECLARATIONS =================================================*/
2910 #elif defined(_SOKOL_ANY_GL)
2911 typedef struct {
2912     _sg_slot_t slot;
2913     _sg_buffer_common_t cmn;
2914     struct {
2915         GLuint buf[SG_NUM_INFLIGHT_FRAMES];
2916         bool ext_buffers;   /* if true, external buffers were injected with sg_buffer_desc.gl_buffers */
2917     } gl;
2918 } _sg_gl_buffer_t;
2919 typedef _sg_gl_buffer_t _sg_buffer_t;
2920 
2921 typedef struct {
2922     _sg_slot_t slot;
2923     _sg_image_common_t cmn;
2924     struct {
2925         GLenum target;
2926         GLuint depth_render_buffer;
2927         GLuint msaa_render_buffer;
2928         GLuint tex[SG_NUM_INFLIGHT_FRAMES];
2929         bool ext_textures;  /* if true, external textures were injected with sg_image_desc.gl_textures */
2930     } gl;
2931 } _sg_gl_image_t;
2932 typedef _sg_gl_image_t _sg_image_t;
2933 
2934 typedef struct {
2935     GLint gl_loc;
2936     sg_uniform_type type;
2937     uint8_t count;
2938     uint16_t offset;
2939 } _sg_gl_uniform_t;
2940 
2941 typedef struct {
2942     int num_uniforms;
2943     _sg_gl_uniform_t uniforms[SG_MAX_UB_MEMBERS];
2944 } _sg_gl_uniform_block_t;
2945 
2946 typedef struct {
2947     int gl_tex_slot;
2948 } _sg_gl_shader_image_t;
2949 
2950 typedef struct {
2951     _sg_str_t name;
2952 } _sg_gl_shader_attr_t;
2953 
2954 typedef struct {
2955     _sg_gl_uniform_block_t uniform_blocks[SG_MAX_SHADERSTAGE_UBS];
2956     _sg_gl_shader_image_t images[SG_MAX_SHADERSTAGE_IMAGES];
2957 } _sg_gl_shader_stage_t;
2958 
2959 typedef struct {
2960     _sg_slot_t slot;
2961     _sg_shader_common_t cmn;
2962     struct {
2963         GLuint prog;
2964         _sg_gl_shader_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES];
2965         _sg_gl_shader_stage_t stage[SG_NUM_SHADER_STAGES];
2966     } gl;
2967 } _sg_gl_shader_t;
2968 typedef _sg_gl_shader_t _sg_shader_t;
2969 
2970 typedef struct {
2971     int8_t vb_index;        /* -1 if attr is not enabled */
2972     int8_t divisor;         /* -1 if not initialized */
2973     uint8_t stride;
2974     uint8_t size;
2975     uint8_t normalized;
2976     int offset;
2977     GLenum type;
2978 } _sg_gl_attr_t;
2979 
2980 typedef struct {
2981     _sg_slot_t slot;
2982     _sg_pipeline_common_t cmn;
2983     _sg_shader_t* shader;
2984     struct {
2985         _sg_gl_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES];
2986         sg_depth_stencil_state depth_stencil;
2987         sg_primitive_type primitive_type;
2988         sg_blend_state blend;
2989         sg_rasterizer_state rast;
2990     } gl;
2991 } _sg_gl_pipeline_t;
2992 typedef _sg_gl_pipeline_t _sg_pipeline_t;
2993 
2994 typedef struct {
2995     _sg_image_t* image;
2996     GLuint gl_msaa_resolve_buffer;
2997 } _sg_gl_attachment_t;
2998 
2999 typedef struct {
3000     _sg_slot_t slot;
3001     _sg_pass_common_t cmn;
3002     struct {
3003         GLuint fb;
3004         _sg_gl_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS];
3005         _sg_gl_attachment_t ds_att;
3006     } gl;
3007 } _sg_gl_pass_t;
3008 typedef _sg_gl_pass_t _sg_pass_t;
3009 typedef _sg_attachment_common_t _sg_attachment_t;
3010 
3011 typedef struct {
3012     _sg_slot_t slot;
3013     #if !defined(SOKOL_GLES2)
3014     GLuint vao;
3015     #endif
3016     GLuint default_framebuffer;
3017 } _sg_gl_context_t;
3018 typedef _sg_gl_context_t _sg_context_t;
3019 
3020 typedef struct {
3021     _sg_gl_attr_t gl_attr;
3022     GLuint gl_vbuf;
3023 } _sg_gl_cache_attr_t;
3024 
3025 typedef struct {
3026     GLenum target;
3027     GLuint texture;
3028 } _sg_gl_texture_bind_slot;
3029 
3030 typedef struct {
3031     sg_depth_stencil_state ds;
3032     sg_blend_state blend;
3033     sg_rasterizer_state rast;
3034     bool polygon_offset_enabled;
3035     _sg_gl_cache_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES];
3036     GLuint vertex_buffer;
3037     GLuint index_buffer;
3038     GLuint stored_vertex_buffer;
3039     GLuint stored_index_buffer;
3040     GLuint prog;
3041     _sg_gl_texture_bind_slot textures[SG_MAX_SHADERSTAGE_IMAGES];
3042     _sg_gl_texture_bind_slot stored_texture;
3043     int cur_ib_offset;
3044     GLenum cur_primitive_type;
3045     GLenum cur_index_type;
3046     GLenum cur_active_texture;
3047     _sg_pipeline_t* cur_pipeline;
3048     sg_pipeline cur_pipeline_id;
3049 } _sg_gl_state_cache_t;
3050 
3051 typedef struct {
3052     bool valid;
3053     bool gles2;
3054     bool in_pass;
3055     int cur_pass_width;
3056     int cur_pass_height;
3057     _sg_context_t* cur_context;
3058     _sg_pass_t* cur_pass;
3059     sg_pass cur_pass_id;
3060     _sg_gl_state_cache_t cache;
3061     bool ext_anisotropic;
3062     GLint max_anisotropy;
3063     GLint max_combined_texture_image_units;
3064 } _sg_gl_backend_t;
3065 
3066 /*== D3D11 BACKEND DECLARATIONS ==============================================*/
3067 #elif defined(SOKOL_D3D11)
3068 
3069 typedef struct {
3070     _sg_slot_t slot;
3071     _sg_buffer_common_t cmn;
3072     struct {
3073         ID3D11Buffer* buf;
3074     } d3d11;
3075 } _sg_d3d11_buffer_t;
3076 typedef _sg_d3d11_buffer_t _sg_buffer_t;
3077 
3078 typedef struct {
3079     _sg_slot_t slot;
3080     _sg_image_common_t cmn;
3081     struct {
3082         DXGI_FORMAT format;
3083         ID3D11Texture2D* tex2d;
3084         ID3D11Texture3D* tex3d;
3085         ID3D11Texture2D* texds;
3086         ID3D11Texture2D* texmsaa;
3087         ID3D11ShaderResourceView* srv;
3088         ID3D11SamplerState* smp;
3089     } d3d11;
3090 } _sg_d3d11_image_t;
3091 typedef _sg_d3d11_image_t _sg_image_t;
3092 
3093 typedef struct {
3094     _sg_str_t sem_name;
3095     int sem_index;
3096 } _sg_d3d11_shader_attr_t;
3097 
3098 typedef struct {
3099     ID3D11Buffer* cbufs[SG_MAX_SHADERSTAGE_UBS];
3100 } _sg_d3d11_shader_stage_t;
3101 
3102 typedef struct {
3103     _sg_slot_t slot;
3104     _sg_shader_common_t cmn;
3105     struct {
3106         _sg_d3d11_shader_attr_t attrs[SG_MAX_VERTEX_ATTRIBUTES];
3107         _sg_d3d11_shader_stage_t stage[SG_NUM_SHADER_STAGES];
3108         ID3D11VertexShader* vs;
3109         ID3D11PixelShader* fs;
3110         void* vs_blob;
3111         int vs_blob_length;
3112     } d3d11;
3113 } _sg_d3d11_shader_t;
3114 typedef _sg_d3d11_shader_t _sg_shader_t;
3115 
3116 typedef struct {
3117     _sg_slot_t slot;
3118     _sg_pipeline_common_t cmn;
3119     _sg_shader_t* shader;
3120     struct {
3121         UINT stencil_ref;
3122         UINT vb_strides[SG_MAX_SHADERSTAGE_BUFFERS];
3123         D3D_PRIMITIVE_TOPOLOGY topology;
3124         DXGI_FORMAT index_format;
3125         ID3D11InputLayout* il;
3126         ID3D11RasterizerState* rs;
3127         ID3D11DepthStencilState* dss;
3128         ID3D11BlendState* bs;
3129     } d3d11;
3130 } _sg_d3d11_pipeline_t;
3131 typedef _sg_d3d11_pipeline_t _sg_pipeline_t;
3132 
3133 typedef struct {
3134     _sg_image_t* image;
3135     ID3D11RenderTargetView* rtv;
3136 } _sg_d3d11_color_attachment_t;
3137 
3138 typedef struct {
3139     _sg_image_t* image;
3140     ID3D11DepthStencilView* dsv;
3141 } _sg_d3d11_ds_attachment_t;
3142 
3143 typedef struct {
3144     _sg_slot_t slot;
3145     _sg_pass_common_t cmn;
3146     struct {
3147         _sg_d3d11_color_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS];
3148         _sg_d3d11_ds_attachment_t ds_att;
3149     } d3d11;
3150 } _sg_d3d11_pass_t;
3151 typedef _sg_d3d11_pass_t _sg_pass_t;
3152 typedef _sg_attachment_common_t _sg_attachment_t;
3153 
3154 typedef struct {
3155     _sg_slot_t slot;
3156 } _sg_d3d11_context_t;
3157 typedef _sg_d3d11_context_t _sg_context_t;
3158 
3159 typedef struct {
3160     bool valid;
3161     ID3D11Device* dev;
3162     ID3D11DeviceContext* ctx;
3163     const void* (*rtv_cb)(void);
3164     const void* (*dsv_cb)(void);
3165     bool in_pass;
3166     bool use_indexed_draw;
3167     int cur_width;
3168     int cur_height;
3169     int num_rtvs;
3170     _sg_pass_t* cur_pass;
3171     sg_pass cur_pass_id;
3172     _sg_pipeline_t* cur_pipeline;
3173     sg_pipeline cur_pipeline_id;
3174     ID3D11RenderTargetView* cur_rtvs[SG_MAX_COLOR_ATTACHMENTS];
3175     ID3D11DepthStencilView* cur_dsv;
3176     /* on-demand loaded d3dcompiler_47.dll handles */
3177     HINSTANCE d3dcompiler_dll;
3178     bool d3dcompiler_dll_load_failed;
3179     pD3DCompile D3DCompile_func;
3180     /* the following arrays are used for unbinding resources, they will always contain zeroes */
3181     ID3D11RenderTargetView* zero_rtvs[SG_MAX_COLOR_ATTACHMENTS];
3182     ID3D11Buffer* zero_vbs[SG_MAX_SHADERSTAGE_BUFFERS];
3183     UINT zero_vb_offsets[SG_MAX_SHADERSTAGE_BUFFERS];
3184     UINT zero_vb_strides[SG_MAX_SHADERSTAGE_BUFFERS];
3185     ID3D11Buffer* zero_cbs[SG_MAX_SHADERSTAGE_UBS];
3186     ID3D11ShaderResourceView* zero_srvs[SG_MAX_SHADERSTAGE_IMAGES];
3187     ID3D11SamplerState* zero_smps[SG_MAX_SHADERSTAGE_IMAGES];
3188     /* global subresourcedata array for texture updates */
3189     D3D11_SUBRESOURCE_DATA subres_data[SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS];
3190 } _sg_d3d11_backend_t;
3191 
3192 /*=== METAL BACKEND DECLARATIONS =============================================*/
3193 #elif defined(SOKOL_METAL)
3194 
3195 #if defined(_SG_TARGET_MACOS) || defined(_SG_TARGET_IOS_SIMULATOR)
3196 #define _SG_MTL_UB_ALIGN (256)
3197 #else
3198 #define _SG_MTL_UB_ALIGN (16)
3199 #endif
3200 #define _SG_MTL_INVALID_SLOT_INDEX (0)
3201 
3202 typedef struct {
3203     uint32_t frame_index;   /* frame index at which it is safe to release this resource */
3204     uint32_t slot_index;
3205 } _sg_mtl_release_item_t;
3206 
3207 typedef struct {
3208     NSMutableArray* pool;
3209     uint32_t num_slots;
3210     uint32_t free_queue_top;
3211     uint32_t* free_queue;
3212     uint32_t release_queue_front;
3213     uint32_t release_queue_back;
3214     _sg_mtl_release_item_t* release_queue;
3215 } _sg_mtl_idpool_t;
3216 
3217 typedef struct {
3218     _sg_slot_t slot;
3219     _sg_buffer_common_t cmn;
3220     struct {
3221         uint32_t buf[SG_NUM_INFLIGHT_FRAMES];  /* index into _sg_mtl_pool */
3222     } mtl;
3223 } _sg_mtl_buffer_t;
3224 typedef _sg_mtl_buffer_t _sg_buffer_t;
3225 
3226 typedef struct {
3227     _sg_slot_t slot;
3228     _sg_image_common_t cmn;
3229     struct {
3230         uint32_t tex[SG_NUM_INFLIGHT_FRAMES];
3231         uint32_t depth_tex;
3232         uint32_t msaa_tex;
3233         uint32_t sampler_state;
3234     } mtl;
3235 } _sg_mtl_image_t;
3236 typedef _sg_mtl_image_t _sg_image_t;
3237 
3238 typedef struct {
3239     uint32_t mtl_lib;
3240     uint32_t mtl_func;
3241 } _sg_mtl_shader_stage_t;
3242 
3243 typedef struct {
3244     _sg_slot_t slot;
3245     _sg_shader_common_t cmn;
3246     struct {
3247         _sg_mtl_shader_stage_t stage[SG_NUM_SHADER_STAGES];
3248     } mtl;
3249 } _sg_mtl_shader_t;
3250 typedef _sg_mtl_shader_t _sg_shader_t;
3251 
3252 typedef struct {
3253     _sg_slot_t slot;
3254     _sg_pipeline_common_t cmn;
3255     _sg_shader_t* shader;
3256     struct {
3257         MTLPrimitiveType prim_type;
3258         NSUInteger index_size;
3259         MTLIndexType index_type;
3260         MTLCullMode cull_mode;
3261         MTLWinding winding;
3262         uint32_t stencil_ref;
3263         uint32_t rps;
3264         uint32_t dss;
3265     } mtl;
3266 } _sg_mtl_pipeline_t;
3267 typedef _sg_mtl_pipeline_t _sg_pipeline_t;
3268 
3269 typedef struct {
3270     _sg_image_t* image;
3271 } _sg_mtl_attachment_t;
3272 
3273 typedef struct {
3274     _sg_slot_t slot;
3275     _sg_pass_common_t cmn;
3276     struct {
3277         _sg_mtl_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS];
3278         _sg_mtl_attachment_t ds_att;
3279     } mtl;
3280 } _sg_mtl_pass_t;
3281 typedef _sg_mtl_pass_t _sg_pass_t;
3282 typedef _sg_attachment_common_t _sg_attachment_t;
3283 
3284 typedef struct {
3285     _sg_slot_t slot;
3286 } _sg_mtl_context_t;
3287 typedef _sg_mtl_context_t _sg_context_t;
3288 
3289 /* resouce binding state cache */
3290 typedef struct {
3291     const _sg_pipeline_t* cur_pipeline;
3292     sg_pipeline cur_pipeline_id;
3293     const _sg_buffer_t* cur_indexbuffer;
3294     int cur_indexbuffer_offset;
3295     sg_buffer cur_indexbuffer_id;
3296     const _sg_buffer_t* cur_vertexbuffers[SG_MAX_SHADERSTAGE_BUFFERS];
3297     int cur_vertexbuffer_offsets[SG_MAX_SHADERSTAGE_BUFFERS];
3298     sg_buffer cur_vertexbuffer_ids[SG_MAX_SHADERSTAGE_BUFFERS];
3299     const _sg_image_t* cur_vs_images[SG_MAX_SHADERSTAGE_IMAGES];
3300     sg_image cur_vs_image_ids[SG_MAX_SHADERSTAGE_IMAGES];
3301     const _sg_image_t* cur_fs_images[SG_MAX_SHADERSTAGE_IMAGES];
3302     sg_image cur_fs_image_ids[SG_MAX_SHADERSTAGE_IMAGES];
3303 } _sg_mtl_state_cache_t;
3304 
3305 typedef struct {
3306     bool valid;
3307     const void*(*renderpass_descriptor_cb)(void);
3308     const void*(*drawable_cb)(void);
3309     uint32_t frame_index;
3310     uint32_t cur_frame_rotate_index;
3311     uint32_t ub_size;
3312     uint32_t cur_ub_offset;
3313     uint8_t* cur_ub_base_ptr;
3314     bool in_pass;
3315     bool pass_valid;
3316     int cur_width;
3317     int cur_height;
3318     _sg_mtl_state_cache_t state_cache;
3319     _sg_sampler_cache_t sampler_cache;
3320     _sg_mtl_idpool_t idpool;
3321     dispatch_semaphore_t sem;
3322     id<MTLDevice> device;
3323     id<MTLCommandQueue> cmd_queue;
3324     id<MTLCommandBuffer> cmd_buffer;
3325     id<MTLRenderCommandEncoder> cmd_encoder;
3326     id<MTLBuffer> uniform_buffers[SG_NUM_INFLIGHT_FRAMES];
3327 } _sg_mtl_backend_t;
3328 
3329 /*=== WGPU BACKEND DECLARATIONS ==============================================*/
3330 #elif defined(SOKOL_WGPU)
3331 
3332 #define _SG_WGPU_STAGING_ALIGN (256)
3333 #define _SG_WGPU_STAGING_PIPELINE_SIZE (8)
3334 #define _SG_WGPU_ROWPITCH_ALIGN (256)
3335 #define _SG_WGPU_MAX_SHADERSTAGE_IMAGES (8)
3336 #define _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE (1<<16)
3337 
3338 typedef struct {
3339     _sg_slot_t slot;
3340     _sg_buffer_common_t cmn;
3341     struct {
3342         WGPUBuffer buf;
3343     } wgpu;
3344 } _sg_wgpu_buffer_t;
3345 typedef _sg_wgpu_buffer_t _sg_buffer_t;
3346 
3347 typedef struct {
3348     _sg_slot_t slot;
3349     _sg_image_common_t cmn;
3350     struct {
3351         WGPUTexture tex;
3352         WGPUTextureView tex_view;
3353         WGPUTexture msaa_tex;
3354         WGPUSampler sampler;
3355     } wgpu;
3356 } _sg_wgpu_image_t;
3357 typedef _sg_wgpu_image_t _sg_image_t;
3358 
3359 typedef struct {
3360     WGPUShaderModule module;
3361     WGPUBindGroupLayout bind_group_layout;
3362     _sg_str_t entry;
3363 } _sg_wgpu_shader_stage_t;
3364 
3365 typedef struct {
3366     _sg_slot_t slot;
3367     _sg_shader_common_t cmn;
3368     struct {
3369         _sg_wgpu_shader_stage_t stage[SG_NUM_SHADER_STAGES];
3370     } wgpu;
3371 } _sg_wgpu_shader_t;
3372 typedef _sg_wgpu_shader_t _sg_shader_t;
3373 
3374 typedef struct {
3375     _sg_slot_t slot;
3376     _sg_pipeline_common_t cmn;
3377     _sg_shader_t* shader;
3378     struct {
3379         WGPURenderPipeline pip;
3380         uint32_t stencil_ref;
3381     } wgpu;
3382 } _sg_wgpu_pipeline_t;
3383 typedef _sg_wgpu_pipeline_t _sg_pipeline_t;
3384 
3385 typedef struct {
3386     _sg_image_t* image;
3387     WGPUTextureView render_tex_view;
3388     WGPUTextureView resolve_tex_view;
3389 } _sg_wgpu_attachment_t;
3390 
3391 typedef struct {
3392     _sg_slot_t slot;
3393     _sg_pass_common_t cmn;
3394     struct {
3395         _sg_wgpu_attachment_t color_atts[SG_MAX_COLOR_ATTACHMENTS];
3396         _sg_wgpu_attachment_t ds_att;
3397     } wgpu;
3398 } _sg_wgpu_pass_t;
3399 typedef _sg_wgpu_pass_t _sg_pass_t;
3400 typedef _sg_attachment_common_t _sg_attachment_t;
3401 
3402 typedef struct {
3403     _sg_slot_t slot;
3404 } _sg_wgpu_context_t;
3405 typedef _sg_wgpu_context_t _sg_context_t;
3406 
3407 /* a pool of per-frame uniform buffers */
3408 typedef struct {
3409     WGPUBindGroupLayout bindgroup_layout;
3410     uint32_t num_bytes;
3411     uint32_t offset;    /* current offset into current frame's mapped uniform buffer */
3412     uint32_t bind_offsets[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS];
3413     WGPUBuffer buf;     /* the GPU-side uniform buffer */
3414     WGPUBindGroup bindgroup;
3415     struct {
3416         int num;
3417         int cur;
3418         WGPUBuffer buf[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* CPU-side staging buffers */
3419         uint8_t* ptr[_SG_WGPU_STAGING_PIPELINE_SIZE];   /* if != 0, staging buffer currently mapped */
3420     } stage;
3421 } _sg_wgpu_ubpool_t;
3422 
3423 /* ...a similar pool (like uniform buffer pool) of dynamic-resource staging buffers */
3424 typedef struct {
3425     uint32_t num_bytes;
3426     uint32_t offset;    /* current offset into current frame's staging buffer */
3427     int num;            /* number of staging buffers */
3428     int cur;            /* this frame's staging buffer */
3429     WGPUBuffer buf[_SG_WGPU_STAGING_PIPELINE_SIZE]; /* CPU-side staging buffers */
3430     uint8_t* ptr[_SG_WGPU_STAGING_PIPELINE_SIZE];   /* if != 0, staging buffer currently mapped */
3431 } _sg_wgpu_stagingpool_t;
3432 
3433 /* the WGPU backend state */
3434 typedef struct {
3435     bool valid;
3436     bool in_pass;
3437     bool draw_indexed;
3438     int cur_width;
3439     int cur_height;
3440     WGPUDevice dev;
3441     WGPUTextureView (*render_view_cb)(void);
3442     WGPUTextureView (*resolve_view_cb)(void);
3443     WGPUTextureView (*depth_stencil_view_cb)(void);
3444     WGPUQueue queue;
3445     WGPUCommandEncoder render_cmd_enc;
3446     WGPUCommandEncoder staging_cmd_enc;
3447     WGPURenderPassEncoder pass_enc;
3448     WGPUBindGroup empty_bind_group;
3449     const _sg_pipeline_t* cur_pipeline;
3450     sg_pipeline cur_pipeline_id;
3451     _sg_sampler_cache_t sampler_cache;
3452     _sg_wgpu_ubpool_t ub;
3453     _sg_wgpu_stagingpool_t staging;
3454 } _sg_wgpu_backend_t;
3455 #endif
3456 
3457 /*=== RESOURCE POOL DECLARATIONS =============================================*/
3458 
3459 /* this *MUST* remain 0 */
3460 #define _SG_INVALID_SLOT_INDEX (0)
3461 
3462 typedef struct {
3463     int size;
3464     int queue_top;
3465     uint32_t* gen_ctrs;
3466     int* free_queue;
3467 } _sg_pool_t;
3468 
3469 typedef struct {
3470     _sg_pool_t buffer_pool;
3471     _sg_pool_t image_pool;
3472     _sg_pool_t shader_pool;
3473     _sg_pool_t pipeline_pool;
3474     _sg_pool_t pass_pool;
3475     _sg_pool_t context_pool;
3476     _sg_buffer_t* buffers;
3477     _sg_image_t* images;
3478     _sg_shader_t* shaders;
3479     _sg_pipeline_t* pipelines;
3480     _sg_pass_t* passes;
3481     _sg_context_t* contexts;
3482 } _sg_pools_t;
3483 
3484 /*=== VALIDATION LAYER DECLARATIONS ==========================================*/
3485 typedef enum {
3486     /* special case 'validation was successful' */
3487     _SG_VALIDATE_SUCCESS,
3488 
3489     /* buffer creation */
3490     _SG_VALIDATE_BUFFERDESC_CANARY,
3491     _SG_VALIDATE_BUFFERDESC_SIZE,
3492     _SG_VALIDATE_BUFFERDESC_CONTENT,
3493     _SG_VALIDATE_BUFFERDESC_NO_CONTENT,
3494 
3495     /* image creation */
3496     _SG_VALIDATE_IMAGEDESC_CANARY,
3497     _SG_VALIDATE_IMAGEDESC_WIDTH,
3498     _SG_VALIDATE_IMAGEDESC_HEIGHT,
3499     _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT,
3500     _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT,
3501     _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT,
3502     _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT,
3503     _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE,
3504     _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT,
3505     _SG_VALIDATE_IMAGEDESC_CONTENT,
3506     _SG_VALIDATE_IMAGEDESC_NO_CONTENT,
3507 
3508     /* shader creation */
3509     _SG_VALIDATE_SHADERDESC_CANARY,
3510     _SG_VALIDATE_SHADERDESC_SOURCE,
3511     _SG_VALIDATE_SHADERDESC_BYTECODE,
3512     _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE,
3513     _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE,
3514     _SG_VALIDATE_SHADERDESC_NO_CONT_UBS,
3515     _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS,
3516     _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS,
3517     _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS,
3518     _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME,
3519     _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH,
3520     _SG_VALIDATE_SHADERDESC_IMG_NAME,
3521     _SG_VALIDATE_SHADERDESC_ATTR_NAMES,
3522     _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS,
3523     _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG,
3524 
3525     /* pipeline creation */
3526     _SG_VALIDATE_PIPELINEDESC_CANARY,
3527     _SG_VALIDATE_PIPELINEDESC_SHADER,
3528     _SG_VALIDATE_PIPELINEDESC_NO_ATTRS,
3529     _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4,
3530     _SG_VALIDATE_PIPELINEDESC_ATTR_NAME,
3531     _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS,
3532 
3533     /* pass creation */
3534     _SG_VALIDATE_PASSDESC_CANARY,
3535     _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS,
3536     _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS,
3537     _SG_VALIDATE_PASSDESC_IMAGE,
3538     _SG_VALIDATE_PASSDESC_MIPLEVEL,
3539     _SG_VALIDATE_PASSDESC_FACE,
3540     _SG_VALIDATE_PASSDESC_LAYER,
3541     _SG_VALIDATE_PASSDESC_SLICE,
3542     _SG_VALIDATE_PASSDESC_IMAGE_NO_RT,
3543     _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS,
3544     _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT,
3545     _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT,
3546     _SG_VALIDATE_PASSDESC_IMAGE_SIZES,
3547     _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS,
3548 
3549     /* sg_begin_pass validation */
3550     _SG_VALIDATE_BEGINPASS_PASS,
3551     _SG_VALIDATE_BEGINPASS_IMAGE,
3552 
3553     /* sg_apply_pipeline validation */
3554     _SG_VALIDATE_APIP_PIPELINE_VALID_ID,
3555     _SG_VALIDATE_APIP_PIPELINE_EXISTS,
3556     _SG_VALIDATE_APIP_PIPELINE_VALID,
3557     _SG_VALIDATE_APIP_SHADER_EXISTS,
3558     _SG_VALIDATE_APIP_SHADER_VALID,
3559     _SG_VALIDATE_APIP_ATT_COUNT,
3560     _SG_VALIDATE_APIP_COLOR_FORMAT,
3561     _SG_VALIDATE_APIP_DEPTH_FORMAT,
3562     _SG_VALIDATE_APIP_SAMPLE_COUNT,
3563 
3564     /* sg_apply_bindings validation */
3565     _SG_VALIDATE_ABND_PIPELINE,
3566     _SG_VALIDATE_ABND_PIPELINE_EXISTS,
3567     _SG_VALIDATE_ABND_PIPELINE_VALID,
3568     _SG_VALIDATE_ABND_VBS,
3569     _SG_VALIDATE_ABND_VB_EXISTS,
3570     _SG_VALIDATE_ABND_VB_TYPE,
3571     _SG_VALIDATE_ABND_VB_OVERFLOW,
3572     _SG_VALIDATE_ABND_NO_IB,
3573     _SG_VALIDATE_ABND_IB,
3574     _SG_VALIDATE_ABND_IB_EXISTS,
3575     _SG_VALIDATE_ABND_IB_TYPE,
3576     _SG_VALIDATE_ABND_IB_OVERFLOW,
3577     _SG_VALIDATE_ABND_VS_IMGS,
3578     _SG_VALIDATE_ABND_VS_IMG_EXISTS,
3579     _SG_VALIDATE_ABND_VS_IMG_TYPES,
3580     _SG_VALIDATE_ABND_FS_IMGS,
3581     _SG_VALIDATE_ABND_FS_IMG_EXISTS,
3582     _SG_VALIDATE_ABND_FS_IMG_TYPES,
3583 
3584     /* sg_apply_uniforms validation */
3585     _SG_VALIDATE_AUB_NO_PIPELINE,
3586     _SG_VALIDATE_AUB_NO_UB_AT_SLOT,
3587     _SG_VALIDATE_AUB_SIZE,
3588 
3589     /* sg_update_buffer validation */
3590     _SG_VALIDATE_UPDATEBUF_USAGE,
3591     _SG_VALIDATE_UPDATEBUF_SIZE,
3592     _SG_VALIDATE_UPDATEBUF_ONCE,
3593     _SG_VALIDATE_UPDATEBUF_APPEND,
3594 
3595     /* sg_append_buffer validation */
3596     _SG_VALIDATE_APPENDBUF_USAGE,
3597     _SG_VALIDATE_APPENDBUF_SIZE,
3598     _SG_VALIDATE_APPENDBUF_UPDATE,
3599 
3600     /* sg_update_image validation */
3601     _SG_VALIDATE_UPDIMG_USAGE,
3602     _SG_VALIDATE_UPDIMG_NOTENOUGHDATA,
3603     _SG_VALIDATE_UPDIMG_SIZE,
3604     _SG_VALIDATE_UPDIMG_COMPRESSED,
3605     _SG_VALIDATE_UPDIMG_ONCE
3606 } _sg_validate_error_t;
3607 
3608 /*=== GENERIC BACKEND STATE ==================================================*/
3609 
3610 typedef struct {
3611     bool valid;
3612     sg_desc desc;       /* original desc with default values patched in */
3613     uint32_t frame_index;
3614     sg_context active_context;
3615     sg_pass cur_pass;
3616     sg_pipeline cur_pipeline;
3617     bool pass_valid;
3618     bool bindings_valid;
3619     bool next_draw_valid;
3620     #if defined(SOKOL_DEBUG)
3621     _sg_validate_error_t validate_error;
3622     #endif
3623     _sg_pools_t pools;
3624     sg_backend backend;
3625     sg_features features;
3626     sg_limits limits;
3627     sg_pixelformat_info formats[_SG_PIXELFORMAT_NUM];
3628     #if defined(_SOKOL_ANY_GL)
3629     _sg_gl_backend_t gl;
3630     #elif defined(SOKOL_METAL)
3631     _sg_mtl_backend_t mtl;
3632     #elif defined(SOKOL_D3D11)
3633     _sg_d3d11_backend_t d3d11;
3634     #elif defined(SOKOL_WGPU)
3635     _sg_wgpu_backend_t wgpu;
3636     #endif
3637     #if defined(SOKOL_TRACE_HOOKS)
3638     sg_trace_hooks hooks;
3639     #endif
3640 } _sg_state_t;
3641 static _sg_state_t _sg;
3642 
3643 /*-- helper functions --------------------------------------------------------*/
3644 
_sg_strempty(const _sg_str_t * str)3645 _SOKOL_PRIVATE bool _sg_strempty(const _sg_str_t* str) {
3646     return 0 == str->buf[0];
3647 }
3648 
_sg_strptr(const _sg_str_t * str)3649 _SOKOL_PRIVATE const char* _sg_strptr(const _sg_str_t* str) {
3650     return &str->buf[0];
3651 }
3652 
_sg_strcpy(_sg_str_t * dst,const char * src)3653 _SOKOL_PRIVATE void _sg_strcpy(_sg_str_t* dst, const char* src) {
3654     SOKOL_ASSERT(dst);
3655     if (src) {
3656         #if defined(_MSC_VER)
3657         strncpy_s(dst->buf, _SG_STRING_SIZE, src, (_SG_STRING_SIZE-1));
3658         #else
3659         strncpy(dst->buf, src, _SG_STRING_SIZE);
3660         #endif
3661         dst->buf[_SG_STRING_SIZE-1] = 0;
3662     }
3663     else {
3664         memset(dst->buf, 0, _SG_STRING_SIZE);
3665     }
3666 }
3667 
3668 /* return byte size of a vertex format */
_sg_vertexformat_bytesize(sg_vertex_format fmt)3669 _SOKOL_PRIVATE int _sg_vertexformat_bytesize(sg_vertex_format fmt) {
3670     switch (fmt) {
3671         case SG_VERTEXFORMAT_FLOAT:     return 4;
3672         case SG_VERTEXFORMAT_FLOAT2:    return 8;
3673         case SG_VERTEXFORMAT_FLOAT3:    return 12;
3674         case SG_VERTEXFORMAT_FLOAT4:    return 16;
3675         case SG_VERTEXFORMAT_BYTE4:     return 4;
3676         case SG_VERTEXFORMAT_BYTE4N:    return 4;
3677         case SG_VERTEXFORMAT_UBYTE4:    return 4;
3678         case SG_VERTEXFORMAT_UBYTE4N:   return 4;
3679         case SG_VERTEXFORMAT_SHORT2:    return 4;
3680         case SG_VERTEXFORMAT_SHORT2N:   return 4;
3681         case SG_VERTEXFORMAT_USHORT2N:  return 4;
3682         case SG_VERTEXFORMAT_SHORT4:    return 8;
3683         case SG_VERTEXFORMAT_SHORT4N:   return 8;
3684         case SG_VERTEXFORMAT_USHORT4N:  return 8;
3685         case SG_VERTEXFORMAT_UINT10_N2: return 4;
3686         case SG_VERTEXFORMAT_INVALID:   return 0;
3687         default:
3688             SOKOL_UNREACHABLE;
3689             return -1;
3690     }
3691 }
3692 
3693 /* return the byte size of a shader uniform */
_sg_uniform_size(sg_uniform_type type,int count)3694 _SOKOL_PRIVATE int _sg_uniform_size(sg_uniform_type type, int count) {
3695     switch (type) {
3696         case SG_UNIFORMTYPE_INVALID:    return 0;
3697         case SG_UNIFORMTYPE_FLOAT:      return 4 * count;
3698         case SG_UNIFORMTYPE_FLOAT2:     return 8 * count;
3699         case SG_UNIFORMTYPE_FLOAT3:     return 12 * count; /* FIXME: std140??? */
3700         case SG_UNIFORMTYPE_FLOAT4:     return 16 * count;
3701         case SG_UNIFORMTYPE_MAT4:       return 64 * count;
3702         default:
3703             SOKOL_UNREACHABLE;
3704             return -1;
3705     }
3706 }
3707 
3708 /* return true if pixel format is a compressed format */
_sg_is_compressed_pixel_format(sg_pixel_format fmt)3709 _SOKOL_PRIVATE bool _sg_is_compressed_pixel_format(sg_pixel_format fmt) {
3710     switch (fmt) {
3711         case SG_PIXELFORMAT_BC1_RGBA:
3712         case SG_PIXELFORMAT_BC2_RGBA:
3713         case SG_PIXELFORMAT_BC3_RGBA:
3714         case SG_PIXELFORMAT_BC4_R:
3715         case SG_PIXELFORMAT_BC4_RSN:
3716         case SG_PIXELFORMAT_BC5_RG:
3717         case SG_PIXELFORMAT_BC5_RGSN:
3718         case SG_PIXELFORMAT_BC6H_RGBF:
3719         case SG_PIXELFORMAT_BC6H_RGBUF:
3720         case SG_PIXELFORMAT_BC7_RGBA:
3721         case SG_PIXELFORMAT_PVRTC_RGB_2BPP:
3722         case SG_PIXELFORMAT_PVRTC_RGB_4BPP:
3723         case SG_PIXELFORMAT_PVRTC_RGBA_2BPP:
3724         case SG_PIXELFORMAT_PVRTC_RGBA_4BPP:
3725         case SG_PIXELFORMAT_ETC2_RGB8:
3726         case SG_PIXELFORMAT_ETC2_RGB8A1:
3727         case SG_PIXELFORMAT_ETC2_RGBA8:
3728         case SG_PIXELFORMAT_ETC2_RG11:
3729         case SG_PIXELFORMAT_ETC2_RG11SN:
3730             return true;
3731         default:
3732             return false;
3733     }
3734 }
3735 
3736 /* return true if pixel format is a valid render target format */
_sg_is_valid_rendertarget_color_format(sg_pixel_format fmt)3737 _SOKOL_PRIVATE bool _sg_is_valid_rendertarget_color_format(sg_pixel_format fmt) {
3738     const int fmt_index = (int) fmt;
3739     SOKOL_ASSERT((fmt_index >= 0) && (fmt_index < _SG_PIXELFORMAT_NUM));
3740     return _sg.formats[fmt_index].render && !_sg.formats[fmt_index].depth;
3741 }
3742 
3743 /* return true if pixel format is a valid depth format */
_sg_is_valid_rendertarget_depth_format(sg_pixel_format fmt)3744 _SOKOL_PRIVATE bool _sg_is_valid_rendertarget_depth_format(sg_pixel_format fmt) {
3745     const int fmt_index = (int) fmt;
3746     SOKOL_ASSERT((fmt_index >= 0) && (fmt_index < _SG_PIXELFORMAT_NUM));
3747     return _sg.formats[fmt_index].render && _sg.formats[fmt_index].depth;
3748 }
3749 
3750 /* return true if pixel format is a depth-stencil format */
_sg_is_depth_stencil_format(sg_pixel_format fmt)3751 _SOKOL_PRIVATE bool _sg_is_depth_stencil_format(sg_pixel_format fmt) {
3752     return (SG_PIXELFORMAT_DEPTH_STENCIL == fmt);
3753 }
3754 
3755 /* return the bytes-per-pixel for a pixel format */
_sg_pixelformat_bytesize(sg_pixel_format fmt)3756 _SOKOL_PRIVATE int _sg_pixelformat_bytesize(sg_pixel_format fmt) {
3757     switch (fmt) {
3758         case SG_PIXELFORMAT_R8:
3759         case SG_PIXELFORMAT_R8SN:
3760         case SG_PIXELFORMAT_R8UI:
3761         case SG_PIXELFORMAT_R8SI:
3762             return 1;
3763 
3764         case SG_PIXELFORMAT_R16:
3765         case SG_PIXELFORMAT_R16SN:
3766         case SG_PIXELFORMAT_R16UI:
3767         case SG_PIXELFORMAT_R16SI:
3768         case SG_PIXELFORMAT_R16F:
3769         case SG_PIXELFORMAT_RG8:
3770         case SG_PIXELFORMAT_RG8SN:
3771         case SG_PIXELFORMAT_RG8UI:
3772         case SG_PIXELFORMAT_RG8SI:
3773             return 2;
3774 
3775         case SG_PIXELFORMAT_R32UI:
3776         case SG_PIXELFORMAT_R32SI:
3777         case SG_PIXELFORMAT_R32F:
3778         case SG_PIXELFORMAT_RG16:
3779         case SG_PIXELFORMAT_RG16SN:
3780         case SG_PIXELFORMAT_RG16UI:
3781         case SG_PIXELFORMAT_RG16SI:
3782         case SG_PIXELFORMAT_RG16F:
3783         case SG_PIXELFORMAT_RGBA8:
3784         case SG_PIXELFORMAT_RGBA8SN:
3785         case SG_PIXELFORMAT_RGBA8UI:
3786         case SG_PIXELFORMAT_RGBA8SI:
3787         case SG_PIXELFORMAT_BGRA8:
3788         case SG_PIXELFORMAT_RGB10A2:
3789         case SG_PIXELFORMAT_RG11B10F:
3790             return 4;
3791 
3792         case SG_PIXELFORMAT_RG32UI:
3793         case SG_PIXELFORMAT_RG32SI:
3794         case SG_PIXELFORMAT_RG32F:
3795         case SG_PIXELFORMAT_RGBA16:
3796         case SG_PIXELFORMAT_RGBA16SN:
3797         case SG_PIXELFORMAT_RGBA16UI:
3798         case SG_PIXELFORMAT_RGBA16SI:
3799         case SG_PIXELFORMAT_RGBA16F:
3800             return 8;
3801 
3802         case SG_PIXELFORMAT_RGBA32UI:
3803         case SG_PIXELFORMAT_RGBA32SI:
3804         case SG_PIXELFORMAT_RGBA32F:
3805             return 16;
3806 
3807         default:
3808             SOKOL_UNREACHABLE;
3809             return 0;
3810     }
3811 }
3812 
3813 #define _sg_roundup(val, round_to) (((val)+((round_to)-1))&~((round_to)-1))
3814 
3815 /* return row pitch for an image
3816     see ComputePitch in https://github.com/microsoft/DirectXTex/blob/master/DirectXTex/DirectXTexUtil.cpp
3817 */
_sg_row_pitch(sg_pixel_format fmt,uint32_t width,uint32_t row_align)3818 _SOKOL_PRIVATE uint32_t _sg_row_pitch(sg_pixel_format fmt, uint32_t width, uint32_t row_align) {
3819     uint32_t pitch;
3820     switch (fmt) {
3821         case SG_PIXELFORMAT_BC1_RGBA:
3822         case SG_PIXELFORMAT_BC4_R:
3823         case SG_PIXELFORMAT_BC4_RSN:
3824         case SG_PIXELFORMAT_ETC2_RGB8:
3825         case SG_PIXELFORMAT_ETC2_RGB8A1:
3826             pitch = ((width + 3) / 4) * 8;
3827             pitch = pitch < 8 ? 8 : pitch;
3828             break;
3829         case SG_PIXELFORMAT_BC2_RGBA:
3830         case SG_PIXELFORMAT_BC3_RGBA:
3831         case SG_PIXELFORMAT_BC5_RG:
3832         case SG_PIXELFORMAT_BC5_RGSN:
3833         case SG_PIXELFORMAT_BC6H_RGBF:
3834         case SG_PIXELFORMAT_BC6H_RGBUF:
3835         case SG_PIXELFORMAT_BC7_RGBA:
3836         case SG_PIXELFORMAT_ETC2_RGBA8:
3837         case SG_PIXELFORMAT_ETC2_RG11:
3838         case SG_PIXELFORMAT_ETC2_RG11SN:
3839             pitch = ((width + 3) / 4) * 16;
3840             pitch = pitch < 16 ? 16 : pitch;
3841             break;
3842         case SG_PIXELFORMAT_PVRTC_RGB_4BPP:
3843         case SG_PIXELFORMAT_PVRTC_RGBA_4BPP:
3844             {
3845                 const int block_size = 4*4;
3846                 const int bpp = 4;
3847                 int width_blocks = width / 4;
3848                 width_blocks = width_blocks < 2 ? 2 : width_blocks;
3849                 pitch = width_blocks * ((block_size * bpp) / 8);
3850             }
3851             break;
3852         case SG_PIXELFORMAT_PVRTC_RGB_2BPP:
3853         case SG_PIXELFORMAT_PVRTC_RGBA_2BPP:
3854             {
3855                 const int block_size = 8*4;
3856                 const int bpp = 2;
3857                 int width_blocks = width / 4;
3858                 width_blocks = width_blocks < 2 ? 2 : width_blocks;
3859                 pitch = width_blocks * ((block_size * bpp) / 8);
3860             }
3861             break;
3862         default:
3863             pitch = width * _sg_pixelformat_bytesize(fmt);
3864             break;
3865     }
3866     pitch = _sg_roundup(pitch, row_align);
3867     return pitch;
3868 }
3869 
3870 /* compute the number of rows in a surface depending on pixel format */
_sg_num_rows(sg_pixel_format fmt,int height)3871 _SOKOL_PRIVATE int _sg_num_rows(sg_pixel_format fmt, int height) {
3872     int num_rows;
3873     switch (fmt) {
3874         case SG_PIXELFORMAT_BC1_RGBA:
3875         case SG_PIXELFORMAT_BC4_R:
3876         case SG_PIXELFORMAT_BC4_RSN:
3877         case SG_PIXELFORMAT_ETC2_RGB8:
3878         case SG_PIXELFORMAT_ETC2_RGB8A1:
3879         case SG_PIXELFORMAT_ETC2_RGBA8:
3880         case SG_PIXELFORMAT_ETC2_RG11:
3881         case SG_PIXELFORMAT_ETC2_RG11SN:
3882         case SG_PIXELFORMAT_BC2_RGBA:
3883         case SG_PIXELFORMAT_BC3_RGBA:
3884         case SG_PIXELFORMAT_BC5_RG:
3885         case SG_PIXELFORMAT_BC5_RGSN:
3886         case SG_PIXELFORMAT_BC6H_RGBF:
3887         case SG_PIXELFORMAT_BC6H_RGBUF:
3888         case SG_PIXELFORMAT_BC7_RGBA:
3889         case SG_PIXELFORMAT_PVRTC_RGB_4BPP:
3890         case SG_PIXELFORMAT_PVRTC_RGBA_4BPP:
3891         case SG_PIXELFORMAT_PVRTC_RGB_2BPP:
3892         case SG_PIXELFORMAT_PVRTC_RGBA_2BPP:
3893             num_rows = ((height + 3) / 4);
3894             break;
3895         default:
3896             num_rows = height;
3897             break;
3898     }
3899     if (num_rows < 1) {
3900         num_rows = 1;
3901     }
3902     return num_rows;
3903 }
3904 
3905 /* return pitch of a 2D subimage / texture slice
3906     see ComputePitch in https://github.com/microsoft/DirectXTex/blob/master/DirectXTex/DirectXTexUtil.cpp
3907 */
_sg_surface_pitch(sg_pixel_format fmt,uint32_t width,uint32_t height,uint32_t row_align)3908 _SOKOL_PRIVATE uint32_t _sg_surface_pitch(sg_pixel_format fmt, uint32_t width, uint32_t height, uint32_t row_align) {
3909     int num_rows = _sg_num_rows(fmt, height);
3910     return num_rows * _sg_row_pitch(fmt, width, row_align);
3911 }
3912 
3913 /* capability table pixel format helper functions */
_sg_pixelformat_all(sg_pixelformat_info * pfi)3914 _SOKOL_PRIVATE void _sg_pixelformat_all(sg_pixelformat_info* pfi) {
3915     pfi->sample = true;
3916     pfi->filter = true;
3917     pfi->blend = true;
3918     pfi->render = true;
3919     pfi->msaa = true;
3920 }
3921 
_sg_pixelformat_s(sg_pixelformat_info * pfi)3922 _SOKOL_PRIVATE void _sg_pixelformat_s(sg_pixelformat_info* pfi) {
3923     pfi->sample = true;
3924 }
3925 
_sg_pixelformat_sf(sg_pixelformat_info * pfi)3926 _SOKOL_PRIVATE void _sg_pixelformat_sf(sg_pixelformat_info* pfi) {
3927     pfi->sample = true;
3928     pfi->filter = true;
3929 }
3930 
_sg_pixelformat_sr(sg_pixelformat_info * pfi)3931 _SOKOL_PRIVATE void _sg_pixelformat_sr(sg_pixelformat_info* pfi) {
3932     pfi->sample = true;
3933     pfi->render = true;
3934 }
3935 
_sg_pixelformat_srmd(sg_pixelformat_info * pfi)3936 _SOKOL_PRIVATE void _sg_pixelformat_srmd(sg_pixelformat_info* pfi) {
3937     pfi->sample = true;
3938     pfi->render = true;
3939     pfi->msaa = true;
3940     pfi->depth = true;
3941 }
3942 
_sg_pixelformat_srm(sg_pixelformat_info * pfi)3943 _SOKOL_PRIVATE void _sg_pixelformat_srm(sg_pixelformat_info* pfi) {
3944     pfi->sample = true;
3945     pfi->render = true;
3946     pfi->msaa = true;
3947 }
3948 
_sg_pixelformat_sfrm(sg_pixelformat_info * pfi)3949 _SOKOL_PRIVATE void _sg_pixelformat_sfrm(sg_pixelformat_info* pfi) {
3950     pfi->sample = true;
3951     pfi->filter = true;
3952     pfi->render = true;
3953     pfi->msaa = true;
3954 }
_sg_pixelformat_sbrm(sg_pixelformat_info * pfi)3955 _SOKOL_PRIVATE void _sg_pixelformat_sbrm(sg_pixelformat_info* pfi) {
3956     pfi->sample = true;
3957     pfi->blend = true;
3958     pfi->render = true;
3959     pfi->msaa = true;
3960 }
3961 
_sg_pixelformat_sbr(sg_pixelformat_info * pfi)3962 _SOKOL_PRIVATE void _sg_pixelformat_sbr(sg_pixelformat_info* pfi) {
3963     pfi->sample = true;
3964     pfi->blend = true;
3965     pfi->render = true;
3966 }
3967 
_sg_pixelformat_sfbr(sg_pixelformat_info * pfi)3968 _SOKOL_PRIVATE void _sg_pixelformat_sfbr(sg_pixelformat_info* pfi) {
3969     pfi->sample = true;
3970     pfi->filter = true;
3971     pfi->blend = true;
3972     pfi->render = true;
3973 }
3974 
3975 /* resolve pass action defaults into a new pass action struct */
_sg_resolve_default_pass_action(const sg_pass_action * from,sg_pass_action * to)3976 _SOKOL_PRIVATE void _sg_resolve_default_pass_action(const sg_pass_action* from, sg_pass_action* to) {
3977     SOKOL_ASSERT(from && to);
3978     *to = *from;
3979     for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
3980         if (to->colors[i].action  == _SG_ACTION_DEFAULT) {
3981             to->colors[i].action = SG_ACTION_CLEAR;
3982             to->colors[i].val[0] = SG_DEFAULT_CLEAR_RED;
3983             to->colors[i].val[1] = SG_DEFAULT_CLEAR_GREEN;
3984             to->colors[i].val[2] = SG_DEFAULT_CLEAR_BLUE;
3985             to->colors[i].val[3] = SG_DEFAULT_CLEAR_ALPHA;
3986         }
3987     }
3988     if (to->depth.action == _SG_ACTION_DEFAULT) {
3989         to->depth.action = SG_ACTION_CLEAR;
3990         to->depth.val = SG_DEFAULT_CLEAR_DEPTH;
3991     }
3992     if (to->stencil.action == _SG_ACTION_DEFAULT) {
3993         to->stencil.action = SG_ACTION_CLEAR;
3994         to->stencil.val = SG_DEFAULT_CLEAR_STENCIL;
3995     }
3996 }
3997 
3998 /*== DUMMY BACKEND IMPL ======================================================*/
3999 #if defined(SOKOL_DUMMY_BACKEND)
4000 
_sg_dummy_setup_backend(const sg_desc * desc)4001 _SOKOL_PRIVATE void _sg_dummy_setup_backend(const sg_desc* desc) {
4002     SOKOL_ASSERT(desc);
4003     _SOKOL_UNUSED(desc);
4004     _sg.backend = SG_BACKEND_DUMMY;
4005     for (int i = SG_PIXELFORMAT_R8; i < SG_PIXELFORMAT_BC1_RGBA; i++) {
4006         _sg.formats[i].sample = true;
4007         _sg.formats[i].filter = true;
4008         _sg.formats[i].render = true;
4009         _sg.formats[i].blend = true;
4010         _sg.formats[i].msaa = true;
4011     }
4012     _sg.formats[SG_PIXELFORMAT_DEPTH].depth = true;
4013     _sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL].depth = true;
4014 }
4015 
_sg_dummy_discard_backend(void)4016 _SOKOL_PRIVATE void _sg_dummy_discard_backend(void) {
4017     /* empty */
4018 }
4019 
_sg_dummy_reset_state_cache(void)4020 _SOKOL_PRIVATE void _sg_dummy_reset_state_cache(void) {
4021     /* empty*/
4022 }
4023 
_sg_dummy_create_context(_sg_context_t * ctx)4024 _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_context(_sg_context_t* ctx) {
4025     SOKOL_ASSERT(ctx);
4026     _SOKOL_UNUSED(ctx);
4027     return SG_RESOURCESTATE_VALID;
4028 }
4029 
_sg_dummy_destroy_context(_sg_context_t * ctx)4030 _SOKOL_PRIVATE void _sg_dummy_destroy_context(_sg_context_t* ctx) {
4031     SOKOL_ASSERT(ctx);
4032     _SOKOL_UNUSED(ctx);
4033 }
4034 
_sg_dummy_activate_context(_sg_context_t * ctx)4035 _SOKOL_PRIVATE void _sg_dummy_activate_context(_sg_context_t* ctx) {
4036     SOKOL_ASSERT(ctx);
4037     _SOKOL_UNUSED(ctx);
4038 }
4039 
_sg_dummy_create_buffer(_sg_buffer_t * buf,const sg_buffer_desc * desc)4040 _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) {
4041     SOKOL_ASSERT(buf && desc);
4042     _sg_buffer_common_init(&buf->cmn, desc);
4043     return SG_RESOURCESTATE_VALID;
4044 }
4045 
_sg_dummy_destroy_buffer(_sg_buffer_t * buf)4046 _SOKOL_PRIVATE void _sg_dummy_destroy_buffer(_sg_buffer_t* buf) {
4047     SOKOL_ASSERT(buf);
4048     _SOKOL_UNUSED(buf);
4049 }
4050 
_sg_dummy_create_image(_sg_image_t * img,const sg_image_desc * desc)4051 _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_image(_sg_image_t* img, const sg_image_desc* desc) {
4052     SOKOL_ASSERT(img && desc);
4053     _sg_image_common_init(&img->cmn, desc);
4054     return SG_RESOURCESTATE_VALID;
4055 }
4056 
_sg_dummy_destroy_image(_sg_image_t * img)4057 _SOKOL_PRIVATE void _sg_dummy_destroy_image(_sg_image_t* img) {
4058     SOKOL_ASSERT(img);
4059     _SOKOL_UNUSED(img);
4060 }
4061 
_sg_dummy_create_shader(_sg_shader_t * shd,const sg_shader_desc * desc)4062 _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) {
4063     SOKOL_ASSERT(shd && desc);
4064     _sg_shader_common_init(&shd->cmn, desc);
4065     return SG_RESOURCESTATE_VALID;
4066 }
4067 
_sg_dummy_destroy_shader(_sg_shader_t * shd)4068 _SOKOL_PRIVATE void _sg_dummy_destroy_shader(_sg_shader_t* shd) {
4069     SOKOL_ASSERT(shd);
4070     _SOKOL_UNUSED(shd);
4071 }
4072 
_sg_dummy_create_pipeline(_sg_pipeline_t * pip,_sg_shader_t * shd,const sg_pipeline_desc * desc)4073 _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) {
4074     SOKOL_ASSERT(pip && desc);
4075     pip->shader = shd;
4076     _sg_pipeline_common_init(&pip->cmn, desc);
4077     for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) {
4078         const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index];
4079         if (a_desc->format == SG_VERTEXFORMAT_INVALID) {
4080             break;
4081         }
4082         SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS));
4083         pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true;
4084     }
4085     return SG_RESOURCESTATE_VALID;
4086 }
4087 
_sg_dummy_destroy_pipeline(_sg_pipeline_t * pip)4088 _SOKOL_PRIVATE void _sg_dummy_destroy_pipeline(_sg_pipeline_t* pip) {
4089     SOKOL_ASSERT(pip);
4090     _SOKOL_UNUSED(pip);
4091 }
4092 
_sg_dummy_create_pass(_sg_pass_t * pass,_sg_image_t ** att_images,const sg_pass_desc * desc)4093 _SOKOL_PRIVATE sg_resource_state _sg_dummy_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) {
4094     SOKOL_ASSERT(pass && desc);
4095     SOKOL_ASSERT(att_images && att_images[0]);
4096 
4097     _sg_pass_common_init(&pass->cmn, desc);
4098 
4099     const sg_attachment_desc* att_desc;
4100     for (int i = 0; i < pass->cmn.num_color_atts; i++) {
4101         att_desc = &desc->color_attachments[i];
4102         SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID);
4103         SOKOL_ASSERT(0 == pass->dmy.color_atts[i].image);
4104         SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id));
4105         SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format));
4106         pass->dmy.color_atts[i].image = att_images[i];
4107     }
4108 
4109     SOKOL_ASSERT(0 == pass->dmy.ds_att.image);
4110     att_desc = &desc->depth_stencil_attachment;
4111     if (att_desc->image.id != SG_INVALID_ID) {
4112         const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS;
4113         SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id));
4114         SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format));
4115         pass->dmy.ds_att.image = att_images[ds_img_index];
4116     }
4117     return SG_RESOURCESTATE_VALID;
4118 }
4119 
_sg_dummy_destroy_pass(_sg_pass_t * pass)4120 _SOKOL_PRIVATE void _sg_dummy_destroy_pass(_sg_pass_t* pass) {
4121     SOKOL_ASSERT(pass);
4122     _SOKOL_UNUSED(pass);
4123 }
4124 
_sg_dummy_pass_color_image(const _sg_pass_t * pass,int index)4125 _SOKOL_PRIVATE _sg_image_t* _sg_dummy_pass_color_image(const _sg_pass_t* pass, int index) {
4126     SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS));
4127     /* NOTE: may return null */
4128     return pass->dmy.color_atts[index].image;
4129 }
4130 
_sg_dummy_pass_ds_image(const _sg_pass_t * pass)4131 _SOKOL_PRIVATE _sg_image_t* _sg_dummy_pass_ds_image(const _sg_pass_t* pass) {
4132     /* NOTE: may return null */
4133     SOKOL_ASSERT(pass);
4134     return pass->dmy.ds_att.image;
4135 }
4136 
_sg_dummy_begin_pass(_sg_pass_t * pass,const sg_pass_action * action,int w,int h)4137 _SOKOL_PRIVATE void _sg_dummy_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) {
4138     SOKOL_ASSERT(action);
4139     _SOKOL_UNUSED(pass);
4140     _SOKOL_UNUSED(action);
4141     _SOKOL_UNUSED(w);
4142     _SOKOL_UNUSED(h);
4143 }
4144 
_sg_dummy_end_pass(void)4145 _SOKOL_PRIVATE void _sg_dummy_end_pass(void) {
4146     /* empty */
4147 }
4148 
_sg_dummy_commit(void)4149 _SOKOL_PRIVATE void _sg_dummy_commit(void) {
4150     /* empty */
4151 }
4152 
_sg_dummy_apply_viewport(int x,int y,int w,int h,bool origin_top_left)4153 _SOKOL_PRIVATE void _sg_dummy_apply_viewport(int x, int y, int w, int h, bool origin_top_left) {
4154     _SOKOL_UNUSED(x);
4155     _SOKOL_UNUSED(y);
4156     _SOKOL_UNUSED(w);
4157     _SOKOL_UNUSED(h);
4158     _SOKOL_UNUSED(origin_top_left);
4159 }
4160 
_sg_dummy_apply_scissor_rect(int x,int y,int w,int h,bool origin_top_left)4161 _SOKOL_PRIVATE void _sg_dummy_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) {
4162     _SOKOL_UNUSED(x);
4163     _SOKOL_UNUSED(y);
4164     _SOKOL_UNUSED(w);
4165     _SOKOL_UNUSED(h);
4166     _SOKOL_UNUSED(origin_top_left);
4167 }
4168 
_sg_dummy_apply_pipeline(_sg_pipeline_t * pip)4169 _SOKOL_PRIVATE void _sg_dummy_apply_pipeline(_sg_pipeline_t* pip) {
4170     SOKOL_ASSERT(pip);
4171     _SOKOL_UNUSED(pip);
4172 }
4173 
_sg_dummy_apply_bindings(_sg_pipeline_t * pip,_sg_buffer_t ** vbs,const int * vb_offsets,int num_vbs,_sg_buffer_t * ib,int ib_offset,_sg_image_t ** vs_imgs,int num_vs_imgs,_sg_image_t ** fs_imgs,int num_fs_imgs)4174 _SOKOL_PRIVATE void _sg_dummy_apply_bindings(
4175     _sg_pipeline_t* pip,
4176     _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs,
4177     _sg_buffer_t* ib, int ib_offset,
4178     _sg_image_t** vs_imgs, int num_vs_imgs,
4179     _sg_image_t** fs_imgs, int num_fs_imgs)
4180 {
4181     SOKOL_ASSERT(pip);
4182     SOKOL_ASSERT(vbs && vb_offsets);
4183     SOKOL_ASSERT(vs_imgs);
4184     SOKOL_ASSERT(fs_imgs);
4185     _SOKOL_UNUSED(pip);
4186     _SOKOL_UNUSED(vbs); _SOKOL_UNUSED(vb_offsets); _SOKOL_UNUSED(num_vbs);
4187     _SOKOL_UNUSED(ib); _SOKOL_UNUSED(ib_offset);
4188     _SOKOL_UNUSED(vs_imgs); _SOKOL_UNUSED(num_vs_imgs);
4189     _SOKOL_UNUSED(fs_imgs); _SOKOL_UNUSED(num_fs_imgs);
4190 }
4191 
_sg_dummy_apply_uniforms(sg_shader_stage stage_index,int ub_index,const void * data,int num_bytes)4192 _SOKOL_PRIVATE void _sg_dummy_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) {
4193     SOKOL_ASSERT(data && (num_bytes > 0));
4194     SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES));
4195     SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS));
4196     _SOKOL_UNUSED(stage_index);
4197     _SOKOL_UNUSED(ub_index);
4198     _SOKOL_UNUSED(data);
4199     _SOKOL_UNUSED(num_bytes);
4200 }
4201 
_sg_dummy_draw(int base_element,int num_elements,int num_instances)4202 _SOKOL_PRIVATE void _sg_dummy_draw(int base_element, int num_elements, int num_instances) {
4203     _SOKOL_UNUSED(base_element);
4204     _SOKOL_UNUSED(num_elements);
4205     _SOKOL_UNUSED(num_instances);
4206 }
4207 
_sg_dummy_update_buffer(_sg_buffer_t * buf,const void * data,uint32_t data_size)4208 _SOKOL_PRIVATE void _sg_dummy_update_buffer(_sg_buffer_t* buf, const void* data, uint32_t data_size) {
4209     SOKOL_ASSERT(buf && data && (data_size > 0));
4210     _SOKOL_UNUSED(data);
4211     _SOKOL_UNUSED(data_size);
4212     if (++buf->cmn.active_slot >= buf->cmn.num_slots) {
4213         buf->cmn.active_slot = 0;
4214     }
4215 }
4216 
_sg_dummy_append_buffer(_sg_buffer_t * buf,const void * data,uint32_t data_size,bool new_frame)4217 _SOKOL_PRIVATE uint32_t _sg_dummy_append_buffer(_sg_buffer_t* buf, const void* data, uint32_t data_size, bool new_frame) {
4218     SOKOL_ASSERT(buf && data && (data_size > 0));
4219     _SOKOL_UNUSED(data);
4220     _SOKOL_UNUSED(data_size);
4221     if (new_frame) {
4222         if (++buf->cmn.active_slot >= buf->cmn.num_slots) {
4223             buf->cmn.active_slot = 0;
4224         }
4225     }
4226     /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */
4227     return _sg_roundup(data_size, 4);
4228 }
4229 
_sg_dummy_update_image(_sg_image_t * img,const sg_image_content * data)4230 _SOKOL_PRIVATE void _sg_dummy_update_image(_sg_image_t* img, const sg_image_content* data) {
4231     SOKOL_ASSERT(img && data);
4232     _SOKOL_UNUSED(data);
4233     if (++img->cmn.active_slot >= img->cmn.num_slots) {
4234         img->cmn.active_slot = 0;
4235     }
4236 }
4237 
4238 /*== GL BACKEND ==============================================================*/
4239 #elif defined(_SOKOL_ANY_GL)
4240 
4241 /*-- type translation --------------------------------------------------------*/
_sg_gl_buffer_target(sg_buffer_type t)4242 _SOKOL_PRIVATE GLenum _sg_gl_buffer_target(sg_buffer_type t) {
4243     switch (t) {
4244         case SG_BUFFERTYPE_VERTEXBUFFER:    return GL_ARRAY_BUFFER;
4245         case SG_BUFFERTYPE_INDEXBUFFER:     return GL_ELEMENT_ARRAY_BUFFER;
4246         default: SOKOL_UNREACHABLE; return 0;
4247     }
4248 }
4249 
_sg_gl_texture_target(sg_image_type t)4250 _SOKOL_PRIVATE GLenum _sg_gl_texture_target(sg_image_type t) {
4251     switch (t) {
4252         case SG_IMAGETYPE_2D:   return GL_TEXTURE_2D;
4253         case SG_IMAGETYPE_CUBE: return GL_TEXTURE_CUBE_MAP;
4254         #if !defined(SOKOL_GLES2)
4255         case SG_IMAGETYPE_3D:       return GL_TEXTURE_3D;
4256         case SG_IMAGETYPE_ARRAY:    return GL_TEXTURE_2D_ARRAY;
4257         #endif
4258         default: SOKOL_UNREACHABLE; return 0;
4259     }
4260 }
4261 
_sg_gl_usage(sg_usage u)4262 _SOKOL_PRIVATE GLenum _sg_gl_usage(sg_usage u) {
4263     switch (u) {
4264         case SG_USAGE_IMMUTABLE:    return GL_STATIC_DRAW;
4265         case SG_USAGE_DYNAMIC:      return GL_DYNAMIC_DRAW;
4266         case SG_USAGE_STREAM:       return GL_STREAM_DRAW;
4267         default: SOKOL_UNREACHABLE; return 0;
4268     }
4269 }
4270 
_sg_gl_shader_stage(sg_shader_stage stage)4271 _SOKOL_PRIVATE GLenum _sg_gl_shader_stage(sg_shader_stage stage) {
4272     switch (stage) {
4273         case SG_SHADERSTAGE_VS:     return GL_VERTEX_SHADER;
4274         case SG_SHADERSTAGE_FS:     return GL_FRAGMENT_SHADER;
4275         default: SOKOL_UNREACHABLE; return 0;
4276     }
4277 }
4278 
_sg_gl_vertexformat_size(sg_vertex_format fmt)4279 _SOKOL_PRIVATE GLint _sg_gl_vertexformat_size(sg_vertex_format fmt) {
4280     switch (fmt) {
4281         case SG_VERTEXFORMAT_FLOAT:     return 1;
4282         case SG_VERTEXFORMAT_FLOAT2:    return 2;
4283         case SG_VERTEXFORMAT_FLOAT3:    return 3;
4284         case SG_VERTEXFORMAT_FLOAT4:    return 4;
4285         case SG_VERTEXFORMAT_BYTE4:     return 4;
4286         case SG_VERTEXFORMAT_BYTE4N:    return 4;
4287         case SG_VERTEXFORMAT_UBYTE4:    return 4;
4288         case SG_VERTEXFORMAT_UBYTE4N:   return 4;
4289         case SG_VERTEXFORMAT_SHORT2:    return 2;
4290         case SG_VERTEXFORMAT_SHORT2N:   return 2;
4291         case SG_VERTEXFORMAT_USHORT2N:  return 2;
4292         case SG_VERTEXFORMAT_SHORT4:    return 4;
4293         case SG_VERTEXFORMAT_SHORT4N:   return 4;
4294         case SG_VERTEXFORMAT_USHORT4N:  return 4;
4295         case SG_VERTEXFORMAT_UINT10_N2: return 4;
4296         default: SOKOL_UNREACHABLE; return 0;
4297     }
4298 }
4299 
_sg_gl_vertexformat_type(sg_vertex_format fmt)4300 _SOKOL_PRIVATE GLenum _sg_gl_vertexformat_type(sg_vertex_format fmt) {
4301     switch (fmt) {
4302         case SG_VERTEXFORMAT_FLOAT:
4303         case SG_VERTEXFORMAT_FLOAT2:
4304         case SG_VERTEXFORMAT_FLOAT3:
4305         case SG_VERTEXFORMAT_FLOAT4:
4306             return GL_FLOAT;
4307         case SG_VERTEXFORMAT_BYTE4:
4308         case SG_VERTEXFORMAT_BYTE4N:
4309             return GL_BYTE;
4310         case SG_VERTEXFORMAT_UBYTE4:
4311         case SG_VERTEXFORMAT_UBYTE4N:
4312             return GL_UNSIGNED_BYTE;
4313         case SG_VERTEXFORMAT_SHORT2:
4314         case SG_VERTEXFORMAT_SHORT2N:
4315         case SG_VERTEXFORMAT_SHORT4:
4316         case SG_VERTEXFORMAT_SHORT4N:
4317             return GL_SHORT;
4318         case SG_VERTEXFORMAT_USHORT2N:
4319         case SG_VERTEXFORMAT_USHORT4N:
4320             return GL_UNSIGNED_SHORT;
4321         case SG_VERTEXFORMAT_UINT10_N2:
4322             return GL_UNSIGNED_INT_2_10_10_10_REV;
4323         default:
4324             SOKOL_UNREACHABLE; return 0;
4325     }
4326 }
4327 
_sg_gl_vertexformat_normalized(sg_vertex_format fmt)4328 _SOKOL_PRIVATE GLboolean _sg_gl_vertexformat_normalized(sg_vertex_format fmt) {
4329     switch (fmt) {
4330         case SG_VERTEXFORMAT_BYTE4N:
4331         case SG_VERTEXFORMAT_UBYTE4N:
4332         case SG_VERTEXFORMAT_SHORT2N:
4333         case SG_VERTEXFORMAT_USHORT2N:
4334         case SG_VERTEXFORMAT_SHORT4N:
4335         case SG_VERTEXFORMAT_USHORT4N:
4336         case SG_VERTEXFORMAT_UINT10_N2:
4337             return GL_TRUE;
4338         default:
4339             return GL_FALSE;
4340     }
4341 }
4342 
_sg_gl_primitive_type(sg_primitive_type t)4343 _SOKOL_PRIVATE GLenum _sg_gl_primitive_type(sg_primitive_type t) {
4344     switch (t) {
4345         case SG_PRIMITIVETYPE_POINTS:           return GL_POINTS;
4346         case SG_PRIMITIVETYPE_LINES:            return GL_LINES;
4347         case SG_PRIMITIVETYPE_LINE_STRIP:       return GL_LINE_STRIP;
4348         case SG_PRIMITIVETYPE_TRIANGLES:        return GL_TRIANGLES;
4349         case SG_PRIMITIVETYPE_TRIANGLE_STRIP:   return GL_TRIANGLE_STRIP;
4350         default: SOKOL_UNREACHABLE; return 0;
4351     }
4352 }
4353 
_sg_gl_index_type(sg_index_type t)4354 _SOKOL_PRIVATE GLenum _sg_gl_index_type(sg_index_type t) {
4355     switch (t) {
4356         case SG_INDEXTYPE_NONE:     return 0;
4357         case SG_INDEXTYPE_UINT16:   return GL_UNSIGNED_SHORT;
4358         case SG_INDEXTYPE_UINT32:   return GL_UNSIGNED_INT;
4359         default: SOKOL_UNREACHABLE; return 0;
4360     }
4361 }
4362 
_sg_gl_compare_func(sg_compare_func cmp)4363 _SOKOL_PRIVATE GLenum _sg_gl_compare_func(sg_compare_func cmp) {
4364     switch (cmp) {
4365         case SG_COMPAREFUNC_NEVER:          return GL_NEVER;
4366         case SG_COMPAREFUNC_LESS:           return GL_LESS;
4367         case SG_COMPAREFUNC_EQUAL:          return GL_EQUAL;
4368         case SG_COMPAREFUNC_LESS_EQUAL:     return GL_LEQUAL;
4369         case SG_COMPAREFUNC_GREATER:        return GL_GREATER;
4370         case SG_COMPAREFUNC_NOT_EQUAL:      return GL_NOTEQUAL;
4371         case SG_COMPAREFUNC_GREATER_EQUAL:  return GL_GEQUAL;
4372         case SG_COMPAREFUNC_ALWAYS:         return GL_ALWAYS;
4373         default: SOKOL_UNREACHABLE; return 0;
4374     }
4375 }
4376 
_sg_gl_stencil_op(sg_stencil_op op)4377 _SOKOL_PRIVATE GLenum _sg_gl_stencil_op(sg_stencil_op op) {
4378     switch (op) {
4379         case SG_STENCILOP_KEEP:         return GL_KEEP;
4380         case SG_STENCILOP_ZERO:         return GL_ZERO;
4381         case SG_STENCILOP_REPLACE:      return GL_REPLACE;
4382         case SG_STENCILOP_INCR_CLAMP:   return GL_INCR;
4383         case SG_STENCILOP_DECR_CLAMP:   return GL_DECR;
4384         case SG_STENCILOP_INVERT:       return GL_INVERT;
4385         case SG_STENCILOP_INCR_WRAP:    return GL_INCR_WRAP;
4386         case SG_STENCILOP_DECR_WRAP:    return GL_DECR_WRAP;
4387         default: SOKOL_UNREACHABLE; return 0;
4388     }
4389 }
4390 
_sg_gl_blend_factor(sg_blend_factor f)4391 _SOKOL_PRIVATE GLenum _sg_gl_blend_factor(sg_blend_factor f) {
4392     switch (f) {
4393         case SG_BLENDFACTOR_ZERO:                   return GL_ZERO;
4394         case SG_BLENDFACTOR_ONE:                    return GL_ONE;
4395         case SG_BLENDFACTOR_SRC_COLOR:              return GL_SRC_COLOR;
4396         case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR:    return GL_ONE_MINUS_SRC_COLOR;
4397         case SG_BLENDFACTOR_SRC_ALPHA:              return GL_SRC_ALPHA;
4398         case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA:    return GL_ONE_MINUS_SRC_ALPHA;
4399         case SG_BLENDFACTOR_DST_COLOR:              return GL_DST_COLOR;
4400         case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR:    return GL_ONE_MINUS_DST_COLOR;
4401         case SG_BLENDFACTOR_DST_ALPHA:              return GL_DST_ALPHA;
4402         case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA:    return GL_ONE_MINUS_DST_ALPHA;
4403         case SG_BLENDFACTOR_SRC_ALPHA_SATURATED:    return GL_SRC_ALPHA_SATURATE;
4404         case SG_BLENDFACTOR_BLEND_COLOR:            return GL_CONSTANT_COLOR;
4405         case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR:  return GL_ONE_MINUS_CONSTANT_COLOR;
4406         case SG_BLENDFACTOR_BLEND_ALPHA:            return GL_CONSTANT_ALPHA;
4407         case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA:  return GL_ONE_MINUS_CONSTANT_ALPHA;
4408         default: SOKOL_UNREACHABLE; return 0;
4409     }
4410 }
4411 
_sg_gl_blend_op(sg_blend_op op)4412 _SOKOL_PRIVATE GLenum _sg_gl_blend_op(sg_blend_op op) {
4413     switch (op) {
4414         case SG_BLENDOP_ADD:                return GL_FUNC_ADD;
4415         case SG_BLENDOP_SUBTRACT:           return GL_FUNC_SUBTRACT;
4416         case SG_BLENDOP_REVERSE_SUBTRACT:   return GL_FUNC_REVERSE_SUBTRACT;
4417         default: SOKOL_UNREACHABLE; return 0;
4418     }
4419 }
4420 
_sg_gl_filter(sg_filter f)4421 _SOKOL_PRIVATE GLenum _sg_gl_filter(sg_filter f) {
4422     switch (f) {
4423         case SG_FILTER_NEAREST:                 return GL_NEAREST;
4424         case SG_FILTER_LINEAR:                  return GL_LINEAR;
4425         case SG_FILTER_NEAREST_MIPMAP_NEAREST:  return GL_NEAREST_MIPMAP_NEAREST;
4426         case SG_FILTER_NEAREST_MIPMAP_LINEAR:   return GL_NEAREST_MIPMAP_LINEAR;
4427         case SG_FILTER_LINEAR_MIPMAP_NEAREST:   return GL_LINEAR_MIPMAP_NEAREST;
4428         case SG_FILTER_LINEAR_MIPMAP_LINEAR:    return GL_LINEAR_MIPMAP_LINEAR;
4429         default: SOKOL_UNREACHABLE; return 0;
4430     }
4431 }
4432 
_sg_gl_wrap(sg_wrap w)4433 _SOKOL_PRIVATE GLenum _sg_gl_wrap(sg_wrap w) {
4434     switch (w) {
4435         case SG_WRAP_CLAMP_TO_EDGE:     return GL_CLAMP_TO_EDGE;
4436         #if defined(SOKOL_GLCORE33)
4437         case SG_WRAP_CLAMP_TO_BORDER:   return GL_CLAMP_TO_BORDER;
4438         #else
4439         case SG_WRAP_CLAMP_TO_BORDER:   return GL_CLAMP_TO_EDGE;
4440         #endif
4441         case SG_WRAP_REPEAT:            return GL_REPEAT;
4442         case SG_WRAP_MIRRORED_REPEAT:   return GL_MIRRORED_REPEAT;
4443         default: SOKOL_UNREACHABLE; return 0;
4444     }
4445 }
4446 
_sg_gl_teximage_type(sg_pixel_format fmt)4447 _SOKOL_PRIVATE GLenum _sg_gl_teximage_type(sg_pixel_format fmt) {
4448     switch (fmt) {
4449         case SG_PIXELFORMAT_R8:
4450         case SG_PIXELFORMAT_R8UI:
4451         case SG_PIXELFORMAT_RG8:
4452         case SG_PIXELFORMAT_RG8UI:
4453         case SG_PIXELFORMAT_RGBA8:
4454         case SG_PIXELFORMAT_RGBA8UI:
4455         case SG_PIXELFORMAT_BGRA8:
4456             return GL_UNSIGNED_BYTE;
4457         case SG_PIXELFORMAT_R8SN:
4458         case SG_PIXELFORMAT_R8SI:
4459         case SG_PIXELFORMAT_RG8SN:
4460         case SG_PIXELFORMAT_RG8SI:
4461         case SG_PIXELFORMAT_RGBA8SN:
4462         case SG_PIXELFORMAT_RGBA8SI:
4463             return GL_BYTE;
4464         case SG_PIXELFORMAT_R16:
4465         case SG_PIXELFORMAT_R16UI:
4466         case SG_PIXELFORMAT_RG16:
4467         case SG_PIXELFORMAT_RG16UI:
4468         case SG_PIXELFORMAT_RGBA16:
4469         case SG_PIXELFORMAT_RGBA16UI:
4470             return GL_UNSIGNED_SHORT;
4471         case SG_PIXELFORMAT_R16SN:
4472         case SG_PIXELFORMAT_R16SI:
4473         case SG_PIXELFORMAT_RG16SN:
4474         case SG_PIXELFORMAT_RG16SI:
4475         case SG_PIXELFORMAT_RGBA16SN:
4476         case SG_PIXELFORMAT_RGBA16SI:
4477             return GL_SHORT;
4478         case SG_PIXELFORMAT_R16F:
4479         case SG_PIXELFORMAT_RG16F:
4480         case SG_PIXELFORMAT_RGBA16F:
4481             return GL_HALF_FLOAT;
4482         case SG_PIXELFORMAT_R32UI:
4483         case SG_PIXELFORMAT_RG32UI:
4484         case SG_PIXELFORMAT_RGBA32UI:
4485             return GL_UNSIGNED_INT;
4486         case SG_PIXELFORMAT_R32SI:
4487         case SG_PIXELFORMAT_RG32SI:
4488         case SG_PIXELFORMAT_RGBA32SI:
4489             return GL_INT;
4490         case SG_PIXELFORMAT_R32F:
4491         case SG_PIXELFORMAT_RG32F:
4492         case SG_PIXELFORMAT_RGBA32F:
4493             return GL_FLOAT;
4494         #if !defined(SOKOL_GLES2)
4495         case SG_PIXELFORMAT_RGB10A2:
4496             return GL_UNSIGNED_INT_2_10_10_10_REV;
4497         case SG_PIXELFORMAT_RG11B10F:
4498             return GL_UNSIGNED_INT_10F_11F_11F_REV;
4499         #endif
4500         case SG_PIXELFORMAT_DEPTH:
4501             return GL_UNSIGNED_SHORT;
4502         case SG_PIXELFORMAT_DEPTH_STENCIL:
4503             return GL_UNSIGNED_INT_24_8;
4504         default:
4505             SOKOL_UNREACHABLE; return 0;
4506     }
4507 }
4508 
_sg_gl_teximage_format(sg_pixel_format fmt)4509 _SOKOL_PRIVATE GLenum _sg_gl_teximage_format(sg_pixel_format fmt) {
4510     switch (fmt) {
4511         case SG_PIXELFORMAT_R8:
4512         case SG_PIXELFORMAT_R8SN:
4513         case SG_PIXELFORMAT_R16:
4514         case SG_PIXELFORMAT_R16SN:
4515         case SG_PIXELFORMAT_R16F:
4516         case SG_PIXELFORMAT_R32F:
4517             #if defined(SOKOL_GLES2)
4518                 return GL_LUMINANCE;
4519             #else
4520             if (_sg.gl.gles2) {
4521                 return GL_LUMINANCE;
4522             }
4523             else {
4524                 return GL_RED;
4525             }
4526             #endif
4527         #if !defined(SOKOL_GLES2)
4528             case SG_PIXELFORMAT_R8UI:
4529             case SG_PIXELFORMAT_R8SI:
4530             case SG_PIXELFORMAT_R16UI:
4531             case SG_PIXELFORMAT_R16SI:
4532             case SG_PIXELFORMAT_R32UI:
4533             case SG_PIXELFORMAT_R32SI:
4534                 return GL_RED_INTEGER;
4535             case SG_PIXELFORMAT_RG8:
4536             case SG_PIXELFORMAT_RG8SN:
4537             case SG_PIXELFORMAT_RG16:
4538             case SG_PIXELFORMAT_RG16SN:
4539             case SG_PIXELFORMAT_RG16F:
4540             case SG_PIXELFORMAT_RG32F:
4541                 return GL_RG;
4542             case SG_PIXELFORMAT_RG8UI:
4543             case SG_PIXELFORMAT_RG8SI:
4544             case SG_PIXELFORMAT_RG16UI:
4545             case SG_PIXELFORMAT_RG16SI:
4546             case SG_PIXELFORMAT_RG32UI:
4547             case SG_PIXELFORMAT_RG32SI:
4548                 return GL_RG_INTEGER;
4549         #endif
4550         case SG_PIXELFORMAT_RGBA8:
4551         case SG_PIXELFORMAT_RGBA8SN:
4552         case SG_PIXELFORMAT_RGBA16:
4553         case SG_PIXELFORMAT_RGBA16SN:
4554         case SG_PIXELFORMAT_RGBA16F:
4555         case SG_PIXELFORMAT_RGBA32F:
4556         case SG_PIXELFORMAT_RGB10A2:
4557             return GL_RGBA;
4558         #if !defined(SOKOL_GLES2)
4559             case SG_PIXELFORMAT_RGBA8UI:
4560             case SG_PIXELFORMAT_RGBA8SI:
4561             case SG_PIXELFORMAT_RGBA16UI:
4562             case SG_PIXELFORMAT_RGBA16SI:
4563             case SG_PIXELFORMAT_RGBA32UI:
4564             case SG_PIXELFORMAT_RGBA32SI:
4565                 return GL_RGBA_INTEGER;
4566         #endif
4567         case SG_PIXELFORMAT_RG11B10F:
4568             return GL_RGB;
4569         case SG_PIXELFORMAT_DEPTH:
4570             return GL_DEPTH_COMPONENT;
4571         case SG_PIXELFORMAT_DEPTH_STENCIL:
4572             return GL_DEPTH_STENCIL;
4573         case SG_PIXELFORMAT_BC1_RGBA:
4574             return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
4575         case SG_PIXELFORMAT_BC2_RGBA:
4576             return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
4577         case SG_PIXELFORMAT_BC3_RGBA:
4578             return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
4579         case SG_PIXELFORMAT_BC4_R:
4580             return GL_COMPRESSED_RED_RGTC1;
4581         case SG_PIXELFORMAT_BC4_RSN:
4582             return GL_COMPRESSED_SIGNED_RED_RGTC1;
4583         case SG_PIXELFORMAT_BC5_RG:
4584             return GL_COMPRESSED_RED_GREEN_RGTC2;
4585         case SG_PIXELFORMAT_BC5_RGSN:
4586             return GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2;
4587         case SG_PIXELFORMAT_BC6H_RGBF:
4588             return GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB;
4589         case SG_PIXELFORMAT_BC6H_RGBUF:
4590             return GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB;
4591         case SG_PIXELFORMAT_BC7_RGBA:
4592             return GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
4593         case SG_PIXELFORMAT_PVRTC_RGB_2BPP:
4594             return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG;
4595         case SG_PIXELFORMAT_PVRTC_RGB_4BPP:
4596             return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG;
4597         case SG_PIXELFORMAT_PVRTC_RGBA_2BPP:
4598             return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG;
4599         case SG_PIXELFORMAT_PVRTC_RGBA_4BPP:
4600             return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG;
4601         case SG_PIXELFORMAT_ETC2_RGB8:
4602             return GL_COMPRESSED_RGB8_ETC2;
4603         case SG_PIXELFORMAT_ETC2_RGB8A1:
4604             return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2;
4605         case SG_PIXELFORMAT_ETC2_RGBA8:
4606             return GL_COMPRESSED_RGBA8_ETC2_EAC;
4607         case SG_PIXELFORMAT_ETC2_RG11:
4608             return GL_COMPRESSED_RG11_EAC;
4609         case SG_PIXELFORMAT_ETC2_RG11SN:
4610             return GL_COMPRESSED_SIGNED_RG11_EAC;
4611         default:
4612             SOKOL_UNREACHABLE; return 0;
4613     }
4614 }
4615 
_sg_gl_teximage_internal_format(sg_pixel_format fmt)4616 _SOKOL_PRIVATE GLenum _sg_gl_teximage_internal_format(sg_pixel_format fmt) {
4617     #if defined(SOKOL_GLES2)
4618     return _sg_gl_teximage_format(fmt);
4619     #else
4620     if (_sg.gl.gles2) {
4621         return _sg_gl_teximage_format(fmt);
4622     }
4623     else {
4624         switch (fmt) {
4625             case SG_PIXELFORMAT_R8:         return GL_R8;
4626             case SG_PIXELFORMAT_R8SN:       return GL_R8_SNORM;
4627             case SG_PIXELFORMAT_R8UI:       return GL_R8UI;
4628             case SG_PIXELFORMAT_R8SI:       return GL_R8I;
4629             #if !defined(SOKOL_GLES3)
4630                 case SG_PIXELFORMAT_R16:        return GL_R16;
4631                 case SG_PIXELFORMAT_R16SN:      return GL_R16_SNORM;
4632             #endif
4633             case SG_PIXELFORMAT_R16UI:      return GL_R16UI;
4634             case SG_PIXELFORMAT_R16SI:      return GL_R16I;
4635             case SG_PIXELFORMAT_R16F:       return GL_R16F;
4636             case SG_PIXELFORMAT_RG8:        return GL_RG8;
4637             case SG_PIXELFORMAT_RG8SN:      return GL_RG8_SNORM;
4638             case SG_PIXELFORMAT_RG8UI:      return GL_RG8UI;
4639             case SG_PIXELFORMAT_RG8SI:      return GL_RG8I;
4640             case SG_PIXELFORMAT_R32UI:      return GL_R32UI;
4641             case SG_PIXELFORMAT_R32SI:      return GL_R32I;
4642             case SG_PIXELFORMAT_R32F:       return GL_R32F;
4643             #if !defined(SOKOL_GLES3)
4644                 case SG_PIXELFORMAT_RG16:       return GL_RG16;
4645                 case SG_PIXELFORMAT_RG16SN:     return GL_RG16_SNORM;
4646             #endif
4647             case SG_PIXELFORMAT_RG16UI:     return GL_RG16UI;
4648             case SG_PIXELFORMAT_RG16SI:     return GL_RG16I;
4649             case SG_PIXELFORMAT_RG16F:      return GL_RG16F;
4650             case SG_PIXELFORMAT_RGBA8:      return GL_RGBA8;
4651             case SG_PIXELFORMAT_RGBA8SN:    return GL_RGBA8_SNORM;
4652             case SG_PIXELFORMAT_RGBA8UI:    return GL_RGBA8UI;
4653             case SG_PIXELFORMAT_RGBA8SI:    return GL_RGBA8I;
4654             case SG_PIXELFORMAT_RGB10A2:    return GL_RGB10_A2;
4655             case SG_PIXELFORMAT_RG11B10F:   return GL_R11F_G11F_B10F;
4656             case SG_PIXELFORMAT_RG32UI:     return GL_RG32UI;
4657             case SG_PIXELFORMAT_RG32SI:     return GL_RG32I;
4658             case SG_PIXELFORMAT_RG32F:      return GL_RG32F;
4659             #if !defined(SOKOL_GLES3)
4660                 case SG_PIXELFORMAT_RGBA16:     return GL_RGBA16;
4661                 case SG_PIXELFORMAT_RGBA16SN:   return GL_RGBA16_SNORM;
4662             #endif
4663             case SG_PIXELFORMAT_RGBA16UI:   return GL_RGBA16UI;
4664             case SG_PIXELFORMAT_RGBA16SI:   return GL_RGBA16I;
4665             case SG_PIXELFORMAT_RGBA16F:    return GL_RGBA16F;
4666             case SG_PIXELFORMAT_RGBA32UI:   return GL_RGBA32UI;
4667             case SG_PIXELFORMAT_RGBA32SI:   return GL_RGBA32I;
4668             case SG_PIXELFORMAT_RGBA32F:    return GL_RGBA32F;
4669             case SG_PIXELFORMAT_DEPTH:      return GL_DEPTH_COMPONENT16;
4670             case SG_PIXELFORMAT_DEPTH_STENCIL:      return GL_DEPTH24_STENCIL8;
4671             case SG_PIXELFORMAT_BC1_RGBA:           return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
4672             case SG_PIXELFORMAT_BC2_RGBA:           return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
4673             case SG_PIXELFORMAT_BC3_RGBA:           return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
4674             case SG_PIXELFORMAT_BC4_R:              return GL_COMPRESSED_RED_RGTC1;
4675             case SG_PIXELFORMAT_BC4_RSN:            return GL_COMPRESSED_SIGNED_RED_RGTC1;
4676             case SG_PIXELFORMAT_BC5_RG:             return GL_COMPRESSED_RED_GREEN_RGTC2;
4677             case SG_PIXELFORMAT_BC5_RGSN:           return GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2;
4678             case SG_PIXELFORMAT_BC6H_RGBF:          return GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB;
4679             case SG_PIXELFORMAT_BC6H_RGBUF:         return GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB;
4680             case SG_PIXELFORMAT_BC7_RGBA:           return GL_COMPRESSED_RGBA_BPTC_UNORM_ARB;
4681             case SG_PIXELFORMAT_PVRTC_RGB_2BPP:     return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG;
4682             case SG_PIXELFORMAT_PVRTC_RGB_4BPP:     return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG;
4683             case SG_PIXELFORMAT_PVRTC_RGBA_2BPP:    return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG;
4684             case SG_PIXELFORMAT_PVRTC_RGBA_4BPP:    return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG;
4685             case SG_PIXELFORMAT_ETC2_RGB8:          return GL_COMPRESSED_RGB8_ETC2;
4686             case SG_PIXELFORMAT_ETC2_RGB8A1:        return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2;
4687             case SG_PIXELFORMAT_ETC2_RGBA8:         return GL_COMPRESSED_RGBA8_ETC2_EAC;
4688             case SG_PIXELFORMAT_ETC2_RG11:          return GL_COMPRESSED_RG11_EAC;
4689             case SG_PIXELFORMAT_ETC2_RG11SN:        return GL_COMPRESSED_SIGNED_RG11_EAC;
4690             default: SOKOL_UNREACHABLE; return 0;
4691         }
4692     }
4693     #endif
4694 }
4695 
_sg_gl_cubeface_target(int face_index)4696 _SOKOL_PRIVATE GLenum _sg_gl_cubeface_target(int face_index) {
4697     switch (face_index) {
4698         case 0: return GL_TEXTURE_CUBE_MAP_POSITIVE_X;
4699         case 1: return GL_TEXTURE_CUBE_MAP_NEGATIVE_X;
4700         case 2: return GL_TEXTURE_CUBE_MAP_POSITIVE_Y;
4701         case 3: return GL_TEXTURE_CUBE_MAP_NEGATIVE_Y;
4702         case 4: return GL_TEXTURE_CUBE_MAP_POSITIVE_Z;
4703         case 5: return GL_TEXTURE_CUBE_MAP_NEGATIVE_Z;
4704         default: SOKOL_UNREACHABLE; return 0;
4705     }
4706 }
4707 
_sg_gl_depth_attachment_format(sg_pixel_format fmt)4708 _SOKOL_PRIVATE GLenum _sg_gl_depth_attachment_format(sg_pixel_format fmt) {
4709     switch (fmt) {
4710         case SG_PIXELFORMAT_DEPTH:          return GL_DEPTH_COMPONENT16;
4711         case SG_PIXELFORMAT_DEPTH_STENCIL:  return GL_DEPTH24_STENCIL8;
4712         default: SOKOL_UNREACHABLE; return 0;
4713     }
4714 }
4715 
_sg_gl_init_attr(_sg_gl_attr_t * attr)4716 _SOKOL_PRIVATE void _sg_gl_init_attr(_sg_gl_attr_t* attr) {
4717     attr->vb_index = -1;
4718     attr->divisor = -1;
4719 }
4720 
_sg_gl_init_stencil_state(sg_stencil_state * s)4721 _SOKOL_PRIVATE void _sg_gl_init_stencil_state(sg_stencil_state* s) {
4722     SOKOL_ASSERT(s);
4723     s->fail_op = SG_STENCILOP_KEEP;
4724     s->depth_fail_op = SG_STENCILOP_KEEP;
4725     s->pass_op = SG_STENCILOP_KEEP;
4726     s->compare_func = SG_COMPAREFUNC_ALWAYS;
4727 }
4728 
_sg_gl_init_depth_stencil_state(sg_depth_stencil_state * s)4729 _SOKOL_PRIVATE void _sg_gl_init_depth_stencil_state(sg_depth_stencil_state* s) {
4730     SOKOL_ASSERT(s);
4731     _sg_gl_init_stencil_state(&s->stencil_front);
4732     _sg_gl_init_stencil_state(&s->stencil_back);
4733     s->depth_compare_func = SG_COMPAREFUNC_ALWAYS;
4734 }
4735 
_sg_gl_init_blend_state(sg_blend_state * s)4736 _SOKOL_PRIVATE void _sg_gl_init_blend_state(sg_blend_state* s) {
4737     SOKOL_ASSERT(s);
4738     s->src_factor_rgb = SG_BLENDFACTOR_ONE;
4739     s->dst_factor_rgb = SG_BLENDFACTOR_ZERO;
4740     s->op_rgb = SG_BLENDOP_ADD;
4741     s->src_factor_alpha = SG_BLENDFACTOR_ONE;
4742     s->dst_factor_alpha = SG_BLENDFACTOR_ZERO;
4743     s->op_alpha = SG_BLENDOP_ADD;
4744     s->color_write_mask = SG_COLORMASK_RGBA;
4745 }
4746 
_sg_gl_init_rasterizer_state(sg_rasterizer_state * s)4747 _SOKOL_PRIVATE void _sg_gl_init_rasterizer_state(sg_rasterizer_state* s) {
4748     SOKOL_ASSERT(s);
4749     s->cull_mode = SG_CULLMODE_NONE;
4750     s->face_winding = SG_FACEWINDING_CW;
4751     s->sample_count = 1;
4752 }
4753 
4754 /* see: https://www.khronos.org/registry/OpenGL-Refpages/es3.0/html/glTexImage2D.xhtml */
_sg_gl_init_pixelformats(bool has_bgra)4755 _SOKOL_PRIVATE void _sg_gl_init_pixelformats(bool has_bgra) {
4756     #if !defined(SOKOL_GLES2)
4757     if (!_sg.gl.gles2) {
4758         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]);
4759     }
4760     else {
4761         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8]);
4762     }
4763     #else
4764     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8]);
4765     #endif
4766     #if !defined(SOKOL_GLES2)
4767     if (!_sg.gl.gles2) {
4768         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8SN]);
4769         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]);
4770         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]);
4771         #if !defined(SOKOL_GLES3)
4772             _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16]);
4773             _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16SN]);
4774         #endif
4775         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]);
4776         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]);
4777         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]);
4778         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG8SN]);
4779         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]);
4780         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]);
4781         _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]);
4782         _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]);
4783         #if !defined(SOKOL_GLES3)
4784             _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16]);
4785             _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16SN]);
4786         #endif
4787         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]);
4788         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]);
4789     }
4790     #endif
4791     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]);
4792     #if !defined(SOKOL_GLES2)
4793     if (!_sg.gl.gles2) {
4794         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]);
4795         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]);
4796         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]);
4797     }
4798     #endif
4799     if (has_bgra) {
4800         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]);
4801     }
4802     #if !defined(SOKOL_GLES2)
4803     if (!_sg.gl.gles2) {
4804         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]);
4805         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG11B10F]);
4806         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32UI]);
4807         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32SI]);
4808         #if !defined(SOKOL_GLES3)
4809             _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16]);
4810             _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]);
4811         #endif
4812         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]);
4813         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]);
4814         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]);
4815         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]);
4816     }
4817     #endif
4818     // FIXME: WEBGL_depth_texture extension?
4819     _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]);
4820     _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]);
4821 }
4822 
4823 /* FIXME: OES_half_float_blend */
_sg_gl_init_pixelformats_half_float(bool has_colorbuffer_half_float,bool has_texture_half_float_linear)4824 _SOKOL_PRIVATE void _sg_gl_init_pixelformats_half_float(bool has_colorbuffer_half_float, bool has_texture_half_float_linear) {
4825     #if !defined(SOKOL_GLES2)
4826     if (!_sg.gl.gles2) {
4827         if (has_texture_half_float_linear) {
4828             if (has_colorbuffer_half_float) {
4829                 _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]);
4830                 _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]);
4831                 _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
4832             }
4833             else {
4834                 _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R16F]);
4835                 _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG16F]);
4836                 _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
4837             }
4838         }
4839         else {
4840             if (has_colorbuffer_half_float) {
4841                 _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_R16F]);
4842                 _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RG16F]);
4843                 _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
4844             }
4845             else {
4846                 _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R16F]);
4847                 _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RG16F]);
4848                 _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
4849             }
4850         }
4851     }
4852     else {
4853     #endif
4854         /* GLES2 can only render to RGBA, and there's no RG format */
4855         if (has_texture_half_float_linear) {
4856             if (has_colorbuffer_half_float) {
4857                 _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
4858             }
4859             else {
4860                 _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
4861             }
4862             _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R16F]);
4863         }
4864         else {
4865             if (has_colorbuffer_half_float) {
4866                 _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
4867             }
4868             else {
4869                 _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
4870             }
4871             _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R16F]);
4872         }
4873     #if !defined(SOKOL_GLES2)
4874     }
4875     #endif
4876 }
4877 
_sg_gl_init_pixelformats_float(bool has_colorbuffer_float,bool has_texture_float_linear,bool has_float_blend)4878 _SOKOL_PRIVATE void _sg_gl_init_pixelformats_float(bool has_colorbuffer_float, bool has_texture_float_linear, bool has_float_blend) {
4879     #if !defined(SOKOL_GLES2)
4880     if (!_sg.gl.gles2) {
4881         if (has_texture_float_linear) {
4882             if (has_colorbuffer_float) {
4883                 if (has_float_blend) {
4884                     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R32F]);
4885                     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG32F]);
4886                     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4887                 }
4888                 else {
4889                     _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_R32F]);
4890                     _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RG32F]);
4891                     _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4892                 }
4893             }
4894             else {
4895                 _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R32F]);
4896                 _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG32F]);
4897                 _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4898             }
4899         }
4900         else {
4901             if (has_colorbuffer_float) {
4902                 _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_R32F]);
4903                 _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RG32F]);
4904                 _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4905             }
4906             else {
4907                 _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R32F]);
4908                 _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RG32F]);
4909                 _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4910             }
4911         }
4912     }
4913     else {
4914     #endif
4915         /* GLES2 can only render to RGBA, and there's no RG format */
4916         if (has_texture_float_linear) {
4917             if (has_colorbuffer_float) {
4918                 if (has_float_blend) {
4919                     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4920                 }
4921                 else {
4922                     _sg_pixelformat_sfrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4923                 }
4924             }
4925             else {
4926                 _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4927             }
4928             _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R32F]);
4929         }
4930         else {
4931             if (has_colorbuffer_float) {
4932                 _sg_pixelformat_sbrm(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4933             }
4934             else {
4935                 _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
4936             }
4937             _sg_pixelformat_s(&_sg.formats[SG_PIXELFORMAT_R32F]);
4938         }
4939     #if !defined(SOKOL_GLES2)
4940     }
4941     #endif
4942 }
4943 
_sg_gl_init_pixelformats_s3tc(void)4944 _SOKOL_PRIVATE void _sg_gl_init_pixelformats_s3tc(void) {
4945     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]);
4946     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]);
4947     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]);
4948 }
4949 
_sg_gl_init_pixelformats_rgtc(void)4950 _SOKOL_PRIVATE void _sg_gl_init_pixelformats_rgtc(void) {
4951     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]);
4952     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]);
4953     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]);
4954     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]);
4955 }
4956 
_sg_gl_init_pixelformats_bptc(void)4957 _SOKOL_PRIVATE void _sg_gl_init_pixelformats_bptc(void) {
4958     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]);
4959     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]);
4960     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]);
4961 }
4962 
_sg_gl_init_pixelformats_pvrtc(void)4963 _SOKOL_PRIVATE void _sg_gl_init_pixelformats_pvrtc(void) {
4964     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_2BPP]);
4965     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_4BPP]);
4966     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_2BPP]);
4967     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_4BPP]);
4968 }
4969 
_sg_gl_init_pixelformats_etc2(void)4970 _SOKOL_PRIVATE void _sg_gl_init_pixelformats_etc2(void) {
4971     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8]);
4972     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8A1]);
4973     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGBA8]);
4974     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11]);
4975     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11SN]);
4976 }
4977 
_sg_gl_init_limits(void)4978 _SOKOL_PRIVATE void _sg_gl_init_limits(void) {
4979     _SG_GL_CHECK_ERROR();
4980     GLint gl_int;
4981     glGetIntegerv(GL_MAX_TEXTURE_SIZE, &gl_int);
4982     _SG_GL_CHECK_ERROR();
4983     _sg.limits.max_image_size_2d = gl_int;
4984     _sg.limits.max_image_size_array = gl_int;
4985     glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &gl_int);
4986     _SG_GL_CHECK_ERROR();
4987     _sg.limits.max_image_size_cube = gl_int;
4988     glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &gl_int);
4989     _SG_GL_CHECK_ERROR();
4990     if (gl_int > SG_MAX_VERTEX_ATTRIBUTES) {
4991         gl_int = SG_MAX_VERTEX_ATTRIBUTES;
4992     }
4993     _sg.limits.max_vertex_attrs = gl_int;
4994     #if !defined(SOKOL_GLES2)
4995     if (!_sg.gl.gles2) {
4996         glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, &gl_int);
4997         _SG_GL_CHECK_ERROR();
4998         _sg.limits.max_image_size_3d = gl_int;
4999         glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &gl_int);
5000         _SG_GL_CHECK_ERROR();
5001         _sg.limits.max_image_array_layers = gl_int;
5002     }
5003     #endif
5004     if (_sg.gl.ext_anisotropic) {
5005         glGetIntegerv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &gl_int);
5006         _SG_GL_CHECK_ERROR();
5007         _sg.gl.max_anisotropy = gl_int;
5008     }
5009     else {
5010         _sg.gl.max_anisotropy = 1;
5011     }
5012     glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &gl_int);
5013     _SG_GL_CHECK_ERROR();
5014     _sg.gl.max_combined_texture_image_units = gl_int;
5015 }
5016 
5017 #if defined(SOKOL_GLCORE33)
_sg_gl_init_caps_glcore33(void)5018 _SOKOL_PRIVATE void _sg_gl_init_caps_glcore33(void) {
5019     _sg.backend = SG_BACKEND_GLCORE33;
5020 
5021     _sg.features.origin_top_left = false;
5022     _sg.features.instancing = true;
5023     _sg.features.multiple_render_targets = true;
5024     _sg.features.msaa_render_targets = true;
5025     _sg.features.imagetype_3d = true;
5026     _sg.features.imagetype_array = true;
5027     _sg.features.image_clamp_to_border = true;
5028 
5029     /* scan extensions */
5030     bool has_s3tc = false;  /* BC1..BC3 */
5031     bool has_rgtc = false;  /* BC4 and BC5 */
5032     bool has_bptc = false;  /* BC6H and BC7 */
5033     bool has_pvrtc = false;
5034     bool has_etc2 = false;
5035     GLint num_ext = 0;
5036     glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext);
5037     for (int i = 0; i < num_ext; i++) {
5038         const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, i);
5039         if (ext) {
5040             if (strstr(ext, "_texture_compression_s3tc")) {
5041                 has_s3tc = true;
5042             }
5043             else if (strstr(ext, "_texture_compression_rgtc")) {
5044                 has_rgtc = true;
5045             }
5046             else if (strstr(ext, "_texture_compression_bptc")) {
5047                 has_bptc = true;
5048             }
5049             else if (strstr(ext, "_texture_compression_pvrtc")) {
5050                 has_pvrtc = true;
5051             }
5052             else if (strstr(ext, "_ES3_compatibility")) {
5053                 has_etc2 = true;
5054             }
5055             else if (strstr(ext, "_texture_filter_anisotropic")) {
5056                 _sg.gl.ext_anisotropic = true;
5057             }
5058         }
5059     }
5060 
5061     /* limits */
5062     _sg_gl_init_limits();
5063 
5064     /* pixel formats */
5065     const bool has_bgra = false;    /* not a bug */
5066     const bool has_colorbuffer_float = true;
5067     const bool has_colorbuffer_half_float = true;
5068     const bool has_texture_float_linear = true; /* FIXME??? */
5069     const bool has_texture_half_float_linear = true;
5070     const bool has_float_blend = true;
5071     _sg_gl_init_pixelformats(has_bgra);
5072     _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend);
5073     _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear);
5074     if (has_s3tc) {
5075         _sg_gl_init_pixelformats_s3tc();
5076     }
5077     if (has_rgtc) {
5078         _sg_gl_init_pixelformats_rgtc();
5079     }
5080     if (has_bptc) {
5081         _sg_gl_init_pixelformats_bptc();
5082     }
5083     if (has_pvrtc) {
5084         _sg_gl_init_pixelformats_pvrtc();
5085     }
5086     if (has_etc2) {
5087         _sg_gl_init_pixelformats_etc2();
5088     }
5089 }
5090 #endif
5091 
5092 #if defined(SOKOL_GLES3)
_sg_gl_init_caps_gles3(void)5093 _SOKOL_PRIVATE void _sg_gl_init_caps_gles3(void) {
5094     _sg.backend = SG_BACKEND_GLES3;
5095 
5096     _sg.features.origin_top_left = false;
5097     _sg.features.instancing = true;
5098     _sg.features.multiple_render_targets = true;
5099     _sg.features.msaa_render_targets = true;
5100     _sg.features.imagetype_3d = true;
5101     _sg.features.imagetype_array = true;
5102     _sg.features.image_clamp_to_border = false;
5103 
5104     bool has_s3tc = false;  /* BC1..BC3 */
5105     bool has_rgtc = false;  /* BC4 and BC5 */
5106     bool has_bptc = false;  /* BC6H and BC7 */
5107     bool has_pvrtc = false;
5108     #if defined(__EMSCRIPTEN__)
5109         bool has_etc2 = false;
5110     #else
5111         bool has_etc2 = true;
5112     #endif
5113     bool has_colorbuffer_float = false;
5114     bool has_colorbuffer_half_float = false;
5115     bool has_texture_float_linear = false;
5116     bool has_float_blend = false;
5117     GLint num_ext = 0;
5118     glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext);
5119     for (int i = 0; i < num_ext; i++) {
5120         const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, i);
5121         if (ext) {
5122             if (strstr(ext, "_texture_compression_s3tc")) {
5123                 has_s3tc = true;
5124             }
5125             else if (strstr(ext, "_compressed_texture_s3tc")) {
5126                 has_s3tc = true;
5127             }
5128             else if (strstr(ext, "_texture_compression_rgtc")) {
5129                 has_rgtc = true;
5130             }
5131             else if (strstr(ext, "_texture_compression_bptc")) {
5132                 has_bptc = true;
5133             }
5134             else if (strstr(ext, "_texture_compression_pvrtc")) {
5135                 has_pvrtc = true;
5136             }
5137             else if (strstr(ext, "_compressed_texture_pvrtc")) {
5138                 has_pvrtc = true;
5139             }
5140             else if (strstr(ext, "_compressed_texture_etc")) {
5141                 has_etc2 = true;
5142             }
5143             else if (strstr(ext, "_color_buffer_float")) {
5144                 has_colorbuffer_float = true;
5145             }
5146             else if (strstr(ext, "_color_buffer_half_float")) {
5147                 has_colorbuffer_half_float = true;
5148             }
5149             else if (strstr(ext, "_texture_float_linear")) {
5150                 has_texture_float_linear = true;
5151             }
5152             else if (strstr(ext, "_float_blend")) {
5153                 has_float_blend = true;
5154             }
5155             else if (strstr(ext, "_texture_filter_anisotropic")) {
5156                 _sg.gl.ext_anisotropic = true;
5157             }
5158         }
5159     }
5160 
5161     /* limits */
5162     _sg_gl_init_limits();
5163 
5164     /* pixel formats */
5165     const bool has_texture_half_float_linear = true;
5166     const bool has_bgra = false;    /* not a bug */
5167     _sg_gl_init_pixelformats(has_bgra);
5168     _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend);
5169     _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear);
5170     if (has_s3tc) {
5171         _sg_gl_init_pixelformats_s3tc();
5172     }
5173     if (has_rgtc) {
5174         _sg_gl_init_pixelformats_rgtc();
5175     }
5176     if (has_bptc) {
5177         _sg_gl_init_pixelformats_bptc();
5178     }
5179     if (has_pvrtc) {
5180         _sg_gl_init_pixelformats_pvrtc();
5181     }
5182     if (has_etc2) {
5183         _sg_gl_init_pixelformats_etc2();
5184     }
5185 }
5186 #endif
5187 
5188 #if defined(SOKOL_GLES3) || defined(SOKOL_GLES2)
_sg_gl_init_caps_gles2(void)5189 _SOKOL_PRIVATE void _sg_gl_init_caps_gles2(void) {
5190     _sg.backend = SG_BACKEND_GLES2;
5191 
5192     bool has_s3tc = false;  /* BC1..BC3 */
5193     bool has_rgtc = false;  /* BC4 and BC5 */
5194     bool has_bptc = false;  /* BC6H and BC7 */
5195     bool has_pvrtc = false;
5196     bool has_etc2 = false;
5197     bool has_texture_float = false;
5198     bool has_texture_float_linear = false;
5199     bool has_colorbuffer_float = false;
5200     bool has_float_blend = false;
5201     bool has_instancing = false;
5202     const char* ext = (const char*) glGetString(GL_EXTENSIONS);
5203     if (ext) {
5204         has_s3tc = strstr(ext, "_texture_compression_s3tc") || strstr(ext, "_compressed_texture_s3tc");
5205         has_rgtc = strstr(ext, "_texture_compression_rgtc");
5206         has_bptc = strstr(ext, "_texture_compression_bptc");
5207         has_pvrtc = strstr(ext, "_texture_compression_pvrtc") || strstr(ext, "_compressed_texture_pvrtc");
5208         has_etc2 = strstr(ext, "_compressed_texture_etc");
5209         has_texture_float = strstr(ext, "_texture_float");
5210         has_texture_float_linear = strstr(ext, "_texture_float_linear");
5211         has_colorbuffer_float = strstr(ext, "_color_buffer_float");
5212         has_float_blend = strstr(ext, "_float_blend");
5213         /* don't bother with half_float support on WebGL1
5214             has_texture_half_float = strstr(ext, "_texture_half_float");
5215             has_texture_half_float_linear = strstr(ext, "_texture_half_float_linear");
5216             has_colorbuffer_half_float = strstr(ext, "_color_buffer_half_float");
5217         */
5218         has_instancing = strstr(ext, "_instanced_arrays");
5219         _sg.gl.ext_anisotropic = strstr(ext, "ext_anisotropic");
5220     }
5221 
5222     _sg.features.origin_top_left = false;
5223     #if defined(SOKOL_INSTANCING_ENABLED)
5224         _sg.features.instancing = has_instancing;
5225     #endif
5226     _sg.features.multiple_render_targets = false;
5227     _sg.features.msaa_render_targets = false;
5228     _sg.features.imagetype_3d = false;
5229     _sg.features.imagetype_array = false;
5230     _sg.features.image_clamp_to_border = false;
5231 
5232     /* limits */
5233     _sg_gl_init_limits();
5234 
5235     /* pixel formats */
5236     const bool has_bgra = false;    /* not a bug */
5237     const bool has_texture_half_float = false;
5238     const bool has_texture_half_float_linear = false;
5239     const bool has_colorbuffer_half_float = false;
5240     _sg_gl_init_pixelformats(has_bgra);
5241     if (has_texture_float) {
5242         _sg_gl_init_pixelformats_float(has_colorbuffer_float, has_texture_float_linear, has_float_blend);
5243     }
5244     if (has_texture_half_float) {
5245         _sg_gl_init_pixelformats_half_float(has_colorbuffer_half_float, has_texture_half_float_linear);
5246     }
5247     if (has_s3tc) {
5248         _sg_gl_init_pixelformats_s3tc();
5249     }
5250     if (has_rgtc) {
5251         _sg_gl_init_pixelformats_rgtc();
5252     }
5253     if (has_bptc) {
5254         _sg_gl_init_pixelformats_bptc();
5255     }
5256     if (has_pvrtc) {
5257         _sg_gl_init_pixelformats_pvrtc();
5258     }
5259     if (has_etc2) {
5260         _sg_gl_init_pixelformats_etc2();
5261     }
5262     /* GLES2 doesn't allow multi-sampled render targets at all */
5263     for (int i = 0; i < _SG_PIXELFORMAT_NUM; i++) {
5264         _sg.formats[i].msaa = false;
5265     }
5266 }
5267 #endif
5268 
5269 /*-- state cache implementation ----------------------------------------------*/
_sg_gl_clear_buffer_bindings(bool force)5270 _SOKOL_PRIVATE void _sg_gl_clear_buffer_bindings(bool force) {
5271     if (force || (_sg.gl.cache.vertex_buffer != 0)) {
5272         glBindBuffer(GL_ARRAY_BUFFER, 0);
5273         _sg.gl.cache.vertex_buffer = 0;
5274     }
5275     if (force || (_sg.gl.cache.index_buffer != 0)) {
5276         glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
5277         _sg.gl.cache.index_buffer = 0;
5278     }
5279 }
5280 
_sg_gl_bind_buffer(GLenum target,GLuint buffer)5281 _SOKOL_PRIVATE void _sg_gl_bind_buffer(GLenum target, GLuint buffer) {
5282     SOKOL_ASSERT((GL_ARRAY_BUFFER == target) || (GL_ELEMENT_ARRAY_BUFFER == target));
5283     if (target == GL_ARRAY_BUFFER) {
5284         if (_sg.gl.cache.vertex_buffer != buffer) {
5285             _sg.gl.cache.vertex_buffer = buffer;
5286             glBindBuffer(target, buffer);
5287         }
5288     }
5289     else {
5290         if (_sg.gl.cache.index_buffer != buffer) {
5291             _sg.gl.cache.index_buffer = buffer;
5292             glBindBuffer(target, buffer);
5293         }
5294     }
5295 }
5296 
_sg_gl_store_buffer_binding(GLenum target)5297 _SOKOL_PRIVATE void _sg_gl_store_buffer_binding(GLenum target) {
5298     if (target == GL_ARRAY_BUFFER) {
5299         _sg.gl.cache.stored_vertex_buffer = _sg.gl.cache.vertex_buffer;
5300     }
5301     else {
5302         _sg.gl.cache.stored_index_buffer = _sg.gl.cache.index_buffer;
5303     }
5304 }
5305 
_sg_gl_restore_buffer_binding(GLenum target)5306 _SOKOL_PRIVATE void _sg_gl_restore_buffer_binding(GLenum target) {
5307     if (target == GL_ARRAY_BUFFER) {
5308         if (_sg.gl.cache.stored_vertex_buffer != 0) {
5309             /* we only care restoring valid ids */
5310             _sg_gl_bind_buffer(target, _sg.gl.cache.stored_vertex_buffer);
5311         }
5312     }
5313     else {
5314         if (_sg.gl.cache.stored_index_buffer != 0) {
5315             /* we only care restoring valid ids */
5316             _sg_gl_bind_buffer(target, _sg.gl.cache.stored_index_buffer);
5317         }
5318     }
5319 }
5320 
_sg_gl_active_texture(GLenum texture)5321 _SOKOL_PRIVATE void _sg_gl_active_texture(GLenum texture) {
5322     if (_sg.gl.cache.cur_active_texture != texture) {
5323         _sg.gl.cache.cur_active_texture = texture;
5324         glActiveTexture(texture);
5325     }
5326 }
5327 
_sg_gl_clear_texture_bindings(bool force)5328 _SOKOL_PRIVATE void _sg_gl_clear_texture_bindings(bool force) {
5329     for (int i = 0; (i < SG_MAX_SHADERSTAGE_IMAGES) && (i < _sg.gl.max_combined_texture_image_units); i++) {
5330         if (force || (_sg.gl.cache.textures[i].texture != 0)) {
5331             GLenum gl_texture_slot = GL_TEXTURE0 + i;
5332             glActiveTexture(gl_texture_slot);
5333             glBindTexture(GL_TEXTURE_2D, 0);
5334             glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
5335             #if !defined(SOKOL_GLES2)
5336             if (!_sg.gl.gles2) {
5337                 glBindTexture(GL_TEXTURE_3D, 0);
5338                 glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
5339             }
5340             #endif
5341             _sg.gl.cache.textures[i].target = 0;
5342             _sg.gl.cache.textures[i].texture = 0;
5343             _sg.gl.cache.cur_active_texture = gl_texture_slot;
5344         }
5345     }
5346 }
5347 
_sg_gl_bind_texture(int slot_index,GLenum target,GLuint texture)5348 _SOKOL_PRIVATE void _sg_gl_bind_texture(int slot_index, GLenum target, GLuint texture) {
5349     /* it's valid to call this function with target=0 and/or texture=0
5350        target=0 will unbind the previous binding, texture=0 will clear
5351        the new binding
5352     */
5353     SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES);
5354     if (slot_index >= _sg.gl.max_combined_texture_image_units) {
5355         return;
5356     }
5357     _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.textures[slot_index];
5358     if ((slot->target != target) || (slot->texture != texture)) {
5359         _sg_gl_active_texture(GL_TEXTURE0 + slot_index);
5360         /* if the target has changed, clear the previous binding on that target */
5361         if ((target != slot->target) && (slot->target != 0)) {
5362             glBindTexture(slot->target, 0);
5363         }
5364         /* apply new binding (texture can be 0 to unbind) */
5365         if (target != 0) {
5366             glBindTexture(target, texture);
5367         }
5368         slot->target = target;
5369         slot->texture = texture;
5370     }
5371 }
5372 
_sg_gl_store_texture_binding(int slot_index)5373 _SOKOL_PRIVATE void _sg_gl_store_texture_binding(int slot_index) {
5374     SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES);
5375     _sg.gl.cache.stored_texture = _sg.gl.cache.textures[slot_index];
5376 }
5377 
_sg_gl_restore_texture_binding(int slot_index)5378 _SOKOL_PRIVATE void _sg_gl_restore_texture_binding(int slot_index) {
5379     SOKOL_ASSERT(slot_index < SG_MAX_SHADERSTAGE_IMAGES);
5380     const _sg_gl_texture_bind_slot* slot = &_sg.gl.cache.stored_texture;
5381     if (slot->texture != 0) {
5382         /* we only care restoring valid ids */
5383         _sg_gl_bind_texture(slot_index, slot->target, slot->texture);
5384     }
5385 }
5386 
_sg_gl_setup_backend(const sg_desc * desc)5387 _SOKOL_PRIVATE void _sg_gl_setup_backend(const sg_desc* desc) {
5388     /* assumes that _sg.gl is already zero-initialized */
5389     _sg.gl.valid = true;
5390     #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3)
5391     _sg.gl.gles2 = desc->context.gl.force_gles2;
5392     #else
5393     _SOKOL_UNUSED(desc);
5394     _sg.gl.gles2 = false;
5395     #endif
5396 
5397     /* clear initial GL error state */
5398     #if defined(SOKOL_DEBUG)
5399         while (glGetError() != GL_NO_ERROR);
5400     #endif
5401     #if defined(SOKOL_GLCORE33)
5402         _sg_gl_init_caps_glcore33();
5403     #elif defined(SOKOL_GLES3)
5404         if (_sg.gl.gles2) {
5405             _sg_gl_init_caps_gles2();
5406         }
5407         else {
5408             _sg_gl_init_caps_gles3();
5409         }
5410     #else
5411         _sg_gl_init_caps_gles2();
5412     #endif
5413 }
5414 
_sg_gl_discard_backend(void)5415 _SOKOL_PRIVATE void _sg_gl_discard_backend(void) {
5416     SOKOL_ASSERT(_sg.gl.valid);
5417     _sg.gl.valid = false;
5418 }
5419 
_sg_gl_reset_state_cache(void)5420 _SOKOL_PRIVATE void _sg_gl_reset_state_cache(void) {
5421     if (_sg.gl.cur_context) {
5422         _SG_GL_CHECK_ERROR();
5423         #if !defined(SOKOL_GLES2)
5424         if (!_sg.gl.gles2) {
5425             glBindVertexArray(_sg.gl.cur_context->vao);
5426             _SG_GL_CHECK_ERROR();
5427         }
5428         #endif
5429         memset(&_sg.gl.cache, 0, sizeof(_sg.gl.cache));
5430         _sg_gl_clear_buffer_bindings(true);
5431         _SG_GL_CHECK_ERROR();
5432         _sg_gl_clear_texture_bindings(true);
5433         _SG_GL_CHECK_ERROR();
5434         for (uint32_t i = 0; i < _sg.limits.max_vertex_attrs; i++) {
5435             _sg_gl_init_attr(&_sg.gl.cache.attrs[i].gl_attr);
5436             glDisableVertexAttribArray(i);
5437             _SG_GL_CHECK_ERROR();
5438         }
5439         _sg.gl.cache.cur_primitive_type = GL_TRIANGLES;
5440 
5441         /* shader program */
5442         glGetIntegerv(GL_CURRENT_PROGRAM, (GLint*)&_sg.gl.cache.prog);
5443         _SG_GL_CHECK_ERROR();
5444 
5445         /* depth-stencil state */
5446         _sg_gl_init_depth_stencil_state(&_sg.gl.cache.ds);
5447         glEnable(GL_DEPTH_TEST);
5448         glDepthFunc(GL_ALWAYS);
5449         glDepthMask(GL_FALSE);
5450         glDisable(GL_STENCIL_TEST);
5451         glStencilFunc(GL_ALWAYS, 0, 0);
5452         glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
5453         glStencilMask(0);
5454 
5455         /* blend state */
5456         _sg_gl_init_blend_state(&_sg.gl.cache.blend);
5457         glDisable(GL_BLEND);
5458         glBlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO);
5459         glBlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD);
5460         glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
5461         glBlendColor(0.0f, 0.0f, 0.0f, 0.0f);
5462 
5463         /* rasterizer state */
5464         _sg_gl_init_rasterizer_state(&_sg.gl.cache.rast);
5465         glPolygonOffset(0.0f, 0.0f);
5466         glDisable(GL_POLYGON_OFFSET_FILL);
5467         glDisable(GL_CULL_FACE);
5468         glFrontFace(GL_CW);
5469         glCullFace(GL_BACK);
5470         glEnable(GL_SCISSOR_TEST);
5471         glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE);
5472         glEnable(GL_DITHER);
5473         glDisable(GL_POLYGON_OFFSET_FILL);
5474         #if defined(SOKOL_GLCORE33)
5475             glEnable(GL_MULTISAMPLE);
5476             glEnable(GL_PROGRAM_POINT_SIZE);
5477         #endif
5478     }
5479 }
5480 
_sg_gl_activate_context(_sg_context_t * ctx)5481 _SOKOL_PRIVATE void _sg_gl_activate_context(_sg_context_t* ctx) {
5482     SOKOL_ASSERT(_sg.gl.valid);
5483     /* NOTE: ctx can be 0 to unset the current context */
5484     _sg.gl.cur_context = ctx;
5485     _sg_gl_reset_state_cache();
5486 }
5487 
5488 /*-- GL backend resource creation and destruction ----------------------------*/
_sg_gl_create_context(_sg_context_t * ctx)5489 _SOKOL_PRIVATE sg_resource_state _sg_gl_create_context(_sg_context_t* ctx) {
5490     SOKOL_ASSERT(ctx);
5491     SOKOL_ASSERT(0 == ctx->default_framebuffer);
5492     _SG_GL_CHECK_ERROR();
5493     glGetIntegerv(GL_FRAMEBUFFER_BINDING, (GLint*)&ctx->default_framebuffer);
5494     _SG_GL_CHECK_ERROR();
5495     #if !defined(SOKOL_GLES2)
5496     if (!_sg.gl.gles2) {
5497         SOKOL_ASSERT(0 == ctx->vao);
5498         glGenVertexArrays(1, &ctx->vao);
5499         glBindVertexArray(ctx->vao);
5500         _SG_GL_CHECK_ERROR();
5501     }
5502     #endif
5503     return SG_RESOURCESTATE_VALID;
5504 }
5505 
_sg_gl_destroy_context(_sg_context_t * ctx)5506 _SOKOL_PRIVATE void _sg_gl_destroy_context(_sg_context_t* ctx) {
5507     SOKOL_ASSERT(ctx);
5508     #if !defined(SOKOL_GLES2)
5509     if (!_sg.gl.gles2) {
5510         if (ctx->vao) {
5511             glDeleteVertexArrays(1, &ctx->vao);
5512         }
5513         _SG_GL_CHECK_ERROR();
5514     }
5515     #else
5516     _SOKOL_UNUSED(ctx);
5517     #endif
5518 }
5519 
_sg_gl_create_buffer(_sg_buffer_t * buf,const sg_buffer_desc * desc)5520 _SOKOL_PRIVATE sg_resource_state _sg_gl_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) {
5521     SOKOL_ASSERT(buf && desc);
5522     _SG_GL_CHECK_ERROR();
5523     _sg_buffer_common_init(&buf->cmn, desc);
5524     buf->gl.ext_buffers = (0 != desc->gl_buffers[0]);
5525     GLenum gl_target = _sg_gl_buffer_target(buf->cmn.type);
5526     GLenum gl_usage  = _sg_gl_usage(buf->cmn.usage);
5527     for (int slot = 0; slot < buf->cmn.num_slots; slot++) {
5528         GLuint gl_buf = 0;
5529         if (buf->gl.ext_buffers) {
5530             SOKOL_ASSERT(desc->gl_buffers[slot]);
5531             gl_buf = desc->gl_buffers[slot];
5532         }
5533         else {
5534             glGenBuffers(1, &gl_buf);
5535             _sg_gl_store_buffer_binding(gl_target);
5536             _sg_gl_bind_buffer(gl_target, gl_buf);
5537             glBufferData(gl_target, buf->cmn.size, 0, gl_usage);
5538             if (buf->cmn.usage == SG_USAGE_IMMUTABLE) {
5539                 SOKOL_ASSERT(desc->content);
5540                 glBufferSubData(gl_target, 0, buf->cmn.size, desc->content);
5541             }
5542             _sg_gl_restore_buffer_binding(gl_target);
5543         }
5544         buf->gl.buf[slot] = gl_buf;
5545     }
5546     _SG_GL_CHECK_ERROR();
5547     return SG_RESOURCESTATE_VALID;
5548 }
5549 
_sg_gl_destroy_buffer(_sg_buffer_t * buf)5550 _SOKOL_PRIVATE void _sg_gl_destroy_buffer(_sg_buffer_t* buf) {
5551     SOKOL_ASSERT(buf);
5552     _SG_GL_CHECK_ERROR();
5553     if (!buf->gl.ext_buffers) {
5554         for (int slot = 0; slot < buf->cmn.num_slots; slot++) {
5555             if (buf->gl.buf[slot]) {
5556                 glDeleteBuffers(1, &buf->gl.buf[slot]);
5557             }
5558         }
5559         _SG_GL_CHECK_ERROR();
5560     }
5561 }
5562 
_sg_gl_supported_texture_format(sg_pixel_format fmt)5563 _SOKOL_PRIVATE bool _sg_gl_supported_texture_format(sg_pixel_format fmt) {
5564     const int fmt_index = (int) fmt;
5565     SOKOL_ASSERT((fmt_index > SG_PIXELFORMAT_NONE) && (fmt_index < _SG_PIXELFORMAT_NUM));
5566     return _sg.formats[fmt_index].sample;
5567 }
5568 
_sg_gl_create_image(_sg_image_t * img,const sg_image_desc * desc)5569 _SOKOL_PRIVATE sg_resource_state _sg_gl_create_image(_sg_image_t* img, const sg_image_desc* desc) {
5570     SOKOL_ASSERT(img && desc);
5571     _SG_GL_CHECK_ERROR();
5572     _sg_image_common_init(&img->cmn, desc);
5573     img->gl.ext_textures = (0 != desc->gl_textures[0]);
5574 
5575     /* check if texture format is support */
5576     if (!_sg_gl_supported_texture_format(img->cmn.pixel_format)) {
5577         SOKOL_LOG("texture format not supported by GL context\n");
5578         return SG_RESOURCESTATE_FAILED;
5579     }
5580     /* check for optional texture types */
5581     if ((img->cmn.type == SG_IMAGETYPE_3D) && !_sg.features.imagetype_3d) {
5582         SOKOL_LOG("3D textures not supported by GL context\n");
5583         return SG_RESOURCESTATE_FAILED;
5584     }
5585     if ((img->cmn.type == SG_IMAGETYPE_ARRAY) && !_sg.features.imagetype_array) {
5586         SOKOL_LOG("array textures not supported by GL context\n");
5587         return SG_RESOURCESTATE_FAILED;
5588     }
5589 
5590     #if !defined(SOKOL_GLES2)
5591     bool msaa = false;
5592     if (!_sg.gl.gles2) {
5593         msaa = (img->cmn.sample_count > 1) && (_sg.features.msaa_render_targets);
5594     }
5595     #endif
5596 
5597     if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) {
5598         /* special case depth-stencil-buffer? */
5599         SOKOL_ASSERT((img->cmn.usage == SG_USAGE_IMMUTABLE) && (img->cmn.num_slots == 1));
5600         SOKOL_ASSERT(!img->gl.ext_textures);   /* cannot provide external texture for depth images */
5601         glGenRenderbuffers(1, &img->gl.depth_render_buffer);
5602         glBindRenderbuffer(GL_RENDERBUFFER, img->gl.depth_render_buffer);
5603         GLenum gl_depth_format = _sg_gl_depth_attachment_format(img->cmn.pixel_format);
5604         #if !defined(SOKOL_GLES2)
5605         if (!_sg.gl.gles2 && msaa) {
5606             glRenderbufferStorageMultisample(GL_RENDERBUFFER, img->cmn.sample_count, gl_depth_format, img->cmn.width, img->cmn.height);
5607         }
5608         else
5609         #endif
5610         {
5611             glRenderbufferStorage(GL_RENDERBUFFER, gl_depth_format, img->cmn.width, img->cmn.height);
5612         }
5613     }
5614     else {
5615         /* regular color texture */
5616         img->gl.target = _sg_gl_texture_target(img->cmn.type);
5617         const GLenum gl_internal_format = _sg_gl_teximage_internal_format(img->cmn.pixel_format);
5618 
5619         /* if this is a MSAA render target, need to create a separate render buffer */
5620         #if !defined(SOKOL_GLES2)
5621         if (!_sg.gl.gles2 && img->cmn.render_target && msaa) {
5622             glGenRenderbuffers(1, &img->gl.msaa_render_buffer);
5623             glBindRenderbuffer(GL_RENDERBUFFER, img->gl.msaa_render_buffer);
5624             glRenderbufferStorageMultisample(GL_RENDERBUFFER, img->cmn.sample_count, gl_internal_format, img->cmn.width, img->cmn.height);
5625         }
5626         #endif
5627 
5628         if (img->gl.ext_textures) {
5629             /* inject externally GL textures */
5630             for (int slot = 0; slot < img->cmn.num_slots; slot++) {
5631                 SOKOL_ASSERT(desc->gl_textures[slot]);
5632                 img->gl.tex[slot] = desc->gl_textures[slot];
5633             }
5634         }
5635         else {
5636             /* create our own GL texture(s) */
5637             const GLenum gl_format = _sg_gl_teximage_format(img->cmn.pixel_format);
5638             const bool is_compressed = _sg_is_compressed_pixel_format(img->cmn.pixel_format);
5639             for (int slot = 0; slot < img->cmn.num_slots; slot++) {
5640                 glGenTextures(1, &img->gl.tex[slot]);
5641                 _sg_gl_store_texture_binding(0);
5642                 _sg_gl_bind_texture(0, img->gl.target, img->gl.tex[slot]);
5643                 GLenum gl_min_filter = _sg_gl_filter(img->cmn.min_filter);
5644                 GLenum gl_mag_filter = _sg_gl_filter(img->cmn.mag_filter);
5645                 glTexParameteri(img->gl.target, GL_TEXTURE_MIN_FILTER, gl_min_filter);
5646                 glTexParameteri(img->gl.target, GL_TEXTURE_MAG_FILTER, gl_mag_filter);
5647                 if (_sg.gl.ext_anisotropic && (img->cmn.max_anisotropy > 1)) {
5648                     GLint max_aniso = (GLint) img->cmn.max_anisotropy;
5649                     if (max_aniso > _sg.gl.max_anisotropy) {
5650                         max_aniso = _sg.gl.max_anisotropy;
5651                     }
5652                     glTexParameteri(img->gl.target, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_aniso);
5653                 }
5654                 if (img->cmn.type == SG_IMAGETYPE_CUBE) {
5655                     glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
5656                     glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
5657                 }
5658                 else {
5659                     glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_S, _sg_gl_wrap(img->cmn.wrap_u));
5660                     glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_T, _sg_gl_wrap(img->cmn.wrap_v));
5661                     #if !defined(SOKOL_GLES2)
5662                     if (!_sg.gl.gles2 && (img->cmn.type == SG_IMAGETYPE_3D)) {
5663                         glTexParameteri(img->gl.target, GL_TEXTURE_WRAP_R, _sg_gl_wrap(img->cmn.wrap_w));
5664                     }
5665                     #endif
5666                     #if defined(SOKOL_GLCORE33)
5667                     float border[4];
5668                     switch (img->cmn.border_color) {
5669                         case SG_BORDERCOLOR_TRANSPARENT_BLACK:
5670                             border[0] = 0.0f; border[1] = 0.0f; border[2] = 0.0f; border[3] = 0.0f;
5671                             break;
5672                         case SG_BORDERCOLOR_OPAQUE_WHITE:
5673                             border[0] = 1.0f; border[1] = 1.0f; border[2] = 1.0f; border[3] = 1.0f;
5674                             break;
5675                         default:
5676                             border[0] = 0.0f; border[1] = 0.0f; border[2] = 0.0f; border[3] = 1.0f;
5677                             break;
5678                     }
5679                     glTexParameterfv(img->gl.target, GL_TEXTURE_BORDER_COLOR, border);
5680                     #endif
5681                 }
5682                 #if !defined(SOKOL_GLES2)
5683                 if (!_sg.gl.gles2) {
5684                     /* GL spec has strange defaults for mipmap min/max lod: -1000 to +1000 */
5685                     const float min_lod = _sg_clamp(desc->min_lod, 0.0f, 1000.0f);
5686                     const float max_lod = _sg_clamp(desc->max_lod, 0.0f, 1000.0f);
5687                     glTexParameterf(img->gl.target, GL_TEXTURE_MIN_LOD, min_lod);
5688                     glTexParameterf(img->gl.target, GL_TEXTURE_MAX_LOD, max_lod);
5689                 }
5690                 #endif
5691                 const int num_faces = img->cmn.type == SG_IMAGETYPE_CUBE ? 6 : 1;
5692                 int data_index = 0;
5693                 for (int face_index = 0; face_index < num_faces; face_index++) {
5694                     for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, data_index++) {
5695                         GLenum gl_img_target = img->gl.target;
5696                         if (SG_IMAGETYPE_CUBE == img->cmn.type) {
5697                             gl_img_target = _sg_gl_cubeface_target(face_index);
5698                         }
5699                         const GLvoid* data_ptr = desc->content.subimage[face_index][mip_index].ptr;
5700                         const int data_size = desc->content.subimage[face_index][mip_index].size;
5701                         int mip_width = img->cmn.width >> mip_index;
5702                         if (mip_width == 0) {
5703                             mip_width = 1;
5704                         }
5705                         int mip_height = img->cmn.height >> mip_index;
5706                         if (mip_height == 0) {
5707                             mip_height = 1;
5708                         }
5709                         if ((SG_IMAGETYPE_2D == img->cmn.type) || (SG_IMAGETYPE_CUBE == img->cmn.type)) {
5710                             if (is_compressed) {
5711                                 glCompressedTexImage2D(gl_img_target, mip_index, gl_internal_format,
5712                                     mip_width, mip_height, 0, data_size, data_ptr);
5713                             }
5714                             else {
5715                                 const GLenum gl_type = _sg_gl_teximage_type(img->cmn.pixel_format);
5716                                 glTexImage2D(gl_img_target, mip_index, gl_internal_format,
5717                                     mip_width, mip_height, 0, gl_format, gl_type, data_ptr);
5718                             }
5719                         }
5720                         #if !defined(SOKOL_GLES2)
5721                         else if (!_sg.gl.gles2 && ((SG_IMAGETYPE_3D == img->cmn.type) || (SG_IMAGETYPE_ARRAY == img->cmn.type))) {
5722                             int mip_depth = img->cmn.depth;
5723                             if (SG_IMAGETYPE_3D == img->cmn.type) {
5724                                 mip_depth >>= mip_index;
5725                             }
5726                             if (mip_depth == 0) {
5727                                 mip_depth = 1;
5728                             }
5729                             if (is_compressed) {
5730                                 glCompressedTexImage3D(gl_img_target, mip_index, gl_internal_format,
5731                                     mip_width, mip_height, mip_depth, 0, data_size, data_ptr);
5732                             }
5733                             else {
5734                                 const GLenum gl_type = _sg_gl_teximage_type(img->cmn.pixel_format);
5735                                 glTexImage3D(gl_img_target, mip_index, gl_internal_format,
5736                                     mip_width, mip_height, mip_depth, 0, gl_format, gl_type, data_ptr);
5737                             }
5738                         }
5739                         #endif
5740                     }
5741                 }
5742                 _sg_gl_restore_texture_binding(0);
5743             }
5744         }
5745     }
5746     _SG_GL_CHECK_ERROR();
5747     return SG_RESOURCESTATE_VALID;
5748 }
5749 
_sg_gl_destroy_image(_sg_image_t * img)5750 _SOKOL_PRIVATE void _sg_gl_destroy_image(_sg_image_t* img) {
5751     SOKOL_ASSERT(img);
5752     _SG_GL_CHECK_ERROR();
5753     if (!img->gl.ext_textures) {
5754         for (int slot = 0; slot < img->cmn.num_slots; slot++) {
5755             if (img->gl.tex[slot]) {
5756                 glDeleteTextures(1, &img->gl.tex[slot]);
5757             }
5758         }
5759     }
5760     if (img->gl.depth_render_buffer) {
5761         glDeleteRenderbuffers(1, &img->gl.depth_render_buffer);
5762     }
5763     if (img->gl.msaa_render_buffer) {
5764         glDeleteRenderbuffers(1, &img->gl.msaa_render_buffer);
5765     }
5766     _SG_GL_CHECK_ERROR();
5767 }
5768 
_sg_gl_compile_shader(sg_shader_stage stage,const char * src)5769 _SOKOL_PRIVATE GLuint _sg_gl_compile_shader(sg_shader_stage stage, const char* src) {
5770     SOKOL_ASSERT(src);
5771     _SG_GL_CHECK_ERROR();
5772     GLuint gl_shd = glCreateShader(_sg_gl_shader_stage(stage));
5773     glShaderSource(gl_shd, 1, &src, 0);
5774     glCompileShader(gl_shd);
5775     GLint compile_status = 0;
5776     glGetShaderiv(gl_shd, GL_COMPILE_STATUS, &compile_status);
5777     if (!compile_status) {
5778         /* compilation failed, log error and delete shader */
5779         GLint log_len = 0;
5780         glGetShaderiv(gl_shd, GL_INFO_LOG_LENGTH, &log_len);
5781         if (log_len > 0) {
5782             GLchar* log_buf = (GLchar*) SOKOL_MALLOC(log_len);
5783             glGetShaderInfoLog(gl_shd, log_len, &log_len, log_buf);
5784             SOKOL_LOG(log_buf);
5785             SOKOL_FREE(log_buf);
5786         }
5787         glDeleteShader(gl_shd);
5788         gl_shd = 0;
5789     }
5790     _SG_GL_CHECK_ERROR();
5791     return gl_shd;
5792 }
5793 
_sg_gl_create_shader(_sg_shader_t * shd,const sg_shader_desc * desc)5794 _SOKOL_PRIVATE sg_resource_state _sg_gl_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) {
5795     SOKOL_ASSERT(shd && desc);
5796     SOKOL_ASSERT(!shd->gl.prog);
5797     _SG_GL_CHECK_ERROR();
5798 
5799     _sg_shader_common_init(&shd->cmn, desc);
5800 
5801     /* copy vertex attribute names over, these are required for GLES2, and optional for GLES3 and GL3.x */
5802     for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) {
5803         _sg_strcpy(&shd->gl.attrs[i].name, desc->attrs[i].name);
5804     }
5805 
5806     GLuint gl_vs = _sg_gl_compile_shader(SG_SHADERSTAGE_VS, desc->vs.source);
5807     GLuint gl_fs = _sg_gl_compile_shader(SG_SHADERSTAGE_FS, desc->fs.source);
5808     if (!(gl_vs && gl_fs)) {
5809         return SG_RESOURCESTATE_FAILED;
5810     }
5811     GLuint gl_prog = glCreateProgram();
5812     glAttachShader(gl_prog, gl_vs);
5813     glAttachShader(gl_prog, gl_fs);
5814     glLinkProgram(gl_prog);
5815     glDeleteShader(gl_vs);
5816     glDeleteShader(gl_fs);
5817     _SG_GL_CHECK_ERROR();
5818 
5819     GLint link_status;
5820     glGetProgramiv(gl_prog, GL_LINK_STATUS, &link_status);
5821     if (!link_status) {
5822         GLint log_len = 0;
5823         glGetProgramiv(gl_prog, GL_INFO_LOG_LENGTH, &log_len);
5824         if (log_len > 0) {
5825             GLchar* log_buf = (GLchar*) SOKOL_MALLOC(log_len);
5826             glGetProgramInfoLog(gl_prog, log_len, &log_len, log_buf);
5827             SOKOL_LOG(log_buf);
5828             SOKOL_FREE(log_buf);
5829         }
5830         glDeleteProgram(gl_prog);
5831         return SG_RESOURCESTATE_FAILED;
5832     }
5833     shd->gl.prog = gl_prog;
5834 
5835     /* resolve uniforms */
5836     _SG_GL_CHECK_ERROR();
5837     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
5838         const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &desc->vs : &desc->fs;
5839         _sg_gl_shader_stage_t* gl_stage = &shd->gl.stage[stage_index];
5840         for (int ub_index = 0; ub_index < shd->cmn.stage[stage_index].num_uniform_blocks; ub_index++) {
5841             const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index];
5842             SOKOL_ASSERT(ub_desc->size > 0);
5843             _sg_gl_uniform_block_t* ub = &gl_stage->uniform_blocks[ub_index];
5844             SOKOL_ASSERT(ub->num_uniforms == 0);
5845             int cur_uniform_offset = 0;
5846             for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) {
5847                 const sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index];
5848                 if (u_desc->type == SG_UNIFORMTYPE_INVALID) {
5849                     break;
5850                 }
5851                 _sg_gl_uniform_t* u = &ub->uniforms[u_index];
5852                 u->type = u_desc->type;
5853                 u->count = (uint8_t) u_desc->array_count;
5854                 u->offset = (uint16_t) cur_uniform_offset;
5855                 cur_uniform_offset += _sg_uniform_size(u->type, u->count);
5856                 if (u_desc->name) {
5857                     u->gl_loc = glGetUniformLocation(gl_prog, u_desc->name);
5858                 }
5859                 else {
5860                     u->gl_loc = u_index;
5861                 }
5862                 ub->num_uniforms++;
5863             }
5864             SOKOL_ASSERT(ub_desc->size == cur_uniform_offset);
5865         }
5866     }
5867 
5868     /* resolve image locations */
5869     _SG_GL_CHECK_ERROR();
5870     GLuint cur_prog = 0;
5871     glGetIntegerv(GL_CURRENT_PROGRAM, (GLint*)&cur_prog);
5872     glUseProgram(gl_prog);
5873     int gl_tex_slot = 0;
5874     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
5875         const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &desc->vs : &desc->fs;
5876         _sg_gl_shader_stage_t* gl_stage = &shd->gl.stage[stage_index];
5877         for (int img_index = 0; img_index < shd->cmn.stage[stage_index].num_images; img_index++) {
5878             const sg_shader_image_desc* img_desc = &stage_desc->images[img_index];
5879             SOKOL_ASSERT(img_desc->type != _SG_IMAGETYPE_DEFAULT);
5880             _sg_gl_shader_image_t* gl_img = &gl_stage->images[img_index];
5881             GLint gl_loc = img_index;
5882             if (img_desc->name) {
5883                 gl_loc = glGetUniformLocation(gl_prog, img_desc->name);
5884             }
5885             if (gl_loc != -1) {
5886                 gl_img->gl_tex_slot = gl_tex_slot++;
5887                 glUniform1i(gl_loc, gl_img->gl_tex_slot);
5888             }
5889             else {
5890                 gl_img->gl_tex_slot = -1;
5891             }
5892         }
5893     }
5894     /* it's legal to call glUseProgram with 0 */
5895     glUseProgram(cur_prog);
5896     _SG_GL_CHECK_ERROR();
5897     return SG_RESOURCESTATE_VALID;
5898 }
5899 
_sg_gl_destroy_shader(_sg_shader_t * shd)5900 _SOKOL_PRIVATE void _sg_gl_destroy_shader(_sg_shader_t* shd) {
5901     SOKOL_ASSERT(shd);
5902     _SG_GL_CHECK_ERROR();
5903     if (shd->gl.prog == _sg.gl.cache.prog) {
5904         _sg.gl.cache.prog = 0;
5905         glUseProgram(0);
5906     }
5907     if (shd->gl.prog) {
5908         glDeleteProgram(shd->gl.prog);
5909     }
5910     _SG_GL_CHECK_ERROR();
5911 }
5912 
_sg_gl_create_pipeline(_sg_pipeline_t * pip,_sg_shader_t * shd,const sg_pipeline_desc * desc)5913 _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) {
5914     SOKOL_ASSERT(pip && shd && desc);
5915     SOKOL_ASSERT(!pip->shader && pip->cmn.shader_id.id == SG_INVALID_ID);
5916     SOKOL_ASSERT(desc->shader.id == shd->slot.id);
5917     SOKOL_ASSERT(shd->gl.prog);
5918     pip->shader = shd;
5919     _sg_pipeline_common_init(&pip->cmn, desc);
5920     pip->gl.primitive_type = desc->primitive_type;
5921     pip->gl.depth_stencil = desc->depth_stencil;
5922     pip->gl.blend = desc->blend;
5923     pip->gl.rast = desc->rasterizer;
5924 
5925     /* resolve vertex attributes */
5926     for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) {
5927         pip->gl.attrs[attr_index].vb_index = -1;
5928     }
5929     for (uint32_t attr_index = 0; attr_index < _sg.limits.max_vertex_attrs; attr_index++) {
5930         const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index];
5931         if (a_desc->format == SG_VERTEXFORMAT_INVALID) {
5932             break;
5933         }
5934         SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS));
5935         const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index];
5936         const sg_vertex_step step_func = l_desc->step_func;
5937         const int step_rate = l_desc->step_rate;
5938         GLint attr_loc = attr_index;
5939         if (!_sg_strempty(&shd->gl.attrs[attr_index].name)) {
5940             attr_loc = glGetAttribLocation(pip->shader->gl.prog, _sg_strptr(&shd->gl.attrs[attr_index].name));
5941         }
5942         SOKOL_ASSERT(attr_loc < (GLint)_sg.limits.max_vertex_attrs);
5943         if (attr_loc != -1) {
5944             _sg_gl_attr_t* gl_attr = &pip->gl.attrs[attr_loc];
5945             SOKOL_ASSERT(gl_attr->vb_index == -1);
5946             gl_attr->vb_index = (int8_t) a_desc->buffer_index;
5947             if (step_func == SG_VERTEXSTEP_PER_VERTEX) {
5948                 gl_attr->divisor = 0;
5949             }
5950             else {
5951                 gl_attr->divisor = (int8_t) step_rate;
5952             }
5953             SOKOL_ASSERT(l_desc->stride > 0);
5954             gl_attr->stride = (uint8_t) l_desc->stride;
5955             gl_attr->offset = a_desc->offset;
5956             gl_attr->size = (uint8_t) _sg_gl_vertexformat_size(a_desc->format);
5957             gl_attr->type = _sg_gl_vertexformat_type(a_desc->format);
5958             gl_attr->normalized = _sg_gl_vertexformat_normalized(a_desc->format);
5959             pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true;
5960         }
5961         else {
5962             SOKOL_LOG("Vertex attribute not found in shader: ");
5963             SOKOL_LOG(_sg_strptr(&shd->gl.attrs[attr_index].name));
5964         }
5965     }
5966     return SG_RESOURCESTATE_VALID;
5967 }
5968 
_sg_gl_destroy_pipeline(_sg_pipeline_t * pip)5969 _SOKOL_PRIVATE void _sg_gl_destroy_pipeline(_sg_pipeline_t* pip) {
5970     SOKOL_ASSERT(pip);
5971     _SOKOL_UNUSED(pip);
5972     /* empty */
5973 }
5974 
5975 /*
5976     _sg_create_pass
5977 
5978     att_imgs must point to a _sg_image* att_imgs[SG_MAX_COLOR_ATTACHMENTS+1] array,
5979     first entries are the color attachment images (or nullptr), last entry
5980     is the depth-stencil image (or nullptr).
5981 */
_sg_gl_create_pass(_sg_pass_t * pass,_sg_image_t ** att_images,const sg_pass_desc * desc)5982 _SOKOL_PRIVATE sg_resource_state _sg_gl_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) {
5983     SOKOL_ASSERT(pass && att_images && desc);
5984     SOKOL_ASSERT(att_images && att_images[0]);
5985     _SG_GL_CHECK_ERROR();
5986 
5987     _sg_pass_common_init(&pass->cmn, desc);
5988 
5989     /* copy image pointers */
5990     const sg_attachment_desc* att_desc;
5991     for (int i = 0; i < pass->cmn.num_color_atts; i++) {
5992         att_desc = &desc->color_attachments[i];
5993         SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID);
5994         SOKOL_ASSERT(0 == pass->gl.color_atts[i].image);
5995         SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id));
5996         SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format));
5997         pass->gl.color_atts[i].image = att_images[i];
5998     }
5999     SOKOL_ASSERT(0 == pass->gl.ds_att.image);
6000     att_desc = &desc->depth_stencil_attachment;
6001     if (att_desc->image.id != SG_INVALID_ID) {
6002         const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS;
6003         SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id));
6004         SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format));
6005         pass->gl.ds_att.image = att_images[ds_img_index];
6006     }
6007 
6008     /* store current framebuffer binding (restored at end of function) */
6009     GLuint gl_orig_fb;
6010     glGetIntegerv(GL_FRAMEBUFFER_BINDING, (GLint*)&gl_orig_fb);
6011 
6012     /* create a framebuffer object */
6013     glGenFramebuffers(1, &pass->gl.fb);
6014     glBindFramebuffer(GL_FRAMEBUFFER, pass->gl.fb);
6015 
6016     /* attach msaa render buffer or textures */
6017     const bool is_msaa = (0 != att_images[0]->gl.msaa_render_buffer);
6018     if (is_msaa) {
6019         for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
6020             const _sg_image_t* att_img = pass->gl.color_atts[i].image;
6021             if (att_img) {
6022                 const GLuint gl_render_buffer = att_img->gl.msaa_render_buffer;
6023                 SOKOL_ASSERT(gl_render_buffer);
6024                 glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0+i, GL_RENDERBUFFER, gl_render_buffer);
6025             }
6026         }
6027     }
6028     else {
6029         for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
6030             const _sg_image_t* att_img = pass->gl.color_atts[i].image;
6031             const int mip_level = pass->cmn.color_atts[i].mip_level;
6032             const int slice = pass->cmn.color_atts[i].slice;
6033             if (att_img) {
6034                 const GLuint gl_tex = att_img->gl.tex[0];
6035                 SOKOL_ASSERT(gl_tex);
6036                 const GLenum gl_att = GL_COLOR_ATTACHMENT0 + i;
6037                 switch (att_img->cmn.type) {
6038                     case SG_IMAGETYPE_2D:
6039                         glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, GL_TEXTURE_2D, gl_tex, mip_level);
6040                         break;
6041                     case SG_IMAGETYPE_CUBE:
6042                         glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, _sg_gl_cubeface_target(slice), gl_tex, mip_level);
6043                         break;
6044                     default:
6045                         /* 3D- or array-texture */
6046                         #if !defined(SOKOL_GLES2)
6047                         if (!_sg.gl.gles2) {
6048                             glFramebufferTextureLayer(GL_FRAMEBUFFER, gl_att, gl_tex, mip_level, slice);
6049                         }
6050                         #endif
6051                         break;
6052                 }
6053             }
6054         }
6055     }
6056 
6057     /* attach depth-stencil buffer to framebuffer */
6058     if (pass->gl.ds_att.image) {
6059         const GLuint gl_render_buffer = pass->gl.ds_att.image->gl.depth_render_buffer;
6060         SOKOL_ASSERT(gl_render_buffer);
6061         glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, gl_render_buffer);
6062         if (_sg_is_depth_stencil_format(pass->gl.ds_att.image->cmn.pixel_format)) {
6063             glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, gl_render_buffer);
6064         }
6065     }
6066 
6067     /* check if framebuffer is complete */
6068     if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
6069         SOKOL_LOG("Framebuffer completeness check failed!\n");
6070         return SG_RESOURCESTATE_FAILED;
6071     }
6072 
6073     /* setup color attachments for the framebuffer */
6074     #if !defined(SOKOL_GLES2)
6075     if (!_sg.gl.gles2) {
6076         GLenum att[SG_MAX_COLOR_ATTACHMENTS] = {
6077             GL_COLOR_ATTACHMENT0,
6078             GL_COLOR_ATTACHMENT1,
6079             GL_COLOR_ATTACHMENT2,
6080             GL_COLOR_ATTACHMENT3
6081         };
6082         glDrawBuffers(pass->cmn.num_color_atts, att);
6083     }
6084     #endif
6085 
6086     /* create MSAA resolve framebuffers if necessary */
6087     if (is_msaa) {
6088         for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
6089             _sg_gl_attachment_t* gl_att = &pass->gl.color_atts[i];
6090             _sg_attachment_t* cmn_att = &pass->cmn.color_atts[i];
6091             if (gl_att->image) {
6092                 SOKOL_ASSERT(0 == gl_att->gl_msaa_resolve_buffer);
6093                 glGenFramebuffers(1, &gl_att->gl_msaa_resolve_buffer);
6094                 glBindFramebuffer(GL_FRAMEBUFFER, gl_att->gl_msaa_resolve_buffer);
6095                 const GLuint gl_tex = gl_att->image->gl.tex[0];
6096                 SOKOL_ASSERT(gl_tex);
6097                 switch (gl_att->image->cmn.type) {
6098                     case SG_IMAGETYPE_2D:
6099                         glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
6100                             GL_TEXTURE_2D, gl_tex, cmn_att->mip_level);
6101                         break;
6102                     case SG_IMAGETYPE_CUBE:
6103                         glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
6104                             _sg_gl_cubeface_target(cmn_att->slice), gl_tex, cmn_att->mip_level);
6105                         break;
6106                     default:
6107                         #if !defined(SOKOL_GLES2)
6108                         if (!_sg.gl.gles2) {
6109                             glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, gl_tex, cmn_att->mip_level, cmn_att->slice);
6110                         }
6111                         #endif
6112                         break;
6113                 }
6114                 /* check if framebuffer is complete */
6115                 if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
6116                     SOKOL_LOG("Framebuffer completeness check failed (msaa resolve buffer)!\n");
6117                     return SG_RESOURCESTATE_FAILED;
6118                 }
6119                 /* setup color attachments for the framebuffer */
6120                 #if !defined(SOKOL_GLES2)
6121                 if (!_sg.gl.gles2) {
6122                     const GLenum gl_draw_bufs = GL_COLOR_ATTACHMENT0;
6123                     glDrawBuffers(1, &gl_draw_bufs);
6124                 }
6125                 #endif
6126             }
6127         }
6128     }
6129 
6130     /* restore original framebuffer binding */
6131     glBindFramebuffer(GL_FRAMEBUFFER, gl_orig_fb);
6132     _SG_GL_CHECK_ERROR();
6133     return SG_RESOURCESTATE_VALID;
6134 }
6135 
_sg_gl_destroy_pass(_sg_pass_t * pass)6136 _SOKOL_PRIVATE void _sg_gl_destroy_pass(_sg_pass_t* pass) {
6137     SOKOL_ASSERT(pass);
6138     _SG_GL_CHECK_ERROR();
6139     if (0 != pass->gl.fb) {
6140         glDeleteFramebuffers(1, &pass->gl.fb);
6141     }
6142     for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
6143         if (pass->gl.color_atts[i].gl_msaa_resolve_buffer) {
6144             glDeleteFramebuffers(1, &pass->gl.color_atts[i].gl_msaa_resolve_buffer);
6145         }
6146     }
6147     if (pass->gl.ds_att.gl_msaa_resolve_buffer) {
6148         glDeleteFramebuffers(1, &pass->gl.ds_att.gl_msaa_resolve_buffer);
6149     }
6150     _SG_GL_CHECK_ERROR();
6151 }
6152 
_sg_gl_pass_color_image(const _sg_pass_t * pass,int index)6153 _SOKOL_PRIVATE _sg_image_t* _sg_gl_pass_color_image(const _sg_pass_t* pass, int index) {
6154     SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS));
6155     /* NOTE: may return null */
6156     return pass->gl.color_atts[index].image;
6157 }
6158 
_sg_gl_pass_ds_image(const _sg_pass_t * pass)6159 _SOKOL_PRIVATE _sg_image_t* _sg_gl_pass_ds_image(const _sg_pass_t* pass) {
6160     /* NOTE: may return null */
6161     SOKOL_ASSERT(pass);
6162     return pass->gl.ds_att.image;
6163 }
6164 
_sg_gl_begin_pass(_sg_pass_t * pass,const sg_pass_action * action,int w,int h)6165 _SOKOL_PRIVATE void _sg_gl_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) {
6166     /* FIXME: what if a texture used as render target is still bound, should we
6167        unbind all currently bound textures in begin pass? */
6168     SOKOL_ASSERT(action);
6169     SOKOL_ASSERT(!_sg.gl.in_pass);
6170     _SG_GL_CHECK_ERROR();
6171     _sg.gl.in_pass = true;
6172     _sg.gl.cur_pass = pass; /* can be 0 */
6173     if (pass) {
6174         _sg.gl.cur_pass_id.id = pass->slot.id;
6175     }
6176     else {
6177         _sg.gl.cur_pass_id.id = SG_INVALID_ID;
6178     }
6179     _sg.gl.cur_pass_width = w;
6180     _sg.gl.cur_pass_height = h;
6181 
6182     /* number of color attachments */
6183     const int num_color_atts = pass ? pass->cmn.num_color_atts : 1;
6184 
6185     /* bind the render pass framebuffer */
6186     if (pass) {
6187         /* offscreen pass */
6188         SOKOL_ASSERT(pass->gl.fb);
6189         glBindFramebuffer(GL_FRAMEBUFFER, pass->gl.fb);
6190     }
6191     else {
6192         /* default pass */
6193         SOKOL_ASSERT(_sg.gl.cur_context);
6194         glBindFramebuffer(GL_FRAMEBUFFER, _sg.gl.cur_context->default_framebuffer);
6195     }
6196     glViewport(0, 0, w, h);
6197     glScissor(0, 0, w, h);
6198 
6199     /* clear color and depth-stencil attachments if needed */
6200     bool clear_color = false;
6201     for (int i = 0; i < num_color_atts; i++) {
6202         if (SG_ACTION_CLEAR == action->colors[i].action) {
6203             clear_color = true;
6204             break;
6205         }
6206     }
6207     const bool clear_depth = (action->depth.action == SG_ACTION_CLEAR);
6208     const bool clear_stencil = (action->stencil.action == SG_ACTION_CLEAR);
6209 
6210     bool need_pip_cache_flush = false;
6211     if (clear_color) {
6212         if (_sg.gl.cache.blend.color_write_mask != SG_COLORMASK_RGBA) {
6213             need_pip_cache_flush = true;
6214             _sg.gl.cache.blend.color_write_mask = SG_COLORMASK_RGBA;
6215             glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
6216         }
6217     }
6218     if (clear_depth) {
6219         if (!_sg.gl.cache.ds.depth_write_enabled) {
6220             need_pip_cache_flush = true;
6221             _sg.gl.cache.ds.depth_write_enabled = true;
6222             glDepthMask(GL_TRUE);
6223         }
6224         if (_sg.gl.cache.ds.depth_compare_func != SG_COMPAREFUNC_ALWAYS) {
6225             need_pip_cache_flush = true;
6226             _sg.gl.cache.ds.depth_compare_func = SG_COMPAREFUNC_ALWAYS;
6227             glDepthFunc(GL_ALWAYS);
6228         }
6229     }
6230     if (clear_stencil) {
6231         if (_sg.gl.cache.ds.stencil_write_mask != 0xFF) {
6232             need_pip_cache_flush = true;
6233             _sg.gl.cache.ds.stencil_write_mask = 0xFF;
6234             glStencilMask(0xFF);
6235         }
6236     }
6237     if (need_pip_cache_flush) {
6238         /* we messed with the state cache directly, need to clear cached
6239            pipeline to force re-evaluation in next sg_apply_pipeline() */
6240         _sg.gl.cache.cur_pipeline = 0;
6241         _sg.gl.cache.cur_pipeline_id.id = SG_INVALID_ID;
6242     }
6243     bool use_mrt_clear = (0 != pass);
6244     #if defined(SOKOL_GLES2)
6245     use_mrt_clear = false;
6246     #else
6247     if (_sg.gl.gles2) {
6248         use_mrt_clear = false;
6249     }
6250     #endif
6251     if (!use_mrt_clear) {
6252         GLbitfield clear_mask = 0;
6253         if (clear_color) {
6254             clear_mask |= GL_COLOR_BUFFER_BIT;
6255             const float* c = action->colors[0].val;
6256             glClearColor(c[0], c[1], c[2], c[3]);
6257         }
6258         if (clear_depth) {
6259             clear_mask |= GL_DEPTH_BUFFER_BIT;
6260             #ifdef SOKOL_GLCORE33
6261             glClearDepth(action->depth.val);
6262             #else
6263             glClearDepthf(action->depth.val);
6264             #endif
6265         }
6266         if (clear_stencil) {
6267             clear_mask |= GL_STENCIL_BUFFER_BIT;
6268             glClearStencil(action->stencil.val);
6269         }
6270         if (0 != clear_mask) {
6271             glClear(clear_mask);
6272         }
6273     }
6274     #if !defined SOKOL_GLES2
6275     else {
6276         SOKOL_ASSERT(pass);
6277         for (int i = 0; i < num_color_atts; i++) {
6278             if (action->colors[i].action == SG_ACTION_CLEAR) {
6279                 glClearBufferfv(GL_COLOR, i, action->colors[i].val);
6280             }
6281         }
6282         if (pass->gl.ds_att.image) {
6283             if (clear_depth && clear_stencil) {
6284                 glClearBufferfi(GL_DEPTH_STENCIL, 0, action->depth.val, action->stencil.val);
6285             }
6286             else if (clear_depth) {
6287                 glClearBufferfv(GL_DEPTH, 0, &action->depth.val);
6288             }
6289             else if (clear_stencil) {
6290                 GLint val = (GLint) action->stencil.val;
6291                 glClearBufferiv(GL_STENCIL, 0, &val);
6292             }
6293         }
6294     }
6295     #endif
6296     _SG_GL_CHECK_ERROR();
6297 }
6298 
_sg_gl_end_pass(void)6299 _SOKOL_PRIVATE void _sg_gl_end_pass(void) {
6300     SOKOL_ASSERT(_sg.gl.in_pass);
6301     _SG_GL_CHECK_ERROR();
6302 
6303     /* if this was an offscreen pass, and MSAA rendering was used, need
6304        to resolve into the pass images */
6305     #if !defined(SOKOL_GLES2)
6306     if (!_sg.gl.gles2 && _sg.gl.cur_pass) {
6307         /* check if the pass object is still valid */
6308         const _sg_pass_t* pass = _sg.gl.cur_pass;
6309         SOKOL_ASSERT(pass->slot.id == _sg.gl.cur_pass_id.id);
6310         bool is_msaa = (0 != _sg.gl.cur_pass->gl.color_atts[0].gl_msaa_resolve_buffer);
6311         if (is_msaa) {
6312             SOKOL_ASSERT(pass->gl.fb);
6313             glBindFramebuffer(GL_READ_FRAMEBUFFER, pass->gl.fb);
6314             SOKOL_ASSERT(pass->gl.color_atts[0].image);
6315             const int w = pass->gl.color_atts[0].image->cmn.width;
6316             const int h = pass->gl.color_atts[0].image->cmn.height;
6317             for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) {
6318                 const _sg_gl_attachment_t* gl_att = &pass->gl.color_atts[att_index];
6319                 if (gl_att->image) {
6320                     SOKOL_ASSERT(gl_att->gl_msaa_resolve_buffer);
6321                     glBindFramebuffer(GL_DRAW_FRAMEBUFFER, gl_att->gl_msaa_resolve_buffer);
6322                     glReadBuffer(GL_COLOR_ATTACHMENT0 + att_index);
6323                     glBlitFramebuffer(0, 0, w, h, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST);
6324                 }
6325                 else {
6326                     break;
6327                 }
6328             }
6329         }
6330     }
6331     #endif
6332     _sg.gl.cur_pass = 0;
6333     _sg.gl.cur_pass_id.id = SG_INVALID_ID;
6334     _sg.gl.cur_pass_width = 0;
6335     _sg.gl.cur_pass_height = 0;
6336 
6337     SOKOL_ASSERT(_sg.gl.cur_context);
6338     glBindFramebuffer(GL_FRAMEBUFFER, _sg.gl.cur_context->default_framebuffer);
6339     _sg.gl.in_pass = false;
6340     _SG_GL_CHECK_ERROR();
6341 }
6342 
_sg_gl_apply_viewport(int x,int y,int w,int h,bool origin_top_left)6343 _SOKOL_PRIVATE void _sg_gl_apply_viewport(int x, int y, int w, int h, bool origin_top_left) {
6344     SOKOL_ASSERT(_sg.gl.in_pass);
6345     y = origin_top_left ? (_sg.gl.cur_pass_height - (y+h)) : y;
6346     glViewport(x, y, w, h);
6347 }
6348 
_sg_gl_apply_scissor_rect(int x,int y,int w,int h,bool origin_top_left)6349 _SOKOL_PRIVATE void _sg_gl_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) {
6350     SOKOL_ASSERT(_sg.gl.in_pass);
6351     y = origin_top_left ? (_sg.gl.cur_pass_height - (y+h)) : y;
6352     glScissor(x, y, w, h);
6353 }
6354 
_sg_gl_apply_pipeline(_sg_pipeline_t * pip)6355 _SOKOL_PRIVATE void _sg_gl_apply_pipeline(_sg_pipeline_t* pip) {
6356     SOKOL_ASSERT(pip);
6357     SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id));
6358     _SG_GL_CHECK_ERROR();
6359     if ((_sg.gl.cache.cur_pipeline != pip) || (_sg.gl.cache.cur_pipeline_id.id != pip->slot.id)) {
6360         _sg.gl.cache.cur_pipeline = pip;
6361         _sg.gl.cache.cur_pipeline_id.id = pip->slot.id;
6362         _sg.gl.cache.cur_primitive_type = _sg_gl_primitive_type(pip->gl.primitive_type);
6363         _sg.gl.cache.cur_index_type = _sg_gl_index_type(pip->cmn.index_type);
6364 
6365         /* update depth-stencil state */
6366         const sg_depth_stencil_state* new_ds = &pip->gl.depth_stencil;
6367         sg_depth_stencil_state* cache_ds = &_sg.gl.cache.ds;
6368         if (new_ds->depth_compare_func != cache_ds->depth_compare_func) {
6369             cache_ds->depth_compare_func = new_ds->depth_compare_func;
6370             glDepthFunc(_sg_gl_compare_func(new_ds->depth_compare_func));
6371         }
6372         if (new_ds->depth_write_enabled != cache_ds->depth_write_enabled) {
6373             cache_ds->depth_write_enabled = new_ds->depth_write_enabled;
6374             glDepthMask(new_ds->depth_write_enabled);
6375         }
6376         if (new_ds->stencil_enabled != cache_ds->stencil_enabled) {
6377             cache_ds->stencil_enabled = new_ds->stencil_enabled;
6378             if (new_ds->stencil_enabled) glEnable(GL_STENCIL_TEST);
6379             else glDisable(GL_STENCIL_TEST);
6380         }
6381         if (new_ds->stencil_write_mask != cache_ds->stencil_write_mask) {
6382             cache_ds->stencil_write_mask = new_ds->stencil_write_mask;
6383             glStencilMask(new_ds->stencil_write_mask);
6384         }
6385         for (int i = 0; i < 2; i++) {
6386             const sg_stencil_state* new_ss = (i==0)? &new_ds->stencil_front : &new_ds->stencil_back;
6387             sg_stencil_state* cache_ss = (i==0)? &cache_ds->stencil_front : &cache_ds->stencil_back;
6388             GLenum gl_face = (i==0)? GL_FRONT : GL_BACK;
6389             if ((new_ss->compare_func != cache_ss->compare_func) ||
6390                 (new_ds->stencil_read_mask != cache_ds->stencil_read_mask) ||
6391                 (new_ds->stencil_ref != cache_ds->stencil_ref))
6392             {
6393                 cache_ss->compare_func = new_ss->compare_func;
6394                 glStencilFuncSeparate(gl_face,
6395                     _sg_gl_compare_func(new_ss->compare_func),
6396                     new_ds->stencil_ref,
6397                     new_ds->stencil_read_mask);
6398             }
6399             if ((new_ss->fail_op != cache_ss->fail_op) ||
6400                 (new_ss->depth_fail_op != cache_ss->depth_fail_op) ||
6401                 (new_ss->pass_op != cache_ss->pass_op))
6402             {
6403                 cache_ss->fail_op = new_ss->fail_op;
6404                 cache_ss->depth_fail_op = new_ss->depth_fail_op;
6405                 cache_ss->pass_op = new_ss->pass_op;
6406                 glStencilOpSeparate(gl_face,
6407                     _sg_gl_stencil_op(new_ss->fail_op),
6408                     _sg_gl_stencil_op(new_ss->depth_fail_op),
6409                     _sg_gl_stencil_op(new_ss->pass_op));
6410             }
6411         }
6412         cache_ds->stencil_read_mask = new_ds->stencil_read_mask;
6413         cache_ds->stencil_ref = new_ds->stencil_ref;
6414 
6415         /* update blend state */
6416         const sg_blend_state* new_b = &pip->gl.blend;
6417         sg_blend_state* cache_b = &_sg.gl.cache.blend;
6418         if (new_b->enabled != cache_b->enabled) {
6419             cache_b->enabled = new_b->enabled;
6420             if (new_b->enabled) glEnable(GL_BLEND);
6421             else glDisable(GL_BLEND);
6422         }
6423         if ((new_b->src_factor_rgb != cache_b->src_factor_rgb) ||
6424             (new_b->dst_factor_rgb != cache_b->dst_factor_rgb) ||
6425             (new_b->src_factor_alpha != cache_b->src_factor_alpha) ||
6426             (new_b->dst_factor_alpha != cache_b->dst_factor_alpha))
6427         {
6428             cache_b->src_factor_rgb = new_b->src_factor_rgb;
6429             cache_b->dst_factor_rgb = new_b->dst_factor_rgb;
6430             cache_b->src_factor_alpha = new_b->src_factor_alpha;
6431             cache_b->dst_factor_alpha = new_b->dst_factor_alpha;
6432             glBlendFuncSeparate(_sg_gl_blend_factor(new_b->src_factor_rgb),
6433                 _sg_gl_blend_factor(new_b->dst_factor_rgb),
6434                 _sg_gl_blend_factor(new_b->src_factor_alpha),
6435                 _sg_gl_blend_factor(new_b->dst_factor_alpha));
6436         }
6437         if ((new_b->op_rgb != cache_b->op_rgb) || (new_b->op_alpha != cache_b->op_alpha)) {
6438             cache_b->op_rgb = new_b->op_rgb;
6439             cache_b->op_alpha = new_b->op_alpha;
6440             glBlendEquationSeparate(_sg_gl_blend_op(new_b->op_rgb), _sg_gl_blend_op(new_b->op_alpha));
6441         }
6442         if (new_b->color_write_mask != cache_b->color_write_mask) {
6443             cache_b->color_write_mask = new_b->color_write_mask;
6444             glColorMask((new_b->color_write_mask & SG_COLORMASK_R) != 0,
6445                         (new_b->color_write_mask & SG_COLORMASK_G) != 0,
6446                         (new_b->color_write_mask & SG_COLORMASK_B) != 0,
6447                         (new_b->color_write_mask & SG_COLORMASK_A) != 0);
6448         }
6449         if (!_sg_fequal(new_b->blend_color[0], cache_b->blend_color[0], 0.0001f) ||
6450             !_sg_fequal(new_b->blend_color[1], cache_b->blend_color[1], 0.0001f) ||
6451             !_sg_fequal(new_b->blend_color[2], cache_b->blend_color[2], 0.0001f) ||
6452             !_sg_fequal(new_b->blend_color[3], cache_b->blend_color[3], 0.0001f))
6453         {
6454             const float* bc = new_b->blend_color;
6455             for (int i=0; i<4; i++) {
6456                 cache_b->blend_color[i] = bc[i];
6457             }
6458             glBlendColor(bc[0], bc[1], bc[2], bc[3]);
6459         }
6460 
6461         /* update rasterizer state */
6462         const sg_rasterizer_state* new_r = &pip->gl.rast;
6463         sg_rasterizer_state* cache_r = &_sg.gl.cache.rast;
6464         if (new_r->cull_mode != cache_r->cull_mode) {
6465             cache_r->cull_mode = new_r->cull_mode;
6466             if (SG_CULLMODE_NONE == new_r->cull_mode) {
6467                 glDisable(GL_CULL_FACE);
6468             }
6469             else {
6470                 glEnable(GL_CULL_FACE);
6471                 GLenum gl_mode = (SG_CULLMODE_FRONT == new_r->cull_mode) ? GL_FRONT : GL_BACK;
6472                 glCullFace(gl_mode);
6473             }
6474         }
6475         if (new_r->face_winding != cache_r->face_winding) {
6476             cache_r->face_winding = new_r->face_winding;
6477             GLenum gl_winding = (SG_FACEWINDING_CW == new_r->face_winding) ? GL_CW : GL_CCW;
6478             glFrontFace(gl_winding);
6479         }
6480         if (new_r->alpha_to_coverage_enabled != cache_r->alpha_to_coverage_enabled) {
6481             cache_r->alpha_to_coverage_enabled = new_r->alpha_to_coverage_enabled;
6482             if (new_r->alpha_to_coverage_enabled) glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE);
6483             else glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE);
6484         }
6485         #ifdef SOKOL_GLCORE33
6486         if (new_r->sample_count != cache_r->sample_count) {
6487             cache_r->sample_count = new_r->sample_count;
6488             if (new_r->sample_count > 1) glEnable(GL_MULTISAMPLE);
6489             else glDisable(GL_MULTISAMPLE);
6490         }
6491         #endif
6492         if (!_sg_fequal(new_r->depth_bias, cache_r->depth_bias, 0.000001f) ||
6493             !_sg_fequal(new_r->depth_bias_slope_scale, cache_r->depth_bias_slope_scale, 0.000001f))
6494         {
6495             /* according to ANGLE's D3D11 backend:
6496                 D3D11 SlopeScaledDepthBias ==> GL polygonOffsetFactor
6497                 D3D11 DepthBias ==> GL polygonOffsetUnits
6498                 DepthBiasClamp has no meaning on GL
6499             */
6500             cache_r->depth_bias = new_r->depth_bias;
6501             cache_r->depth_bias_slope_scale = new_r->depth_bias_slope_scale;
6502             glPolygonOffset(new_r->depth_bias_slope_scale, new_r->depth_bias);
6503             bool po_enabled = true;
6504             if (_sg_fequal(new_r->depth_bias, 0.0f, 0.000001f) &&
6505                 _sg_fequal(new_r->depth_bias_slope_scale, 0.0f, 0.000001f))
6506             {
6507                 po_enabled = false;
6508             }
6509             if (po_enabled != _sg.gl.cache.polygon_offset_enabled) {
6510                 _sg.gl.cache.polygon_offset_enabled = po_enabled;
6511                 if (po_enabled) glEnable(GL_POLYGON_OFFSET_FILL);
6512                 else glDisable(GL_POLYGON_OFFSET_FILL);
6513             }
6514         }
6515 
6516         /* bind shader program */
6517         if (pip->shader->gl.prog != _sg.gl.cache.prog) {
6518             _sg.gl.cache.prog = pip->shader->gl.prog;
6519             glUseProgram(pip->shader->gl.prog);
6520         }
6521     }
6522 }
6523 
_sg_gl_apply_bindings(_sg_pipeline_t * pip,_sg_buffer_t ** vbs,const int * vb_offsets,int num_vbs,_sg_buffer_t * ib,int ib_offset,_sg_image_t ** vs_imgs,int num_vs_imgs,_sg_image_t ** fs_imgs,int num_fs_imgs)6524 _SOKOL_PRIVATE void _sg_gl_apply_bindings(
6525     _sg_pipeline_t* pip,
6526     _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs,
6527     _sg_buffer_t* ib, int ib_offset,
6528     _sg_image_t** vs_imgs, int num_vs_imgs,
6529     _sg_image_t** fs_imgs, int num_fs_imgs)
6530 {
6531     SOKOL_ASSERT(pip);
6532     _SOKOL_UNUSED(num_fs_imgs);
6533     _SOKOL_UNUSED(num_vs_imgs);
6534     _SOKOL_UNUSED(num_vbs);
6535     _SG_GL_CHECK_ERROR();
6536 
6537     /* bind textures */
6538     _SG_GL_CHECK_ERROR();
6539     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
6540         const _sg_shader_stage_t* stage = &pip->shader->cmn.stage[stage_index];
6541         const _sg_gl_shader_stage_t* gl_stage = &pip->shader->gl.stage[stage_index];
6542         _sg_image_t** imgs = (stage_index == SG_SHADERSTAGE_VS)? vs_imgs : fs_imgs;
6543         SOKOL_ASSERT(((stage_index == SG_SHADERSTAGE_VS)? num_vs_imgs : num_fs_imgs) == stage->num_images);
6544         for (int img_index = 0; img_index < stage->num_images; img_index++) {
6545             const _sg_gl_shader_image_t* gl_shd_img = &gl_stage->images[img_index];
6546             if (gl_shd_img->gl_tex_slot != -1) {
6547                 _sg_image_t* img = imgs[img_index];
6548                 const GLuint gl_tex = img->gl.tex[img->cmn.active_slot];
6549                 SOKOL_ASSERT(img && img->gl.target);
6550                 SOKOL_ASSERT((gl_shd_img->gl_tex_slot != -1) && gl_tex);
6551                 _sg_gl_bind_texture(gl_shd_img->gl_tex_slot, img->gl.target, gl_tex);
6552             }
6553         }
6554     }
6555     _SG_GL_CHECK_ERROR();
6556 
6557     /* index buffer (can be 0) */
6558     const GLuint gl_ib = ib ? ib->gl.buf[ib->cmn.active_slot] : 0;
6559     _sg_gl_bind_buffer(GL_ELEMENT_ARRAY_BUFFER, gl_ib);
6560     _sg.gl.cache.cur_ib_offset = ib_offset;
6561 
6562     /* vertex attributes */
6563     for (uint32_t attr_index = 0; attr_index < _sg.limits.max_vertex_attrs; attr_index++) {
6564         _sg_gl_attr_t* attr = &pip->gl.attrs[attr_index];
6565         _sg_gl_cache_attr_t* cache_attr = &_sg.gl.cache.attrs[attr_index];
6566         bool cache_attr_dirty = false;
6567         int vb_offset = 0;
6568         GLuint gl_vb = 0;
6569         if (attr->vb_index >= 0) {
6570             /* attribute is enabled */
6571             SOKOL_ASSERT(attr->vb_index < num_vbs);
6572             _sg_buffer_t* vb = vbs[attr->vb_index];
6573             SOKOL_ASSERT(vb);
6574             gl_vb = vb->gl.buf[vb->cmn.active_slot];
6575             vb_offset = vb_offsets[attr->vb_index] + attr->offset;
6576             if ((gl_vb != cache_attr->gl_vbuf) ||
6577                 (attr->size != cache_attr->gl_attr.size) ||
6578                 (attr->type != cache_attr->gl_attr.type) ||
6579                 (attr->normalized != cache_attr->gl_attr.normalized) ||
6580                 (attr->stride != cache_attr->gl_attr.stride) ||
6581                 (vb_offset != cache_attr->gl_attr.offset) ||
6582                 (cache_attr->gl_attr.divisor != attr->divisor))
6583             {
6584                 _sg_gl_bind_buffer(GL_ARRAY_BUFFER, gl_vb);
6585                 glVertexAttribPointer(attr_index, attr->size, attr->type,
6586                     attr->normalized, attr->stride,
6587                     (const GLvoid*)(GLintptr)vb_offset);
6588                 #ifdef SOKOL_INSTANCING_ENABLED
6589                     if (_sg.features.instancing) {
6590                         glVertexAttribDivisor(attr_index, attr->divisor);
6591                     }
6592                 #endif
6593                 cache_attr_dirty = true;
6594             }
6595             if (cache_attr->gl_attr.vb_index == -1) {
6596                 glEnableVertexAttribArray(attr_index);
6597                 cache_attr_dirty = true;
6598             }
6599         }
6600         else {
6601             /* attribute is disabled */
6602             if (cache_attr->gl_attr.vb_index != -1) {
6603                 glDisableVertexAttribArray(attr_index);
6604                 cache_attr_dirty = true;
6605             }
6606         }
6607         if (cache_attr_dirty) {
6608             cache_attr->gl_attr = *attr;
6609             cache_attr->gl_attr.offset = vb_offset;
6610             cache_attr->gl_vbuf = gl_vb;
6611         }
6612     }
6613     _SG_GL_CHECK_ERROR();
6614 }
6615 
_sg_gl_apply_uniforms(sg_shader_stage stage_index,int ub_index,const void * data,int num_bytes)6616 _SOKOL_PRIVATE void _sg_gl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) {
6617     _SOKOL_UNUSED(num_bytes);
6618     SOKOL_ASSERT(data && (num_bytes > 0));
6619     SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES));
6620     SOKOL_ASSERT(_sg.gl.cache.cur_pipeline);
6621     SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->slot.id == _sg.gl.cache.cur_pipeline_id.id);
6622     SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->slot.id == _sg.gl.cache.cur_pipeline->cmn.shader_id.id);
6623     SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks > ub_index);
6624     SOKOL_ASSERT(_sg.gl.cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size == num_bytes);
6625     const _sg_gl_shader_stage_t* gl_stage = &_sg.gl.cache.cur_pipeline->shader->gl.stage[stage_index];
6626     const _sg_gl_uniform_block_t* gl_ub = &gl_stage->uniform_blocks[ub_index];
6627     for (int u_index = 0; u_index < gl_ub->num_uniforms; u_index++) {
6628         const _sg_gl_uniform_t* u = &gl_ub->uniforms[u_index];
6629         SOKOL_ASSERT(u->type != SG_UNIFORMTYPE_INVALID);
6630         if (u->gl_loc == -1) {
6631             continue;
6632         }
6633         GLfloat* ptr = (GLfloat*) (((uint8_t*)data) + u->offset);
6634         switch (u->type) {
6635             case SG_UNIFORMTYPE_INVALID:
6636                 break;
6637             case SG_UNIFORMTYPE_FLOAT:
6638                 glUniform1fv(u->gl_loc, u->count, ptr);
6639                 break;
6640             case SG_UNIFORMTYPE_FLOAT2:
6641                 glUniform2fv(u->gl_loc, u->count, ptr);
6642                 break;
6643             case SG_UNIFORMTYPE_FLOAT3:
6644                 glUniform3fv(u->gl_loc, u->count, ptr);
6645                 break;
6646             case SG_UNIFORMTYPE_FLOAT4:
6647                 glUniform4fv(u->gl_loc, u->count, ptr);
6648                 break;
6649             case SG_UNIFORMTYPE_MAT4:
6650                 glUniformMatrix4fv(u->gl_loc, u->count, GL_FALSE, ptr);
6651                 break;
6652             default:
6653                 SOKOL_UNREACHABLE;
6654                 break;
6655         }
6656     }
6657 }
6658 
_sg_gl_draw(int base_element,int num_elements,int num_instances)6659 _SOKOL_PRIVATE void _sg_gl_draw(int base_element, int num_elements, int num_instances) {
6660     const GLenum i_type = _sg.gl.cache.cur_index_type;
6661     const GLenum p_type = _sg.gl.cache.cur_primitive_type;
6662     if (0 != i_type) {
6663         /* indexed rendering */
6664         const int i_size = (i_type == GL_UNSIGNED_SHORT) ? 2 : 4;
6665         const int ib_offset = _sg.gl.cache.cur_ib_offset;
6666         const GLvoid* indices = (const GLvoid*)(GLintptr)(base_element*i_size+ib_offset);
6667         if (num_instances == 1) {
6668             glDrawElements(p_type, num_elements, i_type, indices);
6669         }
6670         else {
6671             if (_sg.features.instancing) {
6672                 glDrawElementsInstanced(p_type, num_elements, i_type, indices, num_instances);
6673             }
6674         }
6675     }
6676     else {
6677         /* non-indexed rendering */
6678         if (num_instances == 1) {
6679             glDrawArrays(p_type, base_element, num_elements);
6680         }
6681         else {
6682             if (_sg.features.instancing) {
6683                 glDrawArraysInstanced(p_type, base_element, num_elements, num_instances);
6684             }
6685         }
6686     }
6687 }
6688 
_sg_gl_commit(void)6689 _SOKOL_PRIVATE void _sg_gl_commit(void) {
6690     SOKOL_ASSERT(!_sg.gl.in_pass);
6691     /* "soft" clear bindings (only those that are actually bound) */
6692     _sg_gl_clear_buffer_bindings(false);
6693     _sg_gl_clear_texture_bindings(false);
6694 }
6695 
_sg_gl_update_buffer(_sg_buffer_t * buf,const void * data_ptr,uint32_t data_size)6696 _SOKOL_PRIVATE void _sg_gl_update_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size) {
6697     SOKOL_ASSERT(buf && data_ptr && (data_size > 0));
6698     /* only one update per buffer per frame allowed */
6699     if (++buf->cmn.active_slot >= buf->cmn.num_slots) {
6700         buf->cmn.active_slot = 0;
6701     }
6702     GLenum gl_tgt = _sg_gl_buffer_target(buf->cmn.type);
6703     SOKOL_ASSERT(buf->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES);
6704     GLuint gl_buf = buf->gl.buf[buf->cmn.active_slot];
6705     SOKOL_ASSERT(gl_buf);
6706     _SG_GL_CHECK_ERROR();
6707     _sg_gl_store_buffer_binding(gl_tgt);
6708     _sg_gl_bind_buffer(gl_tgt, gl_buf);
6709     glBufferSubData(gl_tgt, 0, data_size, data_ptr);
6710     _sg_gl_restore_buffer_binding(gl_tgt);
6711     _SG_GL_CHECK_ERROR();
6712 }
6713 
_sg_gl_append_buffer(_sg_buffer_t * buf,const void * data_ptr,uint32_t data_size,bool new_frame)6714 _SOKOL_PRIVATE uint32_t _sg_gl_append_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size, bool new_frame) {
6715     SOKOL_ASSERT(buf && data_ptr && (data_size > 0));
6716     if (new_frame) {
6717         if (++buf->cmn.active_slot >= buf->cmn.num_slots) {
6718             buf->cmn.active_slot = 0;
6719         }
6720     }
6721     GLenum gl_tgt = _sg_gl_buffer_target(buf->cmn.type);
6722     SOKOL_ASSERT(buf->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES);
6723     GLuint gl_buf = buf->gl.buf[buf->cmn.active_slot];
6724     SOKOL_ASSERT(gl_buf);
6725     _SG_GL_CHECK_ERROR();
6726     _sg_gl_store_buffer_binding(gl_tgt);
6727     _sg_gl_bind_buffer(gl_tgt, gl_buf);
6728     glBufferSubData(gl_tgt, buf->cmn.append_pos, data_size, data_ptr);
6729     _sg_gl_restore_buffer_binding(gl_tgt);
6730     _SG_GL_CHECK_ERROR();
6731     /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */
6732     return _sg_roundup(data_size, 4);
6733 }
6734 
_sg_gl_update_image(_sg_image_t * img,const sg_image_content * data)6735 _SOKOL_PRIVATE void _sg_gl_update_image(_sg_image_t* img, const sg_image_content* data) {
6736     SOKOL_ASSERT(img && data);
6737     /* only one update per image per frame allowed */
6738     if (++img->cmn.active_slot >= img->cmn.num_slots) {
6739         img->cmn.active_slot = 0;
6740     }
6741     SOKOL_ASSERT(img->cmn.active_slot < SG_NUM_INFLIGHT_FRAMES);
6742     SOKOL_ASSERT(0 != img->gl.tex[img->cmn.active_slot]);
6743     _sg_gl_store_texture_binding(0);
6744     _sg_gl_bind_texture(0, img->gl.target, img->gl.tex[img->cmn.active_slot]);
6745     const GLenum gl_img_format = _sg_gl_teximage_format(img->cmn.pixel_format);
6746     const GLenum gl_img_type = _sg_gl_teximage_type(img->cmn.pixel_format);
6747     const int num_faces = img->cmn.type == SG_IMAGETYPE_CUBE ? 6 : 1;
6748     const int num_mips = img->cmn.num_mipmaps;
6749     for (int face_index = 0; face_index < num_faces; face_index++) {
6750         for (int mip_index = 0; mip_index < num_mips; mip_index++) {
6751             GLenum gl_img_target = img->gl.target;
6752             if (SG_IMAGETYPE_CUBE == img->cmn.type) {
6753                 gl_img_target = _sg_gl_cubeface_target(face_index);
6754             }
6755             const GLvoid* data_ptr = data->subimage[face_index][mip_index].ptr;
6756             int mip_width = img->cmn.width >> mip_index;
6757             if (mip_width == 0) {
6758                 mip_width = 1;
6759             }
6760             int mip_height = img->cmn.height >> mip_index;
6761             if (mip_height == 0) {
6762                 mip_height = 1;
6763             }
6764             if ((SG_IMAGETYPE_2D == img->cmn.type) || (SG_IMAGETYPE_CUBE == img->cmn.type)) {
6765                 glTexSubImage2D(gl_img_target, mip_index,
6766                     0, 0,
6767                     mip_width, mip_height,
6768                     gl_img_format, gl_img_type,
6769                     data_ptr);
6770             }
6771             #if !defined(SOKOL_GLES2)
6772             else if (!_sg.gl.gles2 && ((SG_IMAGETYPE_3D == img->cmn.type) || (SG_IMAGETYPE_ARRAY == img->cmn.type))) {
6773                 int mip_depth = img->cmn.depth >> mip_index;
6774                 if (mip_depth == 0) {
6775                     mip_depth = 1;
6776                 }
6777                 glTexSubImage3D(gl_img_target, mip_index,
6778                     0, 0, 0,
6779                     mip_width, mip_height, mip_depth,
6780                     gl_img_format, gl_img_type,
6781                     data_ptr);
6782 
6783             }
6784             #endif
6785         }
6786     }
6787     _sg_gl_restore_texture_binding(0);
6788 }
6789 
6790 /*== D3D11 BACKEND IMPLEMENTATION ============================================*/
6791 #elif defined(SOKOL_D3D11)
6792 
6793 /*-- enum translation functions ----------------------------------------------*/
_sg_d3d11_usage(sg_usage usg)6794 _SOKOL_PRIVATE D3D11_USAGE _sg_d3d11_usage(sg_usage usg) {
6795     switch (usg) {
6796         case SG_USAGE_IMMUTABLE:
6797             return D3D11_USAGE_IMMUTABLE;
6798         case SG_USAGE_DYNAMIC:
6799         case SG_USAGE_STREAM:
6800             return D3D11_USAGE_DYNAMIC;
6801         default:
6802             SOKOL_UNREACHABLE;
6803             return (D3D11_USAGE) 0;
6804     }
6805 }
6806 
_sg_d3d11_cpu_access_flags(sg_usage usg)6807 _SOKOL_PRIVATE UINT _sg_d3d11_cpu_access_flags(sg_usage usg) {
6808     switch (usg) {
6809         case SG_USAGE_IMMUTABLE:
6810             return 0;
6811         case SG_USAGE_DYNAMIC:
6812         case SG_USAGE_STREAM:
6813             return D3D11_CPU_ACCESS_WRITE;
6814         default:
6815             SOKOL_UNREACHABLE;
6816             return 0;
6817     }
6818 }
6819 
_sg_d3d11_pixel_format(sg_pixel_format fmt)6820 _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_pixel_format(sg_pixel_format fmt) {
6821     switch (fmt) {
6822         case SG_PIXELFORMAT_R8:             return DXGI_FORMAT_R8_UNORM;
6823         case SG_PIXELFORMAT_R8SN:           return DXGI_FORMAT_R8_SNORM;
6824         case SG_PIXELFORMAT_R8UI:           return DXGI_FORMAT_R8_UINT;
6825         case SG_PIXELFORMAT_R8SI:           return DXGI_FORMAT_R8_SINT;
6826         case SG_PIXELFORMAT_R16:            return DXGI_FORMAT_R16_UNORM;
6827         case SG_PIXELFORMAT_R16SN:          return DXGI_FORMAT_R16_SNORM;
6828         case SG_PIXELFORMAT_R16UI:          return DXGI_FORMAT_R16_UINT;
6829         case SG_PIXELFORMAT_R16SI:          return DXGI_FORMAT_R16_SINT;
6830         case SG_PIXELFORMAT_R16F:           return DXGI_FORMAT_R16_FLOAT;
6831         case SG_PIXELFORMAT_RG8:            return DXGI_FORMAT_R8G8_UNORM;
6832         case SG_PIXELFORMAT_RG8SN:          return DXGI_FORMAT_R8G8_SNORM;
6833         case SG_PIXELFORMAT_RG8UI:          return DXGI_FORMAT_R8G8_UINT;
6834         case SG_PIXELFORMAT_RG8SI:          return DXGI_FORMAT_R8G8_SINT;
6835         case SG_PIXELFORMAT_R32UI:          return DXGI_FORMAT_R32_UINT;
6836         case SG_PIXELFORMAT_R32SI:          return DXGI_FORMAT_R32_SINT;
6837         case SG_PIXELFORMAT_R32F:           return DXGI_FORMAT_R32_FLOAT;
6838         case SG_PIXELFORMAT_RG16:           return DXGI_FORMAT_R16G16_UNORM;
6839         case SG_PIXELFORMAT_RG16SN:         return DXGI_FORMAT_R16G16_SNORM;
6840         case SG_PIXELFORMAT_RG16UI:         return DXGI_FORMAT_R16G16_UINT;
6841         case SG_PIXELFORMAT_RG16SI:         return DXGI_FORMAT_R16G16_SINT;
6842         case SG_PIXELFORMAT_RG16F:          return DXGI_FORMAT_R16G16_FLOAT;
6843         case SG_PIXELFORMAT_RGBA8:          return DXGI_FORMAT_R8G8B8A8_UNORM;
6844         case SG_PIXELFORMAT_RGBA8SN:        return DXGI_FORMAT_R8G8B8A8_SNORM;
6845         case SG_PIXELFORMAT_RGBA8UI:        return DXGI_FORMAT_R8G8B8A8_UINT;
6846         case SG_PIXELFORMAT_RGBA8SI:        return DXGI_FORMAT_R8G8B8A8_SINT;
6847         case SG_PIXELFORMAT_BGRA8:          return DXGI_FORMAT_B8G8R8A8_UNORM;
6848         case SG_PIXELFORMAT_RGB10A2:        return DXGI_FORMAT_R10G10B10A2_UNORM;
6849         case SG_PIXELFORMAT_RG11B10F:       return DXGI_FORMAT_R11G11B10_FLOAT;
6850         case SG_PIXELFORMAT_RG32UI:         return DXGI_FORMAT_R32G32_UINT;
6851         case SG_PIXELFORMAT_RG32SI:         return DXGI_FORMAT_R32G32_SINT;
6852         case SG_PIXELFORMAT_RG32F:          return DXGI_FORMAT_R32G32_FLOAT;
6853         case SG_PIXELFORMAT_RGBA16:         return DXGI_FORMAT_R16G16B16A16_UNORM;
6854         case SG_PIXELFORMAT_RGBA16SN:       return DXGI_FORMAT_R16G16B16A16_SNORM;
6855         case SG_PIXELFORMAT_RGBA16UI:       return DXGI_FORMAT_R16G16B16A16_UINT;
6856         case SG_PIXELFORMAT_RGBA16SI:       return DXGI_FORMAT_R16G16B16A16_SINT;
6857         case SG_PIXELFORMAT_RGBA16F:        return DXGI_FORMAT_R16G16B16A16_FLOAT;
6858         case SG_PIXELFORMAT_RGBA32UI:       return DXGI_FORMAT_R32G32B32A32_UINT;
6859         case SG_PIXELFORMAT_RGBA32SI:       return DXGI_FORMAT_R32G32B32A32_SINT;
6860         case SG_PIXELFORMAT_RGBA32F:        return DXGI_FORMAT_R32G32B32A32_FLOAT;
6861         case SG_PIXELFORMAT_DEPTH:          return DXGI_FORMAT_D32_FLOAT;
6862         case SG_PIXELFORMAT_DEPTH_STENCIL:  return DXGI_FORMAT_D24_UNORM_S8_UINT;
6863         case SG_PIXELFORMAT_BC1_RGBA:       return DXGI_FORMAT_BC1_UNORM;
6864         case SG_PIXELFORMAT_BC2_RGBA:       return DXGI_FORMAT_BC2_UNORM;
6865         case SG_PIXELFORMAT_BC3_RGBA:       return DXGI_FORMAT_BC3_UNORM;
6866         case SG_PIXELFORMAT_BC4_R:          return DXGI_FORMAT_BC4_UNORM;
6867         case SG_PIXELFORMAT_BC4_RSN:        return DXGI_FORMAT_BC4_SNORM;
6868         case SG_PIXELFORMAT_BC5_RG:         return DXGI_FORMAT_BC5_UNORM;
6869         case SG_PIXELFORMAT_BC5_RGSN:       return DXGI_FORMAT_BC5_SNORM;
6870         case SG_PIXELFORMAT_BC6H_RGBF:      return DXGI_FORMAT_BC6H_SF16;
6871         case SG_PIXELFORMAT_BC6H_RGBUF:     return DXGI_FORMAT_BC6H_UF16;
6872         case SG_PIXELFORMAT_BC7_RGBA:       return DXGI_FORMAT_BC7_UNORM;
6873         default:                            return DXGI_FORMAT_UNKNOWN;
6874     };
6875 }
6876 
_sg_d3d11_primitive_topology(sg_primitive_type prim_type)6877 _SOKOL_PRIVATE D3D11_PRIMITIVE_TOPOLOGY _sg_d3d11_primitive_topology(sg_primitive_type prim_type) {
6878     switch (prim_type) {
6879         case SG_PRIMITIVETYPE_POINTS:           return D3D11_PRIMITIVE_TOPOLOGY_POINTLIST;
6880         case SG_PRIMITIVETYPE_LINES:            return D3D11_PRIMITIVE_TOPOLOGY_LINELIST;
6881         case SG_PRIMITIVETYPE_LINE_STRIP:       return D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP;
6882         case SG_PRIMITIVETYPE_TRIANGLES:        return D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
6883         case SG_PRIMITIVETYPE_TRIANGLE_STRIP:   return D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
6884         default: SOKOL_UNREACHABLE; return (D3D11_PRIMITIVE_TOPOLOGY) 0;
6885     }
6886 }
6887 
_sg_d3d11_index_format(sg_index_type index_type)6888 _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_index_format(sg_index_type index_type) {
6889     switch (index_type) {
6890         case SG_INDEXTYPE_NONE:     return DXGI_FORMAT_UNKNOWN;
6891         case SG_INDEXTYPE_UINT16:   return DXGI_FORMAT_R16_UINT;
6892         case SG_INDEXTYPE_UINT32:   return DXGI_FORMAT_R32_UINT;
6893         default: SOKOL_UNREACHABLE; return (DXGI_FORMAT) 0;
6894     }
6895 }
6896 
_sg_d3d11_filter(sg_filter min_f,sg_filter mag_f,uint32_t max_anisotropy)6897 _SOKOL_PRIVATE D3D11_FILTER _sg_d3d11_filter(sg_filter min_f, sg_filter mag_f, uint32_t max_anisotropy) {
6898     if (max_anisotropy > 1) {
6899         return D3D11_FILTER_ANISOTROPIC;
6900     }
6901     else if (mag_f == SG_FILTER_NEAREST) {
6902         switch (min_f) {
6903             case SG_FILTER_NEAREST:
6904             case SG_FILTER_NEAREST_MIPMAP_NEAREST:
6905                 return D3D11_FILTER_MIN_MAG_MIP_POINT;
6906             case SG_FILTER_LINEAR:
6907             case SG_FILTER_LINEAR_MIPMAP_NEAREST:
6908                 return D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT;
6909             case SG_FILTER_NEAREST_MIPMAP_LINEAR:
6910                 return D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR;
6911             case SG_FILTER_LINEAR_MIPMAP_LINEAR:
6912                 return D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR;
6913             default:
6914                 SOKOL_UNREACHABLE; break;
6915         }
6916     }
6917     else if (mag_f == SG_FILTER_LINEAR) {
6918         switch (min_f) {
6919             case SG_FILTER_NEAREST:
6920             case SG_FILTER_NEAREST_MIPMAP_NEAREST:
6921                 return D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT;
6922             case SG_FILTER_LINEAR:
6923             case SG_FILTER_LINEAR_MIPMAP_NEAREST:
6924                 return D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT;
6925             case SG_FILTER_NEAREST_MIPMAP_LINEAR:
6926                 return D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR;
6927             case SG_FILTER_LINEAR_MIPMAP_LINEAR:
6928                 return D3D11_FILTER_MIN_MAG_MIP_LINEAR;
6929             default:
6930                 SOKOL_UNREACHABLE; break;
6931         }
6932     }
6933     /* invalid value for mag filter */
6934     SOKOL_UNREACHABLE;
6935     return D3D11_FILTER_MIN_MAG_MIP_POINT;
6936 }
6937 
_sg_d3d11_address_mode(sg_wrap m)6938 _SOKOL_PRIVATE D3D11_TEXTURE_ADDRESS_MODE _sg_d3d11_address_mode(sg_wrap m) {
6939     switch (m) {
6940         case SG_WRAP_REPEAT:            return D3D11_TEXTURE_ADDRESS_WRAP;
6941         case SG_WRAP_CLAMP_TO_EDGE:     return D3D11_TEXTURE_ADDRESS_CLAMP;
6942         case SG_WRAP_CLAMP_TO_BORDER:   return D3D11_TEXTURE_ADDRESS_BORDER;
6943         case SG_WRAP_MIRRORED_REPEAT:   return D3D11_TEXTURE_ADDRESS_MIRROR;
6944         default: SOKOL_UNREACHABLE; return (D3D11_TEXTURE_ADDRESS_MODE) 0;
6945     }
6946 }
6947 
_sg_d3d11_vertex_format(sg_vertex_format fmt)6948 _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_vertex_format(sg_vertex_format fmt) {
6949     switch (fmt) {
6950         case SG_VERTEXFORMAT_FLOAT:     return DXGI_FORMAT_R32_FLOAT;
6951         case SG_VERTEXFORMAT_FLOAT2:    return DXGI_FORMAT_R32G32_FLOAT;
6952         case SG_VERTEXFORMAT_FLOAT3:    return DXGI_FORMAT_R32G32B32_FLOAT;
6953         case SG_VERTEXFORMAT_FLOAT4:    return DXGI_FORMAT_R32G32B32A32_FLOAT;
6954         case SG_VERTEXFORMAT_BYTE4:     return DXGI_FORMAT_R8G8B8A8_SINT;
6955         case SG_VERTEXFORMAT_BYTE4N:    return DXGI_FORMAT_R8G8B8A8_SNORM;
6956         case SG_VERTEXFORMAT_UBYTE4:    return DXGI_FORMAT_R8G8B8A8_UINT;
6957         case SG_VERTEXFORMAT_UBYTE4N:   return DXGI_FORMAT_R8G8B8A8_UNORM;
6958         case SG_VERTEXFORMAT_SHORT2:    return DXGI_FORMAT_R16G16_SINT;
6959         case SG_VERTEXFORMAT_SHORT2N:   return DXGI_FORMAT_R16G16_SNORM;
6960         case SG_VERTEXFORMAT_USHORT2N:  return DXGI_FORMAT_R16G16_UNORM;
6961         case SG_VERTEXFORMAT_SHORT4:    return DXGI_FORMAT_R16G16B16A16_SINT;
6962         case SG_VERTEXFORMAT_SHORT4N:   return DXGI_FORMAT_R16G16B16A16_SNORM;
6963         case SG_VERTEXFORMAT_USHORT4N:  return DXGI_FORMAT_R16G16B16A16_UNORM;
6964         case SG_VERTEXFORMAT_UINT10_N2: return DXGI_FORMAT_R10G10B10A2_UNORM;
6965         default: SOKOL_UNREACHABLE; return (DXGI_FORMAT) 0;
6966     }
6967 }
6968 
_sg_d3d11_input_classification(sg_vertex_step step)6969 _SOKOL_PRIVATE D3D11_INPUT_CLASSIFICATION _sg_d3d11_input_classification(sg_vertex_step step) {
6970     switch (step) {
6971         case SG_VERTEXSTEP_PER_VERTEX:      return D3D11_INPUT_PER_VERTEX_DATA;
6972         case SG_VERTEXSTEP_PER_INSTANCE:    return D3D11_INPUT_PER_INSTANCE_DATA;
6973         default: SOKOL_UNREACHABLE; return (D3D11_INPUT_CLASSIFICATION) 0;
6974     }
6975 }
6976 
_sg_d3d11_cull_mode(sg_cull_mode m)6977 _SOKOL_PRIVATE D3D11_CULL_MODE _sg_d3d11_cull_mode(sg_cull_mode m) {
6978     switch (m) {
6979         case SG_CULLMODE_NONE:      return D3D11_CULL_NONE;
6980         case SG_CULLMODE_FRONT:     return D3D11_CULL_FRONT;
6981         case SG_CULLMODE_BACK:      return D3D11_CULL_BACK;
6982         default: SOKOL_UNREACHABLE; return (D3D11_CULL_MODE) 0;
6983     }
6984 }
6985 
_sg_d3d11_compare_func(sg_compare_func f)6986 _SOKOL_PRIVATE D3D11_COMPARISON_FUNC _sg_d3d11_compare_func(sg_compare_func f) {
6987     switch (f) {
6988         case SG_COMPAREFUNC_NEVER:          return D3D11_COMPARISON_NEVER;
6989         case SG_COMPAREFUNC_LESS:           return D3D11_COMPARISON_LESS;
6990         case SG_COMPAREFUNC_EQUAL:          return D3D11_COMPARISON_EQUAL;
6991         case SG_COMPAREFUNC_LESS_EQUAL:     return D3D11_COMPARISON_LESS_EQUAL;
6992         case SG_COMPAREFUNC_GREATER:        return D3D11_COMPARISON_GREATER;
6993         case SG_COMPAREFUNC_NOT_EQUAL:      return D3D11_COMPARISON_NOT_EQUAL;
6994         case SG_COMPAREFUNC_GREATER_EQUAL:  return D3D11_COMPARISON_GREATER_EQUAL;
6995         case SG_COMPAREFUNC_ALWAYS:         return D3D11_COMPARISON_ALWAYS;
6996         default: SOKOL_UNREACHABLE; return (D3D11_COMPARISON_FUNC) 0;
6997     }
6998 }
6999 
_sg_d3d11_stencil_op(sg_stencil_op op)7000 _SOKOL_PRIVATE D3D11_STENCIL_OP _sg_d3d11_stencil_op(sg_stencil_op op) {
7001     switch (op) {
7002         case SG_STENCILOP_KEEP:         return D3D11_STENCIL_OP_KEEP;
7003         case SG_STENCILOP_ZERO:         return D3D11_STENCIL_OP_ZERO;
7004         case SG_STENCILOP_REPLACE:      return D3D11_STENCIL_OP_REPLACE;
7005         case SG_STENCILOP_INCR_CLAMP:   return D3D11_STENCIL_OP_INCR_SAT;
7006         case SG_STENCILOP_DECR_CLAMP:   return D3D11_STENCIL_OP_DECR_SAT;
7007         case SG_STENCILOP_INVERT:       return D3D11_STENCIL_OP_INVERT;
7008         case SG_STENCILOP_INCR_WRAP:    return D3D11_STENCIL_OP_INCR;
7009         case SG_STENCILOP_DECR_WRAP:    return D3D11_STENCIL_OP_DECR;
7010         default: SOKOL_UNREACHABLE; return (D3D11_STENCIL_OP) 0;
7011     }
7012 }
7013 
_sg_d3d11_blend_factor(sg_blend_factor f)7014 _SOKOL_PRIVATE D3D11_BLEND _sg_d3d11_blend_factor(sg_blend_factor f) {
7015     switch (f) {
7016         case SG_BLENDFACTOR_ZERO:                   return D3D11_BLEND_ZERO;
7017         case SG_BLENDFACTOR_ONE:                    return D3D11_BLEND_ONE;
7018         case SG_BLENDFACTOR_SRC_COLOR:              return D3D11_BLEND_SRC_COLOR;
7019         case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR:    return D3D11_BLEND_INV_SRC_COLOR;
7020         case SG_BLENDFACTOR_SRC_ALPHA:              return D3D11_BLEND_SRC_ALPHA;
7021         case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA:    return D3D11_BLEND_INV_SRC_ALPHA;
7022         case SG_BLENDFACTOR_DST_COLOR:              return D3D11_BLEND_DEST_COLOR;
7023         case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR:    return D3D11_BLEND_INV_DEST_COLOR;
7024         case SG_BLENDFACTOR_DST_ALPHA:              return D3D11_BLEND_DEST_ALPHA;
7025         case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA:    return D3D11_BLEND_INV_DEST_ALPHA;
7026         case SG_BLENDFACTOR_SRC_ALPHA_SATURATED:    return D3D11_BLEND_SRC_ALPHA_SAT;
7027         case SG_BLENDFACTOR_BLEND_COLOR:            return D3D11_BLEND_BLEND_FACTOR;
7028         case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR:  return D3D11_BLEND_INV_BLEND_FACTOR;
7029         case SG_BLENDFACTOR_BLEND_ALPHA:            return D3D11_BLEND_BLEND_FACTOR;
7030         case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA:  return D3D11_BLEND_INV_BLEND_FACTOR;
7031         default: SOKOL_UNREACHABLE; return (D3D11_BLEND) 0;
7032     }
7033 }
7034 
_sg_d3d11_blend_op(sg_blend_op op)7035 _SOKOL_PRIVATE D3D11_BLEND_OP _sg_d3d11_blend_op(sg_blend_op op) {
7036     switch (op) {
7037         case SG_BLENDOP_ADD:                return D3D11_BLEND_OP_ADD;
7038         case SG_BLENDOP_SUBTRACT:           return D3D11_BLEND_OP_SUBTRACT;
7039         case SG_BLENDOP_REVERSE_SUBTRACT:   return D3D11_BLEND_OP_REV_SUBTRACT;
7040         default: SOKOL_UNREACHABLE; return (D3D11_BLEND_OP) 0;
7041     }
7042 }
7043 
_sg_d3d11_color_write_mask(sg_color_mask m)7044 _SOKOL_PRIVATE UINT8 _sg_d3d11_color_write_mask(sg_color_mask m) {
7045     UINT8 res = 0;
7046     if (m & SG_COLORMASK_R) {
7047         res |= D3D11_COLOR_WRITE_ENABLE_RED;
7048     }
7049     if (m & SG_COLORMASK_G) {
7050         res |= D3D11_COLOR_WRITE_ENABLE_GREEN;
7051     }
7052     if (m & SG_COLORMASK_B) {
7053         res |= D3D11_COLOR_WRITE_ENABLE_BLUE;
7054     }
7055     if (m & SG_COLORMASK_A) {
7056         res |= D3D11_COLOR_WRITE_ENABLE_ALPHA;
7057     }
7058     return res;
7059 }
7060 
7061 /* see: https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-resources-limits#resource-limits-for-feature-level-11-hardware */
_sg_d3d11_init_caps(void)7062 _SOKOL_PRIVATE void _sg_d3d11_init_caps(void) {
7063     _sg.backend = SG_BACKEND_D3D11;
7064 
7065     _sg.features.instancing = true;
7066     _sg.features.origin_top_left = true;
7067     _sg.features.multiple_render_targets = true;
7068     _sg.features.msaa_render_targets = true;
7069     _sg.features.imagetype_3d = true;
7070     _sg.features.imagetype_array = true;
7071     _sg.features.image_clamp_to_border = true;
7072 
7073     _sg.limits.max_image_size_2d = 16 * 1024;
7074     _sg.limits.max_image_size_cube = 16 * 1024;
7075     _sg.limits.max_image_size_3d = 2 * 1024;
7076     _sg.limits.max_image_size_array = 16 * 1024;
7077     _sg.limits.max_image_array_layers = 2 * 1024;
7078     _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES;
7079 
7080     /* see: https://docs.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_format_support */
7081     for (int fmt = (SG_PIXELFORMAT_NONE+1); fmt < _SG_PIXELFORMAT_NUM; fmt++) {
7082         UINT dxgi_fmt_caps = 0;
7083         const DXGI_FORMAT dxgi_fmt = _sg_d3d11_pixel_format((sg_pixel_format)fmt);
7084         if (dxgi_fmt != DXGI_FORMAT_UNKNOWN) {
7085             HRESULT hr = ID3D11Device_CheckFormatSupport(_sg.d3d11.dev, dxgi_fmt, &dxgi_fmt_caps);
7086             SOKOL_ASSERT(SUCCEEDED(hr) || (E_FAIL == hr));
7087             if (!SUCCEEDED(hr)) {
7088                 dxgi_fmt_caps = 0;
7089             }
7090         }
7091         sg_pixelformat_info* info = &_sg.formats[fmt];
7092         info->sample = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_TEXTURE2D);
7093         info->filter = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_SHADER_SAMPLE);
7094         info->render = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_RENDER_TARGET);
7095         info->blend  = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_BLENDABLE);
7096         info->msaa   = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET);
7097         info->depth  = 0 != (dxgi_fmt_caps & D3D11_FORMAT_SUPPORT_DEPTH_STENCIL);
7098         if (info->depth) {
7099             info->render = true;
7100         }
7101     }
7102 }
7103 
_sg_d3d11_setup_backend(const sg_desc * desc)7104 _SOKOL_PRIVATE void _sg_d3d11_setup_backend(const sg_desc* desc) {
7105     /* assume _sg.d3d11 already is zero-initialized */
7106     SOKOL_ASSERT(desc);
7107     SOKOL_ASSERT(desc->context.d3d11.device);
7108     SOKOL_ASSERT(desc->context.d3d11.device_context);
7109     SOKOL_ASSERT(desc->context.d3d11.render_target_view_cb);
7110     SOKOL_ASSERT(desc->context.d3d11.depth_stencil_view_cb);
7111     SOKOL_ASSERT(desc->context.d3d11.render_target_view_cb != desc->context.d3d11.depth_stencil_view_cb);
7112     _sg.d3d11.valid = true;
7113     _sg.d3d11.dev = (ID3D11Device*) desc->context.d3d11.device;
7114     _sg.d3d11.ctx = (ID3D11DeviceContext*) desc->context.d3d11.device_context;
7115     _sg.d3d11.rtv_cb = desc->context.d3d11.render_target_view_cb;
7116     _sg.d3d11.dsv_cb = desc->context.d3d11.depth_stencil_view_cb;
7117     _sg_d3d11_init_caps();
7118 }
7119 
_sg_d3d11_discard_backend(void)7120 _SOKOL_PRIVATE void _sg_d3d11_discard_backend(void) {
7121     SOKOL_ASSERT(_sg.d3d11.valid);
7122     _sg.d3d11.valid = false;
7123 }
7124 
_sg_d3d11_clear_state(void)7125 _SOKOL_PRIVATE void _sg_d3d11_clear_state(void) {
7126     /* clear all the device context state, so that resource refs don't keep stuck in the d3d device context */
7127     ID3D11DeviceContext_OMSetRenderTargets(_sg.d3d11.ctx, SG_MAX_COLOR_ATTACHMENTS, _sg.d3d11.zero_rtvs, NULL);
7128     ID3D11DeviceContext_RSSetState(_sg.d3d11.ctx, NULL);
7129     ID3D11DeviceContext_OMSetDepthStencilState(_sg.d3d11.ctx, NULL, 0);
7130     ID3D11DeviceContext_OMSetBlendState(_sg.d3d11.ctx, NULL, NULL, 0xFFFFFFFF);
7131     ID3D11DeviceContext_IASetVertexBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_BUFFERS, _sg.d3d11.zero_vbs, _sg.d3d11.zero_vb_strides, _sg.d3d11.zero_vb_offsets);
7132     ID3D11DeviceContext_IASetIndexBuffer(_sg.d3d11.ctx, NULL, DXGI_FORMAT_UNKNOWN, 0);
7133     ID3D11DeviceContext_IASetInputLayout(_sg.d3d11.ctx, NULL);
7134     ID3D11DeviceContext_VSSetShader(_sg.d3d11.ctx, NULL, NULL, 0);
7135     ID3D11DeviceContext_PSSetShader(_sg.d3d11.ctx, NULL, NULL, 0);
7136     ID3D11DeviceContext_VSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, _sg.d3d11.zero_cbs);
7137     ID3D11DeviceContext_PSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, _sg.d3d11.zero_cbs);
7138     ID3D11DeviceContext_VSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg.d3d11.zero_srvs);
7139     ID3D11DeviceContext_PSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg.d3d11.zero_srvs);
7140     ID3D11DeviceContext_VSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg.d3d11.zero_smps);
7141     ID3D11DeviceContext_PSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg.d3d11.zero_smps);
7142 }
7143 
_sg_d3d11_reset_state_cache(void)7144 _SOKOL_PRIVATE void _sg_d3d11_reset_state_cache(void) {
7145     /* just clear the d3d11 device context state */
7146     _sg_d3d11_clear_state();
7147 }
7148 
_sg_d3d11_activate_context(_sg_context_t * ctx)7149 _SOKOL_PRIVATE void _sg_d3d11_activate_context(_sg_context_t* ctx) {
7150     _SOKOL_UNUSED(ctx);
7151     _sg_d3d11_clear_state();
7152 }
7153 
_sg_d3d11_create_context(_sg_context_t * ctx)7154 _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_context(_sg_context_t* ctx) {
7155     SOKOL_ASSERT(ctx);
7156     _SOKOL_UNUSED(ctx);
7157     return SG_RESOURCESTATE_VALID;
7158 }
7159 
_sg_d3d11_destroy_context(_sg_context_t * ctx)7160 _SOKOL_PRIVATE void _sg_d3d11_destroy_context(_sg_context_t* ctx) {
7161     SOKOL_ASSERT(ctx);
7162     _SOKOL_UNUSED(ctx);
7163     /* empty */
7164 }
7165 
_sg_d3d11_create_buffer(_sg_buffer_t * buf,const sg_buffer_desc * desc)7166 _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) {
7167     SOKOL_ASSERT(buf && desc);
7168     SOKOL_ASSERT(!buf->d3d11.buf);
7169     _sg_buffer_common_init(&buf->cmn, desc);
7170     const bool injected = (0 != desc->d3d11_buffer);
7171     if (injected) {
7172         buf->d3d11.buf = (ID3D11Buffer*) desc->d3d11_buffer;
7173         ID3D11Buffer_AddRef(buf->d3d11.buf);
7174     }
7175     else {
7176         D3D11_BUFFER_DESC d3d11_desc;
7177         memset(&d3d11_desc, 0, sizeof(d3d11_desc));
7178         d3d11_desc.ByteWidth = buf->cmn.size;
7179         d3d11_desc.Usage = _sg_d3d11_usage(buf->cmn.usage);
7180         d3d11_desc.BindFlags = buf->cmn.type == SG_BUFFERTYPE_VERTEXBUFFER ? D3D11_BIND_VERTEX_BUFFER : D3D11_BIND_INDEX_BUFFER;
7181         d3d11_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(buf->cmn.usage);
7182         D3D11_SUBRESOURCE_DATA* init_data_ptr = 0;
7183         D3D11_SUBRESOURCE_DATA init_data;
7184         memset(&init_data, 0, sizeof(init_data));
7185         if (buf->cmn.usage == SG_USAGE_IMMUTABLE) {
7186             SOKOL_ASSERT(desc->content);
7187             init_data.pSysMem = desc->content;
7188             init_data_ptr = &init_data;
7189         }
7190         HRESULT hr = ID3D11Device_CreateBuffer(_sg.d3d11.dev, &d3d11_desc, init_data_ptr, &buf->d3d11.buf);
7191         _SOKOL_UNUSED(hr);
7192         SOKOL_ASSERT(SUCCEEDED(hr) && buf->d3d11.buf);
7193     }
7194     return SG_RESOURCESTATE_VALID;
7195 }
7196 
_sg_d3d11_destroy_buffer(_sg_buffer_t * buf)7197 _SOKOL_PRIVATE void _sg_d3d11_destroy_buffer(_sg_buffer_t* buf) {
7198     SOKOL_ASSERT(buf);
7199     if (buf->d3d11.buf) {
7200         ID3D11Buffer_Release(buf->d3d11.buf);
7201     }
7202 }
7203 
_sg_d3d11_fill_subres_data(const _sg_image_t * img,const sg_image_content * content)7204 _SOKOL_PRIVATE void _sg_d3d11_fill_subres_data(const _sg_image_t* img, const sg_image_content* content) {
7205     const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1;
7206     const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.depth:1;
7207     int subres_index = 0;
7208     for (int face_index = 0; face_index < num_faces; face_index++) {
7209         for (int slice_index = 0; slice_index < num_slices; slice_index++) {
7210             for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, subres_index++) {
7211                 SOKOL_ASSERT(subres_index < (SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS));
7212                 D3D11_SUBRESOURCE_DATA* subres_data = &_sg.d3d11.subres_data[subres_index];
7213                 const int mip_width = ((img->cmn.width>>mip_index)>0) ? img->cmn.width>>mip_index : 1;
7214                 const int mip_height = ((img->cmn.height>>mip_index)>0) ? img->cmn.height>>mip_index : 1;
7215                 const sg_subimage_content* subimg_content = &(content->subimage[face_index][mip_index]);
7216                 const int slice_size = subimg_content->size / num_slices;
7217                 const int slice_offset = slice_size * slice_index;
7218                 const uint8_t* ptr = (const uint8_t*) subimg_content->ptr;
7219                 subres_data->pSysMem = ptr + slice_offset;
7220                 subres_data->SysMemPitch = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1);
7221                 if (img->cmn.type == SG_IMAGETYPE_3D) {
7222                     /* FIXME? const int mip_depth = ((img->depth>>mip_index)>0) ? img->depth>>mip_index : 1; */
7223                     subres_data->SysMemSlicePitch = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1);
7224                 }
7225                 else {
7226                     subres_data->SysMemSlicePitch = 0;
7227                 }
7228             }
7229         }
7230     }
7231 }
7232 
_sg_d3d11_create_image(_sg_image_t * img,const sg_image_desc * desc)7233 _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_image(_sg_image_t* img, const sg_image_desc* desc) {
7234     SOKOL_ASSERT(img && desc);
7235     SOKOL_ASSERT(!img->d3d11.tex2d && !img->d3d11.tex3d && !img->d3d11.texds && !img->d3d11.texmsaa);
7236     SOKOL_ASSERT(!img->d3d11.srv && !img->d3d11.smp);
7237     HRESULT hr;
7238     _SOKOL_UNUSED(hr);
7239 
7240     _sg_image_common_init(&img->cmn, desc);
7241     const bool injected = (0 != desc->d3d11_texture);
7242     const bool msaa = (img->cmn.sample_count > 1);
7243 
7244     /* special case depth-stencil buffer? */
7245     if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) {
7246         /* create only a depth-texture */
7247         SOKOL_ASSERT(!injected);
7248         img->d3d11.format = _sg_d3d11_pixel_format(img->cmn.pixel_format);
7249         if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) {
7250             SOKOL_LOG("trying to create a D3D11 depth-texture with unsupported pixel format\n");
7251             return SG_RESOURCESTATE_FAILED;
7252         }
7253         D3D11_TEXTURE2D_DESC d3d11_desc;
7254         memset(&d3d11_desc, 0, sizeof(d3d11_desc));
7255         d3d11_desc.Width = img->cmn.width;
7256         d3d11_desc.Height = img->cmn.height;
7257         d3d11_desc.MipLevels = 1;
7258         d3d11_desc.ArraySize = 1;
7259         d3d11_desc.Format = img->d3d11.format;
7260         d3d11_desc.Usage = D3D11_USAGE_DEFAULT;
7261         d3d11_desc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
7262         d3d11_desc.SampleDesc.Count = img->cmn.sample_count;
7263         d3d11_desc.SampleDesc.Quality = msaa ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0;
7264         hr = ID3D11Device_CreateTexture2D(_sg.d3d11.dev, &d3d11_desc, NULL, &img->d3d11.texds);
7265         SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.texds);
7266     }
7267     else {
7268         /* create (or inject) color texture */
7269 
7270         /* prepare initial content pointers */
7271         D3D11_SUBRESOURCE_DATA* init_data = 0;
7272         if (!injected && (img->cmn.usage == SG_USAGE_IMMUTABLE) && !img->cmn.render_target) {
7273             _sg_d3d11_fill_subres_data(img, &desc->content);
7274             init_data = _sg.d3d11.subres_data;
7275         }
7276         if (img->cmn.type != SG_IMAGETYPE_3D) {
7277             /* 2D-, cube- or array-texture */
7278             /* if this is an MSAA render target, the following texture will be the 'resolve-texture' */
7279             D3D11_TEXTURE2D_DESC d3d11_tex_desc;
7280             memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc));
7281             d3d11_tex_desc.Width = img->cmn.width;
7282             d3d11_tex_desc.Height = img->cmn.height;
7283             d3d11_tex_desc.MipLevels = img->cmn.num_mipmaps;
7284             switch (img->cmn.type) {
7285                 case SG_IMAGETYPE_ARRAY:    d3d11_tex_desc.ArraySize = img->cmn.depth; break;
7286                 case SG_IMAGETYPE_CUBE:     d3d11_tex_desc.ArraySize = 6; break;
7287                 default:                    d3d11_tex_desc.ArraySize = 1; break;
7288             }
7289             d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
7290             if (img->cmn.render_target) {
7291                 img->d3d11.format = _sg_d3d11_pixel_format(img->cmn.pixel_format);
7292                 d3d11_tex_desc.Format = img->d3d11.format;
7293                 d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT;
7294                 if (!msaa) {
7295                     d3d11_tex_desc.BindFlags |= D3D11_BIND_RENDER_TARGET;
7296                 }
7297                 d3d11_tex_desc.CPUAccessFlags = 0;
7298             }
7299             else {
7300                 img->d3d11.format = _sg_d3d11_pixel_format(img->cmn.pixel_format);
7301                 d3d11_tex_desc.Format = img->d3d11.format;
7302                 d3d11_tex_desc.Usage = _sg_d3d11_usage(img->cmn.usage);
7303                 d3d11_tex_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(img->cmn.usage);
7304             }
7305             if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) {
7306                 /* trying to create a texture format that's not supported by D3D */
7307                 SOKOL_LOG("trying to create a D3D11 texture with unsupported pixel format\n");
7308                 return SG_RESOURCESTATE_FAILED;
7309             }
7310             d3d11_tex_desc.SampleDesc.Count = 1;
7311             d3d11_tex_desc.SampleDesc.Quality = 0;
7312             d3d11_tex_desc.MiscFlags = (img->cmn.type == SG_IMAGETYPE_CUBE) ? D3D11_RESOURCE_MISC_TEXTURECUBE : 0;
7313             if (injected) {
7314                 img->d3d11.tex2d = (ID3D11Texture2D*) desc->d3d11_texture;
7315                 ID3D11Texture2D_AddRef(img->d3d11.tex2d);
7316             }
7317             else {
7318                 hr = ID3D11Device_CreateTexture2D(_sg.d3d11.dev, &d3d11_tex_desc, init_data, &img->d3d11.tex2d);
7319                 SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.tex2d);
7320             }
7321 
7322             /* shader-resource-view */
7323             D3D11_SHADER_RESOURCE_VIEW_DESC d3d11_srv_desc;
7324             memset(&d3d11_srv_desc, 0, sizeof(d3d11_srv_desc));
7325             d3d11_srv_desc.Format = d3d11_tex_desc.Format;
7326             switch (img->cmn.type) {
7327                 case SG_IMAGETYPE_2D:
7328                     d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
7329                     d3d11_srv_desc.Texture2D.MipLevels = img->cmn.num_mipmaps;
7330                     break;
7331                 case SG_IMAGETYPE_CUBE:
7332                     d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE;
7333                     d3d11_srv_desc.TextureCube.MipLevels = img->cmn.num_mipmaps;
7334                     break;
7335                 case SG_IMAGETYPE_ARRAY:
7336                     d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2DARRAY;
7337                     d3d11_srv_desc.Texture2DArray.MipLevels = img->cmn.num_mipmaps;
7338                     d3d11_srv_desc.Texture2DArray.ArraySize = img->cmn.depth;
7339                     break;
7340                 default:
7341                     SOKOL_UNREACHABLE; break;
7342             }
7343             hr = ID3D11Device_CreateShaderResourceView(_sg.d3d11.dev, (ID3D11Resource*)img->d3d11.tex2d, &d3d11_srv_desc, &img->d3d11.srv);
7344             SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.srv);
7345         }
7346         else {
7347             /* 3D texture */
7348             D3D11_TEXTURE3D_DESC d3d11_tex_desc;
7349             memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc));
7350             d3d11_tex_desc.Width = img->cmn.width;
7351             d3d11_tex_desc.Height = img->cmn.height;
7352             d3d11_tex_desc.Depth = img->cmn.depth;
7353             d3d11_tex_desc.MipLevels = img->cmn.num_mipmaps;
7354             d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
7355             if (img->cmn.render_target) {
7356                 img->d3d11.format = _sg_d3d11_pixel_format(img->cmn.pixel_format);
7357                 d3d11_tex_desc.Format = img->d3d11.format;
7358                 d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT;
7359                 if (!msaa) {
7360                     d3d11_tex_desc.BindFlags |= D3D11_BIND_RENDER_TARGET;
7361                 }
7362                 d3d11_tex_desc.CPUAccessFlags = 0;
7363             }
7364             else {
7365                 img->d3d11.format = _sg_d3d11_pixel_format(img->cmn.pixel_format);
7366                 d3d11_tex_desc.Format = img->d3d11.format;
7367                 d3d11_tex_desc.Usage = _sg_d3d11_usage(img->cmn.usage);
7368                 d3d11_tex_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(img->cmn.usage);
7369             }
7370             if (img->d3d11.format == DXGI_FORMAT_UNKNOWN) {
7371                 /* trying to create a texture format that's not supported by D3D */
7372                 SOKOL_LOG("trying to create a D3D11 texture with unsupported pixel format\n");
7373                 return SG_RESOURCESTATE_FAILED;
7374             }
7375             if (injected) {
7376                 img->d3d11.tex3d = (ID3D11Texture3D*) desc->d3d11_texture;
7377                 ID3D11Texture3D_AddRef(img->d3d11.tex3d);
7378             }
7379             else {
7380                 hr = ID3D11Device_CreateTexture3D(_sg.d3d11.dev, &d3d11_tex_desc, init_data, &img->d3d11.tex3d);
7381                 SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.tex3d);
7382             }
7383 
7384             /* shader resource view for 3d texture */
7385             D3D11_SHADER_RESOURCE_VIEW_DESC d3d11_srv_desc;
7386             memset(&d3d11_srv_desc, 0, sizeof(d3d11_srv_desc));
7387             d3d11_srv_desc.Format = d3d11_tex_desc.Format;
7388             d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE3D;
7389             d3d11_srv_desc.Texture3D.MipLevels = img->cmn.num_mipmaps;
7390             hr = ID3D11Device_CreateShaderResourceView(_sg.d3d11.dev, (ID3D11Resource*)img->d3d11.tex3d, &d3d11_srv_desc, &img->d3d11.srv);
7391             SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.srv);
7392         }
7393 
7394         /* also need to create a separate MSAA render target texture? */
7395         if (msaa) {
7396             D3D11_TEXTURE2D_DESC d3d11_tex_desc;
7397             memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc));
7398             d3d11_tex_desc.Width = img->cmn.width;
7399             d3d11_tex_desc.Height = img->cmn.height;
7400             d3d11_tex_desc.MipLevels = 1;
7401             d3d11_tex_desc.ArraySize = 1;
7402             d3d11_tex_desc.Format = img->d3d11.format;
7403             d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT;
7404             d3d11_tex_desc.BindFlags = D3D11_BIND_RENDER_TARGET;
7405             d3d11_tex_desc.CPUAccessFlags = 0;
7406             d3d11_tex_desc.SampleDesc.Count = img->cmn.sample_count;
7407             d3d11_tex_desc.SampleDesc.Quality = (UINT)D3D11_STANDARD_MULTISAMPLE_PATTERN;
7408             hr = ID3D11Device_CreateTexture2D(_sg.d3d11.dev, &d3d11_tex_desc, NULL, &img->d3d11.texmsaa);
7409             SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.texmsaa);
7410         }
7411 
7412         /* sampler state object, note D3D11 implements an internal shared-pool for sampler objects */
7413         D3D11_SAMPLER_DESC d3d11_smp_desc;
7414         memset(&d3d11_smp_desc, 0, sizeof(d3d11_smp_desc));
7415         d3d11_smp_desc.Filter = _sg_d3d11_filter(img->cmn.min_filter, img->cmn.mag_filter, img->cmn.max_anisotropy);
7416         d3d11_smp_desc.AddressU = _sg_d3d11_address_mode(img->cmn.wrap_u);
7417         d3d11_smp_desc.AddressV = _sg_d3d11_address_mode(img->cmn.wrap_v);
7418         d3d11_smp_desc.AddressW = _sg_d3d11_address_mode(img->cmn.wrap_w);
7419         switch (img->cmn.border_color) {
7420             case SG_BORDERCOLOR_TRANSPARENT_BLACK:
7421                 /* all 0.0f */
7422                 break;
7423             case SG_BORDERCOLOR_OPAQUE_WHITE:
7424                 for (int i = 0; i < 4; i++) {
7425                     d3d11_smp_desc.BorderColor[i] = 1.0f;
7426                 }
7427                 break;
7428             default:
7429                 /* opaque black */
7430                 d3d11_smp_desc.BorderColor[3] = 1.0f;
7431                 break;
7432         }
7433         d3d11_smp_desc.MaxAnisotropy = img->cmn.max_anisotropy;
7434         d3d11_smp_desc.ComparisonFunc = D3D11_COMPARISON_NEVER;
7435         d3d11_smp_desc.MinLOD = desc->min_lod;
7436         d3d11_smp_desc.MaxLOD = desc->max_lod;
7437         hr = ID3D11Device_CreateSamplerState(_sg.d3d11.dev, &d3d11_smp_desc, &img->d3d11.smp);
7438         SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11.smp);
7439     }
7440     return SG_RESOURCESTATE_VALID;
7441 }
7442 
_sg_d3d11_destroy_image(_sg_image_t * img)7443 _SOKOL_PRIVATE void _sg_d3d11_destroy_image(_sg_image_t* img) {
7444     SOKOL_ASSERT(img);
7445     if (img->d3d11.tex2d) {
7446         ID3D11Texture2D_Release(img->d3d11.tex2d);
7447     }
7448     if (img->d3d11.tex3d) {
7449         ID3D11Texture3D_Release(img->d3d11.tex3d);
7450     }
7451     if (img->d3d11.texds) {
7452         ID3D11Texture2D_Release(img->d3d11.texds);
7453     }
7454     if (img->d3d11.texmsaa) {
7455         ID3D11Texture2D_Release(img->d3d11.texmsaa);
7456     }
7457     if (img->d3d11.srv) {
7458         ID3D11ShaderResourceView_Release(img->d3d11.srv);
7459     }
7460     if (img->d3d11.smp) {
7461         ID3D11SamplerState_Release(img->d3d11.smp);
7462     }
7463 }
7464 
_sg_d3d11_load_d3dcompiler_dll(void)7465 _SOKOL_PRIVATE bool _sg_d3d11_load_d3dcompiler_dll(void) {
7466     /* on UWP, don't do anything (not tested) */
7467     #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP))
7468         return true;
7469     #else
7470         /* load DLL on demand */
7471         if ((0 == _sg.d3d11.d3dcompiler_dll) && !_sg.d3d11.d3dcompiler_dll_load_failed) {
7472             _sg.d3d11.d3dcompiler_dll = LoadLibraryA("d3dcompiler_47.dll");
7473             if (0 == _sg.d3d11.d3dcompiler_dll) {
7474                 /* don't attempt to load missing DLL in the future */
7475                 SOKOL_LOG("failed to load d3dcompiler_47.dll!\n");
7476                 _sg.d3d11.d3dcompiler_dll_load_failed = true;
7477                 return false;
7478             }
7479             /* look up function pointers */
7480             _sg.d3d11.D3DCompile_func = (pD3DCompile) GetProcAddress(_sg.d3d11.d3dcompiler_dll, "D3DCompile");
7481             SOKOL_ASSERT(_sg.d3d11.D3DCompile_func);
7482         }
7483         return 0 != _sg.d3d11.d3dcompiler_dll;
7484     #endif
7485 }
7486 
7487 #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP))
7488 #define _sg_d3d11_D3DCompile D3DCompile
7489 #else
7490 #define _sg_d3d11_D3DCompile _sg.d3d11.D3DCompile_func
7491 #endif
7492 
_sg_d3d11_compile_shader(const sg_shader_stage_desc * stage_desc)7493 _SOKOL_PRIVATE ID3DBlob* _sg_d3d11_compile_shader(const sg_shader_stage_desc* stage_desc) {
7494     if (!_sg_d3d11_load_d3dcompiler_dll()) {
7495         return NULL;
7496     }
7497     SOKOL_ASSERT(stage_desc->d3d11_target);
7498     ID3DBlob* output = NULL;
7499     ID3DBlob* errors_or_warnings = NULL;
7500     HRESULT hr = _sg_d3d11_D3DCompile(
7501         stage_desc->source,             /* pSrcData */
7502         strlen(stage_desc->source),     /* SrcDataSize */
7503         NULL,                           /* pSourceName */
7504         NULL,                           /* pDefines */
7505         NULL,                           /* pInclude */
7506         stage_desc->entry ? stage_desc->entry : "main",     /* pEntryPoint */
7507         stage_desc->d3d11_target,       /* pTarget (vs_5_0 or ps_5_0) */
7508         D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR | D3DCOMPILE_OPTIMIZATION_LEVEL3,   /* Flags1 */
7509         0,          /* Flags2 */
7510         &output,    /* ppCode */
7511         &errors_or_warnings);   /* ppErrorMsgs */
7512     if (errors_or_warnings) {
7513         SOKOL_LOG((LPCSTR)ID3D10Blob_GetBufferPointer(errors_or_warnings));
7514         ID3D10Blob_Release(errors_or_warnings); errors_or_warnings = NULL;
7515     }
7516     if (FAILED(hr)) {
7517         /* just in case, usually output is NULL here */
7518         if (output) {
7519             ID3D10Blob_Release(output);
7520             output = NULL;
7521         }
7522     }
7523     return output;
7524 }
7525 
7526 #define _sg_d3d11_roundup(val, round_to) (((val)+((round_to)-1))&~((round_to)-1))
7527 
_sg_d3d11_create_shader(_sg_shader_t * shd,const sg_shader_desc * desc)7528 _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) {
7529     SOKOL_ASSERT(shd && desc);
7530     SOKOL_ASSERT(!shd->d3d11.vs && !shd->d3d11.fs && !shd->d3d11.vs_blob);
7531     HRESULT hr;
7532     _SOKOL_UNUSED(hr);
7533 
7534     _sg_shader_common_init(&shd->cmn, desc);
7535 
7536     /* copy vertex attribute semantic names and indices */
7537     for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) {
7538         _sg_strcpy(&shd->d3d11.attrs[i].sem_name, desc->attrs[i].sem_name);
7539         shd->d3d11.attrs[i].sem_index = desc->attrs[i].sem_index;
7540     }
7541 
7542     /* shader stage uniform blocks and image slots */
7543     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
7544         _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index];
7545         _sg_d3d11_shader_stage_t* d3d11_stage = &shd->d3d11.stage[stage_index];
7546         for (int ub_index = 0; ub_index < cmn_stage->num_uniform_blocks; ub_index++) {
7547             const _sg_uniform_block_t* ub = &cmn_stage->uniform_blocks[ub_index];
7548 
7549             /* create a D3D constant buffer for each uniform block */
7550             SOKOL_ASSERT(0 == d3d11_stage->cbufs[ub_index]);
7551             D3D11_BUFFER_DESC cb_desc;
7552             memset(&cb_desc, 0, sizeof(cb_desc));
7553             cb_desc.ByteWidth = _sg_d3d11_roundup(ub->size, 16);
7554             cb_desc.Usage = D3D11_USAGE_DEFAULT;
7555             cb_desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
7556             hr = ID3D11Device_CreateBuffer(_sg.d3d11.dev, &cb_desc, NULL, &d3d11_stage->cbufs[ub_index]);
7557             SOKOL_ASSERT(SUCCEEDED(hr) && d3d11_stage->cbufs[ub_index]);
7558         }
7559     }
7560 
7561     const void* vs_ptr = 0, *fs_ptr = 0;
7562     SIZE_T vs_length = 0, fs_length = 0;
7563     ID3DBlob* vs_blob = 0, *fs_blob = 0;
7564     if (desc->vs.byte_code && desc->fs.byte_code) {
7565         /* create from shader byte code */
7566         vs_ptr = desc->vs.byte_code;
7567         fs_ptr = desc->fs.byte_code;
7568         vs_length = desc->vs.byte_code_size;
7569         fs_length = desc->fs.byte_code_size;
7570     }
7571     else {
7572         /* compile from shader source code */
7573         vs_blob = _sg_d3d11_compile_shader(&desc->vs);
7574         fs_blob = _sg_d3d11_compile_shader(&desc->fs);
7575         if (vs_blob && fs_blob) {
7576             vs_ptr = ID3D10Blob_GetBufferPointer(vs_blob);
7577             vs_length = ID3D10Blob_GetBufferSize(vs_blob);
7578             fs_ptr = ID3D10Blob_GetBufferPointer(fs_blob);
7579             fs_length = ID3D10Blob_GetBufferSize(fs_blob);
7580         }
7581     }
7582     sg_resource_state result = SG_RESOURCESTATE_FAILED;
7583     if (vs_ptr && fs_ptr && (vs_length > 0) && (fs_length > 0)) {
7584         /* create the D3D vertex- and pixel-shader objects */
7585         hr = ID3D11Device_CreateVertexShader(_sg.d3d11.dev, vs_ptr, vs_length, NULL, &shd->d3d11.vs);
7586         SOKOL_ASSERT(SUCCEEDED(hr) && shd->d3d11.vs);
7587         hr = ID3D11Device_CreatePixelShader(_sg.d3d11.dev, fs_ptr, fs_length, NULL, &shd->d3d11.fs);
7588         SOKOL_ASSERT(SUCCEEDED(hr) && shd->d3d11.fs);
7589 
7590         /* need to store the vertex shader byte code, this is needed later in sg_create_pipeline */
7591         shd->d3d11.vs_blob_length = (int)vs_length;
7592         shd->d3d11.vs_blob = SOKOL_MALLOC((int)vs_length);
7593         SOKOL_ASSERT(shd->d3d11.vs_blob);
7594         memcpy(shd->d3d11.vs_blob, vs_ptr, vs_length);
7595 
7596         result = SG_RESOURCESTATE_VALID;
7597     }
7598     if (vs_blob) {
7599         ID3D10Blob_Release(vs_blob); vs_blob = 0;
7600     }
7601     if (fs_blob) {
7602         ID3D10Blob_Release(fs_blob); fs_blob = 0;
7603     }
7604     return result;
7605 }
7606 
_sg_d3d11_destroy_shader(_sg_shader_t * shd)7607 _SOKOL_PRIVATE void _sg_d3d11_destroy_shader(_sg_shader_t* shd) {
7608     SOKOL_ASSERT(shd);
7609     if (shd->d3d11.vs) {
7610         ID3D11VertexShader_Release(shd->d3d11.vs);
7611     }
7612     if (shd->d3d11.fs) {
7613         ID3D11PixelShader_Release(shd->d3d11.fs);
7614     }
7615     if (shd->d3d11.vs_blob) {
7616         SOKOL_FREE(shd->d3d11.vs_blob);
7617     }
7618     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
7619         _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index];
7620         _sg_d3d11_shader_stage_t* d3d11_stage = &shd->d3d11.stage[stage_index];
7621         for (int ub_index = 0; ub_index < cmn_stage->num_uniform_blocks; ub_index++) {
7622             if (d3d11_stage->cbufs[ub_index]) {
7623                 ID3D11Buffer_Release(d3d11_stage->cbufs[ub_index]);
7624             }
7625         }
7626     }
7627 }
7628 
_sg_d3d11_create_pipeline(_sg_pipeline_t * pip,_sg_shader_t * shd,const sg_pipeline_desc * desc)7629 _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) {
7630     SOKOL_ASSERT(pip && shd && desc);
7631     SOKOL_ASSERT(desc->shader.id == shd->slot.id);
7632     SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_VALID);
7633     SOKOL_ASSERT(shd->d3d11.vs_blob && shd->d3d11.vs_blob_length > 0);
7634     SOKOL_ASSERT(!pip->d3d11.il && !pip->d3d11.rs && !pip->d3d11.dss && !pip->d3d11.bs);
7635 
7636     pip->shader = shd;
7637     _sg_pipeline_common_init(&pip->cmn, desc);
7638     pip->d3d11.index_format = _sg_d3d11_index_format(pip->cmn.index_type);
7639     pip->d3d11.topology = _sg_d3d11_primitive_topology(desc->primitive_type);
7640     pip->d3d11.stencil_ref = desc->depth_stencil.stencil_ref;
7641 
7642     /* create input layout object */
7643     HRESULT hr;
7644     _SOKOL_UNUSED(hr);
7645     D3D11_INPUT_ELEMENT_DESC d3d11_comps[SG_MAX_VERTEX_ATTRIBUTES];
7646     memset(d3d11_comps, 0, sizeof(d3d11_comps));
7647     int attr_index = 0;
7648     for (; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) {
7649         const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index];
7650         if (a_desc->format == SG_VERTEXFORMAT_INVALID) {
7651             break;
7652         }
7653         SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS));
7654         const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index];
7655         const sg_vertex_step step_func = l_desc->step_func;
7656         const int step_rate = l_desc->step_rate;
7657         D3D11_INPUT_ELEMENT_DESC* d3d11_comp = &d3d11_comps[attr_index];
7658         d3d11_comp->SemanticName = _sg_strptr(&shd->d3d11.attrs[attr_index].sem_name);
7659         d3d11_comp->SemanticIndex = shd->d3d11.attrs[attr_index].sem_index;
7660         d3d11_comp->Format = _sg_d3d11_vertex_format(a_desc->format);
7661         d3d11_comp->InputSlot = a_desc->buffer_index;
7662         d3d11_comp->AlignedByteOffset = a_desc->offset;
7663         d3d11_comp->InputSlotClass = _sg_d3d11_input_classification(step_func);
7664         if (SG_VERTEXSTEP_PER_INSTANCE == step_func) {
7665             d3d11_comp->InstanceDataStepRate = step_rate;
7666         }
7667         pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true;
7668     }
7669     for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) {
7670         if (pip->cmn.vertex_layout_valid[layout_index]) {
7671             const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index];
7672             SOKOL_ASSERT(l_desc->stride > 0);
7673             pip->d3d11.vb_strides[layout_index] = l_desc->stride;
7674         }
7675         else {
7676             pip->d3d11.vb_strides[layout_index] = 0;
7677         }
7678     }
7679     hr = ID3D11Device_CreateInputLayout(_sg.d3d11.dev,
7680         d3d11_comps,                /* pInputElementDesc */
7681         attr_index,                 /* NumElements */
7682         shd->d3d11.vs_blob,         /* pShaderByteCodeWithInputSignature */
7683         shd->d3d11.vs_blob_length,  /* BytecodeLength */
7684         &pip->d3d11.il);
7685     SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.il);
7686 
7687     /* create rasterizer state */
7688     D3D11_RASTERIZER_DESC rs_desc;
7689     memset(&rs_desc, 0, sizeof(rs_desc));
7690     rs_desc.FillMode = D3D11_FILL_SOLID;
7691     rs_desc.CullMode = _sg_d3d11_cull_mode(desc->rasterizer.cull_mode);
7692     rs_desc.FrontCounterClockwise = desc->rasterizer.face_winding == SG_FACEWINDING_CCW;
7693     rs_desc.DepthBias = (INT) pip->cmn.depth_bias;
7694     rs_desc.DepthBiasClamp = pip->cmn.depth_bias_clamp;
7695     rs_desc.SlopeScaledDepthBias = pip->cmn.depth_bias_slope_scale;
7696     rs_desc.DepthClipEnable = TRUE;
7697     rs_desc.ScissorEnable = TRUE;
7698     rs_desc.MultisampleEnable = desc->rasterizer.sample_count > 1;
7699     rs_desc.AntialiasedLineEnable = FALSE;
7700     hr = ID3D11Device_CreateRasterizerState(_sg.d3d11.dev, &rs_desc, &pip->d3d11.rs);
7701     SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.rs);
7702 
7703     /* create depth-stencil state */
7704     D3D11_DEPTH_STENCIL_DESC dss_desc;
7705     memset(&dss_desc, 0, sizeof(dss_desc));
7706     dss_desc.DepthEnable = TRUE;
7707     dss_desc.DepthWriteMask = desc->depth_stencil.depth_write_enabled ? D3D11_DEPTH_WRITE_MASK_ALL : D3D11_DEPTH_WRITE_MASK_ZERO;
7708     dss_desc.DepthFunc = _sg_d3d11_compare_func(desc->depth_stencil.depth_compare_func);
7709     dss_desc.StencilEnable = desc->depth_stencil.stencil_enabled;
7710     dss_desc.StencilReadMask = desc->depth_stencil.stencil_read_mask;
7711     dss_desc.StencilWriteMask = desc->depth_stencil.stencil_write_mask;
7712     const sg_stencil_state* sf = &desc->depth_stencil.stencil_front;
7713     dss_desc.FrontFace.StencilFailOp = _sg_d3d11_stencil_op(sf->fail_op);
7714     dss_desc.FrontFace.StencilDepthFailOp = _sg_d3d11_stencil_op(sf->depth_fail_op);
7715     dss_desc.FrontFace.StencilPassOp = _sg_d3d11_stencil_op(sf->pass_op);
7716     dss_desc.FrontFace.StencilFunc = _sg_d3d11_compare_func(sf->compare_func);
7717     const sg_stencil_state* sb = &desc->depth_stencil.stencil_back;
7718     dss_desc.BackFace.StencilFailOp = _sg_d3d11_stencil_op(sb->fail_op);
7719     dss_desc.BackFace.StencilDepthFailOp = _sg_d3d11_stencil_op(sb->depth_fail_op);
7720     dss_desc.BackFace.StencilPassOp = _sg_d3d11_stencil_op(sb->pass_op);
7721     dss_desc.BackFace.StencilFunc = _sg_d3d11_compare_func(sb->compare_func);
7722     hr = ID3D11Device_CreateDepthStencilState(_sg.d3d11.dev, &dss_desc, &pip->d3d11.dss);
7723     SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.dss);
7724 
7725     /* create blend state */
7726     D3D11_BLEND_DESC bs_desc;
7727     memset(&bs_desc, 0, sizeof(bs_desc));
7728     bs_desc.AlphaToCoverageEnable = desc->rasterizer.alpha_to_coverage_enabled;
7729     bs_desc.IndependentBlendEnable = FALSE;
7730     bs_desc.RenderTarget[0].BlendEnable = desc->blend.enabled;
7731     bs_desc.RenderTarget[0].SrcBlend = _sg_d3d11_blend_factor(desc->blend.src_factor_rgb);
7732     bs_desc.RenderTarget[0].DestBlend = _sg_d3d11_blend_factor(desc->blend.dst_factor_rgb);
7733     bs_desc.RenderTarget[0].BlendOp = _sg_d3d11_blend_op(desc->blend.op_rgb);
7734     bs_desc.RenderTarget[0].SrcBlendAlpha = _sg_d3d11_blend_factor(desc->blend.src_factor_alpha);
7735     bs_desc.RenderTarget[0].DestBlendAlpha = _sg_d3d11_blend_factor(desc->blend.dst_factor_alpha);
7736     bs_desc.RenderTarget[0].BlendOpAlpha = _sg_d3d11_blend_op(desc->blend.op_alpha);
7737     bs_desc.RenderTarget[0].RenderTargetWriteMask = _sg_d3d11_color_write_mask((sg_color_mask)desc->blend.color_write_mask);
7738     hr = ID3D11Device_CreateBlendState(_sg.d3d11.dev, &bs_desc, &pip->d3d11.bs);
7739     SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11.bs);
7740 
7741     return SG_RESOURCESTATE_VALID;
7742 }
7743 
_sg_d3d11_destroy_pipeline(_sg_pipeline_t * pip)7744 _SOKOL_PRIVATE void _sg_d3d11_destroy_pipeline(_sg_pipeline_t* pip) {
7745     SOKOL_ASSERT(pip);
7746     if (pip->d3d11.il) {
7747         ID3D11InputLayout_Release(pip->d3d11.il);
7748     }
7749     if (pip->d3d11.rs) {
7750         ID3D11RasterizerState_Release(pip->d3d11.rs);
7751     }
7752     if (pip->d3d11.dss) {
7753         ID3D11DepthStencilState_Release(pip->d3d11.dss);
7754     }
7755     if (pip->d3d11.bs) {
7756         ID3D11BlendState_Release(pip->d3d11.bs);
7757     }
7758 }
7759 
_sg_d3d11_create_pass(_sg_pass_t * pass,_sg_image_t ** att_images,const sg_pass_desc * desc)7760 _SOKOL_PRIVATE sg_resource_state _sg_d3d11_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) {
7761     SOKOL_ASSERT(pass && desc);
7762     SOKOL_ASSERT(att_images && att_images[0]);
7763     SOKOL_ASSERT(_sg.d3d11.dev);
7764 
7765     _sg_pass_common_init(&pass->cmn, desc);
7766 
7767     for (int i = 0; i < pass->cmn.num_color_atts; i++) {
7768         const sg_attachment_desc* att_desc = &desc->color_attachments[i];
7769         _SOKOL_UNUSED(att_desc);
7770         SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID);
7771         _sg_image_t* att_img = att_images[i];
7772         SOKOL_ASSERT(att_img && (att_img->slot.id == att_desc->image.id));
7773         SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_img->cmn.pixel_format));
7774         SOKOL_ASSERT(0 == pass->d3d11.color_atts[i].image);
7775         pass->d3d11.color_atts[i].image = att_img;
7776 
7777         /* create D3D11 render-target-view */
7778         const _sg_attachment_t* cmn_att = &pass->cmn.color_atts[i];
7779         SOKOL_ASSERT(0 == pass->d3d11.color_atts[i].rtv);
7780         ID3D11Resource* d3d11_res = 0;
7781         const bool is_msaa = att_img->cmn.sample_count > 1;
7782         D3D11_RENDER_TARGET_VIEW_DESC d3d11_rtv_desc;
7783         memset(&d3d11_rtv_desc, 0, sizeof(d3d11_rtv_desc));
7784         d3d11_rtv_desc.Format = att_img->d3d11.format;
7785         if ((att_img->cmn.type == SG_IMAGETYPE_2D) || is_msaa) {
7786             if (is_msaa) {
7787                 d3d11_res = (ID3D11Resource*) att_img->d3d11.texmsaa;
7788                 d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DMS;
7789             }
7790             else {
7791                 d3d11_res = (ID3D11Resource*) att_img->d3d11.tex2d;
7792                 d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
7793                 d3d11_rtv_desc.Texture2D.MipSlice = cmn_att->mip_level;
7794             }
7795         }
7796         else if ((att_img->cmn.type == SG_IMAGETYPE_CUBE) || (att_img->cmn.type == SG_IMAGETYPE_ARRAY)) {
7797             d3d11_res = (ID3D11Resource*) att_img->d3d11.tex2d;
7798             d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DARRAY;
7799             d3d11_rtv_desc.Texture2DArray.MipSlice = cmn_att->mip_level;
7800             d3d11_rtv_desc.Texture2DArray.FirstArraySlice = cmn_att->slice;
7801             d3d11_rtv_desc.Texture2DArray.ArraySize = 1;
7802         }
7803         else {
7804             SOKOL_ASSERT(att_img->cmn.type == SG_IMAGETYPE_3D);
7805             d3d11_res = (ID3D11Resource*) att_img->d3d11.tex3d;
7806             d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE3D;
7807             d3d11_rtv_desc.Texture3D.MipSlice = cmn_att->mip_level;
7808             d3d11_rtv_desc.Texture3D.FirstWSlice = cmn_att->slice;
7809             d3d11_rtv_desc.Texture3D.WSize = 1;
7810         }
7811         SOKOL_ASSERT(d3d11_res);
7812         HRESULT hr = ID3D11Device_CreateRenderTargetView(_sg.d3d11.dev, d3d11_res, &d3d11_rtv_desc, &pass->d3d11.color_atts[i].rtv);
7813         _SOKOL_UNUSED(hr);
7814         SOKOL_ASSERT(SUCCEEDED(hr) && pass->d3d11.color_atts[i].rtv);
7815     }
7816 
7817     /* optional depth-stencil image */
7818     SOKOL_ASSERT(0 == pass->d3d11.ds_att.image);
7819     SOKOL_ASSERT(0 == pass->d3d11.ds_att.dsv);
7820     if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) {
7821         const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS;
7822         const sg_attachment_desc* att_desc = &desc->depth_stencil_attachment;
7823         _SOKOL_UNUSED(att_desc);
7824         _sg_image_t* att_img = att_images[ds_img_index];
7825         SOKOL_ASSERT(att_img && (att_img->slot.id == att_desc->image.id));
7826         SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_img->cmn.pixel_format));
7827         SOKOL_ASSERT(0 == pass->d3d11.ds_att.image);
7828         pass->d3d11.ds_att.image = att_img;
7829 
7830         /* create D3D11 depth-stencil-view */
7831         D3D11_DEPTH_STENCIL_VIEW_DESC d3d11_dsv_desc;
7832         memset(&d3d11_dsv_desc, 0, sizeof(d3d11_dsv_desc));
7833         d3d11_dsv_desc.Format = att_img->d3d11.format;
7834         const bool is_msaa = att_img->cmn.sample_count > 1;
7835         if (is_msaa) {
7836             d3d11_dsv_desc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DMS;
7837         }
7838         else {
7839             d3d11_dsv_desc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
7840         }
7841         ID3D11Resource* d3d11_res = (ID3D11Resource*) att_img->d3d11.texds;
7842         SOKOL_ASSERT(d3d11_res);
7843         HRESULT hr = ID3D11Device_CreateDepthStencilView(_sg.d3d11.dev, d3d11_res, &d3d11_dsv_desc, &pass->d3d11.ds_att.dsv);
7844         _SOKOL_UNUSED(hr);
7845         SOKOL_ASSERT(SUCCEEDED(hr) && pass->d3d11.ds_att.dsv);
7846     }
7847     return SG_RESOURCESTATE_VALID;
7848 }
7849 
_sg_d3d11_destroy_pass(_sg_pass_t * pass)7850 _SOKOL_PRIVATE void _sg_d3d11_destroy_pass(_sg_pass_t* pass) {
7851     SOKOL_ASSERT(pass);
7852     for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
7853         if (pass->d3d11.color_atts[i].rtv) {
7854             ID3D11RenderTargetView_Release(pass->d3d11.color_atts[i].rtv);
7855         }
7856     }
7857     if (pass->d3d11.ds_att.dsv) {
7858         ID3D11DepthStencilView_Release(pass->d3d11.ds_att.dsv);
7859     }
7860 }
7861 
_sg_d3d11_pass_color_image(const _sg_pass_t * pass,int index)7862 _SOKOL_PRIVATE _sg_image_t* _sg_d3d11_pass_color_image(const _sg_pass_t* pass, int index) {
7863     SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS));
7864     /* NOTE: may return null */
7865     return pass->d3d11.color_atts[index].image;
7866 }
7867 
_sg_d3d11_pass_ds_image(const _sg_pass_t * pass)7868 _SOKOL_PRIVATE _sg_image_t* _sg_d3d11_pass_ds_image(const _sg_pass_t* pass) {
7869     /* NOTE: may return null */
7870     SOKOL_ASSERT(pass);
7871     return pass->d3d11.ds_att.image;
7872 }
7873 
_sg_d3d11_begin_pass(_sg_pass_t * pass,const sg_pass_action * action,int w,int h)7874 _SOKOL_PRIVATE void _sg_d3d11_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) {
7875     SOKOL_ASSERT(action);
7876     SOKOL_ASSERT(!_sg.d3d11.in_pass);
7877     _sg.d3d11.in_pass = true;
7878     _sg.d3d11.cur_width = w;
7879     _sg.d3d11.cur_height = h;
7880     if (pass) {
7881         _sg.d3d11.cur_pass = pass;
7882         _sg.d3d11.cur_pass_id.id = pass->slot.id;
7883         _sg.d3d11.num_rtvs = 0;
7884         for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
7885             _sg.d3d11.cur_rtvs[i] = pass->d3d11.color_atts[i].rtv;
7886             if (_sg.d3d11.cur_rtvs[i]) {
7887                 _sg.d3d11.num_rtvs++;
7888             }
7889         }
7890         _sg.d3d11.cur_dsv = pass->d3d11.ds_att.dsv;
7891     }
7892     else {
7893         /* render to default frame buffer */
7894         _sg.d3d11.cur_pass = 0;
7895         _sg.d3d11.cur_pass_id.id = SG_INVALID_ID;
7896         _sg.d3d11.num_rtvs = 1;
7897         _sg.d3d11.cur_rtvs[0] = (ID3D11RenderTargetView*) _sg.d3d11.rtv_cb();
7898         for (int i = 1; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
7899             _sg.d3d11.cur_rtvs[i] = 0;
7900         }
7901         _sg.d3d11.cur_dsv = (ID3D11DepthStencilView*) _sg.d3d11.dsv_cb();
7902         SOKOL_ASSERT(_sg.d3d11.cur_rtvs[0] && _sg.d3d11.cur_dsv);
7903     }
7904     /* apply the render-target- and depth-stencil-views */
7905     ID3D11DeviceContext_OMSetRenderTargets(_sg.d3d11.ctx, SG_MAX_COLOR_ATTACHMENTS, _sg.d3d11.cur_rtvs, _sg.d3d11.cur_dsv);
7906 
7907     /* set viewport and scissor rect to cover whole screen */
7908     D3D11_VIEWPORT vp;
7909     memset(&vp, 0, sizeof(vp));
7910     vp.Width = (FLOAT) w;
7911     vp.Height = (FLOAT) h;
7912     vp.MaxDepth = 1.0f;
7913     ID3D11DeviceContext_RSSetViewports(_sg.d3d11.ctx, 1, &vp);
7914     D3D11_RECT rect;
7915     rect.left = 0;
7916     rect.top = 0;
7917     rect.right = w;
7918     rect.bottom = h;
7919     ID3D11DeviceContext_RSSetScissorRects(_sg.d3d11.ctx, 1, &rect);
7920 
7921     /* perform clear action */
7922     for (int i = 0; i < _sg.d3d11.num_rtvs; i++) {
7923         if (action->colors[i].action == SG_ACTION_CLEAR) {
7924             ID3D11DeviceContext_ClearRenderTargetView(_sg.d3d11.ctx, _sg.d3d11.cur_rtvs[i], action->colors[i].val);
7925         }
7926     }
7927     UINT ds_flags = 0;
7928     if (action->depth.action == SG_ACTION_CLEAR) {
7929         ds_flags |= D3D11_CLEAR_DEPTH;
7930     }
7931     if (action->stencil.action == SG_ACTION_CLEAR) {
7932         ds_flags |= D3D11_CLEAR_STENCIL;
7933     }
7934     if ((0 != ds_flags) && _sg.d3d11.cur_dsv) {
7935         ID3D11DeviceContext_ClearDepthStencilView(_sg.d3d11.ctx, _sg.d3d11.cur_dsv, ds_flags, action->depth.val, action->stencil.val);
7936     }
7937 }
7938 
7939 /* D3D11CalcSubresource only exists for C++ */
_sg_d3d11_calcsubresource(UINT mip_slice,UINT array_slice,UINT mip_levels)7940 _SOKOL_PRIVATE UINT _sg_d3d11_calcsubresource(UINT mip_slice, UINT array_slice, UINT mip_levels) {
7941     return mip_slice + array_slice * mip_levels;
7942 }
7943 
_sg_d3d11_end_pass(void)7944 _SOKOL_PRIVATE void _sg_d3d11_end_pass(void) {
7945     SOKOL_ASSERT(_sg.d3d11.in_pass && _sg.d3d11.ctx);
7946     _sg.d3d11.in_pass = false;
7947 
7948     /* need to resolve MSAA render target into texture? */
7949     if (_sg.d3d11.cur_pass) {
7950         SOKOL_ASSERT(_sg.d3d11.cur_pass->slot.id == _sg.d3d11.cur_pass_id.id);
7951         for (int i = 0; i < _sg.d3d11.num_rtvs; i++) {
7952             _sg_attachment_t* cmn_att = &_sg.d3d11.cur_pass->cmn.color_atts[i];
7953             _sg_image_t* att_img = _sg.d3d11.cur_pass->d3d11.color_atts[i].image;
7954             SOKOL_ASSERT(att_img && (att_img->slot.id == cmn_att->image_id.id));
7955             if (att_img->cmn.sample_count > 1) {
7956                 /* FIXME: support MSAA resolve into 3D texture */
7957                 SOKOL_ASSERT(att_img->d3d11.tex2d && att_img->d3d11.texmsaa && !att_img->d3d11.tex3d);
7958                 SOKOL_ASSERT(DXGI_FORMAT_UNKNOWN != att_img->d3d11.format);
7959                 UINT dst_subres = _sg_d3d11_calcsubresource(cmn_att->mip_level, cmn_att->slice, att_img->cmn.num_mipmaps);
7960                 ID3D11DeviceContext_ResolveSubresource(_sg.d3d11.ctx,
7961                     (ID3D11Resource*) att_img->d3d11.tex2d,     /* pDstResource */
7962                     dst_subres,                                 /* DstSubresource */
7963                     (ID3D11Resource*) att_img->d3d11.texmsaa,   /* pSrcResource */
7964                     0,                                          /* SrcSubresource */
7965                     att_img->d3d11.format);
7966             }
7967         }
7968     }
7969     _sg.d3d11.cur_pass = 0;
7970     _sg.d3d11.cur_pass_id.id = SG_INVALID_ID;
7971     _sg.d3d11.cur_pipeline = 0;
7972     _sg.d3d11.cur_pipeline_id.id = SG_INVALID_ID;
7973     for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
7974         _sg.d3d11.cur_rtvs[i] = 0;
7975     }
7976     _sg.d3d11.cur_dsv = 0;
7977     _sg_d3d11_clear_state();
7978 }
7979 
_sg_d3d11_apply_viewport(int x,int y,int w,int h,bool origin_top_left)7980 _SOKOL_PRIVATE void _sg_d3d11_apply_viewport(int x, int y, int w, int h, bool origin_top_left) {
7981     SOKOL_ASSERT(_sg.d3d11.ctx);
7982     SOKOL_ASSERT(_sg.d3d11.in_pass);
7983     D3D11_VIEWPORT vp;
7984     vp.TopLeftX = (FLOAT) x;
7985     vp.TopLeftY = (FLOAT) (origin_top_left ? y : (_sg.d3d11.cur_height - (y + h)));
7986     vp.Width = (FLOAT) w;
7987     vp.Height = (FLOAT) h;
7988     vp.MinDepth = 0.0f;
7989     vp.MaxDepth = 1.0f;
7990     ID3D11DeviceContext_RSSetViewports(_sg.d3d11.ctx, 1, &vp);
7991 }
7992 
_sg_d3d11_apply_scissor_rect(int x,int y,int w,int h,bool origin_top_left)7993 _SOKOL_PRIVATE void _sg_d3d11_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) {
7994     SOKOL_ASSERT(_sg.d3d11.ctx);
7995     SOKOL_ASSERT(_sg.d3d11.in_pass);
7996     D3D11_RECT rect;
7997     rect.left = x;
7998     rect.top = (origin_top_left ? y : (_sg.d3d11.cur_height - (y + h)));
7999     rect.right = x + w;
8000     rect.bottom = origin_top_left ? (y + h) : (_sg.d3d11.cur_height - y);
8001     ID3D11DeviceContext_RSSetScissorRects(_sg.d3d11.ctx, 1, &rect);
8002 }
8003 
_sg_d3d11_apply_pipeline(_sg_pipeline_t * pip)8004 _SOKOL_PRIVATE void _sg_d3d11_apply_pipeline(_sg_pipeline_t* pip) {
8005     SOKOL_ASSERT(pip);
8006     SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id));
8007     SOKOL_ASSERT(_sg.d3d11.ctx);
8008     SOKOL_ASSERT(_sg.d3d11.in_pass);
8009     SOKOL_ASSERT(pip->d3d11.rs && pip->d3d11.bs && pip->d3d11.dss && pip->d3d11.il);
8010 
8011     _sg.d3d11.cur_pipeline = pip;
8012     _sg.d3d11.cur_pipeline_id.id = pip->slot.id;
8013     _sg.d3d11.use_indexed_draw = (pip->d3d11.index_format != DXGI_FORMAT_UNKNOWN);
8014 
8015     ID3D11DeviceContext_RSSetState(_sg.d3d11.ctx, pip->d3d11.rs);
8016     ID3D11DeviceContext_OMSetDepthStencilState(_sg.d3d11.ctx, pip->d3d11.dss, pip->d3d11.stencil_ref);
8017     ID3D11DeviceContext_OMSetBlendState(_sg.d3d11.ctx, pip->d3d11.bs, pip->cmn.blend_color, 0xFFFFFFFF);
8018     ID3D11DeviceContext_IASetPrimitiveTopology(_sg.d3d11.ctx, pip->d3d11.topology);
8019     ID3D11DeviceContext_IASetInputLayout(_sg.d3d11.ctx, pip->d3d11.il);
8020     ID3D11DeviceContext_VSSetShader(_sg.d3d11.ctx, pip->shader->d3d11.vs, NULL, 0);
8021     ID3D11DeviceContext_VSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, pip->shader->d3d11.stage[SG_SHADERSTAGE_VS].cbufs);
8022     ID3D11DeviceContext_PSSetShader(_sg.d3d11.ctx, pip->shader->d3d11.fs, NULL, 0);
8023     ID3D11DeviceContext_PSSetConstantBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, pip->shader->d3d11.stage[SG_SHADERSTAGE_FS].cbufs);
8024 }
8025 
_sg_d3d11_apply_bindings(_sg_pipeline_t * pip,_sg_buffer_t ** vbs,const int * vb_offsets,int num_vbs,_sg_buffer_t * ib,int ib_offset,_sg_image_t ** vs_imgs,int num_vs_imgs,_sg_image_t ** fs_imgs,int num_fs_imgs)8026 _SOKOL_PRIVATE void _sg_d3d11_apply_bindings(
8027     _sg_pipeline_t* pip,
8028     _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs,
8029     _sg_buffer_t* ib, int ib_offset,
8030     _sg_image_t** vs_imgs, int num_vs_imgs,
8031     _sg_image_t** fs_imgs, int num_fs_imgs)
8032 {
8033     SOKOL_ASSERT(pip);
8034     SOKOL_ASSERT(_sg.d3d11.ctx);
8035     SOKOL_ASSERT(_sg.d3d11.in_pass);
8036 
8037     /* gather all the D3D11 resources into arrays */
8038     ID3D11Buffer* d3d11_ib = ib ? ib->d3d11.buf : 0;
8039     ID3D11Buffer* d3d11_vbs[SG_MAX_SHADERSTAGE_BUFFERS];
8040     UINT d3d11_vb_offsets[SG_MAX_SHADERSTAGE_BUFFERS];
8041     ID3D11ShaderResourceView* d3d11_vs_srvs[SG_MAX_SHADERSTAGE_IMAGES];
8042     ID3D11SamplerState* d3d11_vs_smps[SG_MAX_SHADERSTAGE_IMAGES];
8043     ID3D11ShaderResourceView* d3d11_fs_srvs[SG_MAX_SHADERSTAGE_IMAGES];
8044     ID3D11SamplerState* d3d11_fs_smps[SG_MAX_SHADERSTAGE_IMAGES];
8045     int i;
8046     for (i = 0; i < num_vbs; i++) {
8047         SOKOL_ASSERT(vbs[i]->d3d11.buf);
8048         d3d11_vbs[i] = vbs[i]->d3d11.buf;
8049         d3d11_vb_offsets[i] = vb_offsets[i];
8050     }
8051     for (; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) {
8052         d3d11_vbs[i] = 0;
8053         d3d11_vb_offsets[i] = 0;
8054     }
8055     for (i = 0; i < num_vs_imgs; i++) {
8056         SOKOL_ASSERT(vs_imgs[i]->d3d11.srv);
8057         SOKOL_ASSERT(vs_imgs[i]->d3d11.smp);
8058         d3d11_vs_srvs[i] = vs_imgs[i]->d3d11.srv;
8059         d3d11_vs_smps[i] = vs_imgs[i]->d3d11.smp;
8060     }
8061     for (; i < SG_MAX_SHADERSTAGE_IMAGES; i++) {
8062         d3d11_vs_srvs[i] = 0;
8063         d3d11_vs_smps[i] = 0;
8064     }
8065     for (i = 0; i < num_fs_imgs; i++) {
8066         SOKOL_ASSERT(fs_imgs[i]->d3d11.srv);
8067         SOKOL_ASSERT(fs_imgs[i]->d3d11.smp);
8068         d3d11_fs_srvs[i] = fs_imgs[i]->d3d11.srv;
8069         d3d11_fs_smps[i] = fs_imgs[i]->d3d11.smp;
8070     }
8071     for (; i < SG_MAX_SHADERSTAGE_IMAGES; i++) {
8072         d3d11_fs_srvs[i] = 0;
8073         d3d11_fs_smps[i] = 0;
8074     }
8075 
8076     ID3D11DeviceContext_IASetVertexBuffers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_BUFFERS, d3d11_vbs, pip->d3d11.vb_strides, d3d11_vb_offsets);
8077     ID3D11DeviceContext_IASetIndexBuffer(_sg.d3d11.ctx, d3d11_ib, pip->d3d11.index_format, ib_offset);
8078     ID3D11DeviceContext_VSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_srvs);
8079     ID3D11DeviceContext_VSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_smps);
8080     ID3D11DeviceContext_PSSetShaderResources(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_srvs);
8081     ID3D11DeviceContext_PSSetSamplers(_sg.d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_smps);
8082 }
8083 
_sg_d3d11_apply_uniforms(sg_shader_stage stage_index,int ub_index,const void * data,int num_bytes)8084 _SOKOL_PRIVATE void _sg_d3d11_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) {
8085     _SOKOL_UNUSED(num_bytes);
8086     SOKOL_ASSERT(_sg.d3d11.ctx && _sg.d3d11.in_pass);
8087     SOKOL_ASSERT(data && (num_bytes > 0));
8088     SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES));
8089     SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS));
8090     SOKOL_ASSERT(_sg.d3d11.cur_pipeline && _sg.d3d11.cur_pipeline->slot.id == _sg.d3d11.cur_pipeline_id.id);
8091     SOKOL_ASSERT(_sg.d3d11.cur_pipeline->shader && _sg.d3d11.cur_pipeline->shader->slot.id == _sg.d3d11.cur_pipeline->cmn.shader_id.id);
8092     SOKOL_ASSERT(ub_index < _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks);
8093     SOKOL_ASSERT(num_bytes == _sg.d3d11.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size);
8094     ID3D11Buffer* cb = _sg.d3d11.cur_pipeline->shader->d3d11.stage[stage_index].cbufs[ub_index];
8095     SOKOL_ASSERT(cb);
8096     ID3D11DeviceContext_UpdateSubresource(_sg.d3d11.ctx, (ID3D11Resource*)cb, 0, NULL, data, 0, 0);
8097 }
8098 
_sg_d3d11_draw(int base_element,int num_elements,int num_instances)8099 _SOKOL_PRIVATE void _sg_d3d11_draw(int base_element, int num_elements, int num_instances) {
8100     SOKOL_ASSERT(_sg.d3d11.in_pass);
8101     if (_sg.d3d11.use_indexed_draw) {
8102         if (1 == num_instances) {
8103             ID3D11DeviceContext_DrawIndexed(_sg.d3d11.ctx, num_elements, base_element, 0);
8104         }
8105         else {
8106             ID3D11DeviceContext_DrawIndexedInstanced(_sg.d3d11.ctx, num_elements, num_instances, base_element, 0, 0);
8107         }
8108     }
8109     else {
8110         if (1 == num_instances) {
8111             ID3D11DeviceContext_Draw(_sg.d3d11.ctx, num_elements, base_element);
8112         }
8113         else {
8114             ID3D11DeviceContext_DrawInstanced(_sg.d3d11.ctx, num_elements, num_instances, base_element, 0);
8115         }
8116     }
8117 }
8118 
_sg_d3d11_commit(void)8119 _SOKOL_PRIVATE void _sg_d3d11_commit(void) {
8120     SOKOL_ASSERT(!_sg.d3d11.in_pass);
8121 }
8122 
_sg_d3d11_update_buffer(_sg_buffer_t * buf,const void * data_ptr,uint32_t data_size)8123 _SOKOL_PRIVATE void _sg_d3d11_update_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size) {
8124     SOKOL_ASSERT(buf && data_ptr && (data_size > 0));
8125     SOKOL_ASSERT(_sg.d3d11.ctx);
8126     SOKOL_ASSERT(buf->d3d11.buf);
8127     D3D11_MAPPED_SUBRESOURCE d3d11_msr;
8128     HRESULT hr = ID3D11DeviceContext_Map(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr);
8129     _SOKOL_UNUSED(hr);
8130     SOKOL_ASSERT(SUCCEEDED(hr));
8131     memcpy(d3d11_msr.pData, data_ptr, data_size);
8132     ID3D11DeviceContext_Unmap(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0);
8133 }
8134 
_sg_d3d11_append_buffer(_sg_buffer_t * buf,const void * data_ptr,uint32_t data_size,bool new_frame)8135 _SOKOL_PRIVATE uint32_t _sg_d3d11_append_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size, bool new_frame) {
8136     SOKOL_ASSERT(buf && data_ptr && (data_size > 0));
8137     SOKOL_ASSERT(_sg.d3d11.ctx);
8138     SOKOL_ASSERT(buf->d3d11.buf);
8139     D3D11_MAP map_type = new_frame ? D3D11_MAP_WRITE_DISCARD : D3D11_MAP_WRITE_NO_OVERWRITE;
8140     D3D11_MAPPED_SUBRESOURCE d3d11_msr;
8141     HRESULT hr = ID3D11DeviceContext_Map(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0, map_type, 0, &d3d11_msr);
8142     _SOKOL_UNUSED(hr);
8143     SOKOL_ASSERT(SUCCEEDED(hr));
8144     uint8_t* dst_ptr = (uint8_t*)d3d11_msr.pData + buf->cmn.append_pos;
8145     memcpy(dst_ptr, data_ptr, data_size);
8146     ID3D11DeviceContext_Unmap(_sg.d3d11.ctx, (ID3D11Resource*)buf->d3d11.buf, 0);
8147     /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */
8148     return _sg_roundup(data_size, 4);
8149 }
8150 
_sg_d3d11_update_image(_sg_image_t * img,const sg_image_content * data)8151 _SOKOL_PRIVATE void _sg_d3d11_update_image(_sg_image_t* img, const sg_image_content* data) {
8152     SOKOL_ASSERT(img && data);
8153     SOKOL_ASSERT(_sg.d3d11.ctx);
8154     SOKOL_ASSERT(img->d3d11.tex2d || img->d3d11.tex3d);
8155     ID3D11Resource* d3d11_res = 0;
8156     if (img->d3d11.tex3d) {
8157         d3d11_res = (ID3D11Resource*) img->d3d11.tex3d;
8158     }
8159     else {
8160         d3d11_res = (ID3D11Resource*) img->d3d11.tex2d;
8161     }
8162     SOKOL_ASSERT(d3d11_res);
8163     const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1;
8164     const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.depth:1;
8165     int subres_index = 0;
8166     HRESULT hr;
8167     _SOKOL_UNUSED(hr);
8168     D3D11_MAPPED_SUBRESOURCE d3d11_msr;
8169     for (int face_index = 0; face_index < num_faces; face_index++) {
8170         for (int slice_index = 0; slice_index < num_slices; slice_index++) {
8171             for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++, subres_index++) {
8172                 SOKOL_ASSERT(subres_index < (SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS));
8173                 const int mip_width = ((img->cmn.width>>mip_index)>0) ? img->cmn.width>>mip_index : 1;
8174                 const int mip_height = ((img->cmn.height>>mip_index)>0) ? img->cmn.height>>mip_index : 1;
8175                 const int src_pitch = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1);
8176                 const sg_subimage_content* subimg_content = &(data->subimage[face_index][mip_index]);
8177                 const int slice_size = subimg_content->size / num_slices;
8178                 const int slice_offset = slice_size * slice_index;
8179                 const uint8_t* slice_ptr = ((const uint8_t*)subimg_content->ptr) + slice_offset;
8180                 hr = ID3D11DeviceContext_Map(_sg.d3d11.ctx, d3d11_res, subres_index, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr);
8181                 SOKOL_ASSERT(SUCCEEDED(hr));
8182                 /* FIXME: need to handle difference in depth-pitch for 3D textures as well! */
8183                 if (src_pitch == (int)d3d11_msr.RowPitch) {
8184                     memcpy(d3d11_msr.pData, slice_ptr, slice_size);
8185                 }
8186                 else {
8187                     SOKOL_ASSERT(src_pitch < (int)d3d11_msr.RowPitch);
8188                     const uint8_t* src_ptr = slice_ptr;
8189                     uint8_t* dst_ptr = (uint8_t*) d3d11_msr.pData;
8190                     for (int row_index = 0; row_index < mip_height; row_index++) {
8191                         memcpy(dst_ptr, src_ptr, src_pitch);
8192                         src_ptr += src_pitch;
8193                         dst_ptr += d3d11_msr.RowPitch;
8194                     }
8195                 }
8196                 ID3D11DeviceContext_Unmap(_sg.d3d11.ctx, d3d11_res, subres_index);
8197             }
8198         }
8199     }
8200 }
8201 
8202 /*== METAL BACKEND IMPLEMENTATION ============================================*/
8203 #elif defined(SOKOL_METAL)
8204 
8205 #if __has_feature(objc_arc)
8206 #define _SG_OBJC_RETAIN(obj) { }
8207 #define _SG_OBJC_RELEASE(obj) { obj = nil; }
8208 #define _SG_OBJC_RELEASE_WITH_NULL(obj) { obj = [NSNull null]; }
8209 #else
8210 #define _SG_OBJC_RETAIN(obj) { [obj retain]; }
8211 #define _SG_OBJC_RELEASE(obj) { [obj release]; obj = nil; }
8212 #define _SG_OBJC_RELEASE_WITH_NULL(obj) { [obj release]; obj = [NSNull null]; }
8213 #endif
8214 
8215 /*-- enum translation functions ----------------------------------------------*/
_sg_mtl_load_action(sg_action a)8216 _SOKOL_PRIVATE MTLLoadAction _sg_mtl_load_action(sg_action a) {
8217     switch (a) {
8218         case SG_ACTION_CLEAR:       return MTLLoadActionClear;
8219         case SG_ACTION_LOAD:        return MTLLoadActionLoad;
8220         case SG_ACTION_DONTCARE:    return MTLLoadActionDontCare;
8221         default: SOKOL_UNREACHABLE; return (MTLLoadAction)0;
8222     }
8223 }
8224 
_sg_mtl_buffer_resource_options(sg_usage usg)8225 _SOKOL_PRIVATE MTLResourceOptions _sg_mtl_buffer_resource_options(sg_usage usg) {
8226     switch (usg) {
8227         case SG_USAGE_IMMUTABLE:
8228             return MTLResourceStorageModeShared;
8229         case SG_USAGE_DYNAMIC:
8230         case SG_USAGE_STREAM:
8231             #if defined(_SG_TARGET_MACOS)
8232             return MTLCPUCacheModeWriteCombined|MTLResourceStorageModeManaged;
8233             #else
8234             return MTLCPUCacheModeWriteCombined;
8235             #endif
8236         default:
8237             SOKOL_UNREACHABLE;
8238             return 0;
8239     }
8240 }
8241 
_sg_mtl_step_function(sg_vertex_step step)8242 _SOKOL_PRIVATE MTLVertexStepFunction _sg_mtl_step_function(sg_vertex_step step) {
8243     switch (step) {
8244         case SG_VERTEXSTEP_PER_VERTEX:      return MTLVertexStepFunctionPerVertex;
8245         case SG_VERTEXSTEP_PER_INSTANCE:    return MTLVertexStepFunctionPerInstance;
8246         default: SOKOL_UNREACHABLE; return (MTLVertexStepFunction)0;
8247     }
8248 }
8249 
_sg_mtl_vertex_format(sg_vertex_format fmt)8250 _SOKOL_PRIVATE MTLVertexFormat _sg_mtl_vertex_format(sg_vertex_format fmt) {
8251     switch (fmt) {
8252         case SG_VERTEXFORMAT_FLOAT:     return MTLVertexFormatFloat;
8253         case SG_VERTEXFORMAT_FLOAT2:    return MTLVertexFormatFloat2;
8254         case SG_VERTEXFORMAT_FLOAT3:    return MTLVertexFormatFloat3;
8255         case SG_VERTEXFORMAT_FLOAT4:    return MTLVertexFormatFloat4;
8256         case SG_VERTEXFORMAT_BYTE4:     return MTLVertexFormatChar4;
8257         case SG_VERTEXFORMAT_BYTE4N:    return MTLVertexFormatChar4Normalized;
8258         case SG_VERTEXFORMAT_UBYTE4:    return MTLVertexFormatUChar4;
8259         case SG_VERTEXFORMAT_UBYTE4N:   return MTLVertexFormatUChar4Normalized;
8260         case SG_VERTEXFORMAT_SHORT2:    return MTLVertexFormatShort2;
8261         case SG_VERTEXFORMAT_SHORT2N:   return MTLVertexFormatShort2Normalized;
8262         case SG_VERTEXFORMAT_USHORT2N:  return MTLVertexFormatUShort2Normalized;
8263         case SG_VERTEXFORMAT_SHORT4:    return MTLVertexFormatShort4;
8264         case SG_VERTEXFORMAT_SHORT4N:   return MTLVertexFormatShort4Normalized;
8265         case SG_VERTEXFORMAT_USHORT4N:  return MTLVertexFormatUShort4Normalized;
8266         case SG_VERTEXFORMAT_UINT10_N2: return MTLVertexFormatUInt1010102Normalized;
8267         default: SOKOL_UNREACHABLE; return (MTLVertexFormat)0;
8268     }
8269 }
8270 
_sg_mtl_primitive_type(sg_primitive_type t)8271 _SOKOL_PRIVATE MTLPrimitiveType _sg_mtl_primitive_type(sg_primitive_type t) {
8272     switch (t) {
8273         case SG_PRIMITIVETYPE_POINTS:           return MTLPrimitiveTypePoint;
8274         case SG_PRIMITIVETYPE_LINES:            return MTLPrimitiveTypeLine;
8275         case SG_PRIMITIVETYPE_LINE_STRIP:       return MTLPrimitiveTypeLineStrip;
8276         case SG_PRIMITIVETYPE_TRIANGLES:        return MTLPrimitiveTypeTriangle;
8277         case SG_PRIMITIVETYPE_TRIANGLE_STRIP:   return MTLPrimitiveTypeTriangleStrip;
8278         default: SOKOL_UNREACHABLE; return (MTLPrimitiveType)0;
8279     }
8280 }
8281 
_sg_mtl_pixel_format(sg_pixel_format fmt)8282 _SOKOL_PRIVATE MTLPixelFormat _sg_mtl_pixel_format(sg_pixel_format fmt) {
8283     switch (fmt) {
8284         case SG_PIXELFORMAT_R8:                     return MTLPixelFormatR8Unorm;
8285         case SG_PIXELFORMAT_R8SN:                   return MTLPixelFormatR8Snorm;
8286         case SG_PIXELFORMAT_R8UI:                   return MTLPixelFormatR8Uint;
8287         case SG_PIXELFORMAT_R8SI:                   return MTLPixelFormatR8Sint;
8288         case SG_PIXELFORMAT_R16:                    return MTLPixelFormatR16Unorm;
8289         case SG_PIXELFORMAT_R16SN:                  return MTLPixelFormatR16Snorm;
8290         case SG_PIXELFORMAT_R16UI:                  return MTLPixelFormatR16Uint;
8291         case SG_PIXELFORMAT_R16SI:                  return MTLPixelFormatR16Sint;
8292         case SG_PIXELFORMAT_R16F:                   return MTLPixelFormatR16Float;
8293         case SG_PIXELFORMAT_RG8:                    return MTLPixelFormatRG8Unorm;
8294         case SG_PIXELFORMAT_RG8SN:                  return MTLPixelFormatRG8Snorm;
8295         case SG_PIXELFORMAT_RG8UI:                  return MTLPixelFormatRG8Uint;
8296         case SG_PIXELFORMAT_RG8SI:                  return MTLPixelFormatRG8Sint;
8297         case SG_PIXELFORMAT_R32UI:                  return MTLPixelFormatR32Uint;
8298         case SG_PIXELFORMAT_R32SI:                  return MTLPixelFormatR32Sint;
8299         case SG_PIXELFORMAT_R32F:                   return MTLPixelFormatR32Float;
8300         case SG_PIXELFORMAT_RG16:                   return MTLPixelFormatRG16Unorm;
8301         case SG_PIXELFORMAT_RG16SN:                 return MTLPixelFormatRG16Snorm;
8302         case SG_PIXELFORMAT_RG16UI:                 return MTLPixelFormatRG16Uint;
8303         case SG_PIXELFORMAT_RG16SI:                 return MTLPixelFormatRG16Sint;
8304         case SG_PIXELFORMAT_RG16F:                  return MTLPixelFormatRG16Float;
8305         case SG_PIXELFORMAT_RGBA8:                  return MTLPixelFormatRGBA8Unorm;
8306         case SG_PIXELFORMAT_RGBA8SN:                return MTLPixelFormatRGBA8Snorm;
8307         case SG_PIXELFORMAT_RGBA8UI:                return MTLPixelFormatRGBA8Uint;
8308         case SG_PIXELFORMAT_RGBA8SI:                return MTLPixelFormatRGBA8Sint;
8309         case SG_PIXELFORMAT_BGRA8:                  return MTLPixelFormatBGRA8Unorm;
8310         case SG_PIXELFORMAT_RGB10A2:                return MTLPixelFormatRGB10A2Unorm;
8311         case SG_PIXELFORMAT_RG11B10F:               return MTLPixelFormatRG11B10Float;
8312         case SG_PIXELFORMAT_RG32UI:                 return MTLPixelFormatRG32Uint;
8313         case SG_PIXELFORMAT_RG32SI:                 return MTLPixelFormatRG32Sint;
8314         case SG_PIXELFORMAT_RG32F:                  return MTLPixelFormatRG32Float;
8315         case SG_PIXELFORMAT_RGBA16:                 return MTLPixelFormatRGBA16Unorm;
8316         case SG_PIXELFORMAT_RGBA16SN:               return MTLPixelFormatRGBA16Snorm;
8317         case SG_PIXELFORMAT_RGBA16UI:               return MTLPixelFormatRGBA16Uint;
8318         case SG_PIXELFORMAT_RGBA16SI:               return MTLPixelFormatRGBA16Sint;
8319         case SG_PIXELFORMAT_RGBA16F:                return MTLPixelFormatRGBA16Float;
8320         case SG_PIXELFORMAT_RGBA32UI:               return MTLPixelFormatRGBA32Uint;
8321         case SG_PIXELFORMAT_RGBA32SI:               return MTLPixelFormatRGBA32Sint;
8322         case SG_PIXELFORMAT_RGBA32F:                return MTLPixelFormatRGBA32Float;
8323         case SG_PIXELFORMAT_DEPTH:                  return MTLPixelFormatDepth32Float;
8324         case SG_PIXELFORMAT_DEPTH_STENCIL:          return MTLPixelFormatDepth32Float_Stencil8;
8325         #if defined(_SG_TARGET_MACOS)
8326         case SG_PIXELFORMAT_BC1_RGBA:               return MTLPixelFormatBC1_RGBA;
8327         case SG_PIXELFORMAT_BC2_RGBA:               return MTLPixelFormatBC2_RGBA;
8328         case SG_PIXELFORMAT_BC3_RGBA:               return MTLPixelFormatBC3_RGBA;
8329         case SG_PIXELFORMAT_BC4_R:                  return MTLPixelFormatBC4_RUnorm;
8330         case SG_PIXELFORMAT_BC4_RSN:                return MTLPixelFormatBC4_RSnorm;
8331         case SG_PIXELFORMAT_BC5_RG:                 return MTLPixelFormatBC5_RGUnorm;
8332         case SG_PIXELFORMAT_BC5_RGSN:               return MTLPixelFormatBC5_RGSnorm;
8333         case SG_PIXELFORMAT_BC6H_RGBF:              return MTLPixelFormatBC6H_RGBFloat;
8334         case SG_PIXELFORMAT_BC6H_RGBUF:             return MTLPixelFormatBC6H_RGBUfloat;
8335         case SG_PIXELFORMAT_BC7_RGBA:               return MTLPixelFormatBC7_RGBAUnorm;
8336         #else
8337         case SG_PIXELFORMAT_PVRTC_RGB_2BPP:         return MTLPixelFormatPVRTC_RGB_2BPP;
8338         case SG_PIXELFORMAT_PVRTC_RGB_4BPP:         return MTLPixelFormatPVRTC_RGB_4BPP;
8339         case SG_PIXELFORMAT_PVRTC_RGBA_2BPP:        return MTLPixelFormatPVRTC_RGBA_2BPP;
8340         case SG_PIXELFORMAT_PVRTC_RGBA_4BPP:        return MTLPixelFormatPVRTC_RGBA_4BPP;
8341         case SG_PIXELFORMAT_ETC2_RGB8:              return MTLPixelFormatETC2_RGB8;
8342         case SG_PIXELFORMAT_ETC2_RGB8A1:            return MTLPixelFormatETC2_RGB8A1;
8343         case SG_PIXELFORMAT_ETC2_RGBA8:             return MTLPixelFormatEAC_RGBA8;
8344         case SG_PIXELFORMAT_ETC2_RG11:              return MTLPixelFormatEAC_RG11Unorm;
8345         case SG_PIXELFORMAT_ETC2_RG11SN:            return MTLPixelFormatEAC_RG11Snorm;
8346         #endif
8347         default: return MTLPixelFormatInvalid;
8348     }
8349 }
8350 
_sg_mtl_color_write_mask(sg_color_mask m)8351 _SOKOL_PRIVATE MTLColorWriteMask _sg_mtl_color_write_mask(sg_color_mask m) {
8352     MTLColorWriteMask mtl_mask = MTLColorWriteMaskNone;
8353     if (m & SG_COLORMASK_R) {
8354         mtl_mask |= MTLColorWriteMaskRed;
8355     }
8356     if (m & SG_COLORMASK_G) {
8357         mtl_mask |= MTLColorWriteMaskGreen;
8358     }
8359     if (m & SG_COLORMASK_B) {
8360         mtl_mask |= MTLColorWriteMaskBlue;
8361     }
8362     if (m & SG_COLORMASK_A) {
8363         mtl_mask |= MTLColorWriteMaskAlpha;
8364     }
8365     return mtl_mask;
8366 }
8367 
_sg_mtl_blend_op(sg_blend_op op)8368 _SOKOL_PRIVATE MTLBlendOperation _sg_mtl_blend_op(sg_blend_op op) {
8369     switch (op) {
8370         case SG_BLENDOP_ADD:                return MTLBlendOperationAdd;
8371         case SG_BLENDOP_SUBTRACT:           return MTLBlendOperationSubtract;
8372         case SG_BLENDOP_REVERSE_SUBTRACT:   return MTLBlendOperationReverseSubtract;
8373         default: SOKOL_UNREACHABLE; return (MTLBlendOperation)0;
8374     }
8375 }
8376 
_sg_mtl_blend_factor(sg_blend_factor f)8377 _SOKOL_PRIVATE MTLBlendFactor _sg_mtl_blend_factor(sg_blend_factor f) {
8378     switch (f) {
8379         case SG_BLENDFACTOR_ZERO:                   return MTLBlendFactorZero;
8380         case SG_BLENDFACTOR_ONE:                    return MTLBlendFactorOne;
8381         case SG_BLENDFACTOR_SRC_COLOR:              return MTLBlendFactorSourceColor;
8382         case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR:    return MTLBlendFactorOneMinusSourceColor;
8383         case SG_BLENDFACTOR_SRC_ALPHA:              return MTLBlendFactorSourceAlpha;
8384         case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA:    return MTLBlendFactorOneMinusSourceAlpha;
8385         case SG_BLENDFACTOR_DST_COLOR:              return MTLBlendFactorDestinationColor;
8386         case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR:    return MTLBlendFactorOneMinusDestinationColor;
8387         case SG_BLENDFACTOR_DST_ALPHA:              return MTLBlendFactorDestinationAlpha;
8388         case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA:    return MTLBlendFactorOneMinusDestinationAlpha;
8389         case SG_BLENDFACTOR_SRC_ALPHA_SATURATED:    return MTLBlendFactorSourceAlphaSaturated;
8390         case SG_BLENDFACTOR_BLEND_COLOR:            return MTLBlendFactorBlendColor;
8391         case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR:  return MTLBlendFactorOneMinusBlendColor;
8392         case SG_BLENDFACTOR_BLEND_ALPHA:            return MTLBlendFactorBlendAlpha;
8393         case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA:  return MTLBlendFactorOneMinusBlendAlpha;
8394         default: SOKOL_UNREACHABLE; return (MTLBlendFactor)0;
8395     }
8396 }
8397 
_sg_mtl_compare_func(sg_compare_func f)8398 _SOKOL_PRIVATE MTLCompareFunction _sg_mtl_compare_func(sg_compare_func f) {
8399     switch (f) {
8400         case SG_COMPAREFUNC_NEVER:          return MTLCompareFunctionNever;
8401         case SG_COMPAREFUNC_LESS:           return MTLCompareFunctionLess;
8402         case SG_COMPAREFUNC_EQUAL:          return MTLCompareFunctionEqual;
8403         case SG_COMPAREFUNC_LESS_EQUAL:     return MTLCompareFunctionLessEqual;
8404         case SG_COMPAREFUNC_GREATER:        return MTLCompareFunctionGreater;
8405         case SG_COMPAREFUNC_NOT_EQUAL:      return MTLCompareFunctionNotEqual;
8406         case SG_COMPAREFUNC_GREATER_EQUAL:  return MTLCompareFunctionGreaterEqual;
8407         case SG_COMPAREFUNC_ALWAYS:         return MTLCompareFunctionAlways;
8408         default: SOKOL_UNREACHABLE; return (MTLCompareFunction)0;
8409     }
8410 }
8411 
_sg_mtl_stencil_op(sg_stencil_op op)8412 _SOKOL_PRIVATE MTLStencilOperation _sg_mtl_stencil_op(sg_stencil_op op) {
8413     switch (op) {
8414         case SG_STENCILOP_KEEP:         return MTLStencilOperationKeep;
8415         case SG_STENCILOP_ZERO:         return MTLStencilOperationZero;
8416         case SG_STENCILOP_REPLACE:      return MTLStencilOperationReplace;
8417         case SG_STENCILOP_INCR_CLAMP:   return MTLStencilOperationIncrementClamp;
8418         case SG_STENCILOP_DECR_CLAMP:   return MTLStencilOperationDecrementClamp;
8419         case SG_STENCILOP_INVERT:       return MTLStencilOperationInvert;
8420         case SG_STENCILOP_INCR_WRAP:    return MTLStencilOperationIncrementWrap;
8421         case SG_STENCILOP_DECR_WRAP:    return MTLStencilOperationDecrementWrap;
8422         default: SOKOL_UNREACHABLE; return (MTLStencilOperation)0;
8423     }
8424 }
8425 
_sg_mtl_cull_mode(sg_cull_mode m)8426 _SOKOL_PRIVATE MTLCullMode _sg_mtl_cull_mode(sg_cull_mode m) {
8427     switch (m) {
8428         case SG_CULLMODE_NONE:  return MTLCullModeNone;
8429         case SG_CULLMODE_FRONT: return MTLCullModeFront;
8430         case SG_CULLMODE_BACK:  return MTLCullModeBack;
8431         default: SOKOL_UNREACHABLE; return (MTLCullMode)0;
8432     }
8433 }
8434 
_sg_mtl_winding(sg_face_winding w)8435 _SOKOL_PRIVATE MTLWinding _sg_mtl_winding(sg_face_winding w) {
8436     switch (w) {
8437         case SG_FACEWINDING_CW:     return MTLWindingClockwise;
8438         case SG_FACEWINDING_CCW:    return MTLWindingCounterClockwise;
8439         default: SOKOL_UNREACHABLE; return (MTLWinding)0;
8440     }
8441 }
8442 
_sg_mtl_index_type(sg_index_type t)8443 _SOKOL_PRIVATE MTLIndexType _sg_mtl_index_type(sg_index_type t) {
8444     switch (t) {
8445         case SG_INDEXTYPE_UINT16:   return MTLIndexTypeUInt16;
8446         case SG_INDEXTYPE_UINT32:   return MTLIndexTypeUInt32;
8447         default: SOKOL_UNREACHABLE; return (MTLIndexType)0;
8448     }
8449 }
8450 
_sg_mtl_index_size(sg_index_type t)8451 _SOKOL_PRIVATE NSUInteger _sg_mtl_index_size(sg_index_type t) {
8452     switch (t) {
8453         case SG_INDEXTYPE_NONE:     return 0;
8454         case SG_INDEXTYPE_UINT16:   return 2;
8455         case SG_INDEXTYPE_UINT32:   return 4;
8456         default: SOKOL_UNREACHABLE; return 0;
8457     }
8458 }
8459 
_sg_mtl_texture_type(sg_image_type t)8460 _SOKOL_PRIVATE MTLTextureType _sg_mtl_texture_type(sg_image_type t) {
8461     switch (t) {
8462         case SG_IMAGETYPE_2D:       return MTLTextureType2D;
8463         case SG_IMAGETYPE_CUBE:     return MTLTextureTypeCube;
8464         case SG_IMAGETYPE_3D:       return MTLTextureType3D;
8465         case SG_IMAGETYPE_ARRAY:    return MTLTextureType2DArray;
8466         default: SOKOL_UNREACHABLE; return (MTLTextureType)0;
8467     }
8468 }
8469 
_sg_mtl_is_pvrtc(sg_pixel_format fmt)8470 _SOKOL_PRIVATE bool _sg_mtl_is_pvrtc(sg_pixel_format fmt) {
8471     switch (fmt) {
8472         case SG_PIXELFORMAT_PVRTC_RGB_2BPP:
8473         case SG_PIXELFORMAT_PVRTC_RGB_4BPP:
8474         case SG_PIXELFORMAT_PVRTC_RGBA_2BPP:
8475         case SG_PIXELFORMAT_PVRTC_RGBA_4BPP:
8476             return true;
8477         default:
8478             return false;
8479     }
8480 }
8481 
_sg_mtl_address_mode(sg_wrap w)8482 _SOKOL_PRIVATE MTLSamplerAddressMode _sg_mtl_address_mode(sg_wrap w) {
8483     switch (w) {
8484         case SG_WRAP_REPEAT:            return MTLSamplerAddressModeRepeat;
8485         case SG_WRAP_CLAMP_TO_EDGE:     return MTLSamplerAddressModeClampToEdge;
8486         #if defined(_SG_TARGET_MACOS)
8487         case SG_WRAP_CLAMP_TO_BORDER:   return MTLSamplerAddressModeClampToBorderColor;
8488         #else
8489         /* clamp-to-border not supported on iOS, fall back to clamp-to-edge */
8490         case SG_WRAP_CLAMP_TO_BORDER:   return MTLSamplerAddressModeClampToEdge;
8491         #endif
8492         case SG_WRAP_MIRRORED_REPEAT:   return MTLSamplerAddressModeMirrorRepeat;
8493         default: SOKOL_UNREACHABLE; return (MTLSamplerAddressMode)0;
8494     }
8495 }
8496 
8497 #if defined(_SG_TARGET_MACOS)
_sg_mtl_border_color(sg_border_color c)8498 _SOKOL_PRIVATE MTLSamplerBorderColor _sg_mtl_border_color(sg_border_color c) {
8499     switch (c) {
8500         case SG_BORDERCOLOR_TRANSPARENT_BLACK: return MTLSamplerBorderColorTransparentBlack;
8501         case SG_BORDERCOLOR_OPAQUE_BLACK: return MTLSamplerBorderColorOpaqueBlack;
8502         case SG_BORDERCOLOR_OPAQUE_WHITE: return MTLSamplerBorderColorOpaqueWhite;
8503         default: SOKOL_UNREACHABLE; return (MTLSamplerBorderColor)0;
8504     }
8505 }
8506 #endif
8507 
_sg_mtl_minmag_filter(sg_filter f)8508 _SOKOL_PRIVATE MTLSamplerMinMagFilter _sg_mtl_minmag_filter(sg_filter f) {
8509     switch (f) {
8510         case SG_FILTER_NEAREST:
8511         case SG_FILTER_NEAREST_MIPMAP_NEAREST:
8512         case SG_FILTER_NEAREST_MIPMAP_LINEAR:
8513             return MTLSamplerMinMagFilterNearest;
8514         case SG_FILTER_LINEAR:
8515         case SG_FILTER_LINEAR_MIPMAP_NEAREST:
8516         case SG_FILTER_LINEAR_MIPMAP_LINEAR:
8517             return MTLSamplerMinMagFilterLinear;
8518         default:
8519             SOKOL_UNREACHABLE; return (MTLSamplerMinMagFilter)0;
8520     }
8521 }
8522 
_sg_mtl_mip_filter(sg_filter f)8523 _SOKOL_PRIVATE MTLSamplerMipFilter _sg_mtl_mip_filter(sg_filter f) {
8524     switch (f) {
8525         case SG_FILTER_NEAREST:
8526         case SG_FILTER_LINEAR:
8527             return MTLSamplerMipFilterNotMipmapped;
8528         case SG_FILTER_NEAREST_MIPMAP_NEAREST:
8529         case SG_FILTER_LINEAR_MIPMAP_NEAREST:
8530             return MTLSamplerMipFilterNearest;
8531         case SG_FILTER_NEAREST_MIPMAP_LINEAR:
8532         case SG_FILTER_LINEAR_MIPMAP_LINEAR:
8533             return MTLSamplerMipFilterLinear;
8534         default:
8535             SOKOL_UNREACHABLE; return (MTLSamplerMipFilter)0;
8536     }
8537 }
8538 
8539 /*-- a pool for all Metal resource objects, with deferred release queue -------*/
8540 
_sg_mtl_init_pool(const sg_desc * desc)8541 _SOKOL_PRIVATE void _sg_mtl_init_pool(const sg_desc* desc) {
8542     _sg.mtl.idpool.num_slots = 2 *
8543         (
8544             2 * desc->buffer_pool_size +
8545             5 * desc->image_pool_size +
8546             4 * desc->shader_pool_size +
8547             2 * desc->pipeline_pool_size +
8548             desc->pass_pool_size
8549         );
8550     _sg.mtl.idpool.pool = [NSMutableArray arrayWithCapacity:_sg.mtl.idpool.num_slots];
8551     _SG_OBJC_RETAIN(_sg.mtl.idpool.pool);
8552     NSNull* null = [NSNull null];
8553     for (uint32_t i = 0; i < _sg.mtl.idpool.num_slots; i++) {
8554         [_sg.mtl.idpool.pool addObject:null];
8555     }
8556     SOKOL_ASSERT([_sg.mtl.idpool.pool count] == _sg.mtl.idpool.num_slots);
8557     /* a queue of currently free slot indices */
8558     _sg.mtl.idpool.free_queue_top = 0;
8559     _sg.mtl.idpool.free_queue = (uint32_t*)SOKOL_MALLOC(_sg.mtl.idpool.num_slots * sizeof(uint32_t));
8560     /* pool slot 0 is reserved! */
8561     for (int i = _sg.mtl.idpool.num_slots-1; i >= 1; i--) {
8562         _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = (uint32_t)i;
8563     }
8564     /* a circular queue which holds release items (frame index
8565        when a resource is to be released, and the resource's
8566        pool index
8567     */
8568     _sg.mtl.idpool.release_queue_front = 0;
8569     _sg.mtl.idpool.release_queue_back = 0;
8570     _sg.mtl.idpool.release_queue = (_sg_mtl_release_item_t*)SOKOL_MALLOC(_sg.mtl.idpool.num_slots * sizeof(_sg_mtl_release_item_t));
8571     for (uint32_t i = 0; i < _sg.mtl.idpool.num_slots; i++) {
8572         _sg.mtl.idpool.release_queue[i].frame_index = 0;
8573         _sg.mtl.idpool.release_queue[i].slot_index = _SG_MTL_INVALID_SLOT_INDEX;
8574     }
8575 }
8576 
_sg_mtl_destroy_pool(void)8577 _SOKOL_PRIVATE void _sg_mtl_destroy_pool(void) {
8578     SOKOL_FREE(_sg.mtl.idpool.release_queue);  _sg.mtl.idpool.release_queue = 0;
8579     SOKOL_FREE(_sg.mtl.idpool.free_queue);     _sg.mtl.idpool.free_queue = 0;
8580     _SG_OBJC_RELEASE(_sg.mtl.idpool.pool);
8581 }
8582 
8583 /* get a new free resource pool slot */
_sg_mtl_alloc_pool_slot(void)8584 _SOKOL_PRIVATE uint32_t _sg_mtl_alloc_pool_slot(void) {
8585     SOKOL_ASSERT(_sg.mtl.idpool.free_queue_top > 0);
8586     const uint32_t slot_index = _sg.mtl.idpool.free_queue[--_sg.mtl.idpool.free_queue_top];
8587     SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots));
8588     return slot_index;
8589 }
8590 
8591 /* put a free resource pool slot back into the free-queue */
_sg_mtl_free_pool_slot(uint32_t slot_index)8592 _SOKOL_PRIVATE void _sg_mtl_free_pool_slot(uint32_t slot_index) {
8593     SOKOL_ASSERT(_sg.mtl.idpool.free_queue_top < _sg.mtl.idpool.num_slots);
8594     SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots));
8595     _sg.mtl.idpool.free_queue[_sg.mtl.idpool.free_queue_top++] = slot_index;
8596 }
8597 
8598 /*  add an MTLResource to the pool, return pool index or 0 if input was 'nil' */
_sg_mtl_add_resource(id res)8599 _SOKOL_PRIVATE uint32_t _sg_mtl_add_resource(id res) {
8600     if (nil == res) {
8601         return _SG_MTL_INVALID_SLOT_INDEX;
8602     }
8603     const uint32_t slot_index = _sg_mtl_alloc_pool_slot();
8604     SOKOL_ASSERT([NSNull null] == _sg.mtl.idpool.pool[slot_index]);
8605     _sg.mtl.idpool.pool[slot_index] = res;
8606     return slot_index;
8607 }
8608 
8609 /*  mark an MTLResource for release, this will put the resource into the
8610     deferred-release queue, and the resource will then be released N frames later,
8611     the special pool index 0 will be ignored (this means that a nil
8612     value was provided to _sg_mtl_add_resource()
8613 */
_sg_mtl_release_resource(uint32_t frame_index,uint32_t slot_index)8614 _SOKOL_PRIVATE void _sg_mtl_release_resource(uint32_t frame_index, uint32_t slot_index) {
8615     if (slot_index == _SG_MTL_INVALID_SLOT_INDEX) {
8616         return;
8617     }
8618     SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots));
8619     SOKOL_ASSERT([NSNull null] != _sg.mtl.idpool.pool[slot_index]);
8620     int release_index = _sg.mtl.idpool.release_queue_front++;
8621     if (_sg.mtl.idpool.release_queue_front >= _sg.mtl.idpool.num_slots) {
8622         /* wrap-around */
8623         _sg.mtl.idpool.release_queue_front = 0;
8624     }
8625     /* release queue full? */
8626     SOKOL_ASSERT(_sg.mtl.idpool.release_queue_front != _sg.mtl.idpool.release_queue_back);
8627     SOKOL_ASSERT(0 == _sg.mtl.idpool.release_queue[release_index].frame_index);
8628     const uint32_t safe_to_release_frame_index = frame_index + SG_NUM_INFLIGHT_FRAMES + 1;
8629     _sg.mtl.idpool.release_queue[release_index].frame_index = safe_to_release_frame_index;
8630     _sg.mtl.idpool.release_queue[release_index].slot_index = slot_index;
8631 }
8632 
8633 /* run garbage-collection pass on all resources in the release-queue */
_sg_mtl_garbage_collect(uint32_t frame_index)8634 _SOKOL_PRIVATE void _sg_mtl_garbage_collect(uint32_t frame_index) {
8635     while (_sg.mtl.idpool.release_queue_back != _sg.mtl.idpool.release_queue_front) {
8636         if (frame_index < _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].frame_index) {
8637             /* don't need to check further, release-items past this are too young */
8638             break;
8639         }
8640         /* safe to release this resource */
8641         const uint32_t slot_index = _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].slot_index;
8642         SOKOL_ASSERT((slot_index > 0) && (slot_index < _sg.mtl.idpool.num_slots));
8643         SOKOL_ASSERT(_sg.mtl.idpool.pool[slot_index] != [NSNull null]);
8644         _SG_OBJC_RELEASE_WITH_NULL(_sg.mtl.idpool.pool[slot_index]);
8645         /* put the now free pool index back on the free queue */
8646         _sg_mtl_free_pool_slot(slot_index);
8647         /* reset the release queue slot and advance the back index */
8648         _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].frame_index = 0;
8649         _sg.mtl.idpool.release_queue[_sg.mtl.idpool.release_queue_back].slot_index = _SG_MTL_INVALID_SLOT_INDEX;
8650         _sg.mtl.idpool.release_queue_back++;
8651         if (_sg.mtl.idpool.release_queue_back >= _sg.mtl.idpool.num_slots) {
8652             /* wrap-around */
8653             _sg.mtl.idpool.release_queue_back = 0;
8654         }
8655     }
8656 }
8657 
_sg_mtl_id(uint32_t slot_index)8658 _SOKOL_PRIVATE id _sg_mtl_id(uint32_t slot_index) {
8659     return _sg.mtl.idpool.pool[slot_index];
8660 }
8661 
_sg_mtl_init_sampler_cache(const sg_desc * desc)8662 _SOKOL_PRIVATE void _sg_mtl_init_sampler_cache(const sg_desc* desc) {
8663     SOKOL_ASSERT(desc->sampler_cache_size > 0);
8664     _sg_smpcache_init(&_sg.mtl.sampler_cache, desc->sampler_cache_size);
8665 }
8666 
8667 /* destroy the sampler cache, and release all sampler objects */
_sg_mtl_destroy_sampler_cache(uint32_t frame_index)8668 _SOKOL_PRIVATE void _sg_mtl_destroy_sampler_cache(uint32_t frame_index) {
8669     SOKOL_ASSERT(_sg.mtl.sampler_cache.items);
8670     SOKOL_ASSERT(_sg.mtl.sampler_cache.num_items <= _sg.mtl.sampler_cache.capacity);
8671     for (int i = 0; i < _sg.mtl.sampler_cache.num_items; i++) {
8672         _sg_mtl_release_resource(frame_index, (uint32_t)_sg_smpcache_sampler(&_sg.mtl.sampler_cache, i));
8673     }
8674     _sg_smpcache_discard(&_sg.mtl.sampler_cache);
8675 }
8676 
8677 /*
8678     create and add an MTLSamplerStateObject and return its resource pool index,
8679     reuse identical sampler state if one exists
8680 */
_sg_mtl_create_sampler(id<MTLDevice> mtl_device,const sg_image_desc * img_desc)8681 _SOKOL_PRIVATE uint32_t _sg_mtl_create_sampler(id<MTLDevice> mtl_device, const sg_image_desc* img_desc) {
8682     SOKOL_ASSERT(img_desc);
8683     int index = _sg_smpcache_find_item(&_sg.mtl.sampler_cache, img_desc);
8684     if (index >= 0) {
8685         /* reuse existing sampler */
8686         return (uint32_t) _sg_smpcache_sampler(&_sg.mtl.sampler_cache, index);
8687     }
8688     else {
8689         /* create a new Metal sampler state object and add to sampler cache */
8690         MTLSamplerDescriptor* mtl_desc = [[MTLSamplerDescriptor alloc] init];
8691         mtl_desc.sAddressMode = _sg_mtl_address_mode(img_desc->wrap_u);
8692         mtl_desc.tAddressMode = _sg_mtl_address_mode(img_desc->wrap_v);
8693         if (SG_IMAGETYPE_3D == img_desc->type) {
8694             mtl_desc.rAddressMode = _sg_mtl_address_mode(img_desc->wrap_w);
8695         }
8696         #if defined(_SG_TARGET_MACOS)
8697             mtl_desc.borderColor = _sg_mtl_border_color(img_desc->border_color);
8698         #endif
8699         mtl_desc.minFilter = _sg_mtl_minmag_filter(img_desc->min_filter);
8700         mtl_desc.magFilter = _sg_mtl_minmag_filter(img_desc->mag_filter);
8701         mtl_desc.mipFilter = _sg_mtl_mip_filter(img_desc->min_filter);
8702         mtl_desc.lodMinClamp = img_desc->min_lod;
8703         mtl_desc.lodMaxClamp = img_desc->max_lod;
8704         mtl_desc.maxAnisotropy = img_desc->max_anisotropy;
8705         mtl_desc.normalizedCoordinates = YES;
8706         id<MTLSamplerState> mtl_sampler = [mtl_device newSamplerStateWithDescriptor:mtl_desc];
8707         _SG_OBJC_RELEASE(mtl_desc);
8708         uint32_t sampler_handle = _sg_mtl_add_resource(mtl_sampler);
8709         _sg_smpcache_add_item(&_sg.mtl.sampler_cache, img_desc, sampler_handle);
8710         return sampler_handle;
8711     }
8712 }
8713 
_sg_mtl_clear_state_cache(void)8714 _SOKOL_PRIVATE void _sg_mtl_clear_state_cache(void) {
8715     memset(&_sg.mtl.state_cache, 0, sizeof(_sg.mtl.state_cache));
8716 }
8717 
8718 /* https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf */
_sg_mtl_init_caps(void)8719 _SOKOL_PRIVATE void _sg_mtl_init_caps(void) {
8720     #if defined(_SG_TARGET_MACOS)
8721         _sg.backend = SG_BACKEND_METAL_MACOS;
8722     #elif defined(_SG_TARGET_IOS)
8723         #if defined(_SG_TARGET_IOS_SIMULATOR)
8724             _sg.backend = SG_BACKEND_METAL_SIMULATOR;
8725         #else
8726             _sg.backend = SG_BACKEND_METAL_IOS;
8727         #endif
8728     #endif
8729     _sg.features.instancing = true;
8730     _sg.features.origin_top_left = true;
8731     _sg.features.multiple_render_targets = true;
8732     _sg.features.msaa_render_targets = true;
8733     _sg.features.imagetype_3d = true;
8734     _sg.features.imagetype_array = true;
8735     #if defined(_SG_TARGET_MACOS)
8736         _sg.features.image_clamp_to_border = true;
8737     #else
8738         _sg.features.image_clamp_to_border = false;
8739     #endif
8740 
8741     #if defined(_SG_TARGET_MACOS)
8742         _sg.limits.max_image_size_2d = 16 * 1024;
8743         _sg.limits.max_image_size_cube = 16 * 1024;
8744         _sg.limits.max_image_size_3d = 2 * 1024;
8745         _sg.limits.max_image_size_array = 16 * 1024;
8746         _sg.limits.max_image_array_layers = 2 * 1024;
8747     #else
8748         /* newer iOS devices support 16k textures */
8749         _sg.limits.max_image_size_2d = 8 * 1024;
8750         _sg.limits.max_image_size_cube = 8 * 1024;
8751         _sg.limits.max_image_size_3d = 2 * 1024;
8752         _sg.limits.max_image_size_array = 8 * 1024;
8753         _sg.limits.max_image_array_layers = 2 * 1024;
8754     #endif
8755     _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES;
8756 
8757     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]);
8758     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8SN]);
8759     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]);
8760     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]);
8761     #if defined(_SG_TARGET_MACOS)
8762         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16]);
8763         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16SN]);
8764     #else
8765         _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_R16]);
8766         _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_R16SN]);
8767     #endif
8768     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]);
8769     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]);
8770     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]);
8771     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]);
8772     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8SN]);
8773     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]);
8774     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]);
8775     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]);
8776     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]);
8777     #if defined(_SG_TARGET_MACOS)
8778         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R32F]);
8779     #else
8780         _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_R32F]);
8781     #endif
8782     #if defined(_SG_TARGET_MACOS)
8783         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16]);
8784         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16SN]);
8785     #else
8786         _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RG16]);
8787         _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RG16SN]);
8788     #endif
8789     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]);
8790     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]);
8791     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]);
8792     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]);
8793     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]);
8794     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]);
8795     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]);
8796     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]);
8797     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]);
8798     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG11B10F]);
8799     #if defined(_SG_TARGET_MACOS)
8800         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32UI]);
8801         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG32SI]);
8802     #else
8803         _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32UI]);
8804         _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32SI]);
8805     #endif
8806     #if defined(_SG_TARGET_MACOS)
8807         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG32F]);
8808     #else
8809         _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_RG32F]);
8810     #endif
8811     #if defined(_SG_TARGET_MACOS)
8812         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16]);
8813         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]);
8814     #else
8815         _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RGBA16]);
8816         _sg_pixelformat_sfbr(&_sg.formats[SG_PIXELFORMAT_RGBA16SN]);
8817     #endif
8818     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]);
8819     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]);
8820     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
8821     #if defined(_SG_TARGET_MACOS)
8822         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]);
8823         _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]);
8824         _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
8825     #else
8826         _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]);
8827         _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]);
8828         _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
8829     #endif
8830     _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]);
8831     _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]);
8832     #if defined(_SG_TARGET_MACOS)
8833         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]);
8834         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]);
8835         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]);
8836         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]);
8837         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]);
8838         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]);
8839         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]);
8840         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]);
8841         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]);
8842         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]);
8843     #else
8844         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_2BPP]);
8845         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGB_4BPP]);
8846         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_2BPP]);
8847         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_PVRTC_RGBA_4BPP]);
8848         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8]);
8849         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGB8A1]);
8850         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RGBA8]);
8851         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11]);
8852         _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_ETC2_RG11SN]);
8853     #endif
8854 }
8855 
8856 /*-- main Metal backend state and functions ----------------------------------*/
_sg_mtl_setup_backend(const sg_desc * desc)8857 _SOKOL_PRIVATE void _sg_mtl_setup_backend(const sg_desc* desc) {
8858     /* assume already zero-initialized */
8859     SOKOL_ASSERT(desc);
8860     SOKOL_ASSERT(desc->context.metal.device);
8861     SOKOL_ASSERT(desc->context.metal.renderpass_descriptor_cb);
8862     SOKOL_ASSERT(desc->context.metal.drawable_cb);
8863     SOKOL_ASSERT(desc->uniform_buffer_size > 0);
8864     _sg_mtl_init_pool(desc);
8865     _sg_mtl_init_sampler_cache(desc);
8866     _sg_mtl_clear_state_cache();
8867     _sg.mtl.valid = true;
8868     _sg.mtl.renderpass_descriptor_cb = desc->context.metal.renderpass_descriptor_cb;
8869     _sg.mtl.drawable_cb = desc->context.metal.drawable_cb;
8870     _sg.mtl.frame_index = 1;
8871     _sg.mtl.ub_size = desc->uniform_buffer_size;
8872     _sg.mtl.sem = dispatch_semaphore_create(SG_NUM_INFLIGHT_FRAMES);
8873     _sg.mtl.device = (__bridge id<MTLDevice>) desc->context.metal.device;
8874     _sg.mtl.cmd_queue = [_sg.mtl.device newCommandQueue];
8875     MTLResourceOptions res_opts = MTLResourceCPUCacheModeWriteCombined;
8876     #if defined(_SG_TARGET_MACOS)
8877     res_opts |= MTLResourceStorageModeManaged;
8878     #endif
8879     for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) {
8880         _sg.mtl.uniform_buffers[i] = [_sg.mtl.device
8881             newBufferWithLength:_sg.mtl.ub_size
8882             options:res_opts
8883         ];
8884     }
8885     _sg_mtl_init_caps();
8886 }
8887 
_sg_mtl_discard_backend(void)8888 _SOKOL_PRIVATE void _sg_mtl_discard_backend(void) {
8889     SOKOL_ASSERT(_sg.mtl.valid);
8890     /* wait for the last frame to finish */
8891     for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) {
8892         dispatch_semaphore_wait(_sg.mtl.sem, DISPATCH_TIME_FOREVER);
8893     }
8894     /* semaphore must be "relinquished" before destruction */
8895     for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) {
8896         dispatch_semaphore_signal(_sg.mtl.sem);
8897     }
8898     _sg_mtl_destroy_sampler_cache(_sg.mtl.frame_index);
8899     _sg_mtl_garbage_collect(_sg.mtl.frame_index + SG_NUM_INFLIGHT_FRAMES + 2);
8900     _sg_mtl_destroy_pool();
8901     _sg.mtl.valid = false;
8902 
8903     _SG_OBJC_RELEASE(_sg.mtl.sem);
8904     _SG_OBJC_RELEASE(_sg.mtl.device);
8905     _SG_OBJC_RELEASE(_sg.mtl.cmd_queue);
8906     for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) {
8907         _SG_OBJC_RELEASE(_sg.mtl.uniform_buffers[i]);
8908     }
8909     /* NOTE: MTLCommandBuffer and MTLRenderCommandEncoder are auto-released */
8910     _sg.mtl.cmd_buffer = nil;
8911     _sg.mtl.cmd_encoder = nil;
8912 }
8913 
_sg_mtl_bind_uniform_buffers(void)8914 _SOKOL_PRIVATE void _sg_mtl_bind_uniform_buffers(void) {
8915     SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder);
8916     for (int slot = 0; slot < SG_MAX_SHADERSTAGE_UBS; slot++) {
8917         [_sg.mtl.cmd_encoder
8918             setVertexBuffer:_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index]
8919             offset:0
8920             atIndex:slot];
8921         [_sg.mtl.cmd_encoder
8922             setFragmentBuffer:_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index]
8923             offset:0
8924             atIndex:slot];
8925     }
8926 }
8927 
_sg_mtl_reset_state_cache(void)8928 _SOKOL_PRIVATE void _sg_mtl_reset_state_cache(void) {
8929     _sg_mtl_clear_state_cache();
8930 
8931     /* need to restore the uniform buffer binding (normally happens in
8932        _sg_mtl_begin_pass()
8933     */
8934     if (nil != _sg.mtl.cmd_encoder) {
8935         _sg_mtl_bind_uniform_buffers();
8936     }
8937 }
8938 
_sg_mtl_create_context(_sg_context_t * ctx)8939 _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_context(_sg_context_t* ctx) {
8940     SOKOL_ASSERT(ctx);
8941     _SOKOL_UNUSED(ctx);
8942     return SG_RESOURCESTATE_VALID;
8943 }
8944 
_sg_mtl_destroy_context(_sg_context_t * ctx)8945 _SOKOL_PRIVATE void _sg_mtl_destroy_context(_sg_context_t* ctx) {
8946     SOKOL_ASSERT(ctx);
8947     _SOKOL_UNUSED(ctx);
8948     /* empty */
8949 }
8950 
_sg_mtl_activate_context(_sg_context_t * ctx)8951 _SOKOL_PRIVATE void _sg_mtl_activate_context(_sg_context_t* ctx) {
8952     _SOKOL_UNUSED(ctx);
8953     _sg_mtl_clear_state_cache();
8954 }
8955 
_sg_mtl_create_buffer(_sg_buffer_t * buf,const sg_buffer_desc * desc)8956 _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) {
8957     SOKOL_ASSERT(buf && desc);
8958     _sg_buffer_common_init(&buf->cmn, desc);
8959     const bool injected = (0 != desc->mtl_buffers[0]);
8960     MTLResourceOptions mtl_options = _sg_mtl_buffer_resource_options(buf->cmn.usage);
8961     for (int slot = 0; slot < buf->cmn.num_slots; slot++) {
8962         id<MTLBuffer> mtl_buf;
8963         if (injected) {
8964             SOKOL_ASSERT(desc->mtl_buffers[slot]);
8965             mtl_buf = (__bridge id<MTLBuffer>) desc->mtl_buffers[slot];
8966         }
8967         else {
8968             if (buf->cmn.usage == SG_USAGE_IMMUTABLE) {
8969                 SOKOL_ASSERT(desc->content);
8970                 mtl_buf = [_sg.mtl.device newBufferWithBytes:desc->content length:buf->cmn.size options:mtl_options];
8971             }
8972             else {
8973                 mtl_buf = [_sg.mtl.device newBufferWithLength:buf->cmn.size options:mtl_options];
8974             }
8975         }
8976         buf->mtl.buf[slot] = _sg_mtl_add_resource(mtl_buf);
8977     }
8978     return SG_RESOURCESTATE_VALID;
8979 }
8980 
_sg_mtl_destroy_buffer(_sg_buffer_t * buf)8981 _SOKOL_PRIVATE void _sg_mtl_destroy_buffer(_sg_buffer_t* buf) {
8982     SOKOL_ASSERT(buf);
8983     for (int slot = 0; slot < buf->cmn.num_slots; slot++) {
8984         /* it's valid to call release resource with '0' */
8985         _sg_mtl_release_resource(_sg.mtl.frame_index, buf->mtl.buf[slot]);
8986     }
8987 }
8988 
_sg_mtl_copy_image_content(const _sg_image_t * img,__unsafe_unretained id<MTLTexture> mtl_tex,const sg_image_content * content)8989 _SOKOL_PRIVATE void _sg_mtl_copy_image_content(const _sg_image_t* img, __unsafe_unretained id<MTLTexture> mtl_tex, const sg_image_content* content) {
8990     const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1;
8991     const int num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.depth : 1;
8992     for (int face_index = 0; face_index < num_faces; face_index++) {
8993         for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++) {
8994             SOKOL_ASSERT(content->subimage[face_index][mip_index].ptr);
8995             SOKOL_ASSERT(content->subimage[face_index][mip_index].size > 0);
8996             const uint8_t* data_ptr = (const uint8_t*)content->subimage[face_index][mip_index].ptr;
8997             const int mip_width = _sg_max(img->cmn.width >> mip_index, 1);
8998             const int mip_height = _sg_max(img->cmn.height >> mip_index, 1);
8999             /* special case PVRTC formats: bytePerRow must be 0 */
9000             int bytes_per_row = 0;
9001             int bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1);
9002             if (!_sg_mtl_is_pvrtc(img->cmn.pixel_format)) {
9003                 bytes_per_row = _sg_row_pitch(img->cmn.pixel_format, mip_width, 1);
9004             }
9005             MTLRegion region;
9006             if (img->cmn.type == SG_IMAGETYPE_3D) {
9007                 const int mip_depth = _sg_max(img->cmn.depth >> mip_index, 1);
9008                 region = MTLRegionMake3D(0, 0, 0, mip_width, mip_height, mip_depth);
9009                 /* FIXME: apparently the minimal bytes_per_image size for 3D texture
9010                  is 4 KByte... somehow need to handle this */
9011             }
9012             else {
9013                 region = MTLRegionMake2D(0, 0, mip_width, mip_height);
9014             }
9015             for (int slice_index = 0; slice_index < num_slices; slice_index++) {
9016                 const int mtl_slice_index = (img->cmn.type == SG_IMAGETYPE_CUBE) ? face_index : slice_index;
9017                 const int slice_offset = slice_index * bytes_per_slice;
9018                 SOKOL_ASSERT((slice_offset + bytes_per_slice) <= (int)content->subimage[face_index][mip_index].size);
9019                 [mtl_tex replaceRegion:region
9020                     mipmapLevel:mip_index
9021                     slice:mtl_slice_index
9022                     withBytes:data_ptr + slice_offset
9023                     bytesPerRow:bytes_per_row
9024                     bytesPerImage:bytes_per_slice];
9025             }
9026         }
9027     }
9028 }
9029 
9030 /*
9031     FIXME: METAL RESOURCE STORAGE MODE FOR macOS AND iOS
9032 
9033     For immutable textures on macOS, the recommended procedure is to create
9034     a MTLStorageModeManaged texture with the immutable content first,
9035     and then use the GPU to blit the content into a MTLStorageModePrivate
9036     texture before the first use.
9037 
9038     On iOS use the same one-time-blit procedure, but from a
9039     MTLStorageModeShared to a MTLStorageModePrivate texture.
9040 
9041     It probably makes sense to handle this in a separate 'resource manager'
9042     with a recycable pool of blit-source-textures?
9043 */
9044 
9045 /* initialize MTLTextureDescritor with common attributes */
_sg_mtl_init_texdesc_common(MTLTextureDescriptor * mtl_desc,_sg_image_t * img)9046 _SOKOL_PRIVATE bool _sg_mtl_init_texdesc_common(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) {
9047     mtl_desc.textureType = _sg_mtl_texture_type(img->cmn.type);
9048     mtl_desc.pixelFormat = _sg_mtl_pixel_format(img->cmn.pixel_format);
9049     if (MTLPixelFormatInvalid == mtl_desc.pixelFormat) {
9050         SOKOL_LOG("Unsupported texture pixel format!\n");
9051         return false;
9052     }
9053     mtl_desc.width = img->cmn.width;
9054     mtl_desc.height = img->cmn.height;
9055     if (SG_IMAGETYPE_3D == img->cmn.type) {
9056         mtl_desc.depth = img->cmn.depth;
9057     }
9058     else {
9059         mtl_desc.depth = 1;
9060     }
9061     mtl_desc.mipmapLevelCount = img->cmn.num_mipmaps;
9062     if (SG_IMAGETYPE_ARRAY == img->cmn.type) {
9063         mtl_desc.arrayLength = img->cmn.depth;
9064     }
9065     else {
9066         mtl_desc.arrayLength = 1;
9067     }
9068     mtl_desc.usage = MTLTextureUsageShaderRead;
9069     if (img->cmn.usage != SG_USAGE_IMMUTABLE) {
9070         mtl_desc.cpuCacheMode = MTLCPUCacheModeWriteCombined;
9071     }
9072     #if defined(_SG_TARGET_MACOS)
9073         /* macOS: use managed textures */
9074         mtl_desc.resourceOptions = MTLResourceStorageModeManaged;
9075         mtl_desc.storageMode = MTLStorageModeManaged;
9076     #else
9077         /* iOS: use CPU/GPU shared memory */
9078         mtl_desc.resourceOptions = MTLResourceStorageModeShared;
9079         mtl_desc.storageMode = MTLStorageModeShared;
9080     #endif
9081     return true;
9082 }
9083 
9084 /* initialize MTLTextureDescritor with rendertarget attributes */
_sg_mtl_init_texdesc_rt(MTLTextureDescriptor * mtl_desc,_sg_image_t * img)9085 _SOKOL_PRIVATE void _sg_mtl_init_texdesc_rt(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) {
9086     SOKOL_ASSERT(img->cmn.render_target);
9087     _SOKOL_UNUSED(img);
9088     /* reset the cpuCacheMode to 'default' */
9089     mtl_desc.cpuCacheMode = MTLCPUCacheModeDefaultCache;
9090     /* render targets are only visible to the GPU */
9091     mtl_desc.resourceOptions = MTLResourceStorageModePrivate;
9092     mtl_desc.storageMode = MTLStorageModePrivate;
9093     /* non-MSAA render targets are shader-readable */
9094     mtl_desc.usage = MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget;
9095 }
9096 
9097 /* initialize MTLTextureDescritor with MSAA attributes */
_sg_mtl_init_texdesc_rt_msaa(MTLTextureDescriptor * mtl_desc,_sg_image_t * img)9098 _SOKOL_PRIVATE void _sg_mtl_init_texdesc_rt_msaa(MTLTextureDescriptor* mtl_desc, _sg_image_t* img) {
9099     SOKOL_ASSERT(img->cmn.sample_count > 1);
9100     /* reset the cpuCacheMode to 'default' */
9101     mtl_desc.cpuCacheMode = MTLCPUCacheModeDefaultCache;
9102     /* render targets are only visible to the GPU */
9103     mtl_desc.resourceOptions = MTLResourceStorageModePrivate;
9104     mtl_desc.storageMode = MTLStorageModePrivate;
9105     /* MSAA render targets are not shader-readable (instead they are resolved) */
9106     mtl_desc.usage = MTLTextureUsageRenderTarget;
9107     mtl_desc.textureType = MTLTextureType2DMultisample;
9108     mtl_desc.depth = 1;
9109     mtl_desc.arrayLength = 1;
9110     mtl_desc.mipmapLevelCount = 1;
9111     mtl_desc.sampleCount = img->cmn.sample_count;
9112 }
9113 
_sg_mtl_create_image(_sg_image_t * img,const sg_image_desc * desc)9114 _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_image(_sg_image_t* img, const sg_image_desc* desc) {
9115     SOKOL_ASSERT(img && desc);
9116     _sg_image_common_init(&img->cmn, desc);
9117     const bool injected = (0 != desc->mtl_textures[0]);
9118     const bool msaa = (img->cmn.sample_count > 1);
9119 
9120     /* first initialize all Metal resource pool slots to 'empty' */
9121     for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) {
9122         img->mtl.tex[i] = _sg_mtl_add_resource(nil);
9123     }
9124     img->mtl.sampler_state = _sg_mtl_add_resource(nil);
9125     img->mtl.depth_tex = _sg_mtl_add_resource(nil);
9126     img->mtl.msaa_tex = _sg_mtl_add_resource(nil);
9127 
9128     /* initialize a Metal texture descriptor with common attributes */
9129     MTLTextureDescriptor* mtl_desc = [[MTLTextureDescriptor alloc] init];
9130     if (!_sg_mtl_init_texdesc_common(mtl_desc, img)) {
9131         _SG_OBJC_RELEASE(mtl_desc);
9132         return SG_RESOURCESTATE_FAILED;
9133     }
9134 
9135     /* special case depth-stencil-buffer? */
9136     if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) {
9137         /* depth-stencil buffer texture must always be a render target */
9138         SOKOL_ASSERT(img->cmn.render_target);
9139         SOKOL_ASSERT(img->cmn.type == SG_IMAGETYPE_2D);
9140         SOKOL_ASSERT(img->cmn.num_mipmaps == 1);
9141         SOKOL_ASSERT(!injected);
9142         if (msaa) {
9143             _sg_mtl_init_texdesc_rt_msaa(mtl_desc, img);
9144         }
9145         else {
9146             _sg_mtl_init_texdesc_rt(mtl_desc, img);
9147         }
9148         id<MTLTexture> tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc];
9149         SOKOL_ASSERT(nil != tex);
9150         img->mtl.depth_tex = _sg_mtl_add_resource(tex);
9151     }
9152     else {
9153         /* create the color texture
9154             In case this is a render target without MSAA, add the relevant
9155             render-target descriptor attributes.
9156             In case this is a render target *with* MSAA, the color texture
9157             will serve as MSAA-resolve target (not as render target), and rendering
9158             will go into a separate render target texture of type
9159             MTLTextureType2DMultisample.
9160         */
9161         if (img->cmn.render_target && !msaa) {
9162             _sg_mtl_init_texdesc_rt(mtl_desc, img);
9163         }
9164         for (int slot = 0; slot < img->cmn.num_slots; slot++) {
9165             id<MTLTexture> tex;
9166             if (injected) {
9167                 SOKOL_ASSERT(desc->mtl_textures[slot]);
9168                 tex = (__bridge id<MTLTexture>) desc->mtl_textures[slot];
9169             }
9170             else {
9171                 tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc];
9172                 if ((img->cmn.usage == SG_USAGE_IMMUTABLE) && !img->cmn.render_target) {
9173                     _sg_mtl_copy_image_content(img, tex, &desc->content);
9174                 }
9175             }
9176             img->mtl.tex[slot] = _sg_mtl_add_resource(tex);
9177         }
9178 
9179         /* if MSAA color render target, create an additional MSAA render-surface texture */
9180         if (img->cmn.render_target && msaa) {
9181             _sg_mtl_init_texdesc_rt_msaa(mtl_desc, img);
9182             id<MTLTexture> tex = [_sg.mtl.device newTextureWithDescriptor:mtl_desc];
9183             img->mtl.msaa_tex = _sg_mtl_add_resource(tex);
9184         }
9185 
9186         /* create (possibly shared) sampler state */
9187         img->mtl.sampler_state = _sg_mtl_create_sampler(_sg.mtl.device, desc);
9188     }
9189     _SG_OBJC_RELEASE(mtl_desc);
9190     return SG_RESOURCESTATE_VALID;
9191 }
9192 
_sg_mtl_destroy_image(_sg_image_t * img)9193 _SOKOL_PRIVATE void _sg_mtl_destroy_image(_sg_image_t* img) {
9194     SOKOL_ASSERT(img);
9195     /* it's valid to call release resource with a 'null resource' */
9196     for (int slot = 0; slot < img->cmn.num_slots; slot++) {
9197         _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.tex[slot]);
9198     }
9199     _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.depth_tex);
9200     _sg_mtl_release_resource(_sg.mtl.frame_index, img->mtl.msaa_tex);
9201     /* NOTE: sampler state objects are shared and not released until shutdown */
9202 }
9203 
_sg_mtl_compile_library(const char * src)9204 _SOKOL_PRIVATE id<MTLLibrary> _sg_mtl_compile_library(const char* src) {
9205     NSError* err = NULL;
9206     id<MTLLibrary> lib = [_sg.mtl.device
9207         newLibraryWithSource:[NSString stringWithUTF8String:src]
9208         options:nil
9209         error:&err
9210     ];
9211     if (err) {
9212         SOKOL_LOG([err.localizedDescription UTF8String]);
9213     }
9214     return lib;
9215 }
9216 
_sg_mtl_library_from_bytecode(const uint8_t * ptr,int num_bytes)9217 _SOKOL_PRIVATE id<MTLLibrary> _sg_mtl_library_from_bytecode(const uint8_t* ptr, int num_bytes) {
9218     NSError* err = NULL;
9219     dispatch_data_t lib_data = dispatch_data_create(ptr, num_bytes, NULL, DISPATCH_DATA_DESTRUCTOR_DEFAULT);
9220     id<MTLLibrary> lib = [_sg.mtl.device newLibraryWithData:lib_data error:&err];
9221     if (err) {
9222         SOKOL_LOG([err.localizedDescription UTF8String]);
9223     }
9224     _SG_OBJC_RELEASE(lib_data);
9225     return lib;
9226 }
9227 
_sg_mtl_create_shader(_sg_shader_t * shd,const sg_shader_desc * desc)9228 _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) {
9229     SOKOL_ASSERT(shd && desc);
9230 
9231     _sg_shader_common_init(&shd->cmn, desc);
9232 
9233     /* create metal libray objects and lookup entry functions */
9234     id<MTLLibrary> vs_lib;
9235     id<MTLLibrary> fs_lib;
9236     id<MTLFunction> vs_func;
9237     id<MTLFunction> fs_func;
9238     const char* vs_entry = desc->vs.entry;
9239     const char* fs_entry = desc->fs.entry;
9240     if (desc->vs.byte_code && desc->fs.byte_code) {
9241         /* separate byte code provided */
9242         vs_lib = _sg_mtl_library_from_bytecode(desc->vs.byte_code, desc->vs.byte_code_size);
9243         fs_lib = _sg_mtl_library_from_bytecode(desc->fs.byte_code, desc->fs.byte_code_size);
9244         if (nil == vs_lib || nil == fs_lib) {
9245             return SG_RESOURCESTATE_FAILED;
9246         }
9247         vs_func = [vs_lib newFunctionWithName:[NSString stringWithUTF8String:vs_entry]];
9248         fs_func = [fs_lib newFunctionWithName:[NSString stringWithUTF8String:fs_entry]];
9249     }
9250     else if (desc->vs.source && desc->fs.source) {
9251         /* separate sources provided */
9252         vs_lib = _sg_mtl_compile_library(desc->vs.source);
9253         fs_lib = _sg_mtl_compile_library(desc->fs.source);
9254         if (nil == vs_lib || nil == fs_lib) {
9255             return SG_RESOURCESTATE_FAILED;
9256         }
9257         vs_func = [vs_lib newFunctionWithName:[NSString stringWithUTF8String:vs_entry]];
9258         fs_func = [fs_lib newFunctionWithName:[NSString stringWithUTF8String:fs_entry]];
9259     }
9260     else {
9261         return SG_RESOURCESTATE_FAILED;
9262     }
9263     if (nil == vs_func) {
9264         SOKOL_LOG("vertex shader entry function not found\n");
9265         return SG_RESOURCESTATE_FAILED;
9266     }
9267     if (nil == fs_func) {
9268         SOKOL_LOG("fragment shader entry function not found\n");
9269         return SG_RESOURCESTATE_FAILED;
9270     }
9271     /* it is legal to call _sg_mtl_add_resource with a nil value, this will return a special 0xFFFFFFFF index */
9272     shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_lib  = _sg_mtl_add_resource(vs_lib);
9273     shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_lib  = _sg_mtl_add_resource(fs_lib);
9274     shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func = _sg_mtl_add_resource(vs_func);
9275     shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func = _sg_mtl_add_resource(fs_func);
9276     return SG_RESOURCESTATE_VALID;
9277 }
9278 
_sg_mtl_destroy_shader(_sg_shader_t * shd)9279 _SOKOL_PRIVATE void _sg_mtl_destroy_shader(_sg_shader_t* shd) {
9280     SOKOL_ASSERT(shd);
9281     /* it is valid to call _sg_mtl_release_resource with a 'null resource' */
9282     _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func);
9283     _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_lib);
9284     _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func);
9285     _sg_mtl_release_resource(_sg.mtl.frame_index, shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_lib);
9286 }
9287 
_sg_mtl_create_pipeline(_sg_pipeline_t * pip,_sg_shader_t * shd,const sg_pipeline_desc * desc)9288 _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) {
9289     SOKOL_ASSERT(pip && shd && desc);
9290     SOKOL_ASSERT(desc->shader.id == shd->slot.id);
9291 
9292     pip->shader = shd;
9293     _sg_pipeline_common_init(&pip->cmn, desc);
9294 
9295     sg_primitive_type prim_type = desc->primitive_type;
9296     pip->mtl.prim_type = _sg_mtl_primitive_type(prim_type);
9297     pip->mtl.index_size = _sg_mtl_index_size(pip->cmn.index_type);
9298     if (SG_INDEXTYPE_NONE != pip->cmn.index_type) {
9299         pip->mtl.index_type = _sg_mtl_index_type(pip->cmn.index_type);
9300     }
9301     pip->mtl.cull_mode = _sg_mtl_cull_mode(desc->rasterizer.cull_mode);
9302     pip->mtl.winding = _sg_mtl_winding(desc->rasterizer.face_winding);
9303     pip->mtl.stencil_ref = desc->depth_stencil.stencil_ref;
9304 
9305     /* create vertex-descriptor */
9306     MTLVertexDescriptor* vtx_desc = [MTLVertexDescriptor vertexDescriptor];
9307     for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) {
9308         const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index];
9309         if (a_desc->format == SG_VERTEXFORMAT_INVALID) {
9310             break;
9311         }
9312         SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS));
9313         vtx_desc.attributes[attr_index].format = _sg_mtl_vertex_format(a_desc->format);
9314         vtx_desc.attributes[attr_index].offset = a_desc->offset;
9315         vtx_desc.attributes[attr_index].bufferIndex = a_desc->buffer_index + SG_MAX_SHADERSTAGE_UBS;
9316         pip->cmn.vertex_layout_valid[a_desc->buffer_index] = true;
9317     }
9318     for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) {
9319         if (pip->cmn.vertex_layout_valid[layout_index]) {
9320             const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index];
9321             const int mtl_vb_slot = layout_index + SG_MAX_SHADERSTAGE_UBS;
9322             SOKOL_ASSERT(l_desc->stride > 0);
9323             vtx_desc.layouts[mtl_vb_slot].stride = l_desc->stride;
9324             vtx_desc.layouts[mtl_vb_slot].stepFunction = _sg_mtl_step_function(l_desc->step_func);
9325             vtx_desc.layouts[mtl_vb_slot].stepRate = l_desc->step_rate;
9326         }
9327     }
9328 
9329     /* render-pipeline descriptor */
9330     MTLRenderPipelineDescriptor* rp_desc = [[MTLRenderPipelineDescriptor alloc] init];
9331     rp_desc.vertexDescriptor = vtx_desc;
9332     SOKOL_ASSERT(shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func != _SG_MTL_INVALID_SLOT_INDEX);
9333     rp_desc.vertexFunction = _sg_mtl_id(shd->mtl.stage[SG_SHADERSTAGE_VS].mtl_func);
9334     SOKOL_ASSERT(shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func != _SG_MTL_INVALID_SLOT_INDEX);
9335     rp_desc.fragmentFunction = _sg_mtl_id(shd->mtl.stage[SG_SHADERSTAGE_FS].mtl_func);
9336     rp_desc.sampleCount = desc->rasterizer.sample_count;
9337     rp_desc.alphaToCoverageEnabled = desc->rasterizer.alpha_to_coverage_enabled;
9338     rp_desc.alphaToOneEnabled = NO;
9339     rp_desc.rasterizationEnabled = YES;
9340     rp_desc.depthAttachmentPixelFormat = _sg_mtl_pixel_format(desc->blend.depth_format);
9341     if (desc->blend.depth_format == SG_PIXELFORMAT_DEPTH_STENCIL) {
9342         rp_desc.stencilAttachmentPixelFormat = _sg_mtl_pixel_format(desc->blend.depth_format);
9343     }
9344     /* FIXME: this only works on macOS 10.13!
9345     for (int i = 0; i < (SG_MAX_SHADERSTAGE_UBS+SG_MAX_SHADERSTAGE_BUFFERS); i++) {
9346         rp_desc.vertexBuffers[i].mutability = MTLMutabilityImmutable;
9347     }
9348     for (int i = 0; i < SG_MAX_SHADERSTAGE_UBS; i++) {
9349         rp_desc.fragmentBuffers[i].mutability = MTLMutabilityImmutable;
9350     }
9351     */
9352     const int att_count = desc->blend.color_attachment_count;
9353     for (int i = 0; i < att_count; i++) {
9354         rp_desc.colorAttachments[i].pixelFormat = _sg_mtl_pixel_format(desc->blend.color_format);
9355         rp_desc.colorAttachments[i].writeMask = _sg_mtl_color_write_mask((sg_color_mask)desc->blend.color_write_mask);
9356         rp_desc.colorAttachments[i].blendingEnabled = desc->blend.enabled;
9357         rp_desc.colorAttachments[i].alphaBlendOperation = _sg_mtl_blend_op(desc->blend.op_alpha);
9358         rp_desc.colorAttachments[i].rgbBlendOperation = _sg_mtl_blend_op(desc->blend.op_rgb);
9359         rp_desc.colorAttachments[i].destinationAlphaBlendFactor = _sg_mtl_blend_factor(desc->blend.dst_factor_alpha);
9360         rp_desc.colorAttachments[i].destinationRGBBlendFactor = _sg_mtl_blend_factor(desc->blend.dst_factor_rgb);
9361         rp_desc.colorAttachments[i].sourceAlphaBlendFactor = _sg_mtl_blend_factor(desc->blend.src_factor_alpha);
9362         rp_desc.colorAttachments[i].sourceRGBBlendFactor = _sg_mtl_blend_factor(desc->blend.src_factor_rgb);
9363     }
9364     NSError* err = NULL;
9365     id<MTLRenderPipelineState> mtl_rps = [_sg.mtl.device newRenderPipelineStateWithDescriptor:rp_desc error:&err];
9366     _SG_OBJC_RELEASE(rp_desc);
9367     if (nil == mtl_rps) {
9368         SOKOL_ASSERT(err);
9369         SOKOL_LOG([err.localizedDescription UTF8String]);
9370         return SG_RESOURCESTATE_FAILED;
9371     }
9372 
9373     /* depth-stencil-state */
9374     MTLDepthStencilDescriptor* ds_desc = [[MTLDepthStencilDescriptor alloc] init];
9375     ds_desc.depthCompareFunction = _sg_mtl_compare_func(desc->depth_stencil.depth_compare_func);
9376     ds_desc.depthWriteEnabled = desc->depth_stencil.depth_write_enabled;
9377     if (desc->depth_stencil.stencil_enabled) {
9378         const sg_stencil_state* sb = &desc->depth_stencil.stencil_back;
9379         ds_desc.backFaceStencil = [[MTLStencilDescriptor alloc] init];
9380         ds_desc.backFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(sb->fail_op);
9381         ds_desc.backFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(sb->depth_fail_op);
9382         ds_desc.backFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(sb->pass_op);
9383         ds_desc.backFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sb->compare_func);
9384         ds_desc.backFaceStencil.readMask = desc->depth_stencil.stencil_read_mask;
9385         ds_desc.backFaceStencil.writeMask = desc->depth_stencil.stencil_write_mask;
9386         const sg_stencil_state* sf = &desc->depth_stencil.stencil_front;
9387         ds_desc.frontFaceStencil = [[MTLStencilDescriptor alloc] init];
9388         ds_desc.frontFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(sf->fail_op);
9389         ds_desc.frontFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(sf->depth_fail_op);
9390         ds_desc.frontFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(sf->pass_op);
9391         ds_desc.frontFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(sf->compare_func);
9392         ds_desc.frontFaceStencil.readMask = desc->depth_stencil.stencil_read_mask;
9393         ds_desc.frontFaceStencil.writeMask = desc->depth_stencil.stencil_write_mask;
9394     }
9395     id<MTLDepthStencilState> mtl_dss = [_sg.mtl.device newDepthStencilStateWithDescriptor:ds_desc];
9396     _SG_OBJC_RELEASE(ds_desc);
9397     pip->mtl.rps = _sg_mtl_add_resource(mtl_rps);
9398     pip->mtl.dss = _sg_mtl_add_resource(mtl_dss);
9399     return SG_RESOURCESTATE_VALID;
9400 }
9401 
_sg_mtl_destroy_pipeline(_sg_pipeline_t * pip)9402 _SOKOL_PRIVATE void _sg_mtl_destroy_pipeline(_sg_pipeline_t* pip) {
9403     SOKOL_ASSERT(pip);
9404     /* it's valid to call release resource with a 'null resource' */
9405     _sg_mtl_release_resource(_sg.mtl.frame_index, pip->mtl.rps);
9406     _sg_mtl_release_resource(_sg.mtl.frame_index, pip->mtl.dss);
9407 }
9408 
_sg_mtl_create_pass(_sg_pass_t * pass,_sg_image_t ** att_images,const sg_pass_desc * desc)9409 _SOKOL_PRIVATE sg_resource_state _sg_mtl_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) {
9410     SOKOL_ASSERT(pass && desc);
9411     SOKOL_ASSERT(att_images && att_images[0]);
9412 
9413     _sg_pass_common_init(&pass->cmn, desc);
9414 
9415     /* copy image pointers */
9416     const sg_attachment_desc* att_desc;
9417     for (int i = 0; i < pass->cmn.num_color_atts; i++) {
9418         att_desc = &desc->color_attachments[i];
9419         if (att_desc->image.id != SG_INVALID_ID) {
9420             SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID);
9421             SOKOL_ASSERT(0 == pass->mtl.color_atts[i].image);
9422             SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id));
9423             SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->cmn.pixel_format));
9424             pass->mtl.color_atts[i].image = att_images[i];
9425         }
9426     }
9427     SOKOL_ASSERT(0 == pass->mtl.ds_att.image);
9428     att_desc = &desc->depth_stencil_attachment;
9429     if (att_desc->image.id != SG_INVALID_ID) {
9430         const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS;
9431         SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id));
9432         SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format));
9433         pass->mtl.ds_att.image = att_images[ds_img_index];
9434     }
9435     return SG_RESOURCESTATE_VALID;
9436 }
9437 
_sg_mtl_destroy_pass(_sg_pass_t * pass)9438 _SOKOL_PRIVATE void _sg_mtl_destroy_pass(_sg_pass_t* pass) {
9439     SOKOL_ASSERT(pass);
9440     _SOKOL_UNUSED(pass);
9441 }
9442 
_sg_mtl_pass_color_image(const _sg_pass_t * pass,int index)9443 _SOKOL_PRIVATE _sg_image_t* _sg_mtl_pass_color_image(const _sg_pass_t* pass, int index) {
9444     SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS));
9445     /* NOTE: may return null */
9446     return pass->mtl.color_atts[index].image;
9447 }
9448 
_sg_mtl_pass_ds_image(const _sg_pass_t * pass)9449 _SOKOL_PRIVATE _sg_image_t* _sg_mtl_pass_ds_image(const _sg_pass_t* pass) {
9450     /* NOTE: may return null */
9451     SOKOL_ASSERT(pass);
9452     return pass->mtl.ds_att.image;
9453 }
9454 
_sg_mtl_begin_pass(_sg_pass_t * pass,const sg_pass_action * action,int w,int h)9455 _SOKOL_PRIVATE void _sg_mtl_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) {
9456     SOKOL_ASSERT(action);
9457     SOKOL_ASSERT(!_sg.mtl.in_pass);
9458     SOKOL_ASSERT(_sg.mtl.cmd_queue);
9459     SOKOL_ASSERT(nil == _sg.mtl.cmd_encoder);
9460     SOKOL_ASSERT(_sg.mtl.renderpass_descriptor_cb);
9461     _sg.mtl.in_pass = true;
9462     _sg.mtl.cur_width = w;
9463     _sg.mtl.cur_height = h;
9464     _sg_mtl_clear_state_cache();
9465 
9466     /* if this is the first pass in the frame, create a command buffer */
9467     if (nil == _sg.mtl.cmd_buffer) {
9468         /* block until the oldest frame in flight has finished */
9469         dispatch_semaphore_wait(_sg.mtl.sem, DISPATCH_TIME_FOREVER);
9470         _sg.mtl.cmd_buffer = [_sg.mtl.cmd_queue commandBufferWithUnretainedReferences];
9471     }
9472 
9473     /* if this is first pass in frame, get uniform buffer base pointer */
9474     if (0 == _sg.mtl.cur_ub_base_ptr) {
9475         _sg.mtl.cur_ub_base_ptr = (uint8_t*)[_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] contents];
9476     }
9477 
9478     /* initialize a render pass descriptor */
9479     MTLRenderPassDescriptor* pass_desc = nil;
9480     if (pass) {
9481         /* offscreen render pass */
9482         pass_desc = [MTLRenderPassDescriptor renderPassDescriptor];
9483     }
9484     else {
9485         /* default render pass, call user-provided callback to provide render pass descriptor */
9486         pass_desc = (__bridge MTLRenderPassDescriptor*) _sg.mtl.renderpass_descriptor_cb();
9487 
9488     }
9489     if (pass_desc) {
9490         _sg.mtl.pass_valid = true;
9491     }
9492     else {
9493         /* default pass descriptor will not be valid if window is minimized,
9494            don't do any rendering in this case */
9495         _sg.mtl.pass_valid = false;
9496         return;
9497     }
9498     if (pass) {
9499         /* setup pass descriptor for offscreen rendering */
9500         SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID);
9501         for (int i = 0; i < pass->cmn.num_color_atts; i++) {
9502             const _sg_attachment_t* cmn_att = &pass->cmn.color_atts[i];
9503             const _sg_mtl_attachment_t* mtl_att = &pass->mtl.color_atts[i];
9504             const _sg_image_t* att_img = mtl_att->image;
9505             SOKOL_ASSERT(att_img->slot.state == SG_RESOURCESTATE_VALID);
9506             SOKOL_ASSERT(att_img->slot.id == cmn_att->image_id.id);
9507             const bool is_msaa = (att_img->cmn.sample_count > 1);
9508             pass_desc.colorAttachments[i].loadAction = _sg_mtl_load_action(action->colors[i].action);
9509             pass_desc.colorAttachments[i].storeAction = is_msaa ? MTLStoreActionMultisampleResolve : MTLStoreActionStore;
9510             const float* c = &(action->colors[i].val[0]);
9511             pass_desc.colorAttachments[i].clearColor = MTLClearColorMake(c[0], c[1], c[2], c[3]);
9512             if (is_msaa) {
9513                 SOKOL_ASSERT(att_img->mtl.msaa_tex != _SG_MTL_INVALID_SLOT_INDEX);
9514                 SOKOL_ASSERT(att_img->mtl.tex[mtl_att->image->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX);
9515                 pass_desc.colorAttachments[i].texture = _sg_mtl_id(att_img->mtl.msaa_tex);
9516                 pass_desc.colorAttachments[i].resolveTexture = _sg_mtl_id(att_img->mtl.tex[att_img->cmn.active_slot]);
9517                 pass_desc.colorAttachments[i].resolveLevel = cmn_att->mip_level;
9518                 switch (att_img->cmn.type) {
9519                     case SG_IMAGETYPE_CUBE:
9520                     case SG_IMAGETYPE_ARRAY:
9521                         pass_desc.colorAttachments[i].resolveSlice = cmn_att->slice;
9522                         break;
9523                     case SG_IMAGETYPE_3D:
9524                         pass_desc.colorAttachments[i].resolveDepthPlane = cmn_att->slice;
9525                         break;
9526                     default: break;
9527                 }
9528             }
9529             else {
9530                 SOKOL_ASSERT(att_img->mtl.tex[att_img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX);
9531                 pass_desc.colorAttachments[i].texture = _sg_mtl_id(att_img->mtl.tex[att_img->cmn.active_slot]);
9532                 pass_desc.colorAttachments[i].level = cmn_att->mip_level;
9533                 switch (att_img->cmn.type) {
9534                     case SG_IMAGETYPE_CUBE:
9535                     case SG_IMAGETYPE_ARRAY:
9536                         pass_desc.colorAttachments[i].slice = cmn_att->slice;
9537                         break;
9538                     case SG_IMAGETYPE_3D:
9539                         pass_desc.colorAttachments[i].depthPlane = cmn_att->slice;
9540                         break;
9541                     default: break;
9542                 }
9543             }
9544         }
9545         const _sg_image_t* ds_att_img = pass->mtl.ds_att.image;
9546         if (0 != ds_att_img) {
9547             SOKOL_ASSERT(ds_att_img->slot.state == SG_RESOURCESTATE_VALID);
9548             SOKOL_ASSERT(ds_att_img->slot.id == pass->cmn.ds_att.image_id.id);
9549             SOKOL_ASSERT(ds_att_img->mtl.depth_tex != _SG_MTL_INVALID_SLOT_INDEX);
9550             pass_desc.depthAttachment.texture = _sg_mtl_id(ds_att_img->mtl.depth_tex);
9551             pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action);
9552             pass_desc.depthAttachment.clearDepth = action->depth.val;
9553             if (_sg_is_depth_stencil_format(ds_att_img->cmn.pixel_format)) {
9554                 pass_desc.stencilAttachment.texture = _sg_mtl_id(ds_att_img->mtl.depth_tex);
9555                 pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action);
9556                 pass_desc.stencilAttachment.clearStencil = action->stencil.val;
9557             }
9558         }
9559     }
9560     else {
9561         /* setup pass descriptor for default rendering */
9562         pass_desc.colorAttachments[0].loadAction = _sg_mtl_load_action(action->colors[0].action);
9563         const float* c = &(action->colors[0].val[0]);
9564         pass_desc.colorAttachments[0].clearColor = MTLClearColorMake(c[0], c[1], c[2], c[3]);
9565         pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action);
9566         pass_desc.depthAttachment.clearDepth = action->depth.val;
9567         pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action);
9568         pass_desc.stencilAttachment.clearStencil = action->stencil.val;
9569     }
9570 
9571     /* create a render command encoder, this might return nil if window is minimized */
9572     _sg.mtl.cmd_encoder = [_sg.mtl.cmd_buffer renderCommandEncoderWithDescriptor:pass_desc];
9573     if (nil == _sg.mtl.cmd_encoder) {
9574         _sg.mtl.pass_valid = false;
9575         return;
9576     }
9577 
9578     /* bind the global uniform buffer, this only happens once per pass */
9579     _sg_mtl_bind_uniform_buffers();
9580 }
9581 
_sg_mtl_end_pass(void)9582 _SOKOL_PRIVATE void _sg_mtl_end_pass(void) {
9583     SOKOL_ASSERT(_sg.mtl.in_pass);
9584     _sg.mtl.in_pass = false;
9585     _sg.mtl.pass_valid = false;
9586     if (nil != _sg.mtl.cmd_encoder) {
9587         [_sg.mtl.cmd_encoder endEncoding];
9588         /* NOTE: MTLRenderCommandEncoder is autoreleased */
9589         _sg.mtl.cmd_encoder = nil;
9590     }
9591 }
9592 
_sg_mtl_commit(void)9593 _SOKOL_PRIVATE void _sg_mtl_commit(void) {
9594     SOKOL_ASSERT(!_sg.mtl.in_pass);
9595     SOKOL_ASSERT(!_sg.mtl.pass_valid);
9596     SOKOL_ASSERT(_sg.mtl.drawable_cb);
9597     SOKOL_ASSERT(nil == _sg.mtl.cmd_encoder);
9598     SOKOL_ASSERT(nil != _sg.mtl.cmd_buffer);
9599 
9600     #if defined(_SG_TARGET_MACOS)
9601     [_sg.mtl.uniform_buffers[_sg.mtl.cur_frame_rotate_index] didModifyRange:NSMakeRange(0, _sg.mtl.cur_ub_offset)];
9602     #endif
9603 
9604     /* present, commit and signal semaphore when done */
9605     id<MTLDrawable> cur_drawable = (__bridge id<MTLDrawable>) _sg.mtl.drawable_cb();
9606     [_sg.mtl.cmd_buffer presentDrawable:cur_drawable];
9607     [_sg.mtl.cmd_buffer addCompletedHandler:^(id<MTLCommandBuffer> cmd_buffer) {
9608         _SOKOL_UNUSED(cmd_buffer);
9609         dispatch_semaphore_signal(_sg.mtl.sem);
9610     }];
9611     [_sg.mtl.cmd_buffer commit];
9612 
9613     /* garbage-collect resources pending for release */
9614     _sg_mtl_garbage_collect(_sg.mtl.frame_index);
9615 
9616     /* rotate uniform buffer slot */
9617     if (++_sg.mtl.cur_frame_rotate_index >= SG_NUM_INFLIGHT_FRAMES) {
9618         _sg.mtl.cur_frame_rotate_index = 0;
9619     }
9620     _sg.mtl.frame_index++;
9621     _sg.mtl.cur_ub_offset = 0;
9622     _sg.mtl.cur_ub_base_ptr = 0;
9623     /* NOTE: MTLCommandBuffer is autoreleased */
9624     _sg.mtl.cmd_buffer = nil;
9625 }
9626 
_sg_mtl_apply_viewport(int x,int y,int w,int h,bool origin_top_left)9627 _SOKOL_PRIVATE void _sg_mtl_apply_viewport(int x, int y, int w, int h, bool origin_top_left) {
9628     SOKOL_ASSERT(_sg.mtl.in_pass);
9629     if (!_sg.mtl.pass_valid) {
9630         return;
9631     }
9632     SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder);
9633     MTLViewport vp;
9634     vp.originX = (double) x;
9635     vp.originY = (double) (origin_top_left ? y : (_sg.mtl.cur_height - (y + h)));
9636     vp.width   = (double) w;
9637     vp.height  = (double) h;
9638     vp.znear   = 0.0;
9639     vp.zfar    = 1.0;
9640     [_sg.mtl.cmd_encoder setViewport:vp];
9641 }
9642 
_sg_mtl_apply_scissor_rect(int x,int y,int w,int h,bool origin_top_left)9643 _SOKOL_PRIVATE void _sg_mtl_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) {
9644     SOKOL_ASSERT(_sg.mtl.in_pass);
9645     if (!_sg.mtl.pass_valid) {
9646         return;
9647     }
9648     SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder);
9649     /* clip against framebuffer rect */
9650     x = _sg_min(_sg_max(0, x), _sg.mtl.cur_width-1);
9651     y = _sg_min(_sg_max(0, y), _sg.mtl.cur_height-1);
9652     if ((x + w) > _sg.mtl.cur_width) {
9653         w = _sg.mtl.cur_width - x;
9654     }
9655     if ((y + h) > _sg.mtl.cur_height) {
9656         h = _sg.mtl.cur_height - y;
9657     }
9658     w = _sg_max(w, 1);
9659     h = _sg_max(h, 1);
9660 
9661     MTLScissorRect r;
9662     r.x = x;
9663     r.y = origin_top_left ? y : (_sg.mtl.cur_height - (y + h));
9664     r.width = w;
9665     r.height = h;
9666     [_sg.mtl.cmd_encoder setScissorRect:r];
9667 }
9668 
_sg_mtl_apply_pipeline(_sg_pipeline_t * pip)9669 _SOKOL_PRIVATE void _sg_mtl_apply_pipeline(_sg_pipeline_t* pip) {
9670     SOKOL_ASSERT(pip);
9671     SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id));
9672     SOKOL_ASSERT(_sg.mtl.in_pass);
9673     if (!_sg.mtl.pass_valid) {
9674         return;
9675     }
9676     SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder);
9677 
9678     if ((_sg.mtl.state_cache.cur_pipeline != pip) || (_sg.mtl.state_cache.cur_pipeline_id.id != pip->slot.id)) {
9679         _sg.mtl.state_cache.cur_pipeline = pip;
9680         _sg.mtl.state_cache.cur_pipeline_id.id = pip->slot.id;
9681         const float* c = pip->cmn.blend_color;
9682         [_sg.mtl.cmd_encoder setBlendColorRed:c[0] green:c[1] blue:c[2] alpha:c[3]];
9683         [_sg.mtl.cmd_encoder setCullMode:pip->mtl.cull_mode];
9684         [_sg.mtl.cmd_encoder setFrontFacingWinding:pip->mtl.winding];
9685         [_sg.mtl.cmd_encoder setStencilReferenceValue:pip->mtl.stencil_ref];
9686         [_sg.mtl.cmd_encoder setDepthBias:pip->cmn.depth_bias slopeScale:pip->cmn.depth_bias_slope_scale clamp:pip->cmn.depth_bias_clamp];
9687         SOKOL_ASSERT(pip->mtl.rps != _SG_MTL_INVALID_SLOT_INDEX);
9688         [_sg.mtl.cmd_encoder setRenderPipelineState:_sg_mtl_id(pip->mtl.rps)];
9689         SOKOL_ASSERT(pip->mtl.dss != _SG_MTL_INVALID_SLOT_INDEX);
9690         [_sg.mtl.cmd_encoder setDepthStencilState:_sg_mtl_id(pip->mtl.dss)];
9691     }
9692 }
9693 
_sg_mtl_apply_bindings(_sg_pipeline_t * pip,_sg_buffer_t ** vbs,const int * vb_offsets,int num_vbs,_sg_buffer_t * ib,int ib_offset,_sg_image_t ** vs_imgs,int num_vs_imgs,_sg_image_t ** fs_imgs,int num_fs_imgs)9694 _SOKOL_PRIVATE void _sg_mtl_apply_bindings(
9695     _sg_pipeline_t* pip,
9696     _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs,
9697     _sg_buffer_t* ib, int ib_offset,
9698     _sg_image_t** vs_imgs, int num_vs_imgs,
9699     _sg_image_t** fs_imgs, int num_fs_imgs)
9700 {
9701     _SOKOL_UNUSED(pip);
9702     SOKOL_ASSERT(_sg.mtl.in_pass);
9703     if (!_sg.mtl.pass_valid) {
9704         return;
9705     }
9706     SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder);
9707 
9708     /* store index buffer binding, this will be needed later in sg_draw() */
9709     _sg.mtl.state_cache.cur_indexbuffer = ib;
9710     _sg.mtl.state_cache.cur_indexbuffer_offset = ib_offset;
9711     if (ib) {
9712         SOKOL_ASSERT(pip->cmn.index_type != SG_INDEXTYPE_NONE);
9713         _sg.mtl.state_cache.cur_indexbuffer_id.id = ib->slot.id;
9714     }
9715     else {
9716         SOKOL_ASSERT(pip->cmn.index_type == SG_INDEXTYPE_NONE);
9717         _sg.mtl.state_cache.cur_indexbuffer_id.id = SG_INVALID_ID;
9718     }
9719 
9720     /* apply vertex buffers */
9721     int slot;
9722     for (slot = 0; slot < num_vbs; slot++) {
9723         const _sg_buffer_t* vb = vbs[slot];
9724         if ((_sg.mtl.state_cache.cur_vertexbuffers[slot] != vb) ||
9725             (_sg.mtl.state_cache.cur_vertexbuffer_offsets[slot] != vb_offsets[slot]) ||
9726             (_sg.mtl.state_cache.cur_vertexbuffer_ids[slot].id != vb->slot.id))
9727         {
9728             _sg.mtl.state_cache.cur_vertexbuffers[slot] = vb;
9729             _sg.mtl.state_cache.cur_vertexbuffer_offsets[slot] = vb_offsets[slot];
9730             _sg.mtl.state_cache.cur_vertexbuffer_ids[slot].id = vb->slot.id;
9731             const NSUInteger mtl_slot = SG_MAX_SHADERSTAGE_UBS + slot;
9732             SOKOL_ASSERT(vb->mtl.buf[vb->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX);
9733             [_sg.mtl.cmd_encoder setVertexBuffer:_sg_mtl_id(vb->mtl.buf[vb->cmn.active_slot])
9734                 offset:vb_offsets[slot]
9735                 atIndex:mtl_slot];
9736         }
9737     }
9738 
9739     /* apply vertex shader images */
9740     for (slot = 0; slot < num_vs_imgs; slot++) {
9741         const _sg_image_t* img = vs_imgs[slot];
9742         if ((_sg.mtl.state_cache.cur_vs_images[slot] != img) || (_sg.mtl.state_cache.cur_vs_image_ids[slot].id != img->slot.id)) {
9743             _sg.mtl.state_cache.cur_vs_images[slot] = img;
9744             _sg.mtl.state_cache.cur_vs_image_ids[slot].id = img->slot.id;
9745             SOKOL_ASSERT(img->mtl.tex[img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX);
9746             [_sg.mtl.cmd_encoder setVertexTexture:_sg_mtl_id(img->mtl.tex[img->cmn.active_slot]) atIndex:slot];
9747             SOKOL_ASSERT(img->mtl.sampler_state != _SG_MTL_INVALID_SLOT_INDEX);
9748             [_sg.mtl.cmd_encoder setVertexSamplerState:_sg_mtl_id(img->mtl.sampler_state) atIndex:slot];
9749         }
9750     }
9751 
9752     /* apply fragment shader images */
9753     for (slot = 0; slot < num_fs_imgs; slot++) {
9754         const _sg_image_t* img = fs_imgs[slot];
9755         if ((_sg.mtl.state_cache.cur_fs_images[slot] != img) || (_sg.mtl.state_cache.cur_fs_image_ids[slot].id != img->slot.id)) {
9756             _sg.mtl.state_cache.cur_fs_images[slot] = img;
9757             _sg.mtl.state_cache.cur_fs_image_ids[slot].id = img->slot.id;
9758             SOKOL_ASSERT(img->mtl.tex[img->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX);
9759             [_sg.mtl.cmd_encoder setFragmentTexture:_sg_mtl_id(img->mtl.tex[img->cmn.active_slot]) atIndex:slot];
9760             SOKOL_ASSERT(img->mtl.sampler_state != _SG_MTL_INVALID_SLOT_INDEX);
9761             [_sg.mtl.cmd_encoder setFragmentSamplerState:_sg_mtl_id(img->mtl.sampler_state) atIndex:slot];
9762         }
9763     }
9764 }
9765 
_sg_mtl_apply_uniforms(sg_shader_stage stage_index,int ub_index,const void * data,int num_bytes)9766 _SOKOL_PRIVATE void _sg_mtl_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) {
9767     SOKOL_ASSERT(_sg.mtl.in_pass);
9768     if (!_sg.mtl.pass_valid) {
9769         return;
9770     }
9771     SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder);
9772     SOKOL_ASSERT(data && (num_bytes > 0));
9773     SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES));
9774     SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS));
9775     SOKOL_ASSERT((_sg.mtl.cur_ub_offset + num_bytes) <= _sg.mtl.ub_size);
9776     SOKOL_ASSERT((_sg.mtl.cur_ub_offset & (_SG_MTL_UB_ALIGN-1)) == 0);
9777     SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline && _sg.mtl.state_cache.cur_pipeline->shader);
9778     SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline->slot.id == _sg.mtl.state_cache.cur_pipeline_id.id);
9779     SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline->shader->slot.id == _sg.mtl.state_cache.cur_pipeline->cmn.shader_id.id);
9780     SOKOL_ASSERT(ub_index < _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks);
9781     SOKOL_ASSERT(num_bytes <= _sg.mtl.state_cache.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size);
9782 
9783     /* copy to global uniform buffer, record offset into cmd encoder, and advance offset */
9784     uint8_t* dst = &_sg.mtl.cur_ub_base_ptr[_sg.mtl.cur_ub_offset];
9785     memcpy(dst, data, num_bytes);
9786     if (stage_index == SG_SHADERSTAGE_VS) {
9787         [_sg.mtl.cmd_encoder setVertexBufferOffset:_sg.mtl.cur_ub_offset atIndex:ub_index];
9788     }
9789     else {
9790         [_sg.mtl.cmd_encoder setFragmentBufferOffset:_sg.mtl.cur_ub_offset atIndex:ub_index];
9791     }
9792     _sg.mtl.cur_ub_offset = _sg_roundup(_sg.mtl.cur_ub_offset + num_bytes, _SG_MTL_UB_ALIGN);
9793 }
9794 
_sg_mtl_draw(int base_element,int num_elements,int num_instances)9795 _SOKOL_PRIVATE void _sg_mtl_draw(int base_element, int num_elements, int num_instances) {
9796     SOKOL_ASSERT(_sg.mtl.in_pass);
9797     if (!_sg.mtl.pass_valid) {
9798         return;
9799     }
9800     SOKOL_ASSERT(nil != _sg.mtl.cmd_encoder);
9801     SOKOL_ASSERT(_sg.mtl.state_cache.cur_pipeline && (_sg.mtl.state_cache.cur_pipeline->slot.id == _sg.mtl.state_cache.cur_pipeline_id.id));
9802     if (SG_INDEXTYPE_NONE != _sg.mtl.state_cache.cur_pipeline->cmn.index_type) {
9803         /* indexed rendering */
9804         SOKOL_ASSERT(_sg.mtl.state_cache.cur_indexbuffer && (_sg.mtl.state_cache.cur_indexbuffer->slot.id == _sg.mtl.state_cache.cur_indexbuffer_id.id));
9805         const _sg_buffer_t* ib = _sg.mtl.state_cache.cur_indexbuffer;
9806         SOKOL_ASSERT(ib->mtl.buf[ib->cmn.active_slot] != _SG_MTL_INVALID_SLOT_INDEX);
9807         const NSUInteger index_buffer_offset = _sg.mtl.state_cache.cur_indexbuffer_offset +
9808             base_element * _sg.mtl.state_cache.cur_pipeline->mtl.index_size;
9809         [_sg.mtl.cmd_encoder drawIndexedPrimitives:_sg.mtl.state_cache.cur_pipeline->mtl.prim_type
9810             indexCount:num_elements
9811             indexType:_sg.mtl.state_cache.cur_pipeline->mtl.index_type
9812             indexBuffer:_sg_mtl_id(ib->mtl.buf[ib->cmn.active_slot])
9813             indexBufferOffset:index_buffer_offset
9814             instanceCount:num_instances];
9815     }
9816     else {
9817         /* non-indexed rendering */
9818         [_sg.mtl.cmd_encoder drawPrimitives:_sg.mtl.state_cache.cur_pipeline->mtl.prim_type
9819             vertexStart:base_element
9820             vertexCount:num_elements
9821             instanceCount:num_instances];
9822     }
9823 }
9824 
_sg_mtl_update_buffer(_sg_buffer_t * buf,const void * data,uint32_t data_size)9825 _SOKOL_PRIVATE void _sg_mtl_update_buffer(_sg_buffer_t* buf, const void* data, uint32_t data_size) {
9826     SOKOL_ASSERT(buf && data && (data_size > 0));
9827     if (++buf->cmn.active_slot >= buf->cmn.num_slots) {
9828         buf->cmn.active_slot = 0;
9829     }
9830     __unsafe_unretained id<MTLBuffer> mtl_buf = _sg_mtl_id(buf->mtl.buf[buf->cmn.active_slot]);
9831     void* dst_ptr = [mtl_buf contents];
9832     memcpy(dst_ptr, data, data_size);
9833     #if defined(_SG_TARGET_MACOS)
9834     [mtl_buf didModifyRange:NSMakeRange(0, data_size)];
9835     #endif
9836 }
9837 
_sg_mtl_append_buffer(_sg_buffer_t * buf,const void * data,uint32_t data_size,bool new_frame)9838 _SOKOL_PRIVATE uint32_t _sg_mtl_append_buffer(_sg_buffer_t* buf, const void* data, uint32_t data_size, bool new_frame) {
9839     SOKOL_ASSERT(buf && data && (data_size > 0));
9840     if (new_frame) {
9841         if (++buf->cmn.active_slot >= buf->cmn.num_slots) {
9842             buf->cmn.active_slot = 0;
9843         }
9844     }
9845     __unsafe_unretained id<MTLBuffer> mtl_buf = _sg_mtl_id(buf->mtl.buf[buf->cmn.active_slot]);
9846     uint8_t* dst_ptr = (uint8_t*) [mtl_buf contents];
9847     dst_ptr += buf->cmn.append_pos;
9848     memcpy(dst_ptr, data, data_size);
9849     #if defined(_SG_TARGET_MACOS)
9850     [mtl_buf didModifyRange:NSMakeRange(buf->cmn.append_pos, data_size)];
9851     #endif
9852     /* NOTE: this is a requirement from WebGPU, but we want identical behaviour across all backend */
9853     return _sg_roundup(data_size, 4);
9854 }
9855 
_sg_mtl_update_image(_sg_image_t * img,const sg_image_content * data)9856 _SOKOL_PRIVATE void _sg_mtl_update_image(_sg_image_t* img, const sg_image_content* data) {
9857     SOKOL_ASSERT(img && data);
9858     if (++img->cmn.active_slot >= img->cmn.num_slots) {
9859         img->cmn.active_slot = 0;
9860     }
9861     __unsafe_unretained id<MTLTexture> mtl_tex = _sg_mtl_id(img->mtl.tex[img->cmn.active_slot]);
9862     _sg_mtl_copy_image_content(img, mtl_tex, data);
9863 }
9864 
9865 /*== WEBGPU BACKEND IMPLEMENTATION ===========================================*/
9866 #elif defined(SOKOL_WGPU)
9867 
_sg_wgpu_buffer_usage(sg_buffer_type t,sg_usage u)9868 _SOKOL_PRIVATE WGPUBufferUsageFlags _sg_wgpu_buffer_usage(sg_buffer_type t, sg_usage u) {
9869     WGPUBufferUsageFlags res = 0;
9870     if (SG_BUFFERTYPE_VERTEXBUFFER == t) {
9871         res |= WGPUBufferUsage_Vertex;
9872     }
9873     else {
9874         res |= WGPUBufferUsage_Index;
9875     }
9876     if (SG_USAGE_IMMUTABLE != u) {
9877         res |= WGPUBufferUsage_CopyDst;
9878     }
9879     return res;
9880 }
9881 
_sg_wgpu_load_op(sg_action a)9882 _SOKOL_PRIVATE WGPULoadOp _sg_wgpu_load_op(sg_action a) {
9883     switch (a) {
9884         case SG_ACTION_CLEAR:
9885         case SG_ACTION_DONTCARE:
9886             return WGPULoadOp_Clear;
9887         case SG_ACTION_LOAD:
9888             return WGPULoadOp_Load;
9889         default:
9890             SOKOL_UNREACHABLE;
9891             return (WGPULoadOp)0;
9892     }
9893 }
9894 
_sg_wgpu_tex_viewdim(sg_image_type t)9895 _SOKOL_PRIVATE WGPUTextureViewDimension _sg_wgpu_tex_viewdim(sg_image_type t) {
9896     switch (t) {
9897         case SG_IMAGETYPE_2D:       return WGPUTextureViewDimension_2D;
9898         case SG_IMAGETYPE_CUBE:     return WGPUTextureViewDimension_Cube;
9899         case SG_IMAGETYPE_3D:       return WGPUTextureViewDimension_3D;
9900         case SG_IMAGETYPE_ARRAY:    return WGPUTextureViewDimension_2DArray;
9901         default: SOKOL_UNREACHABLE; return WGPUTextureViewDimension_Force32;
9902     }
9903 }
9904 
_sg_wgpu_tex_comptype(sg_sampler_type t)9905 _SOKOL_PRIVATE WGPUTextureComponentType _sg_wgpu_tex_comptype(sg_sampler_type t) {
9906     switch (t) {
9907         case SG_SAMPLERTYPE_FLOAT:  return WGPUTextureComponentType_Float;
9908         case SG_SAMPLERTYPE_SINT:   return WGPUTextureComponentType_Sint;
9909         case SG_SAMPLERTYPE_UINT:   return WGPUTextureComponentType_Uint;
9910         default: SOKOL_UNREACHABLE; return WGPUTextureComponentType_Force32;
9911     }
9912 }
9913 
_sg_wgpu_tex_dim(sg_image_type t)9914 _SOKOL_PRIVATE WGPUTextureDimension _sg_wgpu_tex_dim(sg_image_type t) {
9915     if (SG_IMAGETYPE_3D == t) {
9916         return WGPUTextureDimension_3D;
9917     }
9918     else {
9919         return WGPUTextureDimension_2D;
9920     }
9921 }
9922 
_sg_wgpu_sampler_addrmode(sg_wrap m)9923 _SOKOL_PRIVATE WGPUAddressMode _sg_wgpu_sampler_addrmode(sg_wrap m) {
9924     switch (m) {
9925         case SG_WRAP_REPEAT:
9926             return WGPUAddressMode_Repeat;
9927         case SG_WRAP_CLAMP_TO_EDGE:
9928         case SG_WRAP_CLAMP_TO_BORDER:
9929             return WGPUAddressMode_ClampToEdge;
9930         case SG_WRAP_MIRRORED_REPEAT:
9931             return WGPUAddressMode_MirrorRepeat;
9932         default:
9933             SOKOL_UNREACHABLE;
9934             return WGPUAddressMode_Force32;
9935     }
9936 }
9937 
_sg_wgpu_sampler_minmagfilter(sg_filter f)9938 _SOKOL_PRIVATE WGPUFilterMode _sg_wgpu_sampler_minmagfilter(sg_filter f) {
9939     switch (f) {
9940         case SG_FILTER_NEAREST:
9941         case SG_FILTER_NEAREST_MIPMAP_NEAREST:
9942         case SG_FILTER_NEAREST_MIPMAP_LINEAR:
9943             return WGPUFilterMode_Nearest;
9944         case SG_FILTER_LINEAR:
9945         case SG_FILTER_LINEAR_MIPMAP_NEAREST:
9946         case SG_FILTER_LINEAR_MIPMAP_LINEAR:
9947             return WGPUFilterMode_Linear;
9948         default:
9949             SOKOL_UNREACHABLE;
9950             return WGPUFilterMode_Force32;
9951     }
9952 }
9953 
_sg_wgpu_sampler_mipfilter(sg_filter f)9954 _SOKOL_PRIVATE WGPUFilterMode _sg_wgpu_sampler_mipfilter(sg_filter f) {
9955     switch (f) {
9956         case SG_FILTER_NEAREST:
9957         case SG_FILTER_LINEAR:
9958         case SG_FILTER_NEAREST_MIPMAP_NEAREST:
9959         case SG_FILTER_LINEAR_MIPMAP_NEAREST:
9960             return WGPUFilterMode_Nearest;
9961         case SG_FILTER_NEAREST_MIPMAP_LINEAR:
9962         case SG_FILTER_LINEAR_MIPMAP_LINEAR:
9963             return WGPUFilterMode_Linear;
9964         default:
9965             SOKOL_UNREACHABLE;
9966             return WGPUFilterMode_Force32;
9967     }
9968 }
9969 
_sg_wgpu_indexformat(sg_index_type t)9970 _SOKOL_PRIVATE WGPUIndexFormat _sg_wgpu_indexformat(sg_index_type t) {
9971     /* NOTE: there's no WGPUIndexFormat_None */
9972     return (t == SG_INDEXTYPE_UINT16) ? WGPUIndexFormat_Uint16 : WGPUIndexFormat_Uint32;
9973 }
9974 
_sg_wgpu_stepmode(sg_vertex_step s)9975 _SOKOL_PRIVATE WGPUInputStepMode _sg_wgpu_stepmode(sg_vertex_step s) {
9976     return (s == SG_VERTEXSTEP_PER_VERTEX) ? WGPUInputStepMode_Vertex : WGPUInputStepMode_Instance;
9977 }
9978 
_sg_wgpu_vertexformat(sg_vertex_format f)9979 _SOKOL_PRIVATE WGPUVertexFormat _sg_wgpu_vertexformat(sg_vertex_format f) {
9980     switch (f) {
9981         case SG_VERTEXFORMAT_FLOAT:         return WGPUVertexFormat_Float;
9982         case SG_VERTEXFORMAT_FLOAT2:        return WGPUVertexFormat_Float2;
9983         case SG_VERTEXFORMAT_FLOAT3:        return WGPUVertexFormat_Float3;
9984         case SG_VERTEXFORMAT_FLOAT4:        return WGPUVertexFormat_Float4;
9985         case SG_VERTEXFORMAT_BYTE4:         return WGPUVertexFormat_Char4;
9986         case SG_VERTEXFORMAT_BYTE4N:        return WGPUVertexFormat_Char4Norm;
9987         case SG_VERTEXFORMAT_UBYTE4:        return WGPUVertexFormat_UChar4;
9988         case SG_VERTEXFORMAT_UBYTE4N:       return WGPUVertexFormat_UChar4Norm;
9989         case SG_VERTEXFORMAT_SHORT2:        return WGPUVertexFormat_Short2;
9990         case SG_VERTEXFORMAT_SHORT2N:       return WGPUVertexFormat_Short2Norm;
9991         case SG_VERTEXFORMAT_USHORT2N:      return WGPUVertexFormat_UShort2Norm;
9992         case SG_VERTEXFORMAT_SHORT4:        return WGPUVertexFormat_Short4;
9993         case SG_VERTEXFORMAT_SHORT4N:       return WGPUVertexFormat_Short4Norm;
9994         case SG_VERTEXFORMAT_USHORT4N:      return WGPUVertexFormat_UShort4Norm;
9995         /* FIXME! UINT10_N2 */
9996         case SG_VERTEXFORMAT_UINT10_N2:
9997         default:
9998             SOKOL_UNREACHABLE;
9999             return WGPUVertexFormat_Force32;
10000     }
10001 }
10002 
_sg_wgpu_topology(sg_primitive_type t)10003 _SOKOL_PRIVATE WGPUPrimitiveTopology _sg_wgpu_topology(sg_primitive_type t) {
10004     switch (t) {
10005         case SG_PRIMITIVETYPE_POINTS:           return WGPUPrimitiveTopology_PointList;
10006         case SG_PRIMITIVETYPE_LINES:            return WGPUPrimitiveTopology_LineList;
10007         case SG_PRIMITIVETYPE_LINE_STRIP:       return WGPUPrimitiveTopology_LineStrip;
10008         case SG_PRIMITIVETYPE_TRIANGLES:        return WGPUPrimitiveTopology_TriangleList;
10009         case SG_PRIMITIVETYPE_TRIANGLE_STRIP:   return WGPUPrimitiveTopology_TriangleStrip;
10010         default: SOKOL_UNREACHABLE; return WGPUPrimitiveTopology_Force32;
10011     }
10012 }
10013 
_sg_wgpu_frontface(sg_face_winding fw)10014 _SOKOL_PRIVATE WGPUFrontFace _sg_wgpu_frontface(sg_face_winding fw) {
10015     return (fw == SG_FACEWINDING_CCW) ? WGPUFrontFace_CCW : WGPUFrontFace_CW;
10016 }
10017 
_sg_wgpu_cullmode(sg_cull_mode cm)10018 _SOKOL_PRIVATE WGPUCullMode _sg_wgpu_cullmode(sg_cull_mode cm) {
10019     switch (cm) {
10020         case SG_CULLMODE_NONE:      return WGPUCullMode_None;
10021         case SG_CULLMODE_FRONT:     return WGPUCullMode_Front;
10022         case SG_CULLMODE_BACK:      return WGPUCullMode_Back;
10023         default: SOKOL_UNREACHABLE; return WGPUCullMode_Force32;
10024     }
10025 }
10026 
_sg_wgpu_textureformat(sg_pixel_format p)10027 _SOKOL_PRIVATE WGPUTextureFormat _sg_wgpu_textureformat(sg_pixel_format p) {
10028     switch (p) {
10029         case SG_PIXELFORMAT_NONE:           return WGPUTextureFormat_Undefined;
10030         case SG_PIXELFORMAT_R8:             return WGPUTextureFormat_R8Unorm;
10031         case SG_PIXELFORMAT_R8SN:           return WGPUTextureFormat_R8Snorm;
10032         case SG_PIXELFORMAT_R8UI:           return WGPUTextureFormat_R8Uint;
10033         case SG_PIXELFORMAT_R8SI:           return WGPUTextureFormat_R8Sint;
10034         case SG_PIXELFORMAT_R16UI:          return WGPUTextureFormat_R16Uint;
10035         case SG_PIXELFORMAT_R16SI:          return WGPUTextureFormat_R16Sint;
10036         case SG_PIXELFORMAT_R16F:           return WGPUTextureFormat_R16Float;
10037         case SG_PIXELFORMAT_RG8:            return WGPUTextureFormat_RG8Unorm;
10038         case SG_PIXELFORMAT_RG8SN:          return WGPUTextureFormat_RG8Snorm;
10039         case SG_PIXELFORMAT_RG8UI:          return WGPUTextureFormat_RG8Uint;
10040         case SG_PIXELFORMAT_RG8SI:          return WGPUTextureFormat_RG8Sint;
10041         case SG_PIXELFORMAT_R32UI:          return WGPUTextureFormat_R32Uint;
10042         case SG_PIXELFORMAT_R32SI:          return WGPUTextureFormat_R32Sint;
10043         case SG_PIXELFORMAT_R32F:           return WGPUTextureFormat_R32Float;
10044         case SG_PIXELFORMAT_RG16UI:         return WGPUTextureFormat_RG16Uint;
10045         case SG_PIXELFORMAT_RG16SI:         return WGPUTextureFormat_RG16Sint;
10046         case SG_PIXELFORMAT_RG16F:          return WGPUTextureFormat_RG16Float;
10047         case SG_PIXELFORMAT_RGBA8:          return WGPUTextureFormat_RGBA8Unorm;
10048         case SG_PIXELFORMAT_RGBA8SN:        return WGPUTextureFormat_RGBA8Snorm;
10049         case SG_PIXELFORMAT_RGBA8UI:        return WGPUTextureFormat_RGBA8Uint;
10050         case SG_PIXELFORMAT_RGBA8SI:        return WGPUTextureFormat_RGBA8Sint;
10051         case SG_PIXELFORMAT_BGRA8:          return WGPUTextureFormat_BGRA8Unorm;
10052         case SG_PIXELFORMAT_RGB10A2:        return WGPUTextureFormat_RGB10A2Unorm;
10053         case SG_PIXELFORMAT_RG11B10F:       return WGPUTextureFormat_RG11B10Float;
10054         case SG_PIXELFORMAT_RG32UI:         return WGPUTextureFormat_RG32Uint;
10055         case SG_PIXELFORMAT_RG32SI:         return WGPUTextureFormat_RG32Sint;
10056         case SG_PIXELFORMAT_RG32F:          return WGPUTextureFormat_RG32Float;
10057         case SG_PIXELFORMAT_RGBA16UI:       return WGPUTextureFormat_RGBA16Uint;
10058         case SG_PIXELFORMAT_RGBA16SI:       return WGPUTextureFormat_RGBA16Sint;
10059         case SG_PIXELFORMAT_RGBA16F:        return WGPUTextureFormat_RGBA16Float;
10060         case SG_PIXELFORMAT_RGBA32UI:       return WGPUTextureFormat_RGBA32Uint;
10061         case SG_PIXELFORMAT_RGBA32SI:       return WGPUTextureFormat_RGBA32Sint;
10062         case SG_PIXELFORMAT_RGBA32F:        return WGPUTextureFormat_RGBA32Float;
10063         case SG_PIXELFORMAT_DEPTH:          return WGPUTextureFormat_Depth24Plus;
10064         case SG_PIXELFORMAT_DEPTH_STENCIL:  return WGPUTextureFormat_Depth24PlusStencil8;
10065         case SG_PIXELFORMAT_BC1_RGBA:       return WGPUTextureFormat_BC1RGBAUnorm;
10066         case SG_PIXELFORMAT_BC2_RGBA:       return WGPUTextureFormat_BC2RGBAUnorm;
10067         case SG_PIXELFORMAT_BC3_RGBA:       return WGPUTextureFormat_BC3RGBAUnorm;
10068         case SG_PIXELFORMAT_BC4_R:          return WGPUTextureFormat_BC4RUnorm;
10069         case SG_PIXELFORMAT_BC4_RSN:        return WGPUTextureFormat_BC4RSnorm;
10070         case SG_PIXELFORMAT_BC5_RG:         return WGPUTextureFormat_BC5RGUnorm;
10071         case SG_PIXELFORMAT_BC5_RGSN:       return WGPUTextureFormat_BC5RGSnorm;
10072         case SG_PIXELFORMAT_BC6H_RGBF:      return WGPUTextureFormat_BC6HRGBSfloat;
10073         case SG_PIXELFORMAT_BC6H_RGBUF:     return WGPUTextureFormat_BC6HRGBUfloat;
10074         case SG_PIXELFORMAT_BC7_RGBA:       return WGPUTextureFormat_BC7RGBAUnorm;
10075 
10076         /* NOT SUPPORTED */
10077         case SG_PIXELFORMAT_R16:
10078         case SG_PIXELFORMAT_R16SN:
10079         case SG_PIXELFORMAT_RG16:
10080         case SG_PIXELFORMAT_RG16SN:
10081         case SG_PIXELFORMAT_RGBA16:
10082         case SG_PIXELFORMAT_RGBA16SN:
10083         case SG_PIXELFORMAT_PVRTC_RGB_2BPP:
10084         case SG_PIXELFORMAT_PVRTC_RGB_4BPP:
10085         case SG_PIXELFORMAT_PVRTC_RGBA_2BPP:
10086         case SG_PIXELFORMAT_PVRTC_RGBA_4BPP:
10087         case SG_PIXELFORMAT_ETC2_RGB8:
10088         case SG_PIXELFORMAT_ETC2_RGB8A1:
10089         case SG_PIXELFORMAT_ETC2_RGBA8:
10090         case SG_PIXELFORMAT_ETC2_RG11:
10091         case SG_PIXELFORMAT_ETC2_RG11SN:
10092         default:
10093             SOKOL_UNREACHABLE;
10094             return WGPUTextureFormat_Force32;
10095     }
10096 }
10097 
10098 /*
10099 FIXME ??? this isn't needed anywhere?
10100 _SOKOL_PRIVATE WGPUTextureAspect _sg_wgpu_texture_aspect(sg_pixel_format fmt) {
10101     if (_sg_is_valid_rendertarget_depth_format(fmt)) {
10102         if (!_sg_is_depth_stencil_format(fmt)) {
10103             return WGPUTextureAspect_DepthOnly;
10104         }
10105     }
10106     return WGPUTextureAspect_All;
10107 }
10108 */
10109 
_sg_wgpu_comparefunc(sg_compare_func f)10110 _SOKOL_PRIVATE WGPUCompareFunction _sg_wgpu_comparefunc(sg_compare_func f) {
10111     switch (f) {
10112         case SG_COMPAREFUNC_NEVER:          return WGPUCompareFunction_Never;
10113         case SG_COMPAREFUNC_LESS:           return WGPUCompareFunction_Less;
10114         case SG_COMPAREFUNC_EQUAL:          return WGPUCompareFunction_Equal;
10115         case SG_COMPAREFUNC_LESS_EQUAL:     return WGPUCompareFunction_LessEqual;
10116         case SG_COMPAREFUNC_GREATER:        return WGPUCompareFunction_Greater;
10117         case SG_COMPAREFUNC_NOT_EQUAL:      return WGPUCompareFunction_NotEqual;
10118         case SG_COMPAREFUNC_GREATER_EQUAL:  return WGPUCompareFunction_GreaterEqual;
10119         case SG_COMPAREFUNC_ALWAYS:         return WGPUCompareFunction_Always;
10120         default: SOKOL_UNREACHABLE; return WGPUCompareFunction_Force32;
10121     }
10122 }
10123 
_sg_wgpu_stencilop(sg_stencil_op op)10124 _SOKOL_PRIVATE WGPUStencilOperation _sg_wgpu_stencilop(sg_stencil_op op) {
10125     switch (op) {
10126         case SG_STENCILOP_KEEP:         return WGPUStencilOperation_Keep;
10127         case SG_STENCILOP_ZERO:         return WGPUStencilOperation_Zero;
10128         case SG_STENCILOP_REPLACE:      return WGPUStencilOperation_Replace;
10129         case SG_STENCILOP_INCR_CLAMP:   return WGPUStencilOperation_IncrementClamp;
10130         case SG_STENCILOP_DECR_CLAMP:   return WGPUStencilOperation_DecrementClamp;
10131         case SG_STENCILOP_INVERT:       return WGPUStencilOperation_Invert;
10132         case SG_STENCILOP_INCR_WRAP:    return WGPUStencilOperation_IncrementWrap;
10133         case SG_STENCILOP_DECR_WRAP:    return WGPUStencilOperation_DecrementWrap;
10134         default: SOKOL_UNREACHABLE; return WGPUStencilOperation_Force32;
10135     }
10136 }
10137 
_sg_wgpu_blendop(sg_blend_op op)10138 _SOKOL_PRIVATE WGPUBlendOperation _sg_wgpu_blendop(sg_blend_op op) {
10139     switch (op) {
10140         case SG_BLENDOP_ADD:                return WGPUBlendOperation_Add;
10141         case SG_BLENDOP_SUBTRACT:           return WGPUBlendOperation_Subtract;
10142         case SG_BLENDOP_REVERSE_SUBTRACT:   return WGPUBlendOperation_ReverseSubtract;
10143         default: SOKOL_UNREACHABLE; return WGPUBlendOperation_Force32;
10144     }
10145 }
10146 
_sg_wgpu_blendfactor(sg_blend_factor f)10147 _SOKOL_PRIVATE WGPUBlendFactor _sg_wgpu_blendfactor(sg_blend_factor f) {
10148     switch (f) {
10149         case SG_BLENDFACTOR_ZERO:                   return WGPUBlendFactor_Zero;
10150         case SG_BLENDFACTOR_ONE:                    return WGPUBlendFactor_One;
10151         case SG_BLENDFACTOR_SRC_COLOR:              return WGPUBlendFactor_SrcColor;
10152         case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR:    return WGPUBlendFactor_OneMinusSrcColor;
10153         case SG_BLENDFACTOR_SRC_ALPHA:              return WGPUBlendFactor_SrcAlpha;
10154         case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA:    return WGPUBlendFactor_OneMinusSrcAlpha;
10155         case SG_BLENDFACTOR_DST_COLOR:              return WGPUBlendFactor_DstColor;
10156         case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR:    return WGPUBlendFactor_OneMinusDstColor;
10157         case SG_BLENDFACTOR_DST_ALPHA:              return WGPUBlendFactor_DstAlpha;
10158         case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA:    return WGPUBlendFactor_OneMinusDstAlpha;
10159         case SG_BLENDFACTOR_SRC_ALPHA_SATURATED:    return WGPUBlendFactor_SrcAlphaSaturated;
10160         case SG_BLENDFACTOR_BLEND_COLOR:            return WGPUBlendFactor_BlendColor;
10161         case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR:  return WGPUBlendFactor_OneMinusBlendColor;
10162         /* FIXME: separate blend alpha value not supported? */
10163         case SG_BLENDFACTOR_BLEND_ALPHA:            return WGPUBlendFactor_BlendColor;
10164         case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA:  return WGPUBlendFactor_OneMinusBlendColor;
10165         default:
10166             SOKOL_UNREACHABLE; return WGPUBlendFactor_Force32;
10167     }
10168 }
10169 
_sg_wgpu_colorwritemask(uint8_t m)10170 _SOKOL_PRIVATE WGPUColorWriteMaskFlags _sg_wgpu_colorwritemask(uint8_t m) {
10171     WGPUColorWriteMaskFlags res = 0;
10172     if (0 != (m & SG_COLORMASK_R)) {
10173         res |= WGPUColorWriteMask_Red;
10174     }
10175     if (0 != (m & SG_COLORMASK_G)) {
10176         res |= WGPUColorWriteMask_Green;
10177     }
10178     if (0 != (m & SG_COLORMASK_B)) {
10179         res |= WGPUColorWriteMask_Blue;
10180     }
10181     if (0 != (m & SG_COLORMASK_A)) {
10182         res |= WGPUColorWriteMask_Alpha;
10183     }
10184     return res;
10185 }
10186 
_sg_wgpu_init_caps(void)10187 _SOKOL_PRIVATE void _sg_wgpu_init_caps(void) {
10188     _sg.backend = SG_BACKEND_WGPU;
10189     _sg.features.instancing = true;
10190     _sg.features.origin_top_left = true;
10191     _sg.features.multiple_render_targets = true;
10192     _sg.features.msaa_render_targets = true;
10193     _sg.features.imagetype_3d = true;
10194     _sg.features.imagetype_array = true;
10195     _sg.features.image_clamp_to_border = false;
10196 
10197     /* FIXME: max images size??? */
10198     _sg.limits.max_image_size_2d = 8 * 1024;
10199     _sg.limits.max_image_size_cube = 8 * 1024;
10200     _sg.limits.max_image_size_3d = 2 * 1024;
10201     _sg.limits.max_image_size_array = 8 * 1024;
10202     _sg.limits.max_image_array_layers = 2 * 1024;
10203     _sg.limits.max_vertex_attrs = SG_MAX_VERTEX_ATTRIBUTES;
10204 
10205     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R8]);
10206     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_R8SN]);
10207     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8UI]);
10208     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R8SI]);
10209     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16UI]);
10210     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_R16SI]);
10211     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_R16F]);
10212     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG8]);
10213     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RG8SN]);
10214     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8UI]);
10215     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG8SI]);
10216     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32UI]);
10217     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_R32SI]);
10218     _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_R32F]);
10219     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16UI]);
10220     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RG16SI]);
10221     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RG16F]);
10222     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA8]);
10223     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_RGBA8SN]);
10224     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8UI]);
10225     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA8SI]);
10226     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_BGRA8]);
10227     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGB10A2]);
10228     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32UI]);
10229     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RG32SI]);
10230     _sg_pixelformat_sbr(&_sg.formats[SG_PIXELFORMAT_RG32F]);
10231     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16UI]);
10232     _sg_pixelformat_srm(&_sg.formats[SG_PIXELFORMAT_RGBA16SI]);
10233     _sg_pixelformat_all(&_sg.formats[SG_PIXELFORMAT_RGBA16F]);
10234     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32UI]);
10235     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32SI]);
10236     _sg_pixelformat_sr(&_sg.formats[SG_PIXELFORMAT_RGBA32F]);
10237     _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH]);
10238     _sg_pixelformat_srmd(&_sg.formats[SG_PIXELFORMAT_DEPTH_STENCIL]);
10239 
10240     /* FIXME FIXME FIXME: need to check if BC texture compression is
10241         actually supported, currently the WebGPU C-API doesn't allow this
10242     */
10243     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC1_RGBA]);
10244     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC2_RGBA]);
10245     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC3_RGBA]);
10246     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_R]);
10247     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC4_RSN]);
10248     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RG]);
10249     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC5_RGSN]);
10250     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBF]);
10251     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC6H_RGBUF]);
10252     _sg_pixelformat_sf(&_sg.formats[SG_PIXELFORMAT_BC7_RGBA]);
10253 }
10254 
10255 /*
10256     WGPU uniform buffer pool implementation:
10257 
10258     At start of frame, a mapped buffer is grabbed from the pool,
10259     or a new buffer is created if there is no mapped buffer available.
10260 
10261     At end of frame, the current buffer is unmapped before queue submit,
10262     and async-mapped immediately again.
10263 
10264     UNIFORM BUFFER FIXME:
10265 
10266     - As per WebGPU spec, it should be possible to create a Uniform|MapWrite
10267       buffer, but this isn't currently allowed in Dawn.
10268 */
_sg_wgpu_ubpool_init(const sg_desc * desc)10269 _SOKOL_PRIVATE void _sg_wgpu_ubpool_init(const sg_desc* desc) {
10270 
10271     /* Add the max-uniform-update size (64 KB) to the requested buffer size,
10272        this is to prevent validation errors in the WebGPU implementation
10273        if the entire buffer size is used per frame. 64 KB is the allowed
10274        max uniform update size on NVIDIA
10275     */
10276     _sg.wgpu.ub.num_bytes = desc->uniform_buffer_size + _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE;
10277 
10278     WGPUBufferDescriptor ub_desc;
10279     memset(&ub_desc, 0, sizeof(ub_desc));
10280     ub_desc.size = _sg.wgpu.ub.num_bytes;
10281     ub_desc.usage = WGPUBufferUsage_Uniform|WGPUBufferUsage_CopyDst;
10282     _sg.wgpu.ub.buf = wgpuDeviceCreateBuffer(_sg.wgpu.dev, &ub_desc);
10283     SOKOL_ASSERT(_sg.wgpu.ub.buf);
10284 
10285     WGPUBindGroupLayoutBinding ub_bglb_desc[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS];
10286     memset(ub_bglb_desc, 0, sizeof(ub_bglb_desc));
10287     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
10288         WGPUShaderStage vis = (stage_index == SG_SHADERSTAGE_VS) ? WGPUShaderStage_Vertex : WGPUShaderStage_Fragment;
10289         for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) {
10290             int bind_index = stage_index * SG_MAX_SHADERSTAGE_UBS + ub_index;
10291             ub_bglb_desc[stage_index][ub_index].binding = bind_index;
10292             ub_bglb_desc[stage_index][ub_index].visibility = vis;
10293             ub_bglb_desc[stage_index][ub_index].type = WGPUBindingType_UniformBuffer;
10294             ub_bglb_desc[stage_index][ub_index].hasDynamicOffset = true;
10295         }
10296     }
10297 
10298     WGPUBindGroupLayoutDescriptor ub_bgl_desc;
10299     memset(&ub_bgl_desc, 0, sizeof(ub_bgl_desc));
10300     ub_bgl_desc.bindingCount = SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS;
10301     ub_bgl_desc.bindings = &ub_bglb_desc[0][0];
10302     _sg.wgpu.ub.bindgroup_layout = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &ub_bgl_desc);
10303     SOKOL_ASSERT(_sg.wgpu.ub.bindgroup_layout);
10304 
10305     WGPUBindGroupBinding ub_bgb[SG_NUM_SHADER_STAGES][SG_MAX_SHADERSTAGE_UBS];
10306     memset(ub_bgb, 0, sizeof(ub_bgb));
10307     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
10308         for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) {
10309             int bind_index = stage_index * SG_MAX_SHADERSTAGE_UBS + ub_index;
10310             ub_bgb[stage_index][ub_index].binding = bind_index;
10311             ub_bgb[stage_index][ub_index].buffer = _sg.wgpu.ub.buf;
10312             // FIXME FIXME FIXME FIXME: HACK FOR VALIDATION BUG IN DAWN
10313             ub_bgb[stage_index][ub_index].size = (1<<16);
10314         }
10315     }
10316     WGPUBindGroupDescriptor bg_desc;
10317     memset(&bg_desc, 0, sizeof(bg_desc));
10318     bg_desc.layout = _sg.wgpu.ub.bindgroup_layout;
10319     bg_desc.bindingCount = SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS;
10320     bg_desc.bindings = &ub_bgb[0][0];
10321     _sg.wgpu.ub.bindgroup = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc);
10322     SOKOL_ASSERT(_sg.wgpu.ub.bindgroup);
10323 }
10324 
_sg_wgpu_ubpool_discard(void)10325 _SOKOL_PRIVATE void _sg_wgpu_ubpool_discard(void) {
10326     if (_sg.wgpu.ub.buf) {
10327         wgpuBufferRelease(_sg.wgpu.ub.buf);
10328         _sg.wgpu.ub.buf = 0;
10329     }
10330     if (_sg.wgpu.ub.bindgroup) {
10331         wgpuBindGroupRelease(_sg.wgpu.ub.bindgroup);
10332         _sg.wgpu.ub.bindgroup = 0;
10333     }
10334     if (_sg.wgpu.ub.bindgroup_layout) {
10335         wgpuBindGroupLayoutRelease(_sg.wgpu.ub.bindgroup_layout);
10336         _sg.wgpu.ub.bindgroup_layout = 0;
10337     }
10338     for (int i = 0; i < _sg.wgpu.ub.stage.num; i++) {
10339         if (_sg.wgpu.ub.stage.buf[i]) {
10340             wgpuBufferRelease(_sg.wgpu.ub.stage.buf[i]);
10341             _sg.wgpu.ub.stage.buf[i] = 0;
10342             _sg.wgpu.ub.stage.ptr[i] = 0;
10343         }
10344     }
10345 }
10346 
_sg_wgpu_ubpool_mapped_callback(WGPUBufferMapAsyncStatus status,void * data,uint64_t data_len,void * user_data)10347 _SOKOL_PRIVATE void _sg_wgpu_ubpool_mapped_callback(WGPUBufferMapAsyncStatus status, void* data, uint64_t data_len, void* user_data) {
10348     if (!_sg.wgpu.valid) {
10349         return;
10350     }
10351     /* FIXME: better handling for this */
10352     if (WGPUBufferMapAsyncStatus_Success != status) {
10353         SOKOL_LOG("Mapping uniform buffer failed!\n");
10354         SOKOL_ASSERT(false);
10355     }
10356     SOKOL_ASSERT(data && (data_len == _sg.wgpu.ub.num_bytes));
10357     int index = (int)(intptr_t) user_data;
10358     SOKOL_ASSERT(index < _sg.wgpu.ub.stage.num);
10359     SOKOL_ASSERT(0 == _sg.wgpu.ub.stage.ptr[index]);
10360     _sg.wgpu.ub.stage.ptr[index] = (uint8_t*) data;
10361 }
10362 
_sg_wgpu_ubpool_next_frame(bool first_frame)10363 _SOKOL_PRIVATE void _sg_wgpu_ubpool_next_frame(bool first_frame) {
10364 
10365     /* immediately request a new mapping for the last frame's current staging buffer */
10366     if (!first_frame) {
10367         WGPUBuffer ub_src = _sg.wgpu.ub.stage.buf[_sg.wgpu.ub.stage.cur];
10368         wgpuBufferMapWriteAsync(ub_src, _sg_wgpu_ubpool_mapped_callback, (void*)(intptr_t)_sg.wgpu.ub.stage.cur);
10369     }
10370 
10371     /* rewind per-frame offsets */
10372     _sg.wgpu.ub.offset = 0;
10373     memset(&_sg.wgpu.ub.bind_offsets, 0, sizeof(_sg.wgpu.ub.bind_offsets));
10374 
10375     /* check if a mapped staging buffer is available, otherwise create one */
10376     for (int i = 0; i < _sg.wgpu.ub.stage.num; i++) {
10377         if (_sg.wgpu.ub.stage.ptr[i]) {
10378             _sg.wgpu.ub.stage.cur = i;
10379             return;
10380         }
10381     }
10382 
10383     /* no mapped uniform buffer available, create one */
10384     SOKOL_ASSERT(_sg.wgpu.ub.stage.num < _SG_WGPU_STAGING_PIPELINE_SIZE);
10385     _sg.wgpu.ub.stage.cur = _sg.wgpu.ub.stage.num++;
10386     const int cur = _sg.wgpu.ub.stage.cur;
10387 
10388     WGPUBufferDescriptor desc;
10389     memset(&desc, 0, sizeof(desc));
10390     desc.size = _sg.wgpu.ub.num_bytes;
10391     desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_MapWrite;
10392     WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &desc);
10393     _sg.wgpu.ub.stage.buf[cur] = res.buffer;
10394     _sg.wgpu.ub.stage.ptr[cur] = (uint8_t*) res.data;
10395     SOKOL_ASSERT(_sg.wgpu.ub.stage.buf[cur]);
10396     SOKOL_ASSERT(_sg.wgpu.ub.stage.ptr[cur]);
10397     SOKOL_ASSERT(res.dataLength == _sg.wgpu.ub.num_bytes);
10398 }
10399 
_sg_wgpu_ubpool_flush(void)10400 _SOKOL_PRIVATE void _sg_wgpu_ubpool_flush(void) {
10401     /* unmap staging buffer and copy to uniform buffer */
10402     const int cur = _sg.wgpu.ub.stage.cur;
10403     SOKOL_ASSERT(_sg.wgpu.ub.stage.ptr[cur]);
10404     _sg.wgpu.ub.stage.ptr[cur] = 0;
10405     WGPUBuffer src_buf = _sg.wgpu.ub.stage.buf[cur];
10406     wgpuBufferUnmap(src_buf);
10407     if (_sg.wgpu.ub.offset > 0) {
10408         WGPUBuffer dst_buf = _sg.wgpu.ub.buf;
10409         wgpuCommandEncoderCopyBufferToBuffer(_sg.wgpu.render_cmd_enc, src_buf, 0, dst_buf, 0, _sg.wgpu.ub.offset);
10410     }
10411 }
10412 
10413 /* helper function to compute number of bytes needed in staging buffer to copy image data */
_sg_wgpu_image_content_buffer_size(const _sg_image_t * img,const sg_image_content * content)10414 _SOKOL_PRIVATE uint32_t _sg_wgpu_image_content_buffer_size(const _sg_image_t* img, const sg_image_content* content) {
10415     uint32_t num_bytes = 0;
10416     const uint32_t num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1;
10417     const uint32_t num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.depth : 1;
10418     for (int mip_index = 0; mip_index < img->cmn.num_mipmaps; mip_index++) {
10419         const uint32_t mip_width = _sg_max(img->cmn.width >> mip_index, 1);
10420         const uint32_t mip_height = _sg_max(img->cmn.height >> mip_index, 1);
10421         /* row-pitch must be 256-aligend */
10422         const uint32_t bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, _SG_WGPU_ROWPITCH_ALIGN);
10423         num_bytes += bytes_per_slice * num_slices * num_faces;
10424     }
10425     return num_bytes;
10426 }
10427 
10428 /* helper function to copy image data into a texture via a staging buffer, returns number of
10429    bytes copied
10430 */
_sg_wgpu_copy_image_content(WGPUBuffer stg_buf,uint8_t * stg_base_ptr,uint32_t stg_base_offset,_sg_image_t * img,const sg_image_content * content)10431 _SOKOL_PRIVATE uint32_t _sg_wgpu_copy_image_content(WGPUBuffer stg_buf, uint8_t* stg_base_ptr, uint32_t stg_base_offset, _sg_image_t* img, const sg_image_content* content) {
10432     SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc);
10433     SOKOL_ASSERT(stg_buf && stg_base_ptr);
10434     SOKOL_ASSERT(img);
10435     SOKOL_ASSERT(content);
10436     uint32_t stg_offset = stg_base_offset;
10437     const uint32_t num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6:1;
10438     const uint32_t num_slices = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? img->cmn.depth : 1;
10439     const sg_pixel_format fmt = img->cmn.pixel_format;
10440     WGPUBufferCopyView src_view;
10441     memset(&src_view, 0, sizeof(src_view));
10442     src_view.buffer = stg_buf;
10443     WGPUTextureCopyView dst_view;
10444     memset(&dst_view, 0, sizeof(dst_view));
10445     dst_view.texture = img->wgpu.tex;
10446     WGPUExtent3D extent;
10447     memset(&extent, 0, sizeof(extent));
10448 
10449     for (uint32_t face_index = 0; face_index < num_faces; face_index++) {
10450         for (uint32_t mip_index = 0; mip_index < (uint32_t)img->cmn.num_mipmaps; mip_index++) {
10451             SOKOL_ASSERT(content->subimage[face_index][mip_index].ptr);
10452             SOKOL_ASSERT(content->subimage[face_index][mip_index].size > 0);
10453             const uint8_t* src_base_ptr = (const uint8_t*)content->subimage[face_index][mip_index].ptr;
10454             SOKOL_ASSERT(src_base_ptr);
10455             uint8_t* dst_base_ptr = stg_base_ptr + stg_offset;
10456 
10457             const uint32_t mip_width  = _sg_max(img->cmn.width >> mip_index, 1);
10458             const uint32_t mip_height = _sg_max(img->cmn.height >> mip_index, 1);
10459             const uint32_t mip_depth  = (img->cmn.type == SG_IMAGETYPE_3D) ? _sg_max(img->cmn.depth >> mip_index, 1) : 1;
10460             const uint32_t num_rows   = _sg_num_rows(fmt, mip_height);
10461             const uint32_t src_bytes_per_row   = _sg_row_pitch(fmt, mip_width, 1);
10462             const uint32_t dst_bytes_per_row   = _sg_row_pitch(fmt, mip_width, _SG_WGPU_ROWPITCH_ALIGN);
10463             const uint32_t src_bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, 1);
10464             const uint32_t dst_bytes_per_slice = _sg_surface_pitch(fmt, mip_width, mip_height, _SG_WGPU_ROWPITCH_ALIGN);
10465             SOKOL_ASSERT((uint32_t)content->subimage[face_index][mip_index].size == (src_bytes_per_slice * num_slices));
10466             SOKOL_ASSERT(src_bytes_per_row <= dst_bytes_per_row);
10467             SOKOL_ASSERT(src_bytes_per_slice == (src_bytes_per_row * num_rows));
10468             SOKOL_ASSERT(dst_bytes_per_slice == (dst_bytes_per_row * num_rows));
10469             _SOKOL_UNUSED(src_bytes_per_slice);
10470 
10471             /* copy content into mapped staging buffer */
10472             if (src_bytes_per_row == dst_bytes_per_row) {
10473                 /* can do a single memcpy */
10474                 uint32_t num_bytes = content->subimage[face_index][mip_index].size;
10475                 memcpy(dst_base_ptr, src_base_ptr, num_bytes);
10476             }
10477             else {
10478                 /* src/dst pitch doesn't match, need to copy row by row */
10479                 uint8_t* dst_ptr = dst_base_ptr;
10480                 const uint8_t* src_ptr = src_base_ptr;
10481                 for (uint32_t slice_index = 0; slice_index < num_slices; slice_index++) {
10482                     SOKOL_ASSERT(dst_ptr == dst_base_ptr + slice_index * dst_bytes_per_slice);
10483                     for (uint32_t row_index = 0; row_index < num_rows; row_index++) {
10484                         memcpy(dst_ptr, src_ptr, src_bytes_per_row);
10485                         src_ptr += src_bytes_per_row;
10486                         dst_ptr += dst_bytes_per_row;
10487                     }
10488                 }
10489             }
10490 
10491             /* record the staging copy operation into command encoder */
10492             src_view.imageHeight = mip_height;
10493             src_view.rowPitch = dst_bytes_per_row;
10494             dst_view.mipLevel = mip_index;
10495             extent.width = mip_width;
10496             extent.height = mip_height;
10497             extent.depth = mip_depth;
10498             SOKOL_ASSERT((img->cmn.type != SG_IMAGETYPE_CUBE) || (num_slices == 1));
10499             for (uint32_t slice_index = 0; slice_index < num_slices; slice_index++) {
10500                 const uint32_t layer_index = (img->cmn.type == SG_IMAGETYPE_ARRAY) ? slice_index : face_index;
10501                 src_view.offset = stg_offset;
10502                 dst_view.arrayLayer = layer_index;
10503                 wgpuCommandEncoderCopyBufferToTexture(_sg.wgpu.staging_cmd_enc, &src_view, &dst_view, &extent);
10504                 stg_offset += dst_bytes_per_slice;
10505                 SOKOL_ASSERT(stg_offset <= _sg.wgpu.staging.num_bytes);
10506             }
10507         }
10508     }
10509     SOKOL_ASSERT(stg_offset >= stg_base_offset);
10510     return (stg_offset - stg_base_offset);
10511 }
10512 
10513 /*
10514     The WGPU staging buffer implementation:
10515 
10516     Very similar to the uniform buffer pool, there's a pool of big
10517     per-frame staging buffers, each must be big enough to hold
10518     all data uploaded to dynamic resources for one frame.
10519 
10520     Staging buffers are created on demand and reused, because the
10521     'frame pipeline depth' of WGPU isn't predictable.
10522 
10523     The difference to the uniform buffer system is that there isn't
10524     a 1:1 relationship for source- and destination for the
10525     data-copy operation. There's always one staging buffer as copy-source
10526     per frame, but many copy-destinations (regular vertex/index buffers
10527     or images). Instead of one big copy-operation at the end of the frame,
10528     multiple copy-operations will be written throughout the frame.
10529 */
_sg_wgpu_staging_init(const sg_desc * desc)10530 _SOKOL_PRIVATE void _sg_wgpu_staging_init(const sg_desc* desc) {
10531     SOKOL_ASSERT(desc && (desc->staging_buffer_size > 0));
10532     _sg.wgpu.staging.num_bytes = desc->staging_buffer_size;
10533     /* there's actually nothing more to do here */
10534 }
10535 
_sg_wgpu_staging_discard(void)10536 _SOKOL_PRIVATE void _sg_wgpu_staging_discard(void) {
10537     for (int i = 0; i < _sg.wgpu.staging.num; i++) {
10538         if (_sg.wgpu.staging.buf[i]) {
10539             wgpuBufferRelease(_sg.wgpu.staging.buf[i]);
10540             _sg.wgpu.staging.buf[i] = 0;
10541             _sg.wgpu.staging.ptr[i] = 0;
10542         }
10543     }
10544 }
10545 
_sg_wgpu_staging_mapped_callback(WGPUBufferMapAsyncStatus status,void * data,uint64_t data_len,void * user_data)10546 _SOKOL_PRIVATE void _sg_wgpu_staging_mapped_callback(WGPUBufferMapAsyncStatus status, void* data, uint64_t data_len, void* user_data) {
10547     if (!_sg.wgpu.valid) {
10548         return;
10549     }
10550     /* FIXME: better handling for this */
10551     if (WGPUBufferMapAsyncStatus_Success != status) {
10552         SOKOL_ASSERT("Mapping staging buffer failed!\n");
10553         SOKOL_ASSERT(false);
10554     }
10555     SOKOL_ASSERT(data && (data_len == _sg.wgpu.staging.num_bytes));
10556     int index = (int)(intptr_t) user_data;
10557     SOKOL_ASSERT(index < _sg.wgpu.staging.num);
10558     SOKOL_ASSERT(0 == _sg.wgpu.staging.ptr[index]);
10559     _sg.wgpu.staging.ptr[index] = (uint8_t*) data;
10560 }
10561 
_sg_wgpu_staging_next_frame(bool first_frame)10562 _SOKOL_PRIVATE void _sg_wgpu_staging_next_frame(bool first_frame) {
10563 
10564     /* immediately request a new mapping for the last frame's current staging buffer */
10565     if (!first_frame) {
10566         WGPUBuffer cur_buf = _sg.wgpu.staging.buf[_sg.wgpu.staging.cur];
10567         wgpuBufferMapWriteAsync(cur_buf, _sg_wgpu_staging_mapped_callback, (void*)(intptr_t)_sg.wgpu.staging.cur);
10568     }
10569 
10570     /* rewind staging-buffer offset */
10571     _sg.wgpu.staging.offset = 0;
10572 
10573     /* check if mapped staging buffer is available, otherwise create one */
10574     for (int i = 0; i < _sg.wgpu.staging.num; i++) {
10575         if (_sg.wgpu.staging.ptr[i]) {
10576             _sg.wgpu.staging.cur = i;
10577             return;
10578         }
10579     }
10580 
10581     /* no mapped buffer available, create one */
10582     SOKOL_ASSERT(_sg.wgpu.staging.num < _SG_WGPU_STAGING_PIPELINE_SIZE);
10583     _sg.wgpu.staging.cur = _sg.wgpu.staging.num++;
10584     const int cur = _sg.wgpu.staging.cur;
10585 
10586     WGPUBufferDescriptor desc;
10587     memset(&desc, 0, sizeof(desc));
10588     desc.size = _sg.wgpu.staging.num_bytes;
10589     desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_MapWrite;
10590     WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &desc);
10591     _sg.wgpu.staging.buf[cur] = res.buffer;
10592     _sg.wgpu.staging.ptr[cur] = (uint8_t*) res.data;
10593     SOKOL_ASSERT(_sg.wgpu.staging.buf[cur]);
10594     SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]);
10595     SOKOL_ASSERT(res.dataLength == _sg.wgpu.staging.num_bytes);
10596 }
10597 
_sg_wgpu_staging_copy_to_buffer(WGPUBuffer dst_buf,uint32_t dst_buf_offset,const void * data,uint32_t data_num_bytes)10598 _SOKOL_PRIVATE uint32_t _sg_wgpu_staging_copy_to_buffer(WGPUBuffer dst_buf, uint32_t dst_buf_offset, const void* data, uint32_t data_num_bytes) {
10599     /* Copy a chunk of data into the staging buffer, and record a blit-operation into
10600         the command encoder, bump the offset for the next data chunk, return 0 if there
10601         was not enough room in the staging buffer, return the number of actually
10602         copied bytes on success.
10603 
10604         NOTE: that the number of staging bytes to be copied must be a multiple of 4.
10605 
10606     */
10607     SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc);
10608     SOKOL_ASSERT((dst_buf_offset & 3) == 0);
10609     SOKOL_ASSERT(data_num_bytes > 0);
10610     uint32_t copy_num_bytes = _sg_roundup(data_num_bytes, 4);
10611     if ((_sg.wgpu.staging.offset + copy_num_bytes) >= _sg.wgpu.staging.num_bytes) {
10612         SOKOL_LOG("WGPU: Per frame staging buffer full (in _sg_wgpu_staging_copy_to_buffer())!\n");
10613         return false;
10614     }
10615     const int cur = _sg.wgpu.staging.cur;
10616     SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]);
10617     uint32_t stg_buf_offset = _sg.wgpu.staging.offset;
10618     uint8_t* stg_ptr = _sg.wgpu.staging.ptr[cur] + stg_buf_offset;
10619     memcpy(stg_ptr, data, data_num_bytes);
10620     WGPUBuffer stg_buf = _sg.wgpu.staging.buf[cur];
10621     wgpuCommandEncoderCopyBufferToBuffer(_sg.wgpu.staging_cmd_enc, stg_buf, stg_buf_offset, dst_buf, dst_buf_offset, copy_num_bytes);
10622     _sg.wgpu.staging.offset = stg_buf_offset + copy_num_bytes;
10623     return copy_num_bytes;
10624 }
10625 
_sg_wgpu_staging_copy_to_texture(_sg_image_t * img,const sg_image_content * content)10626 _SOKOL_PRIVATE bool _sg_wgpu_staging_copy_to_texture(_sg_image_t* img, const sg_image_content* content) {
10627     /* similar to _sg_wgpu_staging_copy_to_buffer(), but with image data instead */
10628     SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc);
10629     uint32_t num_bytes = _sg_wgpu_image_content_buffer_size(img, content);
10630     if ((_sg.wgpu.staging.offset + num_bytes) >= _sg.wgpu.staging.num_bytes) {
10631         SOKOL_LOG("WGPU: Per frame staging buffer full (in _sg_wgpu_staging_copy_to_texture)!\n");
10632         return false;
10633     }
10634     const int cur = _sg.wgpu.staging.cur;
10635     SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]);
10636     uint32_t stg_offset = _sg.wgpu.staging.offset;
10637     uint8_t* stg_ptr = _sg.wgpu.staging.ptr[cur];
10638     WGPUBuffer stg_buf = _sg.wgpu.staging.buf[cur];
10639     uint32_t bytes_copied = _sg_wgpu_copy_image_content(stg_buf, stg_ptr, stg_offset, img, content);
10640     _SOKOL_UNUSED(bytes_copied);
10641     SOKOL_ASSERT(bytes_copied == num_bytes);
10642     _sg.wgpu.staging.offset = _sg_roundup(stg_offset + num_bytes, _SG_WGPU_STAGING_ALIGN);
10643     return true;
10644 }
10645 
_sg_wgpu_staging_unmap(void)10646 _SOKOL_PRIVATE void _sg_wgpu_staging_unmap(void) {
10647     /* called at end of frame before queue-submit */
10648     const int cur = _sg.wgpu.staging.cur;
10649     SOKOL_ASSERT(_sg.wgpu.staging.ptr[cur]);
10650     _sg.wgpu.staging.ptr[cur] = 0;
10651     wgpuBufferUnmap(_sg.wgpu.staging.buf[cur]);
10652 }
10653 
10654 /*--- WGPU sampler cache functions ---*/
_sg_wgpu_init_sampler_cache(const sg_desc * desc)10655 _SOKOL_PRIVATE void _sg_wgpu_init_sampler_cache(const sg_desc* desc) {
10656     SOKOL_ASSERT(desc->sampler_cache_size > 0);
10657     _sg_smpcache_init(&_sg.wgpu.sampler_cache, desc->sampler_cache_size);
10658 }
10659 
_sg_wgpu_destroy_sampler_cache(void)10660 _SOKOL_PRIVATE void _sg_wgpu_destroy_sampler_cache(void) {
10661     SOKOL_ASSERT(_sg.wgpu.sampler_cache.items);
10662     SOKOL_ASSERT(_sg.wgpu.sampler_cache.num_items <= _sg.wgpu.sampler_cache.capacity);
10663     for (int i = 0; i < _sg.wgpu.sampler_cache.num_items; i++) {
10664         wgpuSamplerRelease((WGPUSampler)_sg_smpcache_sampler(&_sg.wgpu.sampler_cache, i));
10665     }
10666     _sg_smpcache_discard(&_sg.wgpu.sampler_cache);
10667 }
10668 
_sg_wgpu_create_sampler(const sg_image_desc * img_desc)10669 _SOKOL_PRIVATE WGPUSampler _sg_wgpu_create_sampler(const sg_image_desc* img_desc) {
10670     SOKOL_ASSERT(img_desc);
10671     int index = _sg_smpcache_find_item(&_sg.wgpu.sampler_cache, img_desc);
10672     if (index >= 0) {
10673         /* reuse existing sampler */
10674         return (WGPUSampler) _sg_smpcache_sampler(&_sg.wgpu.sampler_cache, index);
10675     }
10676     else {
10677         /* create a new WGPU sampler and add to sampler cache */
10678         /* FIXME: anisotropic filtering not supported? */
10679         WGPUSamplerDescriptor smp_desc;
10680         memset(&smp_desc, 0, sizeof(smp_desc));
10681         smp_desc.addressModeU = _sg_wgpu_sampler_addrmode(img_desc->wrap_u);
10682         smp_desc.addressModeV = _sg_wgpu_sampler_addrmode(img_desc->wrap_v);
10683         smp_desc.addressModeW = _sg_wgpu_sampler_addrmode(img_desc->wrap_w);
10684         smp_desc.magFilter = _sg_wgpu_sampler_minmagfilter(img_desc->mag_filter);
10685         smp_desc.minFilter = _sg_wgpu_sampler_minmagfilter(img_desc->min_filter);
10686         smp_desc.mipmapFilter = _sg_wgpu_sampler_mipfilter(img_desc->min_filter);
10687         smp_desc.lodMinClamp = img_desc->min_lod;
10688         smp_desc.lodMaxClamp = img_desc->max_lod;
10689         WGPUSampler smp = wgpuDeviceCreateSampler(_sg.wgpu.dev, &smp_desc);
10690         SOKOL_ASSERT(smp);
10691         _sg_smpcache_add_item(&_sg.wgpu.sampler_cache, img_desc, (uintptr_t)smp);
10692         return smp;
10693     }
10694 }
10695 
10696 /*--- WGPU backend API functions ---*/
_sg_wgpu_setup_backend(const sg_desc * desc)10697 _SOKOL_PRIVATE void _sg_wgpu_setup_backend(const sg_desc* desc) {
10698     SOKOL_ASSERT(desc);
10699     SOKOL_ASSERT(desc->context.wgpu.device);
10700     SOKOL_ASSERT(desc->context.wgpu.render_view_cb);
10701     SOKOL_ASSERT(desc->context.wgpu.resolve_view_cb);
10702     SOKOL_ASSERT(desc->context.wgpu.depth_stencil_view_cb);
10703     SOKOL_ASSERT(desc->uniform_buffer_size > 0);
10704     SOKOL_ASSERT(desc->staging_buffer_size > 0);
10705     _sg.backend = SG_BACKEND_WGPU;
10706     _sg.wgpu.valid = true;
10707     _sg.wgpu.dev = (WGPUDevice) desc->context.wgpu.device;
10708     _sg.wgpu.render_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.render_view_cb;
10709     _sg.wgpu.resolve_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.resolve_view_cb;
10710     _sg.wgpu.depth_stencil_view_cb = (WGPUTextureView(*)(void)) desc->context.wgpu.depth_stencil_view_cb;
10711     _sg.wgpu.queue = wgpuDeviceCreateQueue(_sg.wgpu.dev);
10712     SOKOL_ASSERT(_sg.wgpu.queue);
10713 
10714     /* setup WebGPU features and limits */
10715     _sg_wgpu_init_caps();
10716 
10717     /* setup the sampler cache, uniform and staging buffer pools */
10718     _sg_wgpu_init_sampler_cache(&_sg.desc);
10719     _sg_wgpu_ubpool_init(desc);
10720     _sg_wgpu_ubpool_next_frame(true);
10721     _sg_wgpu_staging_init(desc);
10722     _sg_wgpu_staging_next_frame(true);
10723 
10724     /* create an empty bind group for shader stages without bound images */
10725     WGPUBindGroupLayoutDescriptor bgl_desc;
10726     memset(&bgl_desc, 0, sizeof(bgl_desc));
10727     WGPUBindGroupLayout empty_bgl = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &bgl_desc);
10728     SOKOL_ASSERT(empty_bgl);
10729     WGPUBindGroupDescriptor bg_desc;
10730     memset(&bg_desc, 0, sizeof(bg_desc));
10731     bg_desc.layout = empty_bgl;
10732     _sg.wgpu.empty_bind_group = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc);
10733     SOKOL_ASSERT(_sg.wgpu.empty_bind_group);
10734     wgpuBindGroupLayoutRelease(empty_bgl);
10735 
10736     /* create initial per-frame command encoders */
10737     WGPUCommandEncoderDescriptor cmd_enc_desc;
10738     memset(&cmd_enc_desc, 0, sizeof(cmd_enc_desc));
10739     _sg.wgpu.render_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc);
10740     SOKOL_ASSERT(_sg.wgpu.render_cmd_enc);
10741     _sg.wgpu.staging_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc);
10742     SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc);
10743 }
10744 
_sg_wgpu_discard_backend(void)10745 _SOKOL_PRIVATE void _sg_wgpu_discard_backend(void) {
10746     SOKOL_ASSERT(_sg.wgpu.valid);
10747     SOKOL_ASSERT(_sg.wgpu.render_cmd_enc);
10748     SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc);
10749     _sg.wgpu.valid = false;
10750     _sg_wgpu_ubpool_discard();
10751     _sg_wgpu_staging_discard();
10752     _sg_wgpu_destroy_sampler_cache();
10753     wgpuBindGroupRelease(_sg.wgpu.empty_bind_group);
10754     wgpuCommandEncoderRelease(_sg.wgpu.render_cmd_enc);
10755     _sg.wgpu.render_cmd_enc = 0;
10756     wgpuCommandEncoderRelease(_sg.wgpu.staging_cmd_enc);
10757     _sg.wgpu.staging_cmd_enc = 0;
10758     if (_sg.wgpu.queue) {
10759         wgpuQueueRelease(_sg.wgpu.queue);
10760         _sg.wgpu.queue = 0;
10761     }
10762 }
10763 
_sg_wgpu_reset_state_cache(void)10764 _SOKOL_PRIVATE void _sg_wgpu_reset_state_cache(void) {
10765     SOKOL_LOG("_sg_wgpu_reset_state_cache: FIXME\n");
10766 }
10767 
_sg_wgpu_create_context(_sg_context_t * ctx)10768 _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_context(_sg_context_t* ctx) {
10769     SOKOL_ASSERT(ctx);
10770     _SOKOL_UNUSED(ctx);
10771     return SG_RESOURCESTATE_VALID;
10772 }
10773 
_sg_wgpu_destroy_context(_sg_context_t * ctx)10774 _SOKOL_PRIVATE void _sg_wgpu_destroy_context(_sg_context_t* ctx) {
10775     SOKOL_ASSERT(ctx);
10776     _SOKOL_UNUSED(ctx);
10777 }
10778 
_sg_wgpu_activate_context(_sg_context_t * ctx)10779 _SOKOL_PRIVATE void _sg_wgpu_activate_context(_sg_context_t* ctx) {
10780     SOKOL_LOG("_sg_wgpu_activate_context: FIXME\n");
10781 }
10782 
_sg_wgpu_create_buffer(_sg_buffer_t * buf,const sg_buffer_desc * desc)10783 _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) {
10784     SOKOL_ASSERT(buf && desc);
10785     const bool injected = (0 != desc->wgpu_buffer);
10786     _sg_buffer_common_init(&buf->cmn, desc);
10787     if (injected) {
10788         buf->wgpu.buf = (WGPUBuffer) desc->wgpu_buffer;
10789         wgpuBufferReference(buf->wgpu.buf);
10790     }
10791     else {
10792         WGPUBufferDescriptor wgpu_buf_desc;
10793         memset(&wgpu_buf_desc, 0, sizeof(wgpu_buf_desc));
10794         wgpu_buf_desc.usage = _sg_wgpu_buffer_usage(buf->cmn.type, buf->cmn.usage);
10795         wgpu_buf_desc.size = buf->cmn.size;
10796         if (SG_USAGE_IMMUTABLE == buf->cmn.usage) {
10797             SOKOL_ASSERT(desc->content);
10798             WGPUCreateBufferMappedResult res = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &wgpu_buf_desc);
10799             buf->wgpu.buf = res.buffer;
10800             SOKOL_ASSERT(res.data && ((int)res.dataLength == buf->cmn.size));
10801             memcpy(res.data, desc->content, buf->cmn.size);
10802             wgpuBufferUnmap(res.buffer);
10803         }
10804         else {
10805             buf->wgpu.buf = wgpuDeviceCreateBuffer(_sg.wgpu.dev, &wgpu_buf_desc);
10806         }
10807     }
10808     return SG_RESOURCESTATE_VALID;
10809 }
10810 
_sg_wgpu_destroy_buffer(_sg_buffer_t * buf)10811 _SOKOL_PRIVATE void _sg_wgpu_destroy_buffer(_sg_buffer_t* buf) {
10812     SOKOL_ASSERT(buf);
10813     WGPUBuffer wgpu_buf = buf->wgpu.buf;
10814     if (0 != wgpu_buf) {
10815         wgpuBufferRelease(wgpu_buf);
10816     }
10817 }
10818 
_sg_wgpu_init_texdesc_common(WGPUTextureDescriptor * wgpu_tex_desc,const sg_image_desc * desc)10819 _SOKOL_PRIVATE void _sg_wgpu_init_texdesc_common(WGPUTextureDescriptor* wgpu_tex_desc, const sg_image_desc* desc) {
10820     wgpu_tex_desc->usage = WGPUTextureUsage_Sampled|WGPUTextureUsage_CopyDst;
10821     wgpu_tex_desc->dimension = _sg_wgpu_tex_dim(desc->type);
10822     wgpu_tex_desc->size.width = desc->width;
10823     wgpu_tex_desc->size.height = desc->height;
10824     if (desc->type == SG_IMAGETYPE_3D) {
10825         wgpu_tex_desc->size.depth = desc->depth;
10826         wgpu_tex_desc->arrayLayerCount = 1;
10827     }
10828     else if (desc->type == SG_IMAGETYPE_CUBE) {
10829         wgpu_tex_desc->size.depth = 1;
10830         wgpu_tex_desc->arrayLayerCount = 6;
10831     }
10832     else {
10833         wgpu_tex_desc->size.depth = 1;
10834         wgpu_tex_desc->arrayLayerCount = desc->layers;
10835     }
10836     wgpu_tex_desc->format = _sg_wgpu_textureformat(desc->pixel_format);
10837     wgpu_tex_desc->mipLevelCount = desc->num_mipmaps;
10838     wgpu_tex_desc->sampleCount = 1;
10839 }
10840 
_sg_wgpu_create_image(_sg_image_t * img,const sg_image_desc * desc)10841 _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_image(_sg_image_t* img, const sg_image_desc* desc) {
10842     SOKOL_ASSERT(img && desc);
10843     SOKOL_ASSERT(_sg.wgpu.dev);
10844     SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc);
10845 
10846     _sg_image_common_init(&img->cmn, desc);
10847 
10848     const bool injected = (0 != desc->wgpu_texture);
10849     const bool is_msaa = desc->sample_count > 1;
10850     WGPUTextureDescriptor wgpu_tex_desc;
10851     memset(&wgpu_tex_desc, 0, sizeof(wgpu_tex_desc));
10852     _sg_wgpu_init_texdesc_common(&wgpu_tex_desc, desc);
10853     if (_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format)) {
10854         SOKOL_ASSERT(img->cmn.render_target);
10855         SOKOL_ASSERT(img->cmn.type == SG_IMAGETYPE_2D);
10856         SOKOL_ASSERT(img->cmn.num_mipmaps == 1);
10857         SOKOL_ASSERT(!injected);
10858         /* NOTE: a depth-stencil texture will never be MSAA-resolved, so there
10859            won't be a separate MSAA- and resolve-texture
10860         */
10861         wgpu_tex_desc.usage = WGPUTextureUsage_OutputAttachment;
10862         wgpu_tex_desc.sampleCount = desc->sample_count;
10863         img->wgpu.tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc);
10864         SOKOL_ASSERT(img->wgpu.tex);
10865     }
10866     else {
10867         if (injected) {
10868             img->wgpu.tex = (WGPUTexture) desc->wgpu_texture;
10869             wgpuTextureReference(img->wgpu.tex);
10870         }
10871         else {
10872             /* NOTE: in the MSAA-rendertarget case, both the MSAA texture *and*
10873                the resolve texture need OutputAttachment usage
10874             */
10875             if (img->cmn.render_target) {
10876                 wgpu_tex_desc.usage = WGPUTextureUsage_Sampled|WGPUTextureUsage_OutputAttachment;
10877             }
10878             img->wgpu.tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc);
10879             SOKOL_ASSERT(img->wgpu.tex);
10880 
10881             /* copy content into texture via a throw-away staging buffer */
10882             if (desc->usage == SG_USAGE_IMMUTABLE && !desc->render_target) {
10883                 WGPUBufferDescriptor wgpu_buf_desc;
10884                 memset(&wgpu_buf_desc, 0, sizeof(wgpu_buf_desc));
10885                 wgpu_buf_desc.size = _sg_wgpu_image_content_buffer_size(img, &desc->content);
10886                 wgpu_buf_desc.usage = WGPUBufferUsage_CopySrc|WGPUBufferUsage_CopyDst;
10887                 WGPUCreateBufferMappedResult map = wgpuDeviceCreateBufferMapped(_sg.wgpu.dev, &wgpu_buf_desc);
10888                 SOKOL_ASSERT(map.buffer && map.data);
10889                 uint32_t num_bytes = _sg_wgpu_copy_image_content(map.buffer, (uint8_t*)map.data, 0, img, &desc->content);
10890                 _SOKOL_UNUSED(num_bytes);
10891                 SOKOL_ASSERT(num_bytes == wgpu_buf_desc.size);
10892                 wgpuBufferUnmap(map.buffer);
10893                 wgpuBufferRelease(map.buffer);
10894             }
10895         }
10896 
10897         /* create texture view object */
10898         WGPUTextureViewDescriptor wgpu_view_desc;
10899         memset(&wgpu_view_desc, 0, sizeof(wgpu_view_desc));
10900         wgpu_view_desc.dimension = _sg_wgpu_tex_viewdim(desc->type);
10901         img->wgpu.tex_view = wgpuTextureCreateView(img->wgpu.tex, &wgpu_view_desc);
10902 
10903         /* if render target and MSAA, then a separate texture in MSAA format is needed
10904            which will be resolved into the regular texture at the end of the
10905            offscreen-render pass
10906         */
10907         if (desc->render_target && is_msaa) {
10908             wgpu_tex_desc.dimension = WGPUTextureDimension_2D;
10909             wgpu_tex_desc.size.depth = 1;
10910             wgpu_tex_desc.arrayLayerCount = 1;
10911             wgpu_tex_desc.mipLevelCount = 1;
10912             wgpu_tex_desc.usage = WGPUTextureUsage_OutputAttachment;
10913             wgpu_tex_desc.sampleCount = desc->sample_count;
10914             img->wgpu.msaa_tex = wgpuDeviceCreateTexture(_sg.wgpu.dev, &wgpu_tex_desc);
10915             SOKOL_ASSERT(img->wgpu.msaa_tex);
10916         }
10917 
10918         /* create sampler via shared-sampler-cache */
10919         img->wgpu.sampler = _sg_wgpu_create_sampler(desc);
10920         SOKOL_ASSERT(img->wgpu.sampler);
10921     }
10922     return SG_RESOURCESTATE_VALID;
10923 }
10924 
_sg_wgpu_destroy_image(_sg_image_t * img)10925 _SOKOL_PRIVATE void _sg_wgpu_destroy_image(_sg_image_t* img) {
10926     SOKOL_ASSERT(img);
10927     if (img->wgpu.tex) {
10928         wgpuTextureRelease(img->wgpu.tex);
10929         img->wgpu.tex = 0;
10930     }
10931     if (img->wgpu.tex_view) {
10932         wgpuTextureViewRelease(img->wgpu.tex_view);
10933         img->wgpu.tex_view = 0;
10934     }
10935     if (img->wgpu.msaa_tex) {
10936         wgpuTextureRelease(img->wgpu.msaa_tex);
10937         img->wgpu.msaa_tex = 0;
10938     }
10939     /* NOTE: do *not* destroy the sampler from the shared-sampler-cache */
10940     img->wgpu.sampler = 0;
10941 }
10942 
10943 /*
10944     How BindGroups work in WebGPU:
10945 
10946     - up to 4 bind groups can be bound simultanously
10947     - up to 16 bindings per bind group
10948     - 'binding' slots are local per bind group
10949     - in the shader:
10950         layout(set=0, binding=1) corresponds to bind group 0, binding 1
10951 
10952     Now how to map this to sokol-gfx's bind model:
10953 
10954     Reduce SG_MAX_SHADERSTAGE_IMAGES to 8, then:
10955 
10956         1 bind group for all 8 uniform buffers
10957         1 bind group for vertex shader textures + samplers
10958         1 bind group for fragment shader textures + samples
10959 
10960     Alternatively:
10961 
10962         1 bind group for 8 uniform buffer slots
10963         1 bind group for 8 vs images + 8 vs samplers
10964         1 bind group for 12 fs images
10965         1 bind group for 12 fs samplers
10966 
10967     I guess this means that we need to create BindGroups on the
10968     fly during sg_apply_bindings() :/
10969 */
_sg_wgpu_create_shader(_sg_shader_t * shd,const sg_shader_desc * desc)10970 _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) {
10971     SOKOL_ASSERT(shd && desc);
10972     SOKOL_ASSERT(desc->vs.byte_code && desc->fs.byte_code);
10973     _sg_shader_common_init(&shd->cmn, desc);
10974 
10975     bool success = true;
10976     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
10977         const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs;
10978         SOKOL_ASSERT((stage_desc->byte_code_size & 3) == 0);
10979 
10980         _sg_shader_stage_t* cmn_stage = &shd->cmn.stage[stage_index];
10981         _sg_wgpu_shader_stage_t* wgpu_stage = &shd->wgpu.stage[stage_index];
10982 
10983         _sg_strcpy(&wgpu_stage->entry, stage_desc->entry);
10984         WGPUShaderModuleDescriptor wgpu_shdmod_desc;
10985         memset(&wgpu_shdmod_desc, 0, sizeof(wgpu_shdmod_desc));
10986         wgpu_shdmod_desc.codeSize = stage_desc->byte_code_size >> 2;
10987         wgpu_shdmod_desc.code = (const uint32_t*) stage_desc->byte_code;
10988         wgpu_stage->module = wgpuDeviceCreateShaderModule(_sg.wgpu.dev, &wgpu_shdmod_desc);
10989         if (0 == wgpu_stage->module) {
10990             success = false;
10991         }
10992 
10993         /* create image/sampler bind group for the shader stage */
10994         WGPUShaderStage vis = (stage_index == SG_SHADERSTAGE_VS) ? WGPUShaderStage_Vertex : WGPUShaderStage_Fragment;
10995         int num_imgs = cmn_stage->num_images;
10996         if (num_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) {
10997             num_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES;
10998         }
10999         WGPUBindGroupLayoutBinding bglb_desc[_SG_WGPU_MAX_SHADERSTAGE_IMAGES * 2];
11000         memset(bglb_desc, 0, sizeof(bglb_desc));
11001         for (int img_index = 0; img_index < num_imgs; img_index++) {
11002             /* texture- and sampler-bindings */
11003             WGPUBindGroupLayoutBinding* tex_desc = &bglb_desc[img_index*2 + 0];
11004             WGPUBindGroupLayoutBinding* smp_desc = &bglb_desc[img_index*2 + 1];
11005 
11006             tex_desc->binding = img_index;
11007             tex_desc->visibility = vis;
11008             tex_desc->type = WGPUBindingType_SampledTexture;
11009             tex_desc->textureDimension = _sg_wgpu_tex_viewdim(cmn_stage->images[img_index].type);
11010             tex_desc->textureComponentType = _sg_wgpu_tex_comptype(cmn_stage->images[img_index].sampler_type);
11011 
11012             smp_desc->binding = img_index + _SG_WGPU_MAX_SHADERSTAGE_IMAGES;
11013             smp_desc->visibility = vis;
11014             smp_desc->type = WGPUBindingType_Sampler;
11015         }
11016         WGPUBindGroupLayoutDescriptor img_bgl_desc;
11017         memset(&img_bgl_desc, 0, sizeof(img_bgl_desc));
11018         img_bgl_desc.bindingCount = num_imgs * 2;
11019         img_bgl_desc.bindings = &bglb_desc[0];
11020         wgpu_stage->bind_group_layout = wgpuDeviceCreateBindGroupLayout(_sg.wgpu.dev, &img_bgl_desc);
11021         SOKOL_ASSERT(wgpu_stage->bind_group_layout);
11022     }
11023     return success ? SG_RESOURCESTATE_VALID : SG_RESOURCESTATE_FAILED;
11024 }
11025 
_sg_wgpu_destroy_shader(_sg_shader_t * shd)11026 _SOKOL_PRIVATE void _sg_wgpu_destroy_shader(_sg_shader_t* shd) {
11027     SOKOL_ASSERT(shd);
11028     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
11029         _sg_wgpu_shader_stage_t* wgpu_stage = &shd->wgpu.stage[stage_index];
11030         if (wgpu_stage->module) {
11031             wgpuShaderModuleRelease(wgpu_stage->module);
11032             wgpu_stage->module = 0;
11033         }
11034         if (wgpu_stage->bind_group_layout) {
11035             wgpuBindGroupLayoutRelease(wgpu_stage->bind_group_layout);
11036             wgpu_stage->bind_group_layout = 0;
11037         }
11038     }
11039 }
11040 
_sg_wgpu_create_pipeline(_sg_pipeline_t * pip,_sg_shader_t * shd,const sg_pipeline_desc * desc)11041 _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) {
11042     SOKOL_ASSERT(pip && shd && desc);
11043     SOKOL_ASSERT(desc->shader.id == shd->slot.id);
11044     SOKOL_ASSERT(shd->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout);
11045     SOKOL_ASSERT(shd->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout);
11046     pip->shader = shd;
11047     _sg_pipeline_common_init(&pip->cmn, desc);
11048     pip->wgpu.stencil_ref = (uint32_t) desc->depth_stencil.stencil_ref;
11049 
11050     WGPUBindGroupLayout pip_bgl[3] = {
11051         _sg.wgpu.ub.bindgroup_layout,
11052         shd->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout,
11053         shd->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout
11054     };
11055     WGPUPipelineLayoutDescriptor pl_desc;
11056     memset(&pl_desc, 0, sizeof(pl_desc));
11057     pl_desc.bindGroupLayoutCount = 3;
11058     pl_desc.bindGroupLayouts = &pip_bgl[0];
11059     WGPUPipelineLayout pip_layout = wgpuDeviceCreatePipelineLayout(_sg.wgpu.dev, &pl_desc);
11060 
11061     WGPUVertexBufferLayoutDescriptor vb_desc[SG_MAX_SHADERSTAGE_BUFFERS];
11062     memset(&vb_desc, 0, sizeof(vb_desc));
11063     WGPUVertexAttributeDescriptor va_desc[SG_MAX_SHADERSTAGE_BUFFERS][SG_MAX_VERTEX_ATTRIBUTES];
11064     memset(&va_desc, 0, sizeof(va_desc));
11065     int vb_idx = 0;
11066     for (; vb_idx < SG_MAX_SHADERSTAGE_BUFFERS; vb_idx++) {
11067         const sg_buffer_layout_desc* src_vb_desc = &desc->layout.buffers[vb_idx];
11068         if (0 == src_vb_desc->stride) {
11069             break;
11070         }
11071         vb_desc[vb_idx].arrayStride = src_vb_desc->stride;
11072         vb_desc[vb_idx].stepMode = _sg_wgpu_stepmode(src_vb_desc->step_func);
11073         /* NOTE: WebGPU has no support for vertex step rate (because that's
11074            not supported by Core Vulkan
11075         */
11076         int va_idx = 0;
11077         for (int va_loc = 0; va_loc < SG_MAX_VERTEX_ATTRIBUTES; va_loc++) {
11078             const sg_vertex_attr_desc* src_va_desc = &desc->layout.attrs[va_loc];
11079             if (SG_VERTEXFORMAT_INVALID == src_va_desc->format) {
11080                 break;
11081             }
11082             pip->cmn.vertex_layout_valid[src_va_desc->buffer_index] = true;
11083             if (vb_idx == src_va_desc->buffer_index) {
11084                 va_desc[vb_idx][va_idx].format = _sg_wgpu_vertexformat(src_va_desc->format);
11085                 va_desc[vb_idx][va_idx].offset = src_va_desc->offset;
11086                 va_desc[vb_idx][va_idx].shaderLocation = va_loc;
11087                 va_idx++;
11088             }
11089         }
11090         vb_desc[vb_idx].attributeCount = va_idx;
11091         vb_desc[vb_idx].attributes = &va_desc[vb_idx][0];
11092     }
11093     WGPUVertexStateDescriptor vx_state_desc;
11094     memset(&vx_state_desc, 0, sizeof(vx_state_desc));
11095     vx_state_desc.indexFormat = _sg_wgpu_indexformat(desc->index_type);
11096     vx_state_desc.vertexBufferCount = vb_idx;
11097     vx_state_desc.vertexBuffers = vb_desc;
11098 
11099     WGPURasterizationStateDescriptor rs_desc;
11100     memset(&rs_desc, 0, sizeof(rs_desc));
11101     rs_desc.frontFace = _sg_wgpu_frontface(desc->rasterizer.face_winding);
11102     rs_desc.cullMode = _sg_wgpu_cullmode(desc->rasterizer.cull_mode);
11103     rs_desc.depthBias = (int32_t) desc->rasterizer.depth_bias;
11104     rs_desc.depthBiasClamp = desc->rasterizer.depth_bias_clamp;
11105     rs_desc.depthBiasSlopeScale = desc->rasterizer.depth_bias_slope_scale;
11106 
11107     WGPUDepthStencilStateDescriptor ds_desc;
11108     memset(&ds_desc, 0, sizeof(ds_desc));
11109     ds_desc.format = _sg_wgpu_textureformat(desc->blend.depth_format);
11110     ds_desc.depthWriteEnabled = desc->depth_stencil.depth_write_enabled;
11111     ds_desc.depthCompare = _sg_wgpu_comparefunc(desc->depth_stencil.depth_compare_func);
11112     ds_desc.stencilReadMask = desc->depth_stencil.stencil_read_mask;
11113     ds_desc.stencilWriteMask = desc->depth_stencil.stencil_write_mask;
11114     ds_desc.stencilFront.compare = _sg_wgpu_comparefunc(desc->depth_stencil.stencil_front.compare_func);
11115     ds_desc.stencilFront.failOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_front.fail_op);
11116     ds_desc.stencilFront.depthFailOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_front.depth_fail_op);
11117     ds_desc.stencilFront.passOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_front.pass_op);
11118     ds_desc.stencilBack.compare = _sg_wgpu_comparefunc(desc->depth_stencil.stencil_back.compare_func);
11119     ds_desc.stencilBack.failOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_back.fail_op);
11120     ds_desc.stencilBack.depthFailOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_back.depth_fail_op);
11121     ds_desc.stencilBack.passOp = _sg_wgpu_stencilop(desc->depth_stencil.stencil_back.pass_op);
11122 
11123     WGPUProgrammableStageDescriptor fs_desc;
11124     memset(&fs_desc, 0, sizeof(fs_desc));
11125     fs_desc.module = shd->wgpu.stage[SG_SHADERSTAGE_FS].module;
11126     fs_desc.entryPoint = shd->wgpu.stage[SG_SHADERSTAGE_VS].entry.buf;
11127 
11128     WGPUColorStateDescriptor cs_desc[SG_MAX_COLOR_ATTACHMENTS];
11129     memset(cs_desc, 0, sizeof(cs_desc));
11130     cs_desc[0].format = _sg_wgpu_textureformat(desc->blend.color_format);
11131     cs_desc[0].colorBlend.operation = _sg_wgpu_blendop(desc->blend.op_rgb);
11132     cs_desc[0].colorBlend.srcFactor = _sg_wgpu_blendfactor(desc->blend.src_factor_rgb);
11133     cs_desc[0].colorBlend.dstFactor = _sg_wgpu_blendfactor(desc->blend.dst_factor_rgb);
11134     cs_desc[0].alphaBlend.operation = _sg_wgpu_blendop(desc->blend.op_alpha);
11135     cs_desc[0].alphaBlend.srcFactor = _sg_wgpu_blendfactor(desc->blend.src_factor_alpha);
11136     cs_desc[0].alphaBlend.dstFactor = _sg_wgpu_blendfactor(desc->blend.dst_factor_alpha);
11137     cs_desc[0].writeMask = _sg_wgpu_colorwritemask(desc->blend.color_write_mask);
11138     SOKOL_ASSERT(desc->blend.color_attachment_count <= SG_MAX_COLOR_ATTACHMENTS);
11139     for (int i = 1; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
11140         cs_desc[i] = cs_desc[0];
11141     }
11142 
11143     WGPURenderPipelineDescriptor pip_desc;
11144     memset(&pip_desc, 0, sizeof(pip_desc));
11145     pip_desc.layout = pip_layout;
11146     pip_desc.vertexStage.module = shd->wgpu.stage[SG_SHADERSTAGE_VS].module;
11147     pip_desc.vertexStage.entryPoint = shd->wgpu.stage[SG_SHADERSTAGE_VS].entry.buf;
11148     pip_desc.fragmentStage = &fs_desc;
11149     pip_desc.vertexState = &vx_state_desc;
11150     pip_desc.primitiveTopology  = _sg_wgpu_topology(desc->primitive_type);
11151     pip_desc.rasterizationState = &rs_desc;
11152     pip_desc.sampleCount = desc->rasterizer.sample_count;
11153     if (SG_PIXELFORMAT_NONE != desc->blend.depth_format) {
11154         pip_desc.depthStencilState = &ds_desc;
11155     }
11156     pip_desc.colorStateCount = desc->blend.color_attachment_count;
11157     pip_desc.colorStates = cs_desc;
11158     pip_desc.sampleMask = 0xFFFFFFFF;   /* FIXME: ??? */
11159     pip->wgpu.pip = wgpuDeviceCreateRenderPipeline(_sg.wgpu.dev, &pip_desc);
11160     SOKOL_ASSERT(0 != pip->wgpu.pip);
11161     wgpuPipelineLayoutRelease(pip_layout);
11162 
11163     return SG_RESOURCESTATE_VALID;
11164 }
11165 
_sg_wgpu_destroy_pipeline(_sg_pipeline_t * pip)11166 _SOKOL_PRIVATE void _sg_wgpu_destroy_pipeline(_sg_pipeline_t* pip) {
11167     SOKOL_ASSERT(pip);
11168     if (pip->wgpu.pip) {
11169         wgpuRenderPipelineRelease(pip->wgpu.pip);
11170         pip->wgpu.pip = 0;
11171     }
11172 }
11173 
_sg_wgpu_create_pass(_sg_pass_t * pass,_sg_image_t ** att_images,const sg_pass_desc * desc)11174 _SOKOL_PRIVATE sg_resource_state _sg_wgpu_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) {
11175     SOKOL_ASSERT(pass && desc);
11176     SOKOL_ASSERT(att_images && att_images[0]);
11177     _sg_pass_common_init(&pass->cmn, desc);
11178 
11179     /* copy image pointers and create render-texture views */
11180     const sg_attachment_desc* att_desc;
11181     for (int i = 0; i < pass->cmn.num_color_atts; i++) {
11182         att_desc = &desc->color_attachments[i];
11183         if (att_desc->image.id != SG_INVALID_ID) {
11184             SOKOL_ASSERT(att_desc->image.id != SG_INVALID_ID);
11185             SOKOL_ASSERT(0 == pass->wgpu.color_atts[i].image);
11186             _sg_image_t* img = att_images[i];
11187             SOKOL_ASSERT(img && (img->slot.id == att_desc->image.id));
11188             SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(img->cmn.pixel_format));
11189             pass->wgpu.color_atts[i].image = img;
11190             /* create a render-texture-view to render into the right sub-surface */
11191             const bool is_msaa = img->cmn.sample_count > 1;
11192             WGPUTextureViewDescriptor view_desc;
11193             memset(&view_desc, 0, sizeof(view_desc));
11194             view_desc.baseMipLevel = is_msaa ? 0 : att_desc->mip_level;
11195             view_desc.mipLevelCount = 1;
11196             view_desc.baseArrayLayer = is_msaa ? 0 : att_desc->slice;
11197             view_desc.arrayLayerCount = 1;
11198             WGPUTexture wgpu_tex = is_msaa ? img->wgpu.msaa_tex : img->wgpu.tex;
11199             SOKOL_ASSERT(wgpu_tex);
11200             pass->wgpu.color_atts[i].render_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc);
11201             SOKOL_ASSERT(pass->wgpu.color_atts[i].render_tex_view);
11202             /* ... and if needed a separate resolve texture view */
11203             if (is_msaa) {
11204                 view_desc.baseMipLevel = att_desc->mip_level;
11205                 view_desc.baseArrayLayer = att_desc->slice;
11206                 WGPUTexture wgpu_tex = img->wgpu.tex;
11207                 pass->wgpu.color_atts[i].resolve_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc);
11208                 SOKOL_ASSERT(pass->wgpu.color_atts[i].resolve_tex_view);
11209             }
11210         }
11211     }
11212     SOKOL_ASSERT(0 == pass->wgpu.ds_att.image);
11213     att_desc = &desc->depth_stencil_attachment;
11214     if (att_desc->image.id != SG_INVALID_ID) {
11215         const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS;
11216         SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id));
11217         SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->cmn.pixel_format));
11218         _sg_image_t* ds_img = att_images[ds_img_index];
11219         pass->wgpu.ds_att.image = ds_img;
11220         /* create a render-texture view */
11221         SOKOL_ASSERT(0 == att_desc->mip_level);
11222         SOKOL_ASSERT(0 == att_desc->slice);
11223         WGPUTextureViewDescriptor view_desc;
11224         memset(&view_desc, 0, sizeof(view_desc));
11225         WGPUTexture wgpu_tex = ds_img->wgpu.tex;
11226         SOKOL_ASSERT(wgpu_tex);
11227         pass->wgpu.ds_att.render_tex_view = wgpuTextureCreateView(wgpu_tex, &view_desc);
11228         SOKOL_ASSERT(pass->wgpu.ds_att.render_tex_view);
11229     }
11230     return SG_RESOURCESTATE_VALID;
11231 }
11232 
_sg_wgpu_destroy_pass(_sg_pass_t * pass)11233 _SOKOL_PRIVATE void _sg_wgpu_destroy_pass(_sg_pass_t* pass) {
11234     SOKOL_ASSERT(pass);
11235     for (int i = 0; i < pass->cmn.num_color_atts; i++) {
11236         if (pass->wgpu.color_atts[i].render_tex_view) {
11237             wgpuTextureViewRelease(pass->wgpu.color_atts[i].render_tex_view);
11238             pass->wgpu.color_atts[i].render_tex_view = 0;
11239         }
11240         if (pass->wgpu.color_atts[i].resolve_tex_view) {
11241             wgpuTextureViewRelease(pass->wgpu.color_atts[i].resolve_tex_view);
11242             pass->wgpu.color_atts[i].resolve_tex_view = 0;
11243         }
11244     }
11245     if (pass->wgpu.ds_att.render_tex_view) {
11246         wgpuTextureViewRelease(pass->wgpu.ds_att.render_tex_view);
11247         pass->wgpu.ds_att.render_tex_view = 0;
11248     }
11249 }
11250 
_sg_wgpu_pass_color_image(const _sg_pass_t * pass,int index)11251 _SOKOL_PRIVATE _sg_image_t* _sg_wgpu_pass_color_image(const _sg_pass_t* pass, int index) {
11252     SOKOL_ASSERT(pass && (index >= 0) && (index < SG_MAX_COLOR_ATTACHMENTS));
11253     /* NOTE: may return null */
11254     return pass->wgpu.color_atts[index].image;
11255 }
11256 
_sg_wgpu_pass_ds_image(const _sg_pass_t * pass)11257 _SOKOL_PRIVATE _sg_image_t* _sg_wgpu_pass_ds_image(const _sg_pass_t* pass) {
11258     /* NOTE: may return null */
11259     SOKOL_ASSERT(pass);
11260     return pass->wgpu.ds_att.image;
11261 }
11262 
_sg_wgpu_begin_pass(_sg_pass_t * pass,const sg_pass_action * action,int w,int h)11263 _SOKOL_PRIVATE void _sg_wgpu_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) {
11264     SOKOL_ASSERT(action);
11265     SOKOL_ASSERT(!_sg.wgpu.in_pass);
11266     SOKOL_ASSERT(_sg.wgpu.render_cmd_enc);
11267     SOKOL_ASSERT(_sg.wgpu.dev);
11268     SOKOL_ASSERT(_sg.wgpu.render_view_cb);
11269     SOKOL_ASSERT(_sg.wgpu.resolve_view_cb);
11270     SOKOL_ASSERT(_sg.wgpu.depth_stencil_view_cb);
11271     _sg.wgpu.in_pass = true;
11272     _sg.wgpu.cur_width = w;
11273     _sg.wgpu.cur_height = h;
11274     _sg.wgpu.cur_pipeline = 0;
11275     _sg.wgpu.cur_pipeline_id.id = SG_INVALID_ID;
11276 
11277     SOKOL_ASSERT(_sg.wgpu.render_cmd_enc);
11278     if (pass) {
11279         WGPURenderPassDescriptor wgpu_pass_desc;
11280         memset(&wgpu_pass_desc, 0, sizeof(wgpu_pass_desc));
11281         WGPURenderPassColorAttachmentDescriptor wgpu_color_att_desc[SG_MAX_COLOR_ATTACHMENTS];
11282         memset(&wgpu_color_att_desc, 0, sizeof(wgpu_color_att_desc));
11283         SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID);
11284         for (int i = 0; i < pass->cmn.num_color_atts; i++) {
11285             const _sg_wgpu_attachment_t* wgpu_att = &pass->wgpu.color_atts[i];
11286             wgpu_color_att_desc[i].loadOp = _sg_wgpu_load_op(action->colors[i].action);
11287             wgpu_color_att_desc[i].storeOp = WGPUStoreOp_Store;
11288             wgpu_color_att_desc[i].clearColor.r = action->colors[i].val[0];
11289             wgpu_color_att_desc[i].clearColor.g = action->colors[i].val[1];
11290             wgpu_color_att_desc[i].clearColor.b = action->colors[i].val[2];
11291             wgpu_color_att_desc[i].clearColor.a = action->colors[i].val[3];
11292             wgpu_color_att_desc[i].attachment = wgpu_att->render_tex_view;
11293             if (wgpu_att->image->cmn.sample_count > 1) {
11294                 wgpu_color_att_desc[i].resolveTarget = wgpu_att->resolve_tex_view;
11295             }
11296         }
11297         wgpu_pass_desc.colorAttachmentCount = pass->cmn.num_color_atts;
11298         wgpu_pass_desc.colorAttachments = &wgpu_color_att_desc[0];
11299         if (pass->wgpu.ds_att.image) {
11300             WGPURenderPassDepthStencilAttachmentDescriptor wgpu_ds_att_desc;
11301             memset(&wgpu_ds_att_desc, 0, sizeof(wgpu_ds_att_desc));
11302             wgpu_ds_att_desc.depthLoadOp = _sg_wgpu_load_op(action->depth.action);
11303             wgpu_ds_att_desc.clearDepth = action->depth.val;
11304             wgpu_ds_att_desc.stencilLoadOp = _sg_wgpu_load_op(action->stencil.action);
11305             wgpu_ds_att_desc.clearStencil = action->stencil.val;
11306             wgpu_ds_att_desc.attachment = pass->wgpu.ds_att.render_tex_view;
11307             wgpu_pass_desc.depthStencilAttachment = &wgpu_ds_att_desc;
11308             _sg.wgpu.pass_enc = wgpuCommandEncoderBeginRenderPass(_sg.wgpu.render_cmd_enc, &wgpu_pass_desc);
11309         }
11310     }
11311     else {
11312         /* default render pass */
11313         WGPUTextureView wgpu_render_view = _sg.wgpu.render_view_cb();
11314         WGPUTextureView wgpu_resolve_view = _sg.wgpu.resolve_view_cb();
11315         WGPUTextureView wgpu_depth_stencil_view = _sg.wgpu.depth_stencil_view_cb();
11316 
11317         WGPURenderPassDescriptor pass_desc;
11318         memset(&pass_desc, 0, sizeof(pass_desc));
11319         WGPURenderPassColorAttachmentDescriptor color_att_desc;
11320         memset(&color_att_desc, 0, sizeof(color_att_desc));
11321         color_att_desc.loadOp = _sg_wgpu_load_op(action->colors[0].action);
11322         color_att_desc.clearColor.r = action->colors[0].val[0];
11323         color_att_desc.clearColor.g = action->colors[0].val[1];
11324         color_att_desc.clearColor.b = action->colors[0].val[2];
11325         color_att_desc.clearColor.a = action->colors[0].val[3];
11326         color_att_desc.attachment = wgpu_render_view;
11327         color_att_desc.resolveTarget = wgpu_resolve_view;   /* null if no MSAA rendering */
11328         pass_desc.colorAttachmentCount = 1;
11329         pass_desc.colorAttachments = &color_att_desc;
11330         WGPURenderPassDepthStencilAttachmentDescriptor ds_att_desc;
11331         memset(&ds_att_desc, 0, sizeof(ds_att_desc));
11332         ds_att_desc.attachment = wgpu_depth_stencil_view;
11333         SOKOL_ASSERT(0 != ds_att_desc.attachment);
11334         ds_att_desc.depthLoadOp = _sg_wgpu_load_op(action->depth.action);
11335         ds_att_desc.clearDepth = action->depth.val;
11336         ds_att_desc.stencilLoadOp = _sg_wgpu_load_op(action->stencil.action);
11337         ds_att_desc.clearStencil = action->stencil.val;
11338         pass_desc.depthStencilAttachment = &ds_att_desc;
11339         _sg.wgpu.pass_enc = wgpuCommandEncoderBeginRenderPass(_sg.wgpu.render_cmd_enc, &pass_desc);
11340     }
11341     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11342 
11343     /* initial uniform buffer binding (required even if no uniforms are set in the frame) */
11344     wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc,
11345                                       0, /* groupIndex 0 is reserved for uniform buffers */
11346                                       _sg.wgpu.ub.bindgroup,
11347                                       SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS,
11348                                       &_sg.wgpu.ub.bind_offsets[0][0]);
11349 }
11350 
_sg_wgpu_end_pass(void)11351 _SOKOL_PRIVATE void _sg_wgpu_end_pass(void) {
11352     SOKOL_ASSERT(_sg.wgpu.in_pass);
11353     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11354     _sg.wgpu.in_pass = false;
11355     wgpuRenderPassEncoderEndPass(_sg.wgpu.pass_enc);
11356     wgpuRenderPassEncoderRelease(_sg.wgpu.pass_enc);
11357     _sg.wgpu.pass_enc = 0;
11358 }
11359 
_sg_wgpu_commit(void)11360 _SOKOL_PRIVATE void _sg_wgpu_commit(void) {
11361     SOKOL_ASSERT(!_sg.wgpu.in_pass);
11362     SOKOL_ASSERT(_sg.wgpu.queue);
11363     SOKOL_ASSERT(_sg.wgpu.render_cmd_enc);
11364     SOKOL_ASSERT(_sg.wgpu.staging_cmd_enc);
11365 
11366     /* finish and submit this frame's work */
11367     _sg_wgpu_ubpool_flush();
11368     _sg_wgpu_staging_unmap();
11369 
11370     WGPUCommandBuffer cmd_bufs[2];
11371 
11372     WGPUCommandBufferDescriptor cmd_buf_desc;
11373     memset(&cmd_buf_desc, 0, sizeof(cmd_buf_desc));
11374     cmd_bufs[0] = wgpuCommandEncoderFinish(_sg.wgpu.staging_cmd_enc, &cmd_buf_desc);
11375     SOKOL_ASSERT(cmd_bufs[0]);
11376     wgpuCommandEncoderRelease(_sg.wgpu.staging_cmd_enc);
11377     _sg.wgpu.staging_cmd_enc = 0;
11378 
11379     cmd_bufs[1] = wgpuCommandEncoderFinish(_sg.wgpu.render_cmd_enc, &cmd_buf_desc);
11380     SOKOL_ASSERT(cmd_bufs[1]);
11381     wgpuCommandEncoderRelease(_sg.wgpu.render_cmd_enc);
11382     _sg.wgpu.render_cmd_enc = 0;
11383 
11384     wgpuQueueSubmit(_sg.wgpu.queue, 2, &cmd_bufs[0]);
11385 
11386     wgpuCommandBufferRelease(cmd_bufs[0]);
11387     wgpuCommandBufferRelease(cmd_bufs[1]);
11388 
11389     /* create a new render- and staging-command-encoders for next frame */
11390     WGPUCommandEncoderDescriptor cmd_enc_desc;
11391     memset(&cmd_enc_desc, 0, sizeof(cmd_enc_desc));
11392     _sg.wgpu.staging_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc);
11393     _sg.wgpu.render_cmd_enc = wgpuDeviceCreateCommandEncoder(_sg.wgpu.dev, &cmd_enc_desc);
11394 
11395     /* grab new staging buffers for uniform- and vertex/image-updates */
11396     _sg_wgpu_ubpool_next_frame(false);
11397     _sg_wgpu_staging_next_frame(false);
11398 }
11399 
_sg_wgpu_apply_viewport(int x,int y,int w,int h,bool origin_top_left)11400 _SOKOL_PRIVATE void _sg_wgpu_apply_viewport(int x, int y, int w, int h, bool origin_top_left) {
11401     SOKOL_ASSERT(_sg.wgpu.in_pass);
11402     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11403     float xf = (float) x;
11404     float yf = (float) (origin_top_left ? y : (_sg.wgpu.cur_height - (y + h)));
11405     float wf = (float) w;
11406     float hf = (float) h;
11407     wgpuRenderPassEncoderSetViewport(_sg.wgpu.pass_enc, xf, yf, wf, hf, 0.0f, 1.0f);
11408 }
11409 
_sg_wgpu_apply_scissor_rect(int x,int y,int w,int h,bool origin_top_left)11410 _SOKOL_PRIVATE void _sg_wgpu_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) {
11411     SOKOL_ASSERT(_sg.wgpu.in_pass);
11412     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11413     SOKOL_ASSERT(_sg.wgpu.in_pass);
11414     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11415 
11416     /* clip against framebuffer rect */
11417     x = _sg_min(_sg_max(0, x), _sg.wgpu.cur_width-1);
11418     y = _sg_min(_sg_max(0, y), _sg.wgpu.cur_height-1);
11419     if ((x + w) > _sg.wgpu.cur_width) {
11420         w = _sg.wgpu.cur_width - x;
11421     }
11422     if ((y + h) > _sg.wgpu.cur_height) {
11423         h = _sg.wgpu.cur_height - y;
11424     }
11425     w = _sg_max(w, 1);
11426     h = _sg_max(h, 1);
11427 
11428     uint32_t sx = (uint32_t) x;
11429     uint32_t sy = origin_top_left ? y : (_sg.wgpu.cur_height - (y + h));
11430     uint32_t sw = w;
11431     uint32_t sh = h;
11432     wgpuRenderPassEncoderSetScissorRect(_sg.wgpu.pass_enc, sx, sy, sw, sh);
11433 }
11434 
_sg_wgpu_apply_pipeline(_sg_pipeline_t * pip)11435 _SOKOL_PRIVATE void _sg_wgpu_apply_pipeline(_sg_pipeline_t* pip) {
11436     SOKOL_ASSERT(pip);
11437     SOKOL_ASSERT(pip->wgpu.pip);
11438     SOKOL_ASSERT(_sg.wgpu.in_pass);
11439     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11440     _sg.wgpu.draw_indexed = (pip->cmn.index_type != SG_INDEXTYPE_NONE);
11441     _sg.wgpu.cur_pipeline = pip;
11442     _sg.wgpu.cur_pipeline_id.id = pip->slot.id;
11443     wgpuRenderPassEncoderSetPipeline(_sg.wgpu.pass_enc, pip->wgpu.pip);
11444     wgpuRenderPassEncoderSetBlendColor(_sg.wgpu.pass_enc, (WGPUColor*)pip->cmn.blend_color);
11445     wgpuRenderPassEncoderSetStencilReference(_sg.wgpu.pass_enc, pip->wgpu.stencil_ref);
11446 }
11447 
_sg_wgpu_create_images_bindgroup(WGPUBindGroupLayout bgl,_sg_image_t ** imgs,int num_imgs)11448 _SOKOL_PRIVATE WGPUBindGroup _sg_wgpu_create_images_bindgroup(WGPUBindGroupLayout bgl, _sg_image_t** imgs, int num_imgs) {
11449     SOKOL_ASSERT(_sg.wgpu.dev);
11450     SOKOL_ASSERT(num_imgs <= _SG_WGPU_MAX_SHADERSTAGE_IMAGES);
11451     WGPUBindGroupBinding img_bgb[_SG_WGPU_MAX_SHADERSTAGE_IMAGES * 2];
11452     memset(&img_bgb, 0, sizeof(img_bgb));
11453     for (int img_index = 0; img_index < num_imgs; img_index++) {
11454         WGPUBindGroupBinding* tex_bdg = &img_bgb[img_index*2 + 0];
11455         WGPUBindGroupBinding* smp_bdg = &img_bgb[img_index*2 + 1];
11456         tex_bdg->binding = img_index;
11457         tex_bdg->textureView = imgs[img_index]->wgpu.tex_view;
11458         smp_bdg->binding = img_index + _SG_WGPU_MAX_SHADERSTAGE_IMAGES;
11459         smp_bdg->sampler = imgs[img_index]->wgpu.sampler;
11460     }
11461     WGPUBindGroupDescriptor bg_desc;
11462     memset(&bg_desc, 0, sizeof(bg_desc));
11463     bg_desc.layout = bgl;
11464     bg_desc.bindingCount = 2 * num_imgs;
11465     bg_desc.bindings = &img_bgb[0];
11466     WGPUBindGroup bg = wgpuDeviceCreateBindGroup(_sg.wgpu.dev, &bg_desc);
11467     SOKOL_ASSERT(bg);
11468     return bg;
11469 }
11470 
_sg_wgpu_apply_bindings(_sg_pipeline_t * pip,_sg_buffer_t ** vbs,const int * vb_offsets,int num_vbs,_sg_buffer_t * ib,int ib_offset,_sg_image_t ** vs_imgs,int num_vs_imgs,_sg_image_t ** fs_imgs,int num_fs_imgs)11471 _SOKOL_PRIVATE void _sg_wgpu_apply_bindings(
11472     _sg_pipeline_t* pip,
11473     _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs,
11474     _sg_buffer_t* ib, int ib_offset,
11475     _sg_image_t** vs_imgs, int num_vs_imgs,
11476     _sg_image_t** fs_imgs, int num_fs_imgs)
11477 {
11478     SOKOL_ASSERT(_sg.wgpu.in_pass);
11479     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11480     SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id));
11481 
11482     /* index buffer */
11483     if (ib) {
11484         wgpuRenderPassEncoderSetIndexBuffer(_sg.wgpu.pass_enc, ib->wgpu.buf, ib_offset);
11485     }
11486 
11487     /* vertex buffers */
11488     for (uint32_t slot = 0; slot < (uint32_t)num_vbs; slot++) {
11489         wgpuRenderPassEncoderSetVertexBuffer(_sg.wgpu.pass_enc, slot, vbs[slot]->wgpu.buf, (uint64_t)vb_offsets[slot]);
11490     }
11491 
11492     /* need to create throw-away bind groups for images */
11493     if (num_vs_imgs > 0) {
11494         if (num_vs_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) {
11495             num_vs_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES;
11496         }
11497         WGPUBindGroupLayout vs_bgl = pip->shader->wgpu.stage[SG_SHADERSTAGE_VS].bind_group_layout;
11498         SOKOL_ASSERT(vs_bgl);
11499         WGPUBindGroup vs_img_bg = _sg_wgpu_create_images_bindgroup(vs_bgl, vs_imgs, num_vs_imgs);
11500         wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 1, vs_img_bg, 0, 0);
11501         wgpuBindGroupRelease(vs_img_bg);
11502     }
11503     else {
11504         wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 1, _sg.wgpu.empty_bind_group, 0, 0);
11505     }
11506     if (num_fs_imgs > 0) {
11507         if (num_fs_imgs > _SG_WGPU_MAX_SHADERSTAGE_IMAGES) {
11508             num_fs_imgs = _SG_WGPU_MAX_SHADERSTAGE_IMAGES;
11509         }
11510         WGPUBindGroupLayout fs_bgl = pip->shader->wgpu.stage[SG_SHADERSTAGE_FS].bind_group_layout;
11511         SOKOL_ASSERT(fs_bgl);
11512         WGPUBindGroup fs_img_bg = _sg_wgpu_create_images_bindgroup(fs_bgl, fs_imgs, num_fs_imgs);
11513         wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 2, fs_img_bg, 0, 0);
11514         wgpuBindGroupRelease(fs_img_bg);
11515     }
11516     else {
11517         wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc, 2, _sg.wgpu.empty_bind_group, 0, 0);
11518     }
11519 }
11520 
_sg_wgpu_apply_uniforms(sg_shader_stage stage_index,int ub_index,const void * data,int num_bytes)11521 _SOKOL_PRIVATE void _sg_wgpu_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) {
11522     SOKOL_ASSERT(_sg.wgpu.in_pass);
11523     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11524     SOKOL_ASSERT(data && (num_bytes > 0));
11525     SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES));
11526     SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS));
11527     SOKOL_ASSERT((_sg.wgpu.ub.offset + num_bytes) <= _sg.wgpu.ub.num_bytes);
11528     SOKOL_ASSERT((_sg.wgpu.ub.offset & (_SG_WGPU_STAGING_ALIGN-1)) == 0);
11529     SOKOL_ASSERT(_sg.wgpu.cur_pipeline && _sg.wgpu.cur_pipeline->shader);
11530     SOKOL_ASSERT(_sg.wgpu.cur_pipeline->slot.id == _sg.wgpu.cur_pipeline_id.id);
11531     SOKOL_ASSERT(_sg.wgpu.cur_pipeline->shader->slot.id == _sg.wgpu.cur_pipeline->cmn.shader_id.id);
11532     SOKOL_ASSERT(ub_index < _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].num_uniform_blocks);
11533     SOKOL_ASSERT(num_bytes <= _sg.wgpu.cur_pipeline->shader->cmn.stage[stage_index].uniform_blocks[ub_index].size);
11534     SOKOL_ASSERT(num_bytes <= _SG_WGPU_MAX_UNIFORM_UPDATE_SIZE);
11535     SOKOL_ASSERT(0 != _sg.wgpu.ub.stage.ptr[_sg.wgpu.ub.stage.cur]);
11536 
11537     uint8_t* dst_ptr = _sg.wgpu.ub.stage.ptr[_sg.wgpu.ub.stage.cur] + _sg.wgpu.ub.offset;
11538     memcpy(dst_ptr, data, num_bytes);
11539     _sg.wgpu.ub.bind_offsets[stage_index][ub_index] = _sg.wgpu.ub.offset;
11540     wgpuRenderPassEncoderSetBindGroup(_sg.wgpu.pass_enc,
11541                                       0, /* groupIndex 0 is reserved for uniform buffers */
11542                                       _sg.wgpu.ub.bindgroup,
11543                                       SG_NUM_SHADER_STAGES * SG_MAX_SHADERSTAGE_UBS,
11544                                       &_sg.wgpu.ub.bind_offsets[0][0]);
11545     _sg.wgpu.ub.offset = _sg_roundup(_sg.wgpu.ub.offset + num_bytes, _SG_WGPU_STAGING_ALIGN);
11546 }
11547 
_sg_wgpu_draw(int base_element,int num_elements,int num_instances)11548 _SOKOL_PRIVATE void _sg_wgpu_draw(int base_element, int num_elements, int num_instances) {
11549     SOKOL_ASSERT(_sg.wgpu.in_pass);
11550     SOKOL_ASSERT(_sg.wgpu.pass_enc);
11551     if (_sg.wgpu.draw_indexed) {
11552         wgpuRenderPassEncoderDrawIndexed(_sg.wgpu.pass_enc, num_elements, num_instances, base_element, 0, 0);
11553     }
11554     else {
11555         wgpuRenderPassEncoderDraw(_sg.wgpu.pass_enc, num_elements, num_instances, base_element, 0);
11556     }
11557 }
11558 
_sg_wgpu_update_buffer(_sg_buffer_t * buf,const void * data,uint32_t num_bytes)11559 _SOKOL_PRIVATE void _sg_wgpu_update_buffer(_sg_buffer_t* buf, const void* data, uint32_t num_bytes) {
11560     SOKOL_ASSERT(buf && data && (num_bytes > 0));
11561     uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, 0, data, (uint32_t)num_bytes);
11562     SOKOL_ASSERT(copied_num_bytes > 0); _SOKOL_UNUSED(copied_num_bytes);
11563 }
11564 
_sg_wgpu_append_buffer(_sg_buffer_t * buf,const void * data,uint32_t num_bytes,bool new_frame)11565 _SOKOL_PRIVATE uint32_t _sg_wgpu_append_buffer(_sg_buffer_t* buf, const void* data, uint32_t num_bytes, bool new_frame) {
11566     SOKOL_ASSERT(buf && data && (num_bytes > 0));
11567     _SOKOL_UNUSED(new_frame);
11568     uint32_t copied_num_bytes = _sg_wgpu_staging_copy_to_buffer(buf->wgpu.buf, buf->cmn.append_pos, data, num_bytes);
11569     SOKOL_ASSERT(copied_num_bytes > 0); _SOKOL_UNUSED(copied_num_bytes);
11570     return copied_num_bytes;
11571 }
11572 
_sg_wgpu_update_image(_sg_image_t * img,const sg_image_content * data)11573 _SOKOL_PRIVATE void _sg_wgpu_update_image(_sg_image_t* img, const sg_image_content* data) {
11574     SOKOL_ASSERT(img && data);
11575     bool success = _sg_wgpu_staging_copy_to_texture(img, data);
11576     SOKOL_ASSERT(success);
11577     _SOKOL_UNUSED(success);
11578 }
11579 #endif
11580 
11581 /*== BACKEND API WRAPPERS ====================================================*/
_sg_setup_backend(const sg_desc * desc)11582 static inline void _sg_setup_backend(const sg_desc* desc) {
11583     #if defined(_SOKOL_ANY_GL)
11584     _sg_gl_setup_backend(desc);
11585     #elif defined(SOKOL_METAL)
11586     _sg_mtl_setup_backend(desc);
11587     #elif defined(SOKOL_D3D11)
11588     _sg_d3d11_setup_backend(desc);
11589     #elif defined(SOKOL_WGPU)
11590     _sg_wgpu_setup_backend(desc);
11591     #elif defined(SOKOL_DUMMY_BACKEND)
11592     _sg_dummy_setup_backend(desc);
11593     #else
11594     #error("INVALID BACKEND");
11595     #endif
11596 }
11597 
_sg_discard_backend(void)11598 static inline void _sg_discard_backend(void) {
11599     #if defined(_SOKOL_ANY_GL)
11600     _sg_gl_discard_backend();
11601     #elif defined(SOKOL_METAL)
11602     _sg_mtl_discard_backend();
11603     #elif defined(SOKOL_D3D11)
11604     _sg_d3d11_discard_backend();
11605     #elif defined(SOKOL_WGPU)
11606     _sg_wgpu_discard_backend();
11607     #elif defined(SOKOL_DUMMY_BACKEND)
11608     _sg_dummy_discard_backend();
11609     #else
11610     #error("INVALID BACKEND");
11611     #endif
11612 }
11613 
_sg_reset_state_cache(void)11614 static inline void _sg_reset_state_cache(void) {
11615     #if defined(_SOKOL_ANY_GL)
11616     _sg_gl_reset_state_cache();
11617     #elif defined(SOKOL_METAL)
11618     _sg_mtl_reset_state_cache();
11619     #elif defined(SOKOL_D3D11)
11620     _sg_d3d11_reset_state_cache();
11621     #elif defined(SOKOL_WGPU)
11622     _sg_wgpu_reset_state_cache();
11623     #elif defined(SOKOL_DUMMY_BACKEND)
11624     _sg_dummy_reset_state_cache();
11625     #else
11626     #error("INVALID BACKEND");
11627     #endif
11628 }
11629 
_sg_activate_context(_sg_context_t * ctx)11630 static inline void _sg_activate_context(_sg_context_t* ctx) {
11631     #if defined(_SOKOL_ANY_GL)
11632     _sg_gl_activate_context(ctx);
11633     #elif defined(SOKOL_METAL)
11634     _sg_mtl_activate_context(ctx);
11635     #elif defined(SOKOL_D3D11)
11636     _sg_d3d11_activate_context(ctx);
11637     #elif defined(SOKOL_WGPU)
11638     _sg_wgpu_activate_context(ctx);
11639     #elif defined(SOKOL_DUMMY_BACKEND)
11640     _sg_dummy_activate_context(ctx);
11641     #else
11642     #error("INVALID BACKEND");
11643     #endif
11644 }
11645 
_sg_create_context(_sg_context_t * ctx)11646 static inline sg_resource_state _sg_create_context(_sg_context_t* ctx) {
11647     #if defined(_SOKOL_ANY_GL)
11648     return _sg_gl_create_context(ctx);
11649     #elif defined(SOKOL_METAL)
11650     return _sg_mtl_create_context(ctx);
11651     #elif defined(SOKOL_D3D11)
11652     return _sg_d3d11_create_context(ctx);
11653     #elif defined(SOKOL_WGPU)
11654     return _sg_wgpu_create_context(ctx);
11655     #elif defined(SOKOL_DUMMY_BACKEND)
11656     return _sg_dummy_create_context(ctx);
11657     #else
11658     #error("INVALID BACKEND");
11659     #endif
11660 }
11661 
_sg_destroy_context(_sg_context_t * ctx)11662 static inline void _sg_destroy_context(_sg_context_t* ctx) {
11663     #if defined(_SOKOL_ANY_GL)
11664     _sg_gl_destroy_context(ctx);
11665     #elif defined(SOKOL_METAL)
11666     _sg_mtl_destroy_context(ctx);
11667     #elif defined(SOKOL_D3D11)
11668     _sg_d3d11_destroy_context(ctx);
11669     #elif defined(SOKOL_WGPU)
11670     _sg_wgpu_destroy_context(ctx);
11671     #elif defined(SOKOL_DUMMY_BACKEND)
11672     _sg_dummy_destroy_context(ctx);
11673     #else
11674     #error("INVALID BACKEND");
11675     #endif
11676 }
11677 
_sg_create_buffer(_sg_buffer_t * buf,const sg_buffer_desc * desc)11678 static inline sg_resource_state _sg_create_buffer(_sg_buffer_t* buf, const sg_buffer_desc* desc) {
11679     #if defined(_SOKOL_ANY_GL)
11680     return _sg_gl_create_buffer(buf, desc);
11681     #elif defined(SOKOL_METAL)
11682     return _sg_mtl_create_buffer(buf, desc);
11683     #elif defined(SOKOL_D3D11)
11684     return _sg_d3d11_create_buffer(buf, desc);
11685     #elif defined(SOKOL_WGPU)
11686     return _sg_wgpu_create_buffer(buf, desc);
11687     #elif defined(SOKOL_DUMMY_BACKEND)
11688     return _sg_dummy_create_buffer(buf, desc);
11689     #else
11690     #error("INVALID BACKEND");
11691     #endif
11692 }
11693 
_sg_destroy_buffer(_sg_buffer_t * buf)11694 static inline void _sg_destroy_buffer(_sg_buffer_t* buf) {
11695     #if defined(_SOKOL_ANY_GL)
11696     _sg_gl_destroy_buffer(buf);
11697     #elif defined(SOKOL_METAL)
11698     _sg_mtl_destroy_buffer(buf);
11699     #elif defined(SOKOL_D3D11)
11700     _sg_d3d11_destroy_buffer(buf);
11701     #elif defined(SOKOL_WGPU)
11702     _sg_wgpu_destroy_buffer(buf);
11703     #elif defined(SOKOL_DUMMY_BACKEND)
11704     _sg_dummy_destroy_buffer(buf);
11705     #else
11706     #error("INVALID BACKEND");
11707     #endif
11708 }
11709 
_sg_create_image(_sg_image_t * img,const sg_image_desc * desc)11710 static inline sg_resource_state _sg_create_image(_sg_image_t* img, const sg_image_desc* desc) {
11711     #if defined(_SOKOL_ANY_GL)
11712     return _sg_gl_create_image(img, desc);
11713     #elif defined(SOKOL_METAL)
11714     return _sg_mtl_create_image(img, desc);
11715     #elif defined(SOKOL_D3D11)
11716     return _sg_d3d11_create_image(img, desc);
11717     #elif defined(SOKOL_WGPU)
11718     return _sg_wgpu_create_image(img, desc);
11719     #elif defined(SOKOL_DUMMY_BACKEND)
11720     return _sg_dummy_create_image(img, desc);
11721     #else
11722     #error("INVALID BACKEND");
11723     #endif
11724 }
11725 
_sg_destroy_image(_sg_image_t * img)11726 static inline void _sg_destroy_image(_sg_image_t* img) {
11727     #if defined(_SOKOL_ANY_GL)
11728     _sg_gl_destroy_image(img);
11729     #elif defined(SOKOL_METAL)
11730     _sg_mtl_destroy_image(img);
11731     #elif defined(SOKOL_D3D11)
11732     _sg_d3d11_destroy_image(img);
11733     #elif defined(SOKOL_WGPU)
11734     _sg_wgpu_destroy_image(img);
11735     #elif defined(SOKOL_DUMMY_BACKEND)
11736     _sg_dummy_destroy_image(img);
11737     #else
11738     #error("INVALID BACKEND");
11739     #endif
11740 }
11741 
_sg_create_shader(_sg_shader_t * shd,const sg_shader_desc * desc)11742 static inline sg_resource_state _sg_create_shader(_sg_shader_t* shd, const sg_shader_desc* desc) {
11743     #if defined(_SOKOL_ANY_GL)
11744     return _sg_gl_create_shader(shd, desc);
11745     #elif defined(SOKOL_METAL)
11746     return _sg_mtl_create_shader(shd, desc);
11747     #elif defined(SOKOL_D3D11)
11748     return _sg_d3d11_create_shader(shd, desc);
11749     #elif defined(SOKOL_WGPU)
11750     return _sg_wgpu_create_shader(shd, desc);
11751     #elif defined(SOKOL_DUMMY_BACKEND)
11752     return _sg_dummy_create_shader(shd, desc);
11753     #else
11754     #error("INVALID BACKEND");
11755     #endif
11756 }
11757 
_sg_destroy_shader(_sg_shader_t * shd)11758 static inline void _sg_destroy_shader(_sg_shader_t* shd) {
11759     #if defined(_SOKOL_ANY_GL)
11760     _sg_gl_destroy_shader(shd);
11761     #elif defined(SOKOL_METAL)
11762     _sg_mtl_destroy_shader(shd);
11763     #elif defined(SOKOL_D3D11)
11764     _sg_d3d11_destroy_shader(shd);
11765     #elif defined(SOKOL_WGPU)
11766     _sg_wgpu_destroy_shader(shd);
11767     #elif defined(SOKOL_DUMMY_BACKEND)
11768     _sg_dummy_destroy_shader(shd);
11769     #else
11770     #error("INVALID BACKEND");
11771     #endif
11772 }
11773 
_sg_create_pipeline(_sg_pipeline_t * pip,_sg_shader_t * shd,const sg_pipeline_desc * desc)11774 static inline sg_resource_state _sg_create_pipeline(_sg_pipeline_t* pip, _sg_shader_t* shd, const sg_pipeline_desc* desc) {
11775     #if defined(_SOKOL_ANY_GL)
11776     return _sg_gl_create_pipeline(pip, shd, desc);
11777     #elif defined(SOKOL_METAL)
11778     return _sg_mtl_create_pipeline(pip, shd, desc);
11779     #elif defined(SOKOL_D3D11)
11780     return _sg_d3d11_create_pipeline(pip, shd, desc);
11781     #elif defined(SOKOL_WGPU)
11782     return _sg_wgpu_create_pipeline(pip, shd, desc);
11783     #elif defined(SOKOL_DUMMY_BACKEND)
11784     return _sg_dummy_create_pipeline(pip, shd, desc);
11785     #else
11786     #error("INVALID BACKEND");
11787     #endif
11788 }
11789 
_sg_destroy_pipeline(_sg_pipeline_t * pip)11790 static inline void _sg_destroy_pipeline(_sg_pipeline_t* pip) {
11791     #if defined(_SOKOL_ANY_GL)
11792     _sg_gl_destroy_pipeline(pip);
11793     #elif defined(SOKOL_METAL)
11794     _sg_mtl_destroy_pipeline(pip);
11795     #elif defined(SOKOL_D3D11)
11796     _sg_d3d11_destroy_pipeline(pip);
11797     #elif defined(SOKOL_WGPU)
11798     _sg_wgpu_destroy_pipeline(pip);
11799     #elif defined(SOKOL_DUMMY_BACKEND)
11800     _sg_dummy_destroy_pipeline(pip);
11801     #else
11802     #error("INVALID BACKEND");
11803     #endif
11804 }
11805 
_sg_create_pass(_sg_pass_t * pass,_sg_image_t ** att_images,const sg_pass_desc * desc)11806 static inline sg_resource_state _sg_create_pass(_sg_pass_t* pass, _sg_image_t** att_images, const sg_pass_desc* desc) {
11807     #if defined(_SOKOL_ANY_GL)
11808     return _sg_gl_create_pass(pass, att_images, desc);
11809     #elif defined(SOKOL_METAL)
11810     return _sg_mtl_create_pass(pass, att_images, desc);
11811     #elif defined(SOKOL_D3D11)
11812     return _sg_d3d11_create_pass(pass, att_images, desc);
11813     #elif defined(SOKOL_WGPU)
11814     return _sg_wgpu_create_pass(pass, att_images, desc);
11815     #elif defined(SOKOL_DUMMY_BACKEND)
11816     return _sg_dummy_create_pass(pass, att_images, desc);
11817     #else
11818     #error("INVALID BACKEND");
11819     #endif
11820 }
11821 
_sg_destroy_pass(_sg_pass_t * pass)11822 static inline void _sg_destroy_pass(_sg_pass_t* pass) {
11823     #if defined(_SOKOL_ANY_GL)
11824     _sg_gl_destroy_pass(pass);
11825     #elif defined(SOKOL_METAL)
11826     _sg_mtl_destroy_pass(pass);
11827     #elif defined(SOKOL_D3D11)
11828     _sg_d3d11_destroy_pass(pass);
11829     #elif defined(SOKOL_WGPU)
11830     return _sg_wgpu_destroy_pass(pass);
11831     #elif defined(SOKOL_DUMMY_BACKEND)
11832     _sg_dummy_destroy_pass(pass);
11833     #else
11834     #error("INVALID BACKEND");
11835     #endif
11836 }
11837 
_sg_pass_color_image(const _sg_pass_t * pass,int index)11838 static inline _sg_image_t* _sg_pass_color_image(const _sg_pass_t* pass, int index) {
11839     #if defined(_SOKOL_ANY_GL)
11840     return _sg_gl_pass_color_image(pass, index);
11841     #elif defined(SOKOL_METAL)
11842     return _sg_mtl_pass_color_image(pass, index);
11843     #elif defined(SOKOL_D3D11)
11844     return _sg_d3d11_pass_color_image(pass, index);
11845     #elif defined(SOKOL_WGPU)
11846     return _sg_wgpu_pass_color_image(pass, index);
11847     #elif defined(SOKOL_DUMMY_BACKEND)
11848     return _sg_dummy_pass_color_image(pass, index);
11849     #else
11850     #error("INVALID BACKEND");
11851     #endif
11852 }
11853 
_sg_pass_ds_image(const _sg_pass_t * pass)11854 static inline _sg_image_t* _sg_pass_ds_image(const _sg_pass_t* pass) {
11855     #if defined(_SOKOL_ANY_GL)
11856     return _sg_gl_pass_ds_image(pass);
11857     #elif defined(SOKOL_METAL)
11858     return _sg_mtl_pass_ds_image(pass);
11859     #elif defined(SOKOL_D3D11)
11860     return _sg_d3d11_pass_ds_image(pass);
11861     #elif defined(SOKOL_WGPU)
11862     return _sg_wgpu_pass_ds_image(pass);
11863     #elif defined(SOKOL_DUMMY_BACKEND)
11864     return _sg_dummy_pass_ds_image(pass);
11865     #else
11866     #error("INVALID BACKEND");
11867     #endif
11868 }
11869 
_sg_begin_pass(_sg_pass_t * pass,const sg_pass_action * action,int w,int h)11870 static inline void _sg_begin_pass(_sg_pass_t* pass, const sg_pass_action* action, int w, int h) {
11871     #if defined(_SOKOL_ANY_GL)
11872     _sg_gl_begin_pass(pass, action, w, h);
11873     #elif defined(SOKOL_METAL)
11874     _sg_mtl_begin_pass(pass, action, w, h);
11875     #elif defined(SOKOL_D3D11)
11876     _sg_d3d11_begin_pass(pass, action, w, h);
11877     #elif defined(SOKOL_WGPU)
11878     _sg_wgpu_begin_pass(pass, action, w, h);
11879     #elif defined(SOKOL_DUMMY_BACKEND)
11880     _sg_dummy_begin_pass(pass, action, w, h);
11881     #else
11882     #error("INVALID BACKEND");
11883     #endif
11884 }
11885 
_sg_end_pass(void)11886 static inline void _sg_end_pass(void) {
11887     #if defined(_SOKOL_ANY_GL)
11888     _sg_gl_end_pass();
11889     #elif defined(SOKOL_METAL)
11890     _sg_mtl_end_pass();
11891     #elif defined(SOKOL_D3D11)
11892     _sg_d3d11_end_pass();
11893     #elif defined(SOKOL_WGPU)
11894     _sg_wgpu_end_pass();
11895     #elif defined(SOKOL_DUMMY_BACKEND)
11896     _sg_dummy_end_pass();
11897     #else
11898     #error("INVALID BACKEND");
11899     #endif
11900 }
11901 
_sg_apply_viewport(int x,int y,int w,int h,bool origin_top_left)11902 static inline void _sg_apply_viewport(int x, int y, int w, int h, bool origin_top_left) {
11903     #if defined(_SOKOL_ANY_GL)
11904     _sg_gl_apply_viewport(x, y, w, h, origin_top_left);
11905     #elif defined(SOKOL_METAL)
11906     _sg_mtl_apply_viewport(x, y, w, h, origin_top_left);
11907     #elif defined(SOKOL_D3D11)
11908     _sg_d3d11_apply_viewport(x, y, w, h, origin_top_left);
11909     #elif defined(SOKOL_WGPU)
11910     _sg_wgpu_apply_viewport(x, y, w, h, origin_top_left);
11911     #elif defined(SOKOL_DUMMY_BACKEND)
11912     _sg_dummy_apply_viewport(x, y, w, h, origin_top_left);
11913     #else
11914     #error("INVALID BACKEND");
11915     #endif
11916 }
11917 
_sg_apply_scissor_rect(int x,int y,int w,int h,bool origin_top_left)11918 static inline void _sg_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) {
11919     #if defined(_SOKOL_ANY_GL)
11920     _sg_gl_apply_scissor_rect(x, y, w, h, origin_top_left);
11921     #elif defined(SOKOL_METAL)
11922     _sg_mtl_apply_scissor_rect(x, y, w, h, origin_top_left);
11923     #elif defined(SOKOL_D3D11)
11924     _sg_d3d11_apply_scissor_rect(x, y, w, h, origin_top_left);
11925     #elif defined(SOKOL_WGPU)
11926     _sg_wgpu_apply_scissor_rect(x, y, w, h, origin_top_left);
11927     #elif defined(SOKOL_DUMMY_BACKEND)
11928     _sg_dummy_apply_scissor_rect(x, y, w, h, origin_top_left);
11929     #else
11930     #error("INVALID BACKEND");
11931     #endif
11932 }
11933 
_sg_apply_pipeline(_sg_pipeline_t * pip)11934 static inline void _sg_apply_pipeline(_sg_pipeline_t* pip) {
11935     #if defined(_SOKOL_ANY_GL)
11936     _sg_gl_apply_pipeline(pip);
11937     #elif defined(SOKOL_METAL)
11938     _sg_mtl_apply_pipeline(pip);
11939     #elif defined(SOKOL_D3D11)
11940     _sg_d3d11_apply_pipeline(pip);
11941     #elif defined(SOKOL_WGPU)
11942     _sg_wgpu_apply_pipeline(pip);
11943     #elif defined(SOKOL_DUMMY_BACKEND)
11944     _sg_dummy_apply_pipeline(pip);
11945     #else
11946     #error("INVALID BACKEND");
11947     #endif
11948 }
11949 
_sg_apply_bindings(_sg_pipeline_t * pip,_sg_buffer_t ** vbs,const int * vb_offsets,int num_vbs,_sg_buffer_t * ib,int ib_offset,_sg_image_t ** vs_imgs,int num_vs_imgs,_sg_image_t ** fs_imgs,int num_fs_imgs)11950 static inline void _sg_apply_bindings(
11951     _sg_pipeline_t* pip,
11952     _sg_buffer_t** vbs, const int* vb_offsets, int num_vbs,
11953     _sg_buffer_t* ib, int ib_offset,
11954     _sg_image_t** vs_imgs, int num_vs_imgs,
11955     _sg_image_t** fs_imgs, int num_fs_imgs)
11956 {
11957     #if defined(_SOKOL_ANY_GL)
11958     _sg_gl_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs);
11959     #elif defined(SOKOL_METAL)
11960     _sg_mtl_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs);
11961     #elif defined(SOKOL_D3D11)
11962     _sg_d3d11_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs);
11963     #elif defined(SOKOL_WGPU)
11964     _sg_wgpu_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs);
11965     #elif defined(SOKOL_DUMMY_BACKEND)
11966     _sg_dummy_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs);
11967     #else
11968     #error("INVALID BACKEND");
11969     #endif
11970 }
11971 
_sg_apply_uniforms(sg_shader_stage stage_index,int ub_index,const void * data,int num_bytes)11972 static inline void _sg_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) {
11973     #if defined(_SOKOL_ANY_GL)
11974     _sg_gl_apply_uniforms(stage_index, ub_index, data, num_bytes);
11975     #elif defined(SOKOL_METAL)
11976     _sg_mtl_apply_uniforms(stage_index, ub_index, data, num_bytes);
11977     #elif defined(SOKOL_D3D11)
11978     _sg_d3d11_apply_uniforms(stage_index, ub_index, data, num_bytes);
11979     #elif defined(SOKOL_WGPU)
11980     _sg_wgpu_apply_uniforms(stage_index, ub_index, data, num_bytes);
11981     #elif defined(SOKOL_DUMMY_BACKEND)
11982     _sg_dummy_apply_uniforms(stage_index, ub_index, data, num_bytes);
11983     #else
11984     #error("INVALID BACKEND");
11985     #endif
11986 }
11987 
_sg_draw(int base_element,int num_elements,int num_instances)11988 static inline void _sg_draw(int base_element, int num_elements, int num_instances) {
11989     #if defined(_SOKOL_ANY_GL)
11990     _sg_gl_draw(base_element, num_elements, num_instances);
11991     #elif defined(SOKOL_METAL)
11992     _sg_mtl_draw(base_element, num_elements, num_instances);
11993     #elif defined(SOKOL_D3D11)
11994     _sg_d3d11_draw(base_element, num_elements, num_instances);
11995     #elif defined(SOKOL_WGPU)
11996     _sg_wgpu_draw(base_element, num_elements, num_instances);
11997     #elif defined(SOKOL_DUMMY_BACKEND)
11998     _sg_dummy_draw(base_element, num_elements, num_instances);
11999     #else
12000     #error("INVALID BACKEND");
12001     #endif
12002 }
12003 
_sg_commit(void)12004 static inline void _sg_commit(void) {
12005     #if defined(_SOKOL_ANY_GL)
12006     _sg_gl_commit();
12007     #elif defined(SOKOL_METAL)
12008     _sg_mtl_commit();
12009     #elif defined(SOKOL_D3D11)
12010     _sg_d3d11_commit();
12011     #elif defined(SOKOL_WGPU)
12012     _sg_wgpu_commit();
12013     #elif defined(SOKOL_DUMMY_BACKEND)
12014     _sg_dummy_commit();
12015     #else
12016     #error("INVALID BACKEND");
12017     #endif
12018 }
12019 
_sg_update_buffer(_sg_buffer_t * buf,const void * data_ptr,uint32_t data_size)12020 static inline void _sg_update_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size) {
12021     #if defined(_SOKOL_ANY_GL)
12022     _sg_gl_update_buffer(buf, data_ptr, data_size);
12023     #elif defined(SOKOL_METAL)
12024     _sg_mtl_update_buffer(buf, data_ptr, data_size);
12025     #elif defined(SOKOL_D3D11)
12026     _sg_d3d11_update_buffer(buf, data_ptr, data_size);
12027     #elif defined(SOKOL_WGPU)
12028     _sg_wgpu_update_buffer(buf, data_ptr, data_size);
12029     #elif defined(SOKOL_DUMMY_BACKEND)
12030     _sg_dummy_update_buffer(buf, data_ptr, data_size);
12031     #else
12032     #error("INVALID BACKEND");
12033     #endif
12034 }
12035 
_sg_append_buffer(_sg_buffer_t * buf,const void * data_ptr,uint32_t data_size,bool new_frame)12036 static inline uint32_t _sg_append_buffer(_sg_buffer_t* buf, const void* data_ptr, uint32_t data_size, bool new_frame) {
12037     #if defined(_SOKOL_ANY_GL)
12038     return _sg_gl_append_buffer(buf, data_ptr, data_size, new_frame);
12039     #elif defined(SOKOL_METAL)
12040     return _sg_mtl_append_buffer(buf, data_ptr, data_size, new_frame);
12041     #elif defined(SOKOL_D3D11)
12042     return _sg_d3d11_append_buffer(buf, data_ptr, data_size, new_frame);
12043     #elif defined(SOKOL_WGPU)
12044     return _sg_wgpu_append_buffer(buf, data_ptr, data_size, new_frame);
12045     #elif defined(SOKOL_DUMMY_BACKEND)
12046     return _sg_dummy_append_buffer(buf, data_ptr, data_size, new_frame);
12047     #else
12048     #error("INVALID BACKEND");
12049     #endif
12050 }
12051 
_sg_update_image(_sg_image_t * img,const sg_image_content * data)12052 static inline void _sg_update_image(_sg_image_t* img, const sg_image_content* data) {
12053     #if defined(_SOKOL_ANY_GL)
12054     _sg_gl_update_image(img, data);
12055     #elif defined(SOKOL_METAL)
12056     _sg_mtl_update_image(img, data);
12057     #elif defined(SOKOL_D3D11)
12058     _sg_d3d11_update_image(img, data);
12059     #elif defined(SOKOL_WGPU)
12060     _sg_wgpu_update_image(img, data);
12061     #elif defined(SOKOL_DUMMY_BACKEND)
12062     _sg_dummy_update_image(img, data);
12063     #else
12064     #error("INVALID BACKEND");
12065     #endif
12066 }
12067 
12068 /*== RESOURCE POOLS ==========================================================*/
12069 
_sg_init_pool(_sg_pool_t * pool,int num)12070 _SOKOL_PRIVATE void _sg_init_pool(_sg_pool_t* pool, int num) {
12071     SOKOL_ASSERT(pool && (num >= 1));
12072     /* slot 0 is reserved for the 'invalid id', so bump the pool size by 1 */
12073     pool->size = num + 1;
12074     pool->queue_top = 0;
12075     /* generation counters indexable by pool slot index, slot 0 is reserved */
12076     size_t gen_ctrs_size = sizeof(uint32_t) * pool->size;
12077     pool->gen_ctrs = (uint32_t*) SOKOL_MALLOC(gen_ctrs_size);
12078     SOKOL_ASSERT(pool->gen_ctrs);
12079     memset(pool->gen_ctrs, 0, gen_ctrs_size);
12080     /* it's not a bug to only reserve 'num' here */
12081     pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int)*num);
12082     SOKOL_ASSERT(pool->free_queue);
12083     /* never allocate the zero-th pool item since the invalid id is 0 */
12084     for (int i = pool->size-1; i >= 1; i--) {
12085         pool->free_queue[pool->queue_top++] = i;
12086     }
12087 }
12088 
_sg_discard_pool(_sg_pool_t * pool)12089 _SOKOL_PRIVATE void _sg_discard_pool(_sg_pool_t* pool) {
12090     SOKOL_ASSERT(pool);
12091     SOKOL_ASSERT(pool->free_queue);
12092     SOKOL_FREE(pool->free_queue);
12093     pool->free_queue = 0;
12094     SOKOL_ASSERT(pool->gen_ctrs);
12095     SOKOL_FREE(pool->gen_ctrs);
12096     pool->gen_ctrs = 0;
12097     pool->size = 0;
12098     pool->queue_top = 0;
12099 }
12100 
_sg_pool_alloc_index(_sg_pool_t * pool)12101 _SOKOL_PRIVATE int _sg_pool_alloc_index(_sg_pool_t* pool) {
12102     SOKOL_ASSERT(pool);
12103     SOKOL_ASSERT(pool->free_queue);
12104     if (pool->queue_top > 0) {
12105         int slot_index = pool->free_queue[--pool->queue_top];
12106         SOKOL_ASSERT((slot_index > 0) && (slot_index < pool->size));
12107         return slot_index;
12108     }
12109     else {
12110         /* pool exhausted */
12111         return _SG_INVALID_SLOT_INDEX;
12112     }
12113 }
12114 
_sg_pool_free_index(_sg_pool_t * pool,int slot_index)12115 _SOKOL_PRIVATE void _sg_pool_free_index(_sg_pool_t* pool, int slot_index) {
12116     SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < pool->size));
12117     SOKOL_ASSERT(pool);
12118     SOKOL_ASSERT(pool->free_queue);
12119     SOKOL_ASSERT(pool->queue_top < pool->size);
12120     #ifdef SOKOL_DEBUG
12121     /* debug check against double-free */
12122     for (int i = 0; i < pool->queue_top; i++) {
12123         SOKOL_ASSERT(pool->free_queue[i] != slot_index);
12124     }
12125     #endif
12126     pool->free_queue[pool->queue_top++] = slot_index;
12127     SOKOL_ASSERT(pool->queue_top <= (pool->size-1));
12128 }
12129 
_sg_reset_buffer(_sg_buffer_t * buf)12130 _SOKOL_PRIVATE void _sg_reset_buffer(_sg_buffer_t* buf) {
12131     SOKOL_ASSERT(buf);
12132     memset(buf, 0, sizeof(_sg_buffer_t));
12133 }
12134 
_sg_reset_image(_sg_image_t * img)12135 _SOKOL_PRIVATE void _sg_reset_image(_sg_image_t* img) {
12136     SOKOL_ASSERT(img);
12137     memset(img, 0, sizeof(_sg_image_t));
12138 }
12139 
_sg_reset_shader(_sg_shader_t * shd)12140 _SOKOL_PRIVATE void _sg_reset_shader(_sg_shader_t* shd) {
12141     SOKOL_ASSERT(shd);
12142     memset(shd, 0, sizeof(_sg_shader_t));
12143 }
12144 
_sg_reset_pipeline(_sg_pipeline_t * pip)12145 _SOKOL_PRIVATE void _sg_reset_pipeline(_sg_pipeline_t* pip) {
12146     SOKOL_ASSERT(pip);
12147     memset(pip, 0, sizeof(_sg_pipeline_t));
12148 }
12149 
_sg_reset_pass(_sg_pass_t * pass)12150 _SOKOL_PRIVATE void _sg_reset_pass(_sg_pass_t* pass) {
12151     SOKOL_ASSERT(pass);
12152     memset(pass, 0, sizeof(_sg_pass_t));
12153 }
12154 
_sg_reset_context(_sg_context_t * ctx)12155 _SOKOL_PRIVATE void _sg_reset_context(_sg_context_t* ctx) {
12156     SOKOL_ASSERT(ctx);
12157     memset(ctx, 0, sizeof(_sg_context_t));
12158 }
12159 
_sg_setup_pools(_sg_pools_t * p,const sg_desc * desc)12160 _SOKOL_PRIVATE void _sg_setup_pools(_sg_pools_t* p, const sg_desc* desc) {
12161     SOKOL_ASSERT(p);
12162     SOKOL_ASSERT(desc);
12163     /* note: the pools here will have an additional item, since slot 0 is reserved */
12164     SOKOL_ASSERT((desc->buffer_pool_size > 0) && (desc->buffer_pool_size < _SG_MAX_POOL_SIZE));
12165     _sg_init_pool(&p->buffer_pool, desc->buffer_pool_size);
12166     size_t buffer_pool_byte_size = sizeof(_sg_buffer_t) * p->buffer_pool.size;
12167     p->buffers = (_sg_buffer_t*) SOKOL_MALLOC(buffer_pool_byte_size);
12168     SOKOL_ASSERT(p->buffers);
12169     memset(p->buffers, 0, buffer_pool_byte_size);
12170 
12171     SOKOL_ASSERT((desc->image_pool_size > 0) && (desc->image_pool_size < _SG_MAX_POOL_SIZE));
12172     _sg_init_pool(&p->image_pool, desc->image_pool_size);
12173     size_t image_pool_byte_size = sizeof(_sg_image_t) * p->image_pool.size;
12174     p->images = (_sg_image_t*) SOKOL_MALLOC(image_pool_byte_size);
12175     SOKOL_ASSERT(p->images);
12176     memset(p->images, 0, image_pool_byte_size);
12177 
12178     SOKOL_ASSERT((desc->shader_pool_size > 0) && (desc->shader_pool_size < _SG_MAX_POOL_SIZE));
12179     _sg_init_pool(&p->shader_pool, desc->shader_pool_size);
12180     size_t shader_pool_byte_size = sizeof(_sg_shader_t) * p->shader_pool.size;
12181     p->shaders = (_sg_shader_t*) SOKOL_MALLOC(shader_pool_byte_size);
12182     SOKOL_ASSERT(p->shaders);
12183     memset(p->shaders, 0, shader_pool_byte_size);
12184 
12185     SOKOL_ASSERT((desc->pipeline_pool_size > 0) && (desc->pipeline_pool_size < _SG_MAX_POOL_SIZE));
12186     _sg_init_pool(&p->pipeline_pool, desc->pipeline_pool_size);
12187     size_t pipeline_pool_byte_size = sizeof(_sg_pipeline_t) * p->pipeline_pool.size;
12188     p->pipelines = (_sg_pipeline_t*) SOKOL_MALLOC(pipeline_pool_byte_size);
12189     SOKOL_ASSERT(p->pipelines);
12190     memset(p->pipelines, 0, pipeline_pool_byte_size);
12191 
12192     SOKOL_ASSERT((desc->pass_pool_size > 0) && (desc->pass_pool_size < _SG_MAX_POOL_SIZE));
12193     _sg_init_pool(&p->pass_pool, desc->pass_pool_size);
12194     size_t pass_pool_byte_size = sizeof(_sg_pass_t) * p->pass_pool.size;
12195     p->passes = (_sg_pass_t*) SOKOL_MALLOC(pass_pool_byte_size);
12196     SOKOL_ASSERT(p->passes);
12197     memset(p->passes, 0, pass_pool_byte_size);
12198 
12199     SOKOL_ASSERT((desc->context_pool_size > 0) && (desc->context_pool_size < _SG_MAX_POOL_SIZE));
12200     _sg_init_pool(&p->context_pool, desc->context_pool_size);
12201     size_t context_pool_byte_size = sizeof(_sg_context_t) * p->context_pool.size;
12202     p->contexts = (_sg_context_t*) SOKOL_MALLOC(context_pool_byte_size);
12203     SOKOL_ASSERT(p->contexts);
12204     memset(p->contexts, 0, context_pool_byte_size);
12205 }
12206 
_sg_discard_pools(_sg_pools_t * p)12207 _SOKOL_PRIVATE void _sg_discard_pools(_sg_pools_t* p) {
12208     SOKOL_ASSERT(p);
12209     SOKOL_FREE(p->contexts);    p->contexts = 0;
12210     SOKOL_FREE(p->passes);      p->passes = 0;
12211     SOKOL_FREE(p->pipelines);   p->pipelines = 0;
12212     SOKOL_FREE(p->shaders);     p->shaders = 0;
12213     SOKOL_FREE(p->images);      p->images = 0;
12214     SOKOL_FREE(p->buffers);     p->buffers = 0;
12215     _sg_discard_pool(&p->context_pool);
12216     _sg_discard_pool(&p->pass_pool);
12217     _sg_discard_pool(&p->pipeline_pool);
12218     _sg_discard_pool(&p->shader_pool);
12219     _sg_discard_pool(&p->image_pool);
12220     _sg_discard_pool(&p->buffer_pool);
12221 }
12222 
12223 /* allocate the slot at slot_index:
12224     - bump the slot's generation counter
12225     - create a resource id from the generation counter and slot index
12226     - set the slot's id to this id
12227     - set the slot's state to ALLOC
12228     - return the resource id
12229 */
_sg_slot_alloc(_sg_pool_t * pool,_sg_slot_t * slot,int slot_index)12230 _SOKOL_PRIVATE uint32_t _sg_slot_alloc(_sg_pool_t* pool, _sg_slot_t* slot, int slot_index) {
12231     /* FIXME: add handling for an overflowing generation counter,
12232        for now, just overflow (another option is to disable
12233        the slot)
12234     */
12235     SOKOL_ASSERT(pool && pool->gen_ctrs);
12236     SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < pool->size));
12237     SOKOL_ASSERT((slot->state == SG_RESOURCESTATE_INITIAL) && (slot->id == SG_INVALID_ID));
12238     uint32_t ctr = ++pool->gen_ctrs[slot_index];
12239     slot->id = (ctr<<_SG_SLOT_SHIFT)|(slot_index & _SG_SLOT_MASK);
12240     slot->state = SG_RESOURCESTATE_ALLOC;
12241     return slot->id;
12242 }
12243 
12244 /* extract slot index from id */
_sg_slot_index(uint32_t id)12245 _SOKOL_PRIVATE int _sg_slot_index(uint32_t id) {
12246     int slot_index = (int) (id & _SG_SLOT_MASK);
12247     SOKOL_ASSERT(_SG_INVALID_SLOT_INDEX != slot_index);
12248     return slot_index;
12249 }
12250 
12251 /* returns pointer to resource by id without matching id check */
_sg_buffer_at(const _sg_pools_t * p,uint32_t buf_id)12252 _SOKOL_PRIVATE _sg_buffer_t* _sg_buffer_at(const _sg_pools_t* p, uint32_t buf_id) {
12253     SOKOL_ASSERT(p && (SG_INVALID_ID != buf_id));
12254     int slot_index = _sg_slot_index(buf_id);
12255     SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->buffer_pool.size));
12256     return &p->buffers[slot_index];
12257 }
12258 
_sg_image_at(const _sg_pools_t * p,uint32_t img_id)12259 _SOKOL_PRIVATE _sg_image_t* _sg_image_at(const _sg_pools_t* p, uint32_t img_id) {
12260     SOKOL_ASSERT(p && (SG_INVALID_ID != img_id));
12261     int slot_index = _sg_slot_index(img_id);
12262     SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->image_pool.size));
12263     return &p->images[slot_index];
12264 }
12265 
_sg_shader_at(const _sg_pools_t * p,uint32_t shd_id)12266 _SOKOL_PRIVATE _sg_shader_t* _sg_shader_at(const _sg_pools_t* p, uint32_t shd_id) {
12267     SOKOL_ASSERT(p && (SG_INVALID_ID != shd_id));
12268     int slot_index = _sg_slot_index(shd_id);
12269     SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->shader_pool.size));
12270     return &p->shaders[slot_index];
12271 }
12272 
_sg_pipeline_at(const _sg_pools_t * p,uint32_t pip_id)12273 _SOKOL_PRIVATE _sg_pipeline_t* _sg_pipeline_at(const _sg_pools_t* p, uint32_t pip_id) {
12274     SOKOL_ASSERT(p && (SG_INVALID_ID != pip_id));
12275     int slot_index = _sg_slot_index(pip_id);
12276     SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->pipeline_pool.size));
12277     return &p->pipelines[slot_index];
12278 }
12279 
_sg_pass_at(const _sg_pools_t * p,uint32_t pass_id)12280 _SOKOL_PRIVATE _sg_pass_t* _sg_pass_at(const _sg_pools_t* p, uint32_t pass_id) {
12281     SOKOL_ASSERT(p && (SG_INVALID_ID != pass_id));
12282     int slot_index = _sg_slot_index(pass_id);
12283     SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->pass_pool.size));
12284     return &p->passes[slot_index];
12285 }
12286 
_sg_context_at(const _sg_pools_t * p,uint32_t context_id)12287 _SOKOL_PRIVATE _sg_context_t* _sg_context_at(const _sg_pools_t* p, uint32_t context_id) {
12288     SOKOL_ASSERT(p && (SG_INVALID_ID != context_id));
12289     int slot_index = _sg_slot_index(context_id);
12290     SOKOL_ASSERT((slot_index > _SG_INVALID_SLOT_INDEX) && (slot_index < p->context_pool.size));
12291     return &p->contexts[slot_index];
12292 }
12293 
12294 /* returns pointer to resource with matching id check, may return 0 */
_sg_lookup_buffer(const _sg_pools_t * p,uint32_t buf_id)12295 _SOKOL_PRIVATE _sg_buffer_t* _sg_lookup_buffer(const _sg_pools_t* p, uint32_t buf_id) {
12296     if (SG_INVALID_ID != buf_id) {
12297         _sg_buffer_t* buf = _sg_buffer_at(p, buf_id);
12298         if (buf->slot.id == buf_id) {
12299             return buf;
12300         }
12301     }
12302     return 0;
12303 }
12304 
_sg_lookup_image(const _sg_pools_t * p,uint32_t img_id)12305 _SOKOL_PRIVATE _sg_image_t* _sg_lookup_image(const _sg_pools_t* p, uint32_t img_id) {
12306     if (SG_INVALID_ID != img_id) {
12307         _sg_image_t* img = _sg_image_at(p, img_id);
12308         if (img->slot.id == img_id) {
12309             return img;
12310         }
12311     }
12312     return 0;
12313 }
12314 
_sg_lookup_shader(const _sg_pools_t * p,uint32_t shd_id)12315 _SOKOL_PRIVATE _sg_shader_t* _sg_lookup_shader(const _sg_pools_t* p, uint32_t shd_id) {
12316     SOKOL_ASSERT(p);
12317     if (SG_INVALID_ID != shd_id) {
12318         _sg_shader_t* shd = _sg_shader_at(p, shd_id);
12319         if (shd->slot.id == shd_id) {
12320             return shd;
12321         }
12322     }
12323     return 0;
12324 }
12325 
_sg_lookup_pipeline(const _sg_pools_t * p,uint32_t pip_id)12326 _SOKOL_PRIVATE _sg_pipeline_t* _sg_lookup_pipeline(const _sg_pools_t* p, uint32_t pip_id) {
12327     SOKOL_ASSERT(p);
12328     if (SG_INVALID_ID != pip_id) {
12329         _sg_pipeline_t* pip = _sg_pipeline_at(p, pip_id);
12330         if (pip->slot.id == pip_id) {
12331             return pip;
12332         }
12333     }
12334     return 0;
12335 }
12336 
_sg_lookup_pass(const _sg_pools_t * p,uint32_t pass_id)12337 _SOKOL_PRIVATE _sg_pass_t* _sg_lookup_pass(const _sg_pools_t* p, uint32_t pass_id) {
12338     SOKOL_ASSERT(p);
12339     if (SG_INVALID_ID != pass_id) {
12340         _sg_pass_t* pass = _sg_pass_at(p, pass_id);
12341         if (pass->slot.id == pass_id) {
12342             return pass;
12343         }
12344     }
12345     return 0;
12346 }
12347 
_sg_lookup_context(const _sg_pools_t * p,uint32_t ctx_id)12348 _SOKOL_PRIVATE _sg_context_t* _sg_lookup_context(const _sg_pools_t* p, uint32_t ctx_id) {
12349     SOKOL_ASSERT(p);
12350     if (SG_INVALID_ID != ctx_id) {
12351         _sg_context_t* ctx = _sg_context_at(p, ctx_id);
12352         if (ctx->slot.id == ctx_id) {
12353             return ctx;
12354         }
12355     }
12356     return 0;
12357 }
12358 
_sg_destroy_all_resources(_sg_pools_t * p,uint32_t ctx_id)12359 _SOKOL_PRIVATE void _sg_destroy_all_resources(_sg_pools_t* p, uint32_t ctx_id) {
12360     /*  this is a bit dumb since it loops over all pool slots to
12361         find the occupied slots, on the other hand it is only ever
12362         executed at shutdown
12363         NOTE: ONLY EXECUTE THIS AT SHUTDOWN
12364               ...because the free queues will not be reset
12365               and the resource slots not be cleared!
12366     */
12367     for (int i = 1; i < p->buffer_pool.size; i++) {
12368         if (p->buffers[i].slot.ctx_id == ctx_id) {
12369             sg_resource_state state = p->buffers[i].slot.state;
12370             if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) {
12371                 _sg_destroy_buffer(&p->buffers[i]);
12372             }
12373         }
12374     }
12375     for (int i = 1; i < p->image_pool.size; i++) {
12376         if (p->images[i].slot.ctx_id == ctx_id) {
12377             sg_resource_state state = p->images[i].slot.state;
12378             if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) {
12379                 _sg_destroy_image(&p->images[i]);
12380             }
12381         }
12382     }
12383     for (int i = 1; i < p->shader_pool.size; i++) {
12384         if (p->shaders[i].slot.ctx_id == ctx_id) {
12385             sg_resource_state state = p->shaders[i].slot.state;
12386             if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) {
12387                 _sg_destroy_shader(&p->shaders[i]);
12388             }
12389         }
12390     }
12391     for (int i = 1; i < p->pipeline_pool.size; i++) {
12392         if (p->pipelines[i].slot.ctx_id == ctx_id) {
12393             sg_resource_state state = p->pipelines[i].slot.state;
12394             if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) {
12395                 _sg_destroy_pipeline(&p->pipelines[i]);
12396             }
12397         }
12398     }
12399     for (int i = 1; i < p->pass_pool.size; i++) {
12400         if (p->passes[i].slot.ctx_id == ctx_id) {
12401             sg_resource_state state = p->passes[i].slot.state;
12402             if ((state == SG_RESOURCESTATE_VALID) || (state == SG_RESOURCESTATE_FAILED)) {
12403                 _sg_destroy_pass(&p->passes[i]);
12404             }
12405         }
12406     }
12407 }
12408 
12409 /*== VALIDATION LAYER ========================================================*/
12410 #if defined(SOKOL_DEBUG)
12411 /* return a human readable string for an _sg_validate_error */
_sg_validate_string(_sg_validate_error_t err)12412 _SOKOL_PRIVATE const char* _sg_validate_string(_sg_validate_error_t err) {
12413     switch (err) {
12414         /* buffer creation validation errors */
12415         case _SG_VALIDATE_BUFFERDESC_CANARY:        return "sg_buffer_desc not initialized";
12416         case _SG_VALIDATE_BUFFERDESC_SIZE:          return "sg_buffer_desc.size cannot be 0";
12417         case _SG_VALIDATE_BUFFERDESC_CONTENT:       return "immutable buffers must be initialized with content (sg_buffer_desc.content)";
12418         case _SG_VALIDATE_BUFFERDESC_NO_CONTENT:    return "dynamic/stream usage buffers cannot be initialized with content";
12419 
12420         /* image creation validation errros */
12421         case _SG_VALIDATE_IMAGEDESC_CANARY:             return "sg_image_desc not initialized";
12422         case _SG_VALIDATE_IMAGEDESC_WIDTH:              return "sg_image_desc.width must be > 0";
12423         case _SG_VALIDATE_IMAGEDESC_HEIGHT:             return "sg_image_desc.height must be > 0";
12424         case _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT:     return "invalid pixel format for render-target image";
12425         case _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT:  return "invalid pixel format for non-render-target image";
12426         case _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT:     return "non-render-target images cannot be multisampled";
12427         case _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT: return "MSAA not supported for this pixel format";
12428         case _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE:       return "render target images must be SG_USAGE_IMMUTABLE";
12429         case _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT:      return "render target images cannot be initialized with content";
12430         case _SG_VALIDATE_IMAGEDESC_CONTENT:            return "missing or invalid content for immutable image";
12431         case _SG_VALIDATE_IMAGEDESC_NO_CONTENT:         return "dynamic/stream usage images cannot be initialized with content";
12432 
12433         /* shader creation */
12434         case _SG_VALIDATE_SHADERDESC_CANARY:                return "sg_shader_desc not initialized";
12435         case _SG_VALIDATE_SHADERDESC_SOURCE:                return "shader source code required";
12436         case _SG_VALIDATE_SHADERDESC_BYTECODE:              return "shader byte code required";
12437         case _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE:    return "shader source or byte code required";
12438         case _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE:      return "shader byte code length (in bytes) required";
12439         case _SG_VALIDATE_SHADERDESC_NO_CONT_UBS:           return "shader uniform blocks must occupy continuous slots";
12440         case _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS:    return "uniform block members must occupy continuous slots";
12441         case _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS:         return "GL backend requires uniform block member declarations";
12442         case _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME:        return "uniform block member name missing";
12443         case _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH:      return "size of uniform block members doesn't match uniform block size";
12444         case _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS:          return "shader images must occupy continuous slots";
12445         case _SG_VALIDATE_SHADERDESC_IMG_NAME:              return "GL backend requires uniform block member names";
12446         case _SG_VALIDATE_SHADERDESC_ATTR_NAMES:            return "GLES2 backend requires vertex attribute names";
12447         case _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS:        return "D3D11 backend requires vertex attribute semantics";
12448         case _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG:  return "vertex attribute name/semantic string too long (max len 16)";
12449 
12450         /* pipeline creation */
12451         case _SG_VALIDATE_PIPELINEDESC_CANARY:          return "sg_pipeline_desc not initialized";
12452         case _SG_VALIDATE_PIPELINEDESC_SHADER:          return "sg_pipeline_desc.shader missing or invalid";
12453         case _SG_VALIDATE_PIPELINEDESC_NO_ATTRS:        return "sg_pipeline_desc.layout.attrs is empty or not continuous";
12454         case _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4:  return "sg_pipeline_desc.layout.buffers[].stride must be multiple of 4";
12455         case _SG_VALIDATE_PIPELINEDESC_ATTR_NAME:       return "GLES2/WebGL missing vertex attribute name in shader";
12456         case _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS:  return "D3D11 missing vertex attribute semantics in shader";
12457 
12458         /* pass creation */
12459         case _SG_VALIDATE_PASSDESC_CANARY:                  return "sg_pass_desc not initialized";
12460         case _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS:           return "sg_pass_desc.color_attachments[0] must be valid";
12461         case _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS:      return "color attachments must occupy continuous slots";
12462         case _SG_VALIDATE_PASSDESC_IMAGE:                   return "pass attachment image is not valid";
12463         case _SG_VALIDATE_PASSDESC_MIPLEVEL:                return "pass attachment mip level is bigger than image has mipmaps";
12464         case _SG_VALIDATE_PASSDESC_FACE:                    return "pass attachment image is cubemap, but face index is too big";
12465         case _SG_VALIDATE_PASSDESC_LAYER:                   return "pass attachment image is array texture, but layer index is too big";
12466         case _SG_VALIDATE_PASSDESC_SLICE:                   return "pass attachment image is 3d texture, but slice value is too big";
12467         case _SG_VALIDATE_PASSDESC_IMAGE_NO_RT:             return "pass attachment image must be render targets";
12468         case _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS:      return "all pass color attachment images must have the same pixel format";
12469         case _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT:   return "pass color-attachment images must have a renderable pixel format";
12470         case _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT:   return "pass depth-attachment image must have depth pixel format";
12471         case _SG_VALIDATE_PASSDESC_IMAGE_SIZES:             return "all pass attachments must have the same size";
12472         case _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS:     return "all pass attachments must have the same sample count";
12473 
12474         /* sg_begin_pass */
12475         case _SG_VALIDATE_BEGINPASS_PASS:       return "sg_begin_pass: pass must be valid";
12476         case _SG_VALIDATE_BEGINPASS_IMAGE:      return "sg_begin_pass: one or more attachment images are not valid";
12477 
12478         /* sg_apply_pipeline */
12479         case _SG_VALIDATE_APIP_PIPELINE_VALID_ID:   return "sg_apply_pipeline: invalid pipeline id provided";
12480         case _SG_VALIDATE_APIP_PIPELINE_EXISTS:     return "sg_apply_pipeline: pipeline object no longer alive";
12481         case _SG_VALIDATE_APIP_PIPELINE_VALID:      return "sg_apply_pipeline: pipeline object not in valid state";
12482         case _SG_VALIDATE_APIP_SHADER_EXISTS:       return "sg_apply_pipeline: shader object no longer alive";
12483         case _SG_VALIDATE_APIP_SHADER_VALID:        return "sg_apply_pipeline: shader object not in valid state";
12484         case _SG_VALIDATE_APIP_ATT_COUNT:           return "sg_apply_pipeline: color_attachment_count in pipeline doesn't match number of pass color attachments";
12485         case _SG_VALIDATE_APIP_COLOR_FORMAT:        return "sg_apply_pipeline: color_format in pipeline doesn't match pass color attachment pixel format";
12486         case _SG_VALIDATE_APIP_DEPTH_FORMAT:        return "sg_apply_pipeline: depth_format in pipeline doesn't match pass depth attachment pixel format";
12487         case _SG_VALIDATE_APIP_SAMPLE_COUNT:        return "sg_apply_pipeline: MSAA sample count in pipeline doesn't match render pass attachment sample count";
12488 
12489         /* sg_apply_bindings */
12490         case _SG_VALIDATE_ABND_PIPELINE:            return "sg_apply_bindings: must be called after sg_apply_pipeline";
12491         case _SG_VALIDATE_ABND_PIPELINE_EXISTS:     return "sg_apply_bindings: currently applied pipeline object no longer alive";
12492         case _SG_VALIDATE_ABND_PIPELINE_VALID:      return "sg_apply_bindings: currently applied pipeline object not in valid state";
12493         case _SG_VALIDATE_ABND_VBS:                 return "sg_apply_bindings: number of vertex buffers doesn't match number of pipeline vertex layouts";
12494         case _SG_VALIDATE_ABND_VB_EXISTS:           return "sg_apply_bindings: vertex buffer no longer alive";
12495         case _SG_VALIDATE_ABND_VB_TYPE:             return "sg_apply_bindings: buffer in vertex buffer slot is not a SG_BUFFERTYPE_VERTEXBUFFER";
12496         case _SG_VALIDATE_ABND_VB_OVERFLOW:         return "sg_apply_bindings: buffer in vertex buffer slot is overflown";
12497         case _SG_VALIDATE_ABND_NO_IB:               return "sg_apply_bindings: pipeline object defines indexed rendering, but no index buffer provided";
12498         case _SG_VALIDATE_ABND_IB:                  return "sg_apply_bindings: pipeline object defines non-indexed rendering, but index buffer provided";
12499         case _SG_VALIDATE_ABND_IB_EXISTS:           return "sg_apply_bindings: index buffer no longer alive";
12500         case _SG_VALIDATE_ABND_IB_TYPE:             return "sg_apply_bindings: buffer in index buffer slot is not a SG_BUFFERTYPE_INDEXBUFFER";
12501         case _SG_VALIDATE_ABND_IB_OVERFLOW:         return "sg_apply_bindings: buffer in index buffer slot is overflown";
12502         case _SG_VALIDATE_ABND_VS_IMGS:             return "sg_apply_bindings: vertex shader image count doesn't match sg_shader_desc";
12503         case _SG_VALIDATE_ABND_VS_IMG_EXISTS:       return "sg_apply_bindings: vertex shader image no longer alive";
12504         case _SG_VALIDATE_ABND_VS_IMG_TYPES:        return "sg_apply_bindings: one or more vertex shader image types don't match sg_shader_desc";
12505         case _SG_VALIDATE_ABND_FS_IMGS:             return "sg_apply_bindings: fragment shader image count doesn't match sg_shader_desc";
12506         case _SG_VALIDATE_ABND_FS_IMG_EXISTS:       return "sg_apply_bindings: fragment shader image no longer alive";
12507         case _SG_VALIDATE_ABND_FS_IMG_TYPES:        return "sg_apply_bindings: one or more fragment shader image types don't match sg_shader_desc";
12508 
12509         /* sg_apply_uniforms */
12510         case _SG_VALIDATE_AUB_NO_PIPELINE:      return "sg_apply_uniforms: must be called after sg_apply_pipeline()";
12511         case _SG_VALIDATE_AUB_NO_UB_AT_SLOT:    return "sg_apply_uniforms: no uniform block declaration at this shader stage UB slot";
12512         case _SG_VALIDATE_AUB_SIZE:             return "sg_apply_uniforms: data size exceeds declared uniform block size";
12513 
12514         /* sg_update_buffer */
12515         case _SG_VALIDATE_UPDATEBUF_USAGE:      return "sg_update_buffer: cannot update immutable buffer";
12516         case _SG_VALIDATE_UPDATEBUF_SIZE:       return "sg_update_buffer: update size is bigger than buffer size";
12517         case _SG_VALIDATE_UPDATEBUF_ONCE:       return "sg_update_buffer: only one update allowed per buffer and frame";
12518         case _SG_VALIDATE_UPDATEBUF_APPEND:     return "sg_update_buffer: cannot call sg_update_buffer and sg_append_buffer in same frame";
12519 
12520         /* sg_append_buffer */
12521         case _SG_VALIDATE_APPENDBUF_USAGE:      return "sg_append_buffer: cannot append to immutable buffer";
12522         case _SG_VALIDATE_APPENDBUF_SIZE:       return "sg_append_buffer: overall appended size is bigger than buffer size";
12523         case _SG_VALIDATE_APPENDBUF_UPDATE:     return "sg_append_buffer: cannot call sg_append_buffer and sg_update_buffer in same frame";
12524 
12525         /* sg_update_image */
12526         case _SG_VALIDATE_UPDIMG_USAGE:         return "sg_update_image: cannot update immutable image";
12527         case _SG_VALIDATE_UPDIMG_NOTENOUGHDATA: return "sg_update_image: not enough subimage data provided";
12528         case _SG_VALIDATE_UPDIMG_SIZE:          return "sg_update_image: provided subimage data size too big";
12529         case _SG_VALIDATE_UPDIMG_COMPRESSED:    return "sg_update_image: cannot update images with compressed format";
12530         case _SG_VALIDATE_UPDIMG_ONCE:          return "sg_update_image: only one update allowed per image and frame";
12531 
12532         default: return "unknown validation error";
12533     }
12534 }
12535 #endif /* defined(SOKOL_DEBUG) */
12536 
12537 /*-- validation checks -------------------------------------------------------*/
12538 #if defined(SOKOL_DEBUG)
_sg_validate_begin(void)12539 _SOKOL_PRIVATE void _sg_validate_begin(void) {
12540     _sg.validate_error = _SG_VALIDATE_SUCCESS;
12541 }
12542 
_sg_validate(bool cond,_sg_validate_error_t err)12543 _SOKOL_PRIVATE void _sg_validate(bool cond, _sg_validate_error_t err) {
12544     if (!cond) {
12545         _sg.validate_error = err;
12546         SOKOL_LOG(_sg_validate_string(err));
12547     }
12548 }
12549 
_sg_validate_end(void)12550 _SOKOL_PRIVATE bool _sg_validate_end(void) {
12551     if (_sg.validate_error != _SG_VALIDATE_SUCCESS) {
12552         #if !defined(SOKOL_VALIDATE_NON_FATAL)
12553             SOKOL_LOG("^^^^  VALIDATION FAILED, TERMINATING ^^^^");
12554             SOKOL_ASSERT(false);
12555         #endif
12556         return false;
12557     }
12558     else {
12559         return true;
12560     }
12561 }
12562 #endif
12563 
_sg_validate_buffer_desc(const sg_buffer_desc * desc)12564 _SOKOL_PRIVATE bool _sg_validate_buffer_desc(const sg_buffer_desc* desc) {
12565     #if !defined(SOKOL_DEBUG)
12566         _SOKOL_UNUSED(desc);
12567         return true;
12568     #else
12569         SOKOL_ASSERT(desc);
12570         SOKOL_VALIDATE_BEGIN();
12571         SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_BUFFERDESC_CANARY);
12572         SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_BUFFERDESC_CANARY);
12573         SOKOL_VALIDATE(desc->size > 0, _SG_VALIDATE_BUFFERDESC_SIZE);
12574         bool injected = (0 != desc->gl_buffers[0]) ||
12575                         (0 != desc->mtl_buffers[0]) ||
12576                         (0 != desc->d3d11_buffer) ||
12577                         (0 != desc->wgpu_buffer);
12578         if (!injected && (desc->usage == SG_USAGE_IMMUTABLE)) {
12579             SOKOL_VALIDATE(0 != desc->content, _SG_VALIDATE_BUFFERDESC_CONTENT);
12580         }
12581         else {
12582             SOKOL_VALIDATE(0 == desc->content, _SG_VALIDATE_BUFFERDESC_NO_CONTENT);
12583         }
12584         return SOKOL_VALIDATE_END();
12585     #endif
12586 }
12587 
_sg_validate_image_desc(const sg_image_desc * desc)12588 _SOKOL_PRIVATE bool _sg_validate_image_desc(const sg_image_desc* desc) {
12589     #if !defined(SOKOL_DEBUG)
12590         _SOKOL_UNUSED(desc);
12591         return true;
12592     #else
12593         SOKOL_ASSERT(desc);
12594         SOKOL_VALIDATE_BEGIN();
12595         SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_IMAGEDESC_CANARY);
12596         SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_IMAGEDESC_CANARY);
12597         SOKOL_VALIDATE(desc->width > 0, _SG_VALIDATE_IMAGEDESC_WIDTH);
12598         SOKOL_VALIDATE(desc->height > 0, _SG_VALIDATE_IMAGEDESC_HEIGHT);
12599         const sg_pixel_format fmt = desc->pixel_format;
12600         const sg_usage usage = desc->usage;
12601         const bool injected = (0 != desc->gl_textures[0]) ||
12602                               (0 != desc->mtl_textures[0]) ||
12603                               (0 != desc->d3d11_texture) ||
12604                               (0 != desc->wgpu_texture);
12605         if (desc->render_target) {
12606             SOKOL_ASSERT(((int)fmt >= 0) && ((int)fmt < _SG_PIXELFORMAT_NUM));
12607             SOKOL_VALIDATE(_sg.formats[fmt].render, _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT);
12608             /* on GLES2, sample count for render targets is completely ignored */
12609             #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3)
12610             if (!_sg.gl.gles2) {
12611             #endif
12612                 if (desc->sample_count > 1) {
12613                     SOKOL_VALIDATE(_sg.features.msaa_render_targets && _sg.formats[fmt].msaa, _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT);
12614                 }
12615             #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3)
12616             }
12617             #endif
12618             SOKOL_VALIDATE(usage == SG_USAGE_IMMUTABLE, _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE);
12619             SOKOL_VALIDATE(desc->content.subimage[0][0].ptr==0, _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT);
12620         }
12621         else {
12622             SOKOL_VALIDATE(desc->sample_count <= 1, _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT);
12623             const bool valid_nonrt_fmt = !_sg_is_valid_rendertarget_depth_format(fmt);
12624             SOKOL_VALIDATE(valid_nonrt_fmt, _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT);
12625             /* FIXME: should use the same "expected size" computation as in _sg_validate_update_image() here */
12626             if (!injected && (usage == SG_USAGE_IMMUTABLE)) {
12627                 const int num_faces = desc->type == SG_IMAGETYPE_CUBE ? 6:1;
12628                 const int num_mips = desc->num_mipmaps;
12629                 for (int face_index = 0; face_index < num_faces; face_index++) {
12630                     for (int mip_index = 0; mip_index < num_mips; mip_index++) {
12631                         const bool has_data = desc->content.subimage[face_index][mip_index].ptr != 0;
12632                         const bool has_size = desc->content.subimage[face_index][mip_index].size > 0;
12633                         SOKOL_VALIDATE(has_data && has_size, _SG_VALIDATE_IMAGEDESC_CONTENT);
12634                     }
12635                 }
12636             }
12637             else {
12638                 for (int face_index = 0; face_index < SG_CUBEFACE_NUM; face_index++) {
12639                     for (int mip_index = 0; mip_index < SG_MAX_MIPMAPS; mip_index++) {
12640                         const bool no_data = 0 == desc->content.subimage[face_index][mip_index].ptr;
12641                         const bool no_size = 0 == desc->content.subimage[face_index][mip_index].size;
12642                         SOKOL_VALIDATE(no_data && no_size, _SG_VALIDATE_IMAGEDESC_NO_CONTENT);
12643                     }
12644                 }
12645             }
12646         }
12647         return SOKOL_VALIDATE_END();
12648     #endif
12649 }
12650 
_sg_validate_shader_desc(const sg_shader_desc * desc)12651 _SOKOL_PRIVATE bool _sg_validate_shader_desc(const sg_shader_desc* desc) {
12652     #if !defined(SOKOL_DEBUG)
12653         _SOKOL_UNUSED(desc);
12654         return true;
12655     #else
12656         SOKOL_ASSERT(desc);
12657         SOKOL_VALIDATE_BEGIN();
12658         SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_SHADERDESC_CANARY);
12659         SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_SHADERDESC_CANARY);
12660         #if defined(SOKOL_GLES2)
12661             SOKOL_VALIDATE(0 != desc->attrs[0].name, _SG_VALIDATE_SHADERDESC_ATTR_NAMES);
12662         #elif defined(SOKOL_D3D11)
12663             SOKOL_VALIDATE(0 != desc->attrs[0].sem_name, _SG_VALIDATE_SHADERDESC_ATTR_SEMANTICS);
12664         #endif
12665         #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3)
12666             /* on GL, must provide shader source code */
12667             SOKOL_VALIDATE(0 != desc->vs.source, _SG_VALIDATE_SHADERDESC_SOURCE);
12668             SOKOL_VALIDATE(0 != desc->fs.source, _SG_VALIDATE_SHADERDESC_SOURCE);
12669         #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11)
12670             /* on Metal or D3D11, must provide shader source code or byte code */
12671             SOKOL_VALIDATE((0 != desc->vs.source)||(0 != desc->vs.byte_code), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE);
12672             SOKOL_VALIDATE((0 != desc->fs.source)||(0 != desc->fs.byte_code), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE);
12673         #elif defined(SOKOL_WGPU)
12674             /* on WGPU byte code must be provided */
12675             SOKOL_VALIDATE((0 != desc->vs.byte_code), _SG_VALIDATE_SHADERDESC_BYTECODE);
12676             SOKOL_VALIDATE((0 != desc->fs.byte_code), _SG_VALIDATE_SHADERDESC_BYTECODE);
12677         #else
12678             /* Dummy Backend, don't require source or bytecode */
12679         #endif
12680         for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) {
12681             if (desc->attrs[i].name) {
12682                 SOKOL_VALIDATE(strlen(desc->attrs[i].name) < _SG_STRING_SIZE, _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG);
12683             }
12684             if (desc->attrs[i].sem_name) {
12685                 SOKOL_VALIDATE(strlen(desc->attrs[i].sem_name) < _SG_STRING_SIZE, _SG_VALIDATE_SHADERDESC_ATTR_STRING_TOO_LONG);
12686             }
12687         }
12688         /* if shader byte code, the size must also be provided */
12689         if (0 != desc->vs.byte_code) {
12690             SOKOL_VALIDATE(desc->vs.byte_code_size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE);
12691         }
12692         if (0 != desc->fs.byte_code) {
12693             SOKOL_VALIDATE(desc->fs.byte_code_size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE);
12694         }
12695         for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
12696             const sg_shader_stage_desc* stage_desc = (stage_index == 0)? &desc->vs : &desc->fs;
12697             bool uniform_blocks_continuous = true;
12698             for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) {
12699                 const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index];
12700                 if (ub_desc->size > 0) {
12701                     SOKOL_VALIDATE(uniform_blocks_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_UBS);
12702                     bool uniforms_continuous = true;
12703                     int uniform_offset = 0;
12704                     int num_uniforms = 0;
12705                     for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) {
12706                         const sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index];
12707                         if (u_desc->type != SG_UNIFORMTYPE_INVALID) {
12708                             SOKOL_VALIDATE(uniforms_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS);
12709                             #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3)
12710                             SOKOL_VALIDATE(u_desc->name, _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME);
12711                             #endif
12712                             const int array_count = u_desc->array_count;
12713                             uniform_offset += _sg_uniform_size(u_desc->type, array_count);
12714                             num_uniforms++;
12715                         }
12716                         else {
12717                             uniforms_continuous = false;
12718                         }
12719                     }
12720                     #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3)
12721                     SOKOL_VALIDATE(uniform_offset == ub_desc->size, _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH);
12722                     SOKOL_VALIDATE(num_uniforms > 0, _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS);
12723                     #endif
12724                 }
12725                 else {
12726                     uniform_blocks_continuous = false;
12727                 }
12728             }
12729             bool images_continuous = true;
12730             for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) {
12731                 const sg_shader_image_desc* img_desc = &stage_desc->images[img_index];
12732                 if (img_desc->type != _SG_IMAGETYPE_DEFAULT) {
12733                     SOKOL_VALIDATE(images_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS);
12734                     #if defined(SOKOL_GLES2)
12735                     SOKOL_VALIDATE(img_desc->name, _SG_VALIDATE_SHADERDESC_IMG_NAME);
12736                     #endif
12737                 }
12738                 else {
12739                     images_continuous = false;
12740                 }
12741             }
12742         }
12743         return SOKOL_VALIDATE_END();
12744     #endif
12745 }
12746 
_sg_validate_pipeline_desc(const sg_pipeline_desc * desc)12747 _SOKOL_PRIVATE bool _sg_validate_pipeline_desc(const sg_pipeline_desc* desc) {
12748     #if !defined(SOKOL_DEBUG)
12749         _SOKOL_UNUSED(desc);
12750         return true;
12751     #else
12752         SOKOL_ASSERT(desc);
12753         SOKOL_VALIDATE_BEGIN();
12754         SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PIPELINEDESC_CANARY);
12755         SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PIPELINEDESC_CANARY);
12756         SOKOL_VALIDATE(desc->shader.id != SG_INVALID_ID, _SG_VALIDATE_PIPELINEDESC_SHADER);
12757         const _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, desc->shader.id);
12758         SOKOL_VALIDATE(shd && shd->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PIPELINEDESC_SHADER);
12759         for (int buf_index = 0; buf_index < SG_MAX_SHADERSTAGE_BUFFERS; buf_index++) {
12760             const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[buf_index];
12761             if (l_desc->stride == 0) {
12762                 continue;
12763             }
12764             SOKOL_VALIDATE((l_desc->stride & 3) == 0, _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4);
12765         }
12766         SOKOL_VALIDATE(desc->layout.attrs[0].format != SG_VERTEXFORMAT_INVALID, _SG_VALIDATE_PIPELINEDESC_NO_ATTRS);
12767         bool attrs_cont = true;
12768         for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) {
12769             const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index];
12770             if (a_desc->format == SG_VERTEXFORMAT_INVALID) {
12771                 attrs_cont = false;
12772                 continue;
12773             }
12774             SOKOL_VALIDATE(attrs_cont, _SG_VALIDATE_PIPELINEDESC_NO_ATTRS);
12775             SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS);
12776             #if defined(SOKOL_GLES2)
12777             /* on GLES2, vertex attribute names must be provided */
12778             SOKOL_VALIDATE(!_sg_strempty(&shd->gl.attrs[attr_index].name), _SG_VALIDATE_PIPELINEDESC_ATTR_NAME);
12779             #elif defined(SOKOL_D3D11)
12780             /* on D3D11, semantic names (and semantic indices) must be provided */
12781             SOKOL_VALIDATE(!_sg_strempty(&shd->d3d11.attrs[attr_index].sem_name), _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS);
12782             #endif
12783         }
12784         return SOKOL_VALIDATE_END();
12785     #endif
12786 }
12787 
_sg_validate_pass_desc(const sg_pass_desc * desc)12788 _SOKOL_PRIVATE bool _sg_validate_pass_desc(const sg_pass_desc* desc) {
12789     #if !defined(SOKOL_DEBUG)
12790         _SOKOL_UNUSED(desc);
12791         return true;
12792     #else
12793         SOKOL_ASSERT(desc);
12794         SOKOL_VALIDATE_BEGIN();
12795         SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PASSDESC_CANARY);
12796         SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PASSDESC_CANARY);
12797         bool atts_cont = true;
12798         sg_pixel_format color_fmt = SG_PIXELFORMAT_NONE;
12799         int width = -1, height = -1, sample_count = -1;
12800         for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) {
12801             const sg_attachment_desc* att = &desc->color_attachments[att_index];
12802             if (att->image.id == SG_INVALID_ID) {
12803                 SOKOL_VALIDATE(att_index > 0, _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS);
12804                 atts_cont = false;
12805                 continue;
12806             }
12807             SOKOL_VALIDATE(atts_cont, _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS);
12808             const _sg_image_t* img = _sg_lookup_image(&_sg.pools, att->image.id);
12809             SOKOL_VALIDATE(img && img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE);
12810             SOKOL_VALIDATE(att->mip_level < img->cmn.num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL);
12811             if (img->cmn.type == SG_IMAGETYPE_CUBE) {
12812                 SOKOL_VALIDATE(att->face < 6, _SG_VALIDATE_PASSDESC_FACE);
12813             }
12814             else if (img->cmn.type == SG_IMAGETYPE_ARRAY) {
12815                 SOKOL_VALIDATE(att->layer < img->cmn.depth, _SG_VALIDATE_PASSDESC_LAYER);
12816             }
12817             else if (img->cmn.type == SG_IMAGETYPE_3D) {
12818                 SOKOL_VALIDATE(att->slice < img->cmn.depth, _SG_VALIDATE_PASSDESC_SLICE);
12819             }
12820             SOKOL_VALIDATE(img->cmn.render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT);
12821             if (att_index == 0) {
12822                 color_fmt = img->cmn.pixel_format;
12823                 width = img->cmn.width >> att->mip_level;
12824                 height = img->cmn.height >> att->mip_level;
12825                 sample_count = img->cmn.sample_count;
12826             }
12827             else {
12828                 SOKOL_VALIDATE(img->cmn.pixel_format == color_fmt, _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS);
12829                 SOKOL_VALIDATE(width == img->cmn.width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES);
12830                 SOKOL_VALIDATE(height == img->cmn.height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES);
12831                 SOKOL_VALIDATE(sample_count == img->cmn.sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS);
12832             }
12833             SOKOL_VALIDATE(_sg_is_valid_rendertarget_color_format(img->cmn.pixel_format), _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT);
12834         }
12835         if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) {
12836             const sg_attachment_desc* att = &desc->depth_stencil_attachment;
12837             const _sg_image_t* img = _sg_lookup_image(&_sg.pools, att->image.id);
12838             SOKOL_VALIDATE(img && img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE);
12839             SOKOL_VALIDATE(att->mip_level < img->cmn.num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL);
12840             if (img->cmn.type == SG_IMAGETYPE_CUBE) {
12841                 SOKOL_VALIDATE(att->face < 6, _SG_VALIDATE_PASSDESC_FACE);
12842             }
12843             else if (img->cmn.type == SG_IMAGETYPE_ARRAY) {
12844                 SOKOL_VALIDATE(att->layer < img->cmn.depth, _SG_VALIDATE_PASSDESC_LAYER);
12845             }
12846             else if (img->cmn.type == SG_IMAGETYPE_3D) {
12847                 SOKOL_VALIDATE(att->slice < img->cmn.depth, _SG_VALIDATE_PASSDESC_SLICE);
12848             }
12849             SOKOL_VALIDATE(img->cmn.render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT);
12850             SOKOL_VALIDATE(width == img->cmn.width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES);
12851             SOKOL_VALIDATE(height == img->cmn.height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES);
12852             SOKOL_VALIDATE(sample_count == img->cmn.sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS);
12853             SOKOL_VALIDATE(_sg_is_valid_rendertarget_depth_format(img->cmn.pixel_format), _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT);
12854         }
12855         return SOKOL_VALIDATE_END();
12856     #endif
12857 }
12858 
_sg_validate_begin_pass(_sg_pass_t * pass)12859 _SOKOL_PRIVATE bool _sg_validate_begin_pass(_sg_pass_t* pass) {
12860     #if !defined(SOKOL_DEBUG)
12861         _SOKOL_UNUSED(pass);
12862         return true;
12863     #else
12864         SOKOL_VALIDATE_BEGIN();
12865         SOKOL_VALIDATE(pass->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_PASS);
12866 
12867         for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
12868             const _sg_attachment_t* att = &pass->cmn.color_atts[i];
12869             const _sg_image_t* img = _sg_pass_color_image(pass, i);
12870             if (img) {
12871                 SOKOL_VALIDATE(img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE);
12872                 SOKOL_VALIDATE(img->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE);
12873             }
12874         }
12875         const _sg_image_t* ds_img = _sg_pass_ds_image(pass);
12876         if (ds_img) {
12877             const _sg_attachment_t* att = &pass->cmn.ds_att;
12878             SOKOL_VALIDATE(ds_img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE);
12879             SOKOL_VALIDATE(ds_img->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE);
12880         }
12881         return SOKOL_VALIDATE_END();
12882     #endif
12883 }
12884 
_sg_validate_apply_pipeline(sg_pipeline pip_id)12885 _SOKOL_PRIVATE bool _sg_validate_apply_pipeline(sg_pipeline pip_id) {
12886     #if !defined(SOKOL_DEBUG)
12887         _SOKOL_UNUSED(pip_id);
12888         return true;
12889     #else
12890         SOKOL_VALIDATE_BEGIN();
12891         /* the pipeline object must be alive and valid */
12892         SOKOL_VALIDATE(pip_id.id != SG_INVALID_ID, _SG_VALIDATE_APIP_PIPELINE_VALID_ID);
12893         const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id);
12894         SOKOL_VALIDATE(pip != 0, _SG_VALIDATE_APIP_PIPELINE_EXISTS);
12895         if (!pip) {
12896             return SOKOL_VALIDATE_END();
12897         }
12898         SOKOL_VALIDATE(pip->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_APIP_PIPELINE_VALID);
12899         /* the pipeline's shader must be alive and valid */
12900         SOKOL_ASSERT(pip->shader);
12901         SOKOL_VALIDATE(pip->shader->slot.id == pip->cmn.shader_id.id, _SG_VALIDATE_APIP_SHADER_EXISTS);
12902         SOKOL_VALIDATE(pip->shader->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_APIP_SHADER_VALID);
12903         /* check that pipeline attributes match current pass attributes */
12904         const _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, _sg.cur_pass.id);
12905         if (pass) {
12906             /* an offscreen pass */
12907             const _sg_image_t* att_img = _sg_pass_color_image(pass, 0);
12908             SOKOL_VALIDATE(pip->cmn.color_attachment_count == pass->cmn.num_color_atts, _SG_VALIDATE_APIP_ATT_COUNT);
12909             SOKOL_VALIDATE(pip->cmn.color_format == att_img->cmn.pixel_format, _SG_VALIDATE_APIP_COLOR_FORMAT);
12910             SOKOL_VALIDATE(pip->cmn.sample_count == att_img->cmn.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT);
12911             const _sg_image_t* att_dsimg = _sg_pass_ds_image(pass);
12912             if (att_dsimg) {
12913                 SOKOL_VALIDATE(pip->cmn.depth_format == att_dsimg->cmn.pixel_format, _SG_VALIDATE_APIP_DEPTH_FORMAT);
12914             }
12915             else {
12916                 SOKOL_VALIDATE(pip->cmn.depth_format == SG_PIXELFORMAT_NONE, _SG_VALIDATE_APIP_DEPTH_FORMAT);
12917             }
12918         }
12919         else {
12920             /* default pass */
12921             SOKOL_VALIDATE(pip->cmn.color_attachment_count == 1, _SG_VALIDATE_APIP_ATT_COUNT);
12922             SOKOL_VALIDATE(pip->cmn.color_format == _sg.desc.context.color_format, _SG_VALIDATE_APIP_COLOR_FORMAT);
12923             SOKOL_VALIDATE(pip->cmn.depth_format == _sg.desc.context.depth_format, _SG_VALIDATE_APIP_DEPTH_FORMAT);
12924             SOKOL_VALIDATE(pip->cmn.sample_count == _sg.desc.context.sample_count, _SG_VALIDATE_APIP_SAMPLE_COUNT);
12925         }
12926         return SOKOL_VALIDATE_END();
12927     #endif
12928 }
12929 
_sg_validate_apply_bindings(const sg_bindings * bindings)12930 _SOKOL_PRIVATE bool _sg_validate_apply_bindings(const sg_bindings* bindings) {
12931     #if !defined(SOKOL_DEBUG)
12932         _SOKOL_UNUSED(bindings);
12933         return true;
12934     #else
12935         SOKOL_VALIDATE_BEGIN();
12936 
12937         /* a pipeline object must have been applied */
12938         SOKOL_VALIDATE(_sg.cur_pipeline.id != SG_INVALID_ID, _SG_VALIDATE_ABND_PIPELINE);
12939         const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id);
12940         SOKOL_VALIDATE(pip != 0, _SG_VALIDATE_ABND_PIPELINE_EXISTS);
12941         if (!pip) {
12942             return SOKOL_VALIDATE_END();
12943         }
12944         SOKOL_VALIDATE(pip->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_ABND_PIPELINE_VALID);
12945         SOKOL_ASSERT(pip->shader && (pip->cmn.shader_id.id == pip->shader->slot.id));
12946 
12947         /* has expected vertex buffers, and vertex buffers still exist */
12948         for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) {
12949             if (bindings->vertex_buffers[i].id != SG_INVALID_ID) {
12950                 SOKOL_VALIDATE(pip->cmn.vertex_layout_valid[i], _SG_VALIDATE_ABND_VBS);
12951                 /* buffers in vertex-buffer-slots must be of type SG_BUFFERTYPE_VERTEXBUFFER */
12952                 const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, bindings->vertex_buffers[i].id);
12953                 SOKOL_VALIDATE(buf != 0, _SG_VALIDATE_ABND_VB_EXISTS);
12954                 if (buf && buf->slot.state == SG_RESOURCESTATE_VALID) {
12955                     SOKOL_VALIDATE(SG_BUFFERTYPE_VERTEXBUFFER == buf->cmn.type, _SG_VALIDATE_ABND_VB_TYPE);
12956                     SOKOL_VALIDATE(!buf->cmn.append_overflow, _SG_VALIDATE_ABND_VB_OVERFLOW);
12957                 }
12958             }
12959             else {
12960                 /* vertex buffer provided in a slot which has no vertex layout in pipeline */
12961                 SOKOL_VALIDATE(!pip->cmn.vertex_layout_valid[i], _SG_VALIDATE_ABND_VBS);
12962             }
12963         }
12964 
12965         /* index buffer expected or not, and index buffer still exists */
12966         if (pip->cmn.index_type == SG_INDEXTYPE_NONE) {
12967             /* pipeline defines non-indexed rendering, but index buffer provided */
12968             SOKOL_VALIDATE(bindings->index_buffer.id == SG_INVALID_ID, _SG_VALIDATE_ABND_IB);
12969         }
12970         else {
12971             /* pipeline defines indexed rendering, but no index buffer provided */
12972             SOKOL_VALIDATE(bindings->index_buffer.id != SG_INVALID_ID, _SG_VALIDATE_ABND_NO_IB);
12973         }
12974         if (bindings->index_buffer.id != SG_INVALID_ID) {
12975             /* buffer in index-buffer-slot must be of type SG_BUFFERTYPE_INDEXBUFFER */
12976             const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, bindings->index_buffer.id);
12977             SOKOL_VALIDATE(buf != 0, _SG_VALIDATE_ABND_IB_EXISTS);
12978             if (buf && buf->slot.state == SG_RESOURCESTATE_VALID) {
12979                 SOKOL_VALIDATE(SG_BUFFERTYPE_INDEXBUFFER == buf->cmn.type, _SG_VALIDATE_ABND_IB_TYPE);
12980                 SOKOL_VALIDATE(!buf->cmn.append_overflow, _SG_VALIDATE_ABND_IB_OVERFLOW);
12981             }
12982         }
12983 
12984         /* has expected vertex shader images */
12985         for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) {
12986             _sg_shader_stage_t* stage = &pip->shader->cmn.stage[SG_SHADERSTAGE_VS];
12987             if (bindings->vs_images[i].id != SG_INVALID_ID) {
12988                 SOKOL_VALIDATE(i < stage->num_images, _SG_VALIDATE_ABND_VS_IMGS);
12989                 const _sg_image_t* img = _sg_lookup_image(&_sg.pools, bindings->vs_images[i].id);
12990                 SOKOL_VALIDATE(img != 0, _SG_VALIDATE_ABND_VS_IMG_EXISTS);
12991                 if (img && img->slot.state == SG_RESOURCESTATE_VALID) {
12992                     SOKOL_VALIDATE(img->cmn.type == stage->images[i].type, _SG_VALIDATE_ABND_VS_IMG_TYPES);
12993                 }
12994             }
12995             else {
12996                 SOKOL_VALIDATE(i >= stage->num_images, _SG_VALIDATE_ABND_VS_IMGS);
12997             }
12998         }
12999 
13000         /* has expected fragment shader images */
13001         for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) {
13002             _sg_shader_stage_t* stage = &pip->shader->cmn.stage[SG_SHADERSTAGE_FS];
13003             if (bindings->fs_images[i].id != SG_INVALID_ID) {
13004                 SOKOL_VALIDATE(i < stage->num_images, _SG_VALIDATE_ABND_FS_IMGS);
13005                 const _sg_image_t* img = _sg_lookup_image(&_sg.pools, bindings->fs_images[i].id);
13006                 SOKOL_VALIDATE(img != 0, _SG_VALIDATE_ABND_FS_IMG_EXISTS);
13007                 if (img && img->slot.state == SG_RESOURCESTATE_VALID) {
13008                     SOKOL_VALIDATE(img->cmn.type == stage->images[i].type, _SG_VALIDATE_ABND_FS_IMG_TYPES);
13009                 }
13010             }
13011             else {
13012                 SOKOL_VALIDATE(i >= stage->num_images, _SG_VALIDATE_ABND_FS_IMGS);
13013             }
13014         }
13015         return SOKOL_VALIDATE_END();
13016     #endif
13017 }
13018 
_sg_validate_apply_uniforms(sg_shader_stage stage_index,int ub_index,const void * data,int num_bytes)13019 _SOKOL_PRIVATE bool _sg_validate_apply_uniforms(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) {
13020     _SOKOL_UNUSED(data);
13021     #if !defined(SOKOL_DEBUG)
13022         _SOKOL_UNUSED(stage_index);
13023         _SOKOL_UNUSED(ub_index);
13024         _SOKOL_UNUSED(num_bytes);
13025         return true;
13026     #else
13027         SOKOL_ASSERT((stage_index == SG_SHADERSTAGE_VS) || (stage_index == SG_SHADERSTAGE_FS));
13028         SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS));
13029         SOKOL_VALIDATE_BEGIN();
13030         SOKOL_VALIDATE(_sg.cur_pipeline.id != SG_INVALID_ID, _SG_VALIDATE_AUB_NO_PIPELINE);
13031         const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id);
13032         SOKOL_ASSERT(pip && (pip->slot.id == _sg.cur_pipeline.id));
13033         SOKOL_ASSERT(pip->shader && (pip->shader->slot.id == pip->cmn.shader_id.id));
13034 
13035         /* check that there is a uniform block at 'stage' and 'ub_index' */
13036         const _sg_shader_stage_t* stage = &pip->shader->cmn.stage[stage_index];
13037         SOKOL_VALIDATE(ub_index < stage->num_uniform_blocks, _SG_VALIDATE_AUB_NO_UB_AT_SLOT);
13038 
13039         /* check that the provided data size doesn't exceed the uniform block size */
13040         SOKOL_VALIDATE(num_bytes <= stage->uniform_blocks[ub_index].size, _SG_VALIDATE_AUB_SIZE);
13041 
13042         return SOKOL_VALIDATE_END();
13043     #endif
13044 }
13045 
_sg_validate_update_buffer(const _sg_buffer_t * buf,const void * data,int size)13046 _SOKOL_PRIVATE bool _sg_validate_update_buffer(const _sg_buffer_t* buf, const void* data, int size) {
13047     #if !defined(SOKOL_DEBUG)
13048         _SOKOL_UNUSED(buf);
13049         _SOKOL_UNUSED(data);
13050         _SOKOL_UNUSED(size);
13051         return true;
13052     #else
13053         SOKOL_ASSERT(buf && data);
13054         SOKOL_VALIDATE_BEGIN();
13055         SOKOL_VALIDATE(buf->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDATEBUF_USAGE);
13056         SOKOL_VALIDATE(buf->cmn.size >= size, _SG_VALIDATE_UPDATEBUF_SIZE);
13057         SOKOL_VALIDATE(buf->cmn.update_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_ONCE);
13058         SOKOL_VALIDATE(buf->cmn.append_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_APPEND);
13059         return SOKOL_VALIDATE_END();
13060     #endif
13061 }
13062 
_sg_validate_append_buffer(const _sg_buffer_t * buf,const void * data,int size)13063 _SOKOL_PRIVATE bool _sg_validate_append_buffer(const _sg_buffer_t* buf, const void* data, int size) {
13064     #if !defined(SOKOL_DEBUG)
13065         _SOKOL_UNUSED(buf);
13066         _SOKOL_UNUSED(data);
13067         _SOKOL_UNUSED(size);
13068         return true;
13069     #else
13070         SOKOL_ASSERT(buf && data);
13071         SOKOL_VALIDATE_BEGIN();
13072         SOKOL_VALIDATE(buf->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_APPENDBUF_USAGE);
13073         SOKOL_VALIDATE(buf->cmn.size >= (buf->cmn.append_pos+size), _SG_VALIDATE_APPENDBUF_SIZE);
13074         SOKOL_VALIDATE(buf->cmn.update_frame_index != _sg.frame_index, _SG_VALIDATE_APPENDBUF_UPDATE);
13075         return SOKOL_VALIDATE_END();
13076     #endif
13077 }
13078 
_sg_validate_update_image(const _sg_image_t * img,const sg_image_content * data)13079 _SOKOL_PRIVATE bool _sg_validate_update_image(const _sg_image_t* img, const sg_image_content* data) {
13080     #if !defined(SOKOL_DEBUG)
13081         _SOKOL_UNUSED(img);
13082         _SOKOL_UNUSED(data);
13083         return true;
13084     #else
13085         SOKOL_ASSERT(img && data);
13086         SOKOL_VALIDATE_BEGIN();
13087         SOKOL_VALIDATE(img->cmn.usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDIMG_USAGE);
13088         SOKOL_VALIDATE(img->cmn.upd_frame_index != _sg.frame_index, _SG_VALIDATE_UPDIMG_ONCE);
13089         SOKOL_VALIDATE(!_sg_is_compressed_pixel_format(img->cmn.pixel_format), _SG_VALIDATE_UPDIMG_COMPRESSED);
13090         const int num_faces = (img->cmn.type == SG_IMAGETYPE_CUBE) ? 6 : 1;
13091         const int num_mips = img->cmn.num_mipmaps;
13092         for (int face_index = 0; face_index < num_faces; face_index++) {
13093             for (int mip_index = 0; mip_index < num_mips; mip_index++) {
13094                 SOKOL_VALIDATE(0 != data->subimage[face_index][mip_index].ptr, _SG_VALIDATE_UPDIMG_NOTENOUGHDATA);
13095                 const int mip_width = _sg_max(img->cmn.width >> mip_index, 1);
13096                 const int mip_height = _sg_max(img->cmn.height >> mip_index, 1);
13097                 const int bytes_per_slice = _sg_surface_pitch(img->cmn.pixel_format, mip_width, mip_height, 1);
13098                 const int expected_size = bytes_per_slice * img->cmn.depth;
13099                 SOKOL_VALIDATE(data->subimage[face_index][mip_index].size <= expected_size, _SG_VALIDATE_UPDIMG_SIZE);
13100             }
13101         }
13102         return SOKOL_VALIDATE_END();
13103     #endif
13104 }
13105 
13106 /*== fill in desc default values =============================================*/
_sg_buffer_desc_defaults(const sg_buffer_desc * desc)13107 _SOKOL_PRIVATE sg_buffer_desc _sg_buffer_desc_defaults(const sg_buffer_desc* desc) {
13108     sg_buffer_desc def = *desc;
13109     def.type = _sg_def(def.type, SG_BUFFERTYPE_VERTEXBUFFER);
13110     def.usage = _sg_def(def.usage, SG_USAGE_IMMUTABLE);
13111     return def;
13112 }
13113 
_sg_image_desc_defaults(const sg_image_desc * desc)13114 _SOKOL_PRIVATE sg_image_desc _sg_image_desc_defaults(const sg_image_desc* desc) {
13115     sg_image_desc def = *desc;
13116     def.type = _sg_def(def.type, SG_IMAGETYPE_2D);
13117     def.depth = _sg_def(def.depth, 1);
13118     def.num_mipmaps = _sg_def(def.num_mipmaps, 1);
13119     def.usage = _sg_def(def.usage, SG_USAGE_IMMUTABLE);
13120     if (desc->render_target) {
13121         def.pixel_format = _sg_def(def.pixel_format, _sg.desc.context.color_format);
13122         def.sample_count = _sg_def(def.sample_count, _sg.desc.context.sample_count);
13123     }
13124     else {
13125         def.pixel_format = _sg_def(def.pixel_format, SG_PIXELFORMAT_RGBA8);
13126         def.sample_count = _sg_def(def.sample_count, 1);
13127     }
13128     def.min_filter = _sg_def(def.min_filter, SG_FILTER_NEAREST);
13129     def.mag_filter = _sg_def(def.mag_filter, SG_FILTER_NEAREST);
13130     def.wrap_u = _sg_def(def.wrap_u, SG_WRAP_REPEAT);
13131     def.wrap_v = _sg_def(def.wrap_v, SG_WRAP_REPEAT);
13132     def.wrap_w = _sg_def(def.wrap_w, SG_WRAP_REPEAT);
13133     def.border_color = _sg_def(def.border_color, SG_BORDERCOLOR_OPAQUE_BLACK);
13134     def.max_anisotropy = _sg_def(def.max_anisotropy, 1);
13135     def.max_lod = _sg_def_flt(def.max_lod, FLT_MAX);
13136     return def;
13137 }
13138 
_sg_shader_desc_defaults(const sg_shader_desc * desc)13139 _SOKOL_PRIVATE sg_shader_desc _sg_shader_desc_defaults(const sg_shader_desc* desc) {
13140     sg_shader_desc def = *desc;
13141     #if defined(SOKOL_METAL)
13142         def.vs.entry = _sg_def(def.vs.entry, "_main");
13143         def.fs.entry = _sg_def(def.fs.entry, "_main");
13144     #else
13145         def.vs.entry = _sg_def(def.vs.entry, "main");
13146         def.fs.entry = _sg_def(def.fs.entry, "main");
13147     #endif
13148     #if defined(SOKOL_D3D11)
13149         if (def.vs.source) {
13150             def.vs.d3d11_target = _sg_def(def.vs.d3d11_target, "vs_4_0");
13151         }
13152         if (def.fs.source) {
13153             def.fs.d3d11_target = _sg_def(def.fs.d3d11_target, "ps_4_0");
13154         }
13155     #endif
13156     for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) {
13157         sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &def.vs : &def.fs;
13158         for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) {
13159             sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index];
13160             if (0 == ub_desc->size) {
13161                 break;
13162             }
13163             for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) {
13164                 sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index];
13165                 if (u_desc->type == SG_UNIFORMTYPE_INVALID) {
13166                     break;
13167                 }
13168                 u_desc->array_count = _sg_def(u_desc->array_count, 1);
13169             }
13170         }
13171         for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) {
13172             sg_shader_image_desc* img_desc = &stage_desc->images[img_index];
13173             if (img_desc->type == _SG_IMAGETYPE_DEFAULT) {
13174                 break;
13175             }
13176             img_desc->sampler_type = _sg_def(img_desc->sampler_type, SG_SAMPLERTYPE_FLOAT);
13177         }
13178     }
13179     return def;
13180 }
13181 
_sg_pipeline_desc_defaults(const sg_pipeline_desc * desc)13182 _SOKOL_PRIVATE sg_pipeline_desc _sg_pipeline_desc_defaults(const sg_pipeline_desc* desc) {
13183     sg_pipeline_desc def = *desc;
13184 
13185     def.primitive_type = _sg_def(def.primitive_type, SG_PRIMITIVETYPE_TRIANGLES);
13186     def.index_type = _sg_def(def.index_type, SG_INDEXTYPE_NONE);
13187 
13188     def.depth_stencil.stencil_front.fail_op = _sg_def(def.depth_stencil.stencil_front.fail_op, SG_STENCILOP_KEEP);
13189     def.depth_stencil.stencil_front.depth_fail_op = _sg_def(def.depth_stencil.stencil_front.depth_fail_op, SG_STENCILOP_KEEP);
13190     def.depth_stencil.stencil_front.pass_op = _sg_def(def.depth_stencil.stencil_front.pass_op, SG_STENCILOP_KEEP);
13191     def.depth_stencil.stencil_front.compare_func = _sg_def(def.depth_stencil.stencil_front.compare_func, SG_COMPAREFUNC_ALWAYS);
13192     def.depth_stencil.stencil_back.fail_op = _sg_def(def.depth_stencil.stencil_back.fail_op, SG_STENCILOP_KEEP);
13193     def.depth_stencil.stencil_back.depth_fail_op = _sg_def(def.depth_stencil.stencil_back.depth_fail_op, SG_STENCILOP_KEEP);
13194     def.depth_stencil.stencil_back.pass_op = _sg_def(def.depth_stencil.stencil_back.pass_op, SG_STENCILOP_KEEP);
13195     def.depth_stencil.stencil_back.compare_func = _sg_def(def.depth_stencil.stencil_back.compare_func, SG_COMPAREFUNC_ALWAYS);
13196     def.depth_stencil.depth_compare_func = _sg_def(def.depth_stencil.depth_compare_func, SG_COMPAREFUNC_ALWAYS);
13197 
13198     def.blend.src_factor_rgb = _sg_def(def.blend.src_factor_rgb, SG_BLENDFACTOR_ONE);
13199     def.blend.dst_factor_rgb = _sg_def(def.blend.dst_factor_rgb, SG_BLENDFACTOR_ZERO);
13200     def.blend.op_rgb = _sg_def(def.blend.op_rgb, SG_BLENDOP_ADD);
13201     def.blend.src_factor_alpha = _sg_def(def.blend.src_factor_alpha, SG_BLENDFACTOR_ONE);
13202     def.blend.dst_factor_alpha = _sg_def(def.blend.dst_factor_alpha, SG_BLENDFACTOR_ZERO);
13203     def.blend.op_alpha = _sg_def(def.blend.op_alpha, SG_BLENDOP_ADD);
13204     if (def.blend.color_write_mask == SG_COLORMASK_NONE) {
13205         def.blend.color_write_mask = 0;
13206     }
13207     else {
13208         def.blend.color_write_mask = (uint8_t) _sg_def((sg_color_mask)def.blend.color_write_mask, SG_COLORMASK_RGBA);
13209     }
13210     def.blend.color_attachment_count = _sg_def(def.blend.color_attachment_count, 1);
13211     def.blend.color_format = _sg_def(def.blend.color_format, _sg.desc.context.color_format);
13212     def.blend.depth_format = _sg_def(def.blend.depth_format, _sg.desc.context.depth_format);
13213 
13214     def.rasterizer.cull_mode = _sg_def(def.rasterizer.cull_mode, SG_CULLMODE_NONE);
13215     def.rasterizer.face_winding = _sg_def(def.rasterizer.face_winding, SG_FACEWINDING_CW);
13216     def.rasterizer.sample_count = _sg_def(def.rasterizer.sample_count, _sg.desc.context.sample_count);
13217 
13218     for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) {
13219         sg_vertex_attr_desc* a_desc = &def.layout.attrs[attr_index];
13220         if (a_desc->format == SG_VERTEXFORMAT_INVALID) {
13221             break;
13222         }
13223         SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS));
13224         sg_buffer_layout_desc* b_desc = &def.layout.buffers[a_desc->buffer_index];
13225         b_desc->step_func = _sg_def(b_desc->step_func, SG_VERTEXSTEP_PER_VERTEX);
13226         b_desc->step_rate = _sg_def(b_desc->step_rate, 1);
13227     }
13228 
13229     /* resolve vertex layout strides and offsets */
13230     int auto_offset[SG_MAX_SHADERSTAGE_BUFFERS];
13231     memset(auto_offset, 0, sizeof(auto_offset));
13232     bool use_auto_offset = true;
13233     for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) {
13234         /* to use computed offsets, *all* attr offsets must be 0 */
13235         if (def.layout.attrs[attr_index].offset != 0) {
13236             use_auto_offset = false;
13237         }
13238     }
13239     for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) {
13240         sg_vertex_attr_desc* a_desc = &def.layout.attrs[attr_index];
13241         if (a_desc->format == SG_VERTEXFORMAT_INVALID) {
13242             break;
13243         }
13244         SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS));
13245         if (use_auto_offset) {
13246             a_desc->offset = auto_offset[a_desc->buffer_index];
13247         }
13248         auto_offset[a_desc->buffer_index] += _sg_vertexformat_bytesize(a_desc->format);
13249     }
13250     /* compute vertex strides if needed */
13251     for (int buf_index = 0; buf_index < SG_MAX_SHADERSTAGE_BUFFERS; buf_index++) {
13252         sg_buffer_layout_desc* l_desc = &def.layout.buffers[buf_index];
13253         if (l_desc->stride == 0) {
13254             l_desc->stride = auto_offset[buf_index];
13255         }
13256     }
13257 
13258     return def;
13259 }
13260 
_sg_pass_desc_defaults(const sg_pass_desc * desc)13261 _SOKOL_PRIVATE sg_pass_desc _sg_pass_desc_defaults(const sg_pass_desc* desc) {
13262     /* FIXME: no values to replace in sg_pass_desc? */
13263     sg_pass_desc def = *desc;
13264     return def;
13265 }
13266 
13267 /*== allocate/initialize resource private functions ==========================*/
_sg_alloc_buffer(void)13268 _SOKOL_PRIVATE sg_buffer _sg_alloc_buffer(void) {
13269     sg_buffer res;
13270     int slot_index = _sg_pool_alloc_index(&_sg.pools.buffer_pool);
13271     if (_SG_INVALID_SLOT_INDEX != slot_index) {
13272         res.id = _sg_slot_alloc(&_sg.pools.buffer_pool, &_sg.pools.buffers[slot_index].slot, slot_index);
13273     }
13274     else {
13275         /* pool is exhausted */
13276         res.id = SG_INVALID_ID;
13277     }
13278     return res;
13279 }
13280 
_sg_alloc_image(void)13281 _SOKOL_PRIVATE sg_image _sg_alloc_image(void) {
13282     sg_image res;
13283     int slot_index = _sg_pool_alloc_index(&_sg.pools.image_pool);
13284     if (_SG_INVALID_SLOT_INDEX != slot_index) {
13285         res.id = _sg_slot_alloc(&_sg.pools.image_pool, &_sg.pools.images[slot_index].slot, slot_index);
13286     }
13287     else {
13288         /* pool is exhausted */
13289         res.id = SG_INVALID_ID;
13290     }
13291     return res;
13292 }
13293 
_sg_alloc_shader(void)13294 _SOKOL_PRIVATE sg_shader _sg_alloc_shader(void) {
13295     sg_shader res;
13296     int slot_index = _sg_pool_alloc_index(&_sg.pools.shader_pool);
13297     if (_SG_INVALID_SLOT_INDEX != slot_index) {
13298         res.id = _sg_slot_alloc(&_sg.pools.shader_pool, &_sg.pools.shaders[slot_index].slot, slot_index);
13299     }
13300     else {
13301         /* pool is exhausted */
13302         res.id = SG_INVALID_ID;
13303     }
13304     return res;
13305 }
13306 
_sg_alloc_pipeline(void)13307 _SOKOL_PRIVATE sg_pipeline _sg_alloc_pipeline(void) {
13308     sg_pipeline res;
13309     int slot_index = _sg_pool_alloc_index(&_sg.pools.pipeline_pool);
13310     if (_SG_INVALID_SLOT_INDEX != slot_index) {
13311         res.id =_sg_slot_alloc(&_sg.pools.pipeline_pool, &_sg.pools.pipelines[slot_index].slot, slot_index);
13312     }
13313     else {
13314         /* pool is exhausted */
13315         res.id = SG_INVALID_ID;
13316     }
13317     return res;
13318 }
13319 
_sg_alloc_pass(void)13320 _SOKOL_PRIVATE sg_pass _sg_alloc_pass(void) {
13321     sg_pass res;
13322     int slot_index = _sg_pool_alloc_index(&_sg.pools.pass_pool);
13323     if (_SG_INVALID_SLOT_INDEX != slot_index) {
13324         res.id = _sg_slot_alloc(&_sg.pools.pass_pool, &_sg.pools.passes[slot_index].slot, slot_index);
13325     }
13326     else {
13327         /* pool is exhausted */
13328         res.id = SG_INVALID_ID;
13329     }
13330     return res;
13331 }
13332 
_sg_init_buffer(sg_buffer buf_id,const sg_buffer_desc * desc)13333 _SOKOL_PRIVATE void _sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc) {
13334     SOKOL_ASSERT(buf_id.id != SG_INVALID_ID && desc);
13335     _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id);
13336     SOKOL_ASSERT(buf && buf->slot.state == SG_RESOURCESTATE_ALLOC);
13337     buf->slot.ctx_id = _sg.active_context.id;
13338     if (_sg_validate_buffer_desc(desc)) {
13339         buf->slot.state = _sg_create_buffer(buf, desc);
13340     }
13341     else {
13342         buf->slot.state = SG_RESOURCESTATE_FAILED;
13343     }
13344     SOKOL_ASSERT((buf->slot.state == SG_RESOURCESTATE_VALID)||(buf->slot.state == SG_RESOURCESTATE_FAILED));
13345 }
13346 
_sg_init_image(sg_image img_id,const sg_image_desc * desc)13347 _SOKOL_PRIVATE void _sg_init_image(sg_image img_id, const sg_image_desc* desc) {
13348     SOKOL_ASSERT(img_id.id != SG_INVALID_ID && desc);
13349     _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id);
13350     SOKOL_ASSERT(img && img->slot.state == SG_RESOURCESTATE_ALLOC);
13351     img->slot.ctx_id = _sg.active_context.id;
13352     if (_sg_validate_image_desc(desc)) {
13353         img->slot.state = _sg_create_image(img, desc);
13354     }
13355     else {
13356         img->slot.state = SG_RESOURCESTATE_FAILED;
13357     }
13358     SOKOL_ASSERT((img->slot.state == SG_RESOURCESTATE_VALID)||(img->slot.state == SG_RESOURCESTATE_FAILED));
13359 }
13360 
_sg_init_shader(sg_shader shd_id,const sg_shader_desc * desc)13361 _SOKOL_PRIVATE void _sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc) {
13362     SOKOL_ASSERT(shd_id.id != SG_INVALID_ID && desc);
13363     _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id);
13364     SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_ALLOC);
13365     shd->slot.ctx_id = _sg.active_context.id;
13366     if (_sg_validate_shader_desc(desc)) {
13367         shd->slot.state = _sg_create_shader(shd, desc);
13368     }
13369     else {
13370         shd->slot.state = SG_RESOURCESTATE_FAILED;
13371     }
13372     SOKOL_ASSERT((shd->slot.state == SG_RESOURCESTATE_VALID)||(shd->slot.state == SG_RESOURCESTATE_FAILED));
13373 }
13374 
_sg_init_pipeline(sg_pipeline pip_id,const sg_pipeline_desc * desc)13375 _SOKOL_PRIVATE void _sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc) {
13376     SOKOL_ASSERT(pip_id.id != SG_INVALID_ID && desc);
13377     _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id);
13378     SOKOL_ASSERT(pip && pip->slot.state == SG_RESOURCESTATE_ALLOC);
13379     pip->slot.ctx_id = _sg.active_context.id;
13380     if (_sg_validate_pipeline_desc(desc)) {
13381         _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, desc->shader.id);
13382         SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_VALID);
13383         pip->slot.state = _sg_create_pipeline(pip, shd, desc);
13384     }
13385     else {
13386         pip->slot.state = SG_RESOURCESTATE_FAILED;
13387     }
13388     SOKOL_ASSERT((pip->slot.state == SG_RESOURCESTATE_VALID)||(pip->slot.state == SG_RESOURCESTATE_FAILED));
13389 }
13390 
_sg_init_pass(sg_pass pass_id,const sg_pass_desc * desc)13391 _SOKOL_PRIVATE void _sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc) {
13392     SOKOL_ASSERT(pass_id.id != SG_INVALID_ID && desc);
13393     _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id);
13394     SOKOL_ASSERT(pass && pass->slot.state == SG_RESOURCESTATE_ALLOC);
13395     pass->slot.ctx_id = _sg.active_context.id;
13396     if (_sg_validate_pass_desc(desc)) {
13397         /* lookup pass attachment image pointers */
13398         _sg_image_t* att_imgs[SG_MAX_COLOR_ATTACHMENTS + 1];
13399         for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) {
13400             if (desc->color_attachments[i].image.id) {
13401                 att_imgs[i] = _sg_lookup_image(&_sg.pools, desc->color_attachments[i].image.id);
13402                 SOKOL_ASSERT(att_imgs[i] && att_imgs[i]->slot.state == SG_RESOURCESTATE_VALID);
13403             }
13404             else {
13405                 att_imgs[i] = 0;
13406             }
13407         }
13408         const int ds_att_index = SG_MAX_COLOR_ATTACHMENTS;
13409         if (desc->depth_stencil_attachment.image.id) {
13410             att_imgs[ds_att_index] = _sg_lookup_image(&_sg.pools, desc->depth_stencil_attachment.image.id);
13411             SOKOL_ASSERT(att_imgs[ds_att_index] && att_imgs[ds_att_index]->slot.state == SG_RESOURCESTATE_VALID);
13412         }
13413         else {
13414             att_imgs[ds_att_index] = 0;
13415         }
13416         pass->slot.state = _sg_create_pass(pass, att_imgs, desc);
13417     }
13418     else {
13419         pass->slot.state = SG_RESOURCESTATE_FAILED;
13420     }
13421     SOKOL_ASSERT((pass->slot.state == SG_RESOURCESTATE_VALID)||(pass->slot.state == SG_RESOURCESTATE_FAILED));
13422 }
13423 
13424 /*== PUBLIC API FUNCTIONS ====================================================*/
13425 
13426 #if defined(SOKOL_METAL)
13427     // this is ARC compatible
13428     #if defined(__cplusplus)
13429         #define _SG_CLEAR(type, item) { item = { }; }
13430     #else
13431         #define _SG_CLEAR(type, item) { item = (type) { 0 }; }
13432     #endif
13433 #else
13434     #define _SG_CLEAR(type, item) { memset(&item, 0, sizeof(item)); }
13435 #endif
13436 
sg_setup(const sg_desc * desc)13437 SOKOL_API_IMPL void sg_setup(const sg_desc* desc) {
13438     SOKOL_ASSERT(desc);
13439     SOKOL_ASSERT((desc->_start_canary == 0) && (desc->_end_canary == 0));
13440     _SG_CLEAR(_sg_state_t, _sg);
13441     _sg.desc = *desc;
13442 
13443     /* replace zero-init items with their default values
13444         NOTE: on WebGPU, the default color pixel format MUST be provided,
13445         cannot be a default compile-time constant.
13446     */
13447     #if defined(SOKOL_WGPU)
13448         SOKOL_ASSERT(SG_PIXELFORMAT_NONE != _sg.desc.context.color_format);
13449     #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11)
13450         _sg.desc.context.color_format = _sg_def(_sg.desc.context.color_format, SG_PIXELFORMAT_BGRA8);
13451     #else
13452         _sg.desc.context.color_format = _sg_def(_sg.desc.context.color_format, SG_PIXELFORMAT_RGBA8);
13453     #endif
13454     _sg.desc.context.depth_format = _sg_def(_sg.desc.context.depth_format, SG_PIXELFORMAT_DEPTH_STENCIL);
13455     _sg.desc.context.sample_count = _sg_def(_sg.desc.context.sample_count, 1);
13456     _sg.desc.buffer_pool_size = _sg_def(_sg.desc.buffer_pool_size, _SG_DEFAULT_BUFFER_POOL_SIZE);
13457     _sg.desc.image_pool_size = _sg_def(_sg.desc.image_pool_size, _SG_DEFAULT_IMAGE_POOL_SIZE);
13458     _sg.desc.shader_pool_size = _sg_def(_sg.desc.shader_pool_size, _SG_DEFAULT_SHADER_POOL_SIZE);
13459     _sg.desc.pipeline_pool_size = _sg_def(_sg.desc.pipeline_pool_size, _SG_DEFAULT_PIPELINE_POOL_SIZE);
13460     _sg.desc.pass_pool_size = _sg_def(_sg.desc.pass_pool_size, _SG_DEFAULT_PASS_POOL_SIZE);
13461     _sg.desc.context_pool_size = _sg_def(_sg.desc.context_pool_size, _SG_DEFAULT_CONTEXT_POOL_SIZE);
13462     _sg.desc.uniform_buffer_size = _sg_def(_sg.desc.uniform_buffer_size, _SG_DEFAULT_UB_SIZE);
13463     _sg.desc.staging_buffer_size = _sg_def(_sg.desc.staging_buffer_size, _SG_DEFAULT_STAGING_SIZE);
13464     _sg.desc.sampler_cache_size = _sg_def(_sg.desc.sampler_cache_size, _SG_DEFAULT_SAMPLER_CACHE_CAPACITY);
13465 
13466     _sg_setup_pools(&_sg.pools, &_sg.desc);
13467     _sg.frame_index = 1;
13468     _sg_setup_backend(&_sg.desc);
13469     _sg.valid = true;
13470     sg_setup_context();
13471 }
13472 
sg_shutdown(void)13473 SOKOL_API_IMPL void sg_shutdown(void) {
13474     /* can only delete resources for the currently set context here, if multiple
13475     contexts are used, the app code must take care of properly releasing them
13476     (since only the app code can switch between 3D-API contexts)
13477     */
13478     if (_sg.active_context.id != SG_INVALID_ID) {
13479         _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, _sg.active_context.id);
13480         if (ctx) {
13481             _sg_destroy_all_resources(&_sg.pools, _sg.active_context.id);
13482             _sg_destroy_context(ctx);
13483         }
13484     }
13485     _sg_discard_backend();
13486     _sg_discard_pools(&_sg.pools);
13487     _sg.valid = false;
13488 }
13489 
sg_isvalid(void)13490 SOKOL_API_IMPL bool sg_isvalid(void) {
13491     return _sg.valid;
13492 }
13493 
sg_query_desc(void)13494 SOKOL_API_IMPL sg_desc sg_query_desc(void) {
13495     SOKOL_ASSERT(_sg.valid);
13496     return _sg.desc;
13497 }
13498 
sg_query_backend(void)13499 SOKOL_API_IMPL sg_backend sg_query_backend(void) {
13500     SOKOL_ASSERT(_sg.valid);
13501     return _sg.backend;
13502 }
13503 
sg_query_features(void)13504 SOKOL_API_IMPL sg_features sg_query_features(void) {
13505     SOKOL_ASSERT(_sg.valid);
13506     return _sg.features;
13507 }
13508 
sg_query_limits(void)13509 SOKOL_API_IMPL sg_limits sg_query_limits(void) {
13510     SOKOL_ASSERT(_sg.valid);
13511     return _sg.limits;
13512 }
13513 
sg_query_pixelformat(sg_pixel_format fmt)13514 SOKOL_API_IMPL sg_pixelformat_info sg_query_pixelformat(sg_pixel_format fmt) {
13515     SOKOL_ASSERT(_sg.valid);
13516     int fmt_index = (int) fmt;
13517     SOKOL_ASSERT((fmt_index > SG_PIXELFORMAT_NONE) && (fmt_index < _SG_PIXELFORMAT_NUM));
13518     return _sg.formats[fmt_index];
13519 }
13520 
sg_setup_context(void)13521 SOKOL_API_IMPL sg_context sg_setup_context(void) {
13522     SOKOL_ASSERT(_sg.valid);
13523     sg_context res;
13524     int slot_index = _sg_pool_alloc_index(&_sg.pools.context_pool);
13525     if (_SG_INVALID_SLOT_INDEX != slot_index) {
13526         res.id = _sg_slot_alloc(&_sg.pools.context_pool, &_sg.pools.contexts[slot_index].slot, slot_index);
13527         _sg_context_t* ctx = _sg_context_at(&_sg.pools, res.id);
13528         ctx->slot.state = _sg_create_context(ctx);
13529         SOKOL_ASSERT(ctx->slot.state == SG_RESOURCESTATE_VALID);
13530         _sg_activate_context(ctx);
13531     }
13532     else {
13533         /* pool is exhausted */
13534         res.id = SG_INVALID_ID;
13535     }
13536     _sg.active_context = res;
13537     return res;
13538 }
13539 
sg_discard_context(sg_context ctx_id)13540 SOKOL_API_IMPL void sg_discard_context(sg_context ctx_id) {
13541     SOKOL_ASSERT(_sg.valid);
13542     _sg_destroy_all_resources(&_sg.pools, ctx_id.id);
13543     _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, ctx_id.id);
13544     if (ctx) {
13545         _sg_destroy_context(ctx);
13546         _sg_reset_context(ctx);
13547         _sg_pool_free_index(&_sg.pools.context_pool, _sg_slot_index(ctx_id.id));
13548     }
13549     _sg.active_context.id = SG_INVALID_ID;
13550     _sg_activate_context(0);
13551 }
13552 
sg_activate_context(sg_context ctx_id)13553 SOKOL_API_IMPL void sg_activate_context(sg_context ctx_id) {
13554     SOKOL_ASSERT(_sg.valid);
13555     _sg.active_context = ctx_id;
13556     _sg_context_t* ctx = _sg_lookup_context(&_sg.pools, ctx_id.id);
13557     /* NOTE: ctx can be 0 here if the context is no longer valid */
13558     _sg_activate_context(ctx);
13559 }
13560 
sg_install_trace_hooks(const sg_trace_hooks * trace_hooks)13561 SOKOL_API_IMPL sg_trace_hooks sg_install_trace_hooks(const sg_trace_hooks* trace_hooks) {
13562     SOKOL_ASSERT(_sg.valid);
13563     SOKOL_ASSERT(trace_hooks);
13564     _SOKOL_UNUSED(trace_hooks);
13565     #if defined(SOKOL_TRACE_HOOKS)
13566         sg_trace_hooks old_hooks = _sg.hooks;
13567         _sg.hooks = *trace_hooks;
13568     #else
13569         static sg_trace_hooks old_hooks;
13570         SOKOL_LOG("sg_install_trace_hooks() called, but SG_TRACE_HOOKS is not defined!");
13571     #endif
13572     return old_hooks;
13573 }
13574 
sg_alloc_buffer(void)13575 SOKOL_API_IMPL sg_buffer sg_alloc_buffer(void) {
13576     SOKOL_ASSERT(_sg.valid);
13577     sg_buffer res = _sg_alloc_buffer();
13578     _SG_TRACE_ARGS(alloc_buffer, res);
13579     return res;
13580 }
13581 
sg_alloc_image(void)13582 SOKOL_API_IMPL sg_image sg_alloc_image(void) {
13583     SOKOL_ASSERT(_sg.valid);
13584     sg_image res = _sg_alloc_image();
13585     _SG_TRACE_ARGS(alloc_image, res);
13586     return res;
13587 }
13588 
sg_alloc_shader(void)13589 SOKOL_API_IMPL sg_shader sg_alloc_shader(void) {
13590     SOKOL_ASSERT(_sg.valid);
13591     sg_shader res = _sg_alloc_shader();
13592     _SG_TRACE_ARGS(alloc_shader, res);
13593     return res;
13594 }
13595 
sg_alloc_pipeline(void)13596 SOKOL_API_IMPL sg_pipeline sg_alloc_pipeline(void) {
13597     SOKOL_ASSERT(_sg.valid);
13598     sg_pipeline res = _sg_alloc_pipeline();
13599     _SG_TRACE_ARGS(alloc_pipeline, res);
13600     return res;
13601 }
13602 
sg_alloc_pass(void)13603 SOKOL_API_IMPL sg_pass sg_alloc_pass(void) {
13604     SOKOL_ASSERT(_sg.valid);
13605     sg_pass res = _sg_alloc_pass();
13606     _SG_TRACE_ARGS(alloc_pass, res);
13607     return res;
13608 }
13609 
sg_init_buffer(sg_buffer buf_id,const sg_buffer_desc * desc)13610 SOKOL_API_IMPL void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc) {
13611     SOKOL_ASSERT(_sg.valid);
13612     sg_buffer_desc desc_def = _sg_buffer_desc_defaults(desc);
13613     _sg_init_buffer(buf_id, &desc_def);
13614     _SG_TRACE_ARGS(init_buffer, buf_id, &desc_def);
13615 }
13616 
sg_init_image(sg_image img_id,const sg_image_desc * desc)13617 SOKOL_API_IMPL void sg_init_image(sg_image img_id, const sg_image_desc* desc) {
13618     SOKOL_ASSERT(_sg.valid);
13619     sg_image_desc desc_def = _sg_image_desc_defaults(desc);
13620     _sg_init_image(img_id, &desc_def);
13621     _SG_TRACE_ARGS(init_image, img_id, &desc_def);
13622 }
13623 
sg_init_shader(sg_shader shd_id,const sg_shader_desc * desc)13624 SOKOL_API_IMPL void sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc) {
13625     SOKOL_ASSERT(_sg.valid);
13626     sg_shader_desc desc_def = _sg_shader_desc_defaults(desc);
13627     _sg_init_shader(shd_id, &desc_def);
13628     _SG_TRACE_ARGS(init_shader, shd_id, &desc_def);
13629 }
13630 
sg_init_pipeline(sg_pipeline pip_id,const sg_pipeline_desc * desc)13631 SOKOL_API_IMPL void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc) {
13632     SOKOL_ASSERT(_sg.valid);
13633     sg_pipeline_desc desc_def = _sg_pipeline_desc_defaults(desc);
13634     _sg_init_pipeline(pip_id, &desc_def);
13635     _SG_TRACE_ARGS(init_pipeline, pip_id, &desc_def);
13636 }
13637 
sg_init_pass(sg_pass pass_id,const sg_pass_desc * desc)13638 SOKOL_API_IMPL void sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc) {
13639     SOKOL_ASSERT(_sg.valid);
13640     sg_pass_desc desc_def = _sg_pass_desc_defaults(desc);
13641     _sg_init_pass(pass_id, &desc_def);
13642     _SG_TRACE_ARGS(init_pass, pass_id, &desc_def);
13643 }
13644 
13645 /*-- set allocated resource to failed state ----------------------------------*/
sg_fail_buffer(sg_buffer buf_id)13646 SOKOL_API_IMPL void sg_fail_buffer(sg_buffer buf_id) {
13647     SOKOL_ASSERT(_sg.valid);
13648     SOKOL_ASSERT(buf_id.id != SG_INVALID_ID);
13649     _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id);
13650     SOKOL_ASSERT(buf && buf->slot.state == SG_RESOURCESTATE_ALLOC);
13651     buf->slot.ctx_id = _sg.active_context.id;
13652     buf->slot.state = SG_RESOURCESTATE_FAILED;
13653     _SG_TRACE_ARGS(fail_buffer, buf_id);
13654 }
13655 
sg_fail_image(sg_image img_id)13656 SOKOL_API_IMPL void sg_fail_image(sg_image img_id) {
13657     SOKOL_ASSERT(_sg.valid);
13658     SOKOL_ASSERT(img_id.id != SG_INVALID_ID);
13659     _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id);
13660     SOKOL_ASSERT(img && img->slot.state == SG_RESOURCESTATE_ALLOC);
13661     img->slot.ctx_id = _sg.active_context.id;
13662     img->slot.state = SG_RESOURCESTATE_FAILED;
13663     _SG_TRACE_ARGS(fail_image, img_id);
13664 }
13665 
sg_fail_shader(sg_shader shd_id)13666 SOKOL_API_IMPL void sg_fail_shader(sg_shader shd_id) {
13667     SOKOL_ASSERT(_sg.valid);
13668     SOKOL_ASSERT(shd_id.id != SG_INVALID_ID);
13669     _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id);
13670     SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_ALLOC);
13671     shd->slot.ctx_id = _sg.active_context.id;
13672     shd->slot.state = SG_RESOURCESTATE_FAILED;
13673     _SG_TRACE_ARGS(fail_shader, shd_id);
13674 }
13675 
sg_fail_pipeline(sg_pipeline pip_id)13676 SOKOL_API_IMPL void sg_fail_pipeline(sg_pipeline pip_id) {
13677     SOKOL_ASSERT(_sg.valid);
13678     SOKOL_ASSERT(pip_id.id != SG_INVALID_ID);
13679     _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id);
13680     SOKOL_ASSERT(pip && pip->slot.state == SG_RESOURCESTATE_ALLOC);
13681     pip->slot.ctx_id = _sg.active_context.id;
13682     pip->slot.state = SG_RESOURCESTATE_FAILED;
13683     _SG_TRACE_ARGS(fail_pipeline, pip_id);
13684 }
13685 
sg_fail_pass(sg_pass pass_id)13686 SOKOL_API_IMPL void sg_fail_pass(sg_pass pass_id) {
13687     SOKOL_ASSERT(_sg.valid);
13688     SOKOL_ASSERT(pass_id.id != SG_INVALID_ID);
13689     _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id);
13690     SOKOL_ASSERT(pass && pass->slot.state == SG_RESOURCESTATE_ALLOC);
13691     pass->slot.ctx_id = _sg.active_context.id;
13692     pass->slot.state = SG_RESOURCESTATE_FAILED;
13693     _SG_TRACE_ARGS(fail_pass, pass_id);
13694 }
13695 
13696 /*-- get resource state */
sg_query_buffer_state(sg_buffer buf_id)13697 SOKOL_API_IMPL sg_resource_state sg_query_buffer_state(sg_buffer buf_id) {
13698     SOKOL_ASSERT(_sg.valid);
13699     _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id);
13700     sg_resource_state res = buf ? buf->slot.state : SG_RESOURCESTATE_INVALID;
13701     return res;
13702 }
13703 
sg_query_image_state(sg_image img_id)13704 SOKOL_API_IMPL sg_resource_state sg_query_image_state(sg_image img_id) {
13705     SOKOL_ASSERT(_sg.valid);
13706     _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id);
13707     sg_resource_state res = img ? img->slot.state : SG_RESOURCESTATE_INVALID;
13708     return res;
13709 }
13710 
sg_query_shader_state(sg_shader shd_id)13711 SOKOL_API_IMPL sg_resource_state sg_query_shader_state(sg_shader shd_id) {
13712     SOKOL_ASSERT(_sg.valid);
13713     _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id);
13714     sg_resource_state res = shd ? shd->slot.state : SG_RESOURCESTATE_INVALID;
13715     return res;
13716 }
13717 
sg_query_pipeline_state(sg_pipeline pip_id)13718 SOKOL_API_IMPL sg_resource_state sg_query_pipeline_state(sg_pipeline pip_id) {
13719     SOKOL_ASSERT(_sg.valid);
13720     _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id);
13721     sg_resource_state res = pip ? pip->slot.state : SG_RESOURCESTATE_INVALID;
13722     return res;
13723 }
13724 
sg_query_pass_state(sg_pass pass_id)13725 SOKOL_API_IMPL sg_resource_state sg_query_pass_state(sg_pass pass_id) {
13726     SOKOL_ASSERT(_sg.valid);
13727     _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id);
13728     sg_resource_state res = pass ? pass->slot.state : SG_RESOURCESTATE_INVALID;
13729     return res;
13730 }
13731 
13732 /*-- allocate and initialize resource ----------------------------------------*/
sg_make_buffer(const sg_buffer_desc * desc)13733 SOKOL_API_IMPL sg_buffer sg_make_buffer(const sg_buffer_desc* desc) {
13734     SOKOL_ASSERT(_sg.valid);
13735     SOKOL_ASSERT(desc);
13736     sg_buffer_desc desc_def = _sg_buffer_desc_defaults(desc);
13737     sg_buffer buf_id = _sg_alloc_buffer();
13738     if (buf_id.id != SG_INVALID_ID) {
13739         _sg_init_buffer(buf_id, &desc_def);
13740     }
13741     else {
13742         SOKOL_LOG("buffer pool exhausted!");
13743         _SG_TRACE_NOARGS(err_buffer_pool_exhausted);
13744     }
13745     _SG_TRACE_ARGS(make_buffer, &desc_def, buf_id);
13746     return buf_id;
13747 }
13748 
sg_make_image(const sg_image_desc * desc)13749 SOKOL_API_IMPL sg_image sg_make_image(const sg_image_desc* desc) {
13750     SOKOL_ASSERT(_sg.valid);
13751     SOKOL_ASSERT(desc);
13752     sg_image_desc desc_def = _sg_image_desc_defaults(desc);
13753     sg_image img_id = _sg_alloc_image();
13754     if (img_id.id != SG_INVALID_ID) {
13755         _sg_init_image(img_id, &desc_def);
13756     }
13757     else {
13758         SOKOL_LOG("image pool exhausted!");
13759         _SG_TRACE_NOARGS(err_image_pool_exhausted);
13760     }
13761     _SG_TRACE_ARGS(make_image, &desc_def, img_id);
13762     return img_id;
13763 }
13764 
sg_make_shader(const sg_shader_desc * desc)13765 SOKOL_API_IMPL sg_shader sg_make_shader(const sg_shader_desc* desc) {
13766     SOKOL_ASSERT(_sg.valid);
13767     SOKOL_ASSERT(desc);
13768     sg_shader_desc desc_def = _sg_shader_desc_defaults(desc);
13769     sg_shader shd_id = _sg_alloc_shader();
13770     if (shd_id.id != SG_INVALID_ID) {
13771         _sg_init_shader(shd_id, &desc_def);
13772     }
13773     else {
13774         SOKOL_LOG("shader pool exhausted!");
13775         _SG_TRACE_NOARGS(err_shader_pool_exhausted);
13776     }
13777     _SG_TRACE_ARGS(make_shader, &desc_def, shd_id);
13778     return shd_id;
13779 }
13780 
sg_make_pipeline(const sg_pipeline_desc * desc)13781 SOKOL_API_IMPL sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc) {
13782     SOKOL_ASSERT(_sg.valid);
13783     SOKOL_ASSERT(desc);
13784     sg_pipeline_desc desc_def = _sg_pipeline_desc_defaults(desc);
13785     sg_pipeline pip_id = _sg_alloc_pipeline();
13786     if (pip_id.id != SG_INVALID_ID) {
13787         _sg_init_pipeline(pip_id, &desc_def);
13788     }
13789     else {
13790         SOKOL_LOG("pipeline pool exhausted!");
13791         _SG_TRACE_NOARGS(err_pipeline_pool_exhausted);
13792     }
13793     _SG_TRACE_ARGS(make_pipeline, &desc_def, pip_id);
13794     return pip_id;
13795 }
13796 
sg_make_pass(const sg_pass_desc * desc)13797 SOKOL_API_IMPL sg_pass sg_make_pass(const sg_pass_desc* desc) {
13798     SOKOL_ASSERT(_sg.valid);
13799     SOKOL_ASSERT(desc);
13800     sg_pass_desc desc_def = _sg_pass_desc_defaults(desc);
13801     sg_pass pass_id = _sg_alloc_pass();
13802     if (pass_id.id != SG_INVALID_ID) {
13803         _sg_init_pass(pass_id, &desc_def);
13804     }
13805     else {
13806         SOKOL_LOG("pass pool exhausted!");
13807         _SG_TRACE_NOARGS(err_pass_pool_exhausted);
13808     }
13809     _SG_TRACE_ARGS(make_pass, &desc_def, pass_id);
13810     return pass_id;
13811 }
13812 
13813 /*-- destroy resource --------------------------------------------------------*/
sg_destroy_buffer(sg_buffer buf_id)13814 SOKOL_API_IMPL void sg_destroy_buffer(sg_buffer buf_id) {
13815     SOKOL_ASSERT(_sg.valid);
13816     _SG_TRACE_ARGS(destroy_buffer, buf_id);
13817     _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id);
13818     if (buf) {
13819         if (buf->slot.ctx_id == _sg.active_context.id) {
13820             _sg_destroy_buffer(buf);
13821             _sg_reset_buffer(buf);
13822             _sg_pool_free_index(&_sg.pools.buffer_pool, _sg_slot_index(buf_id.id));
13823         }
13824         else {
13825             SOKOL_LOG("sg_destroy_buffer: active context mismatch (must be same as for creation)");
13826             _SG_TRACE_NOARGS(err_context_mismatch);
13827         }
13828     }
13829 }
13830 
sg_destroy_image(sg_image img_id)13831 SOKOL_API_IMPL void sg_destroy_image(sg_image img_id) {
13832     SOKOL_ASSERT(_sg.valid);
13833     _SG_TRACE_ARGS(destroy_image, img_id);
13834     _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id);
13835     if (img) {
13836         if (img->slot.ctx_id == _sg.active_context.id) {
13837             _sg_destroy_image(img);
13838             _sg_reset_image(img);
13839             _sg_pool_free_index(&_sg.pools.image_pool, _sg_slot_index(img_id.id));
13840         }
13841         else {
13842             SOKOL_LOG("sg_destroy_image: active context mismatch (must be same as for creation)");
13843             _SG_TRACE_NOARGS(err_context_mismatch);
13844         }
13845     }
13846 }
13847 
sg_destroy_shader(sg_shader shd_id)13848 SOKOL_API_IMPL void sg_destroy_shader(sg_shader shd_id) {
13849     SOKOL_ASSERT(_sg.valid);
13850     _SG_TRACE_ARGS(destroy_shader, shd_id);
13851     _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id);
13852     if (shd) {
13853         if (shd->slot.ctx_id == _sg.active_context.id) {
13854             _sg_destroy_shader(shd);
13855             _sg_reset_shader(shd);
13856             _sg_pool_free_index(&_sg.pools.shader_pool, _sg_slot_index(shd_id.id));
13857         }
13858         else {
13859             SOKOL_LOG("sg_destroy_shader: active context mismatch (must be same as for creation)");
13860             _SG_TRACE_NOARGS(err_context_mismatch);
13861         }
13862     }
13863 }
13864 
sg_destroy_pipeline(sg_pipeline pip_id)13865 SOKOL_API_IMPL void sg_destroy_pipeline(sg_pipeline pip_id) {
13866     SOKOL_ASSERT(_sg.valid);
13867     _SG_TRACE_ARGS(destroy_pipeline, pip_id);
13868     _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id);
13869     if (pip) {
13870         if (pip->slot.ctx_id == _sg.active_context.id) {
13871             _sg_destroy_pipeline(pip);
13872             _sg_reset_pipeline(pip);
13873             _sg_pool_free_index(&_sg.pools.pipeline_pool, _sg_slot_index(pip_id.id));
13874         }
13875         else {
13876             SOKOL_LOG("sg_destroy_pipeline: active context mismatch (must be same as for creation)");
13877             _SG_TRACE_NOARGS(err_context_mismatch);
13878         }
13879     }
13880 }
13881 
sg_destroy_pass(sg_pass pass_id)13882 SOKOL_API_IMPL void sg_destroy_pass(sg_pass pass_id) {
13883     SOKOL_ASSERT(_sg.valid);
13884     _SG_TRACE_ARGS(destroy_pass, pass_id);
13885     _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id);
13886     if (pass) {
13887         if (pass->slot.ctx_id == _sg.active_context.id) {
13888             _sg_destroy_pass(pass);
13889             _sg_reset_pass(pass);
13890             _sg_pool_free_index(&_sg.pools.pass_pool, _sg_slot_index(pass_id.id));
13891         }
13892         else {
13893             SOKOL_LOG("sg_destroy_pass: active context mismatch (must be same as for creation)");
13894             _SG_TRACE_NOARGS(err_context_mismatch);
13895         }
13896     }
13897 }
13898 
sg_begin_default_pass(const sg_pass_action * pass_action,int width,int height)13899 SOKOL_API_IMPL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height) {
13900     SOKOL_ASSERT(_sg.valid);
13901     SOKOL_ASSERT(pass_action);
13902     SOKOL_ASSERT((pass_action->_start_canary == 0) && (pass_action->_end_canary == 0));
13903     sg_pass_action pa;
13904     _sg_resolve_default_pass_action(pass_action, &pa);
13905     _sg.cur_pass.id = SG_INVALID_ID;
13906     _sg.pass_valid = true;
13907     _sg_begin_pass(0, &pa, width, height);
13908     _SG_TRACE_ARGS(begin_default_pass, pass_action, width, height);
13909 }
13910 
sg_begin_pass(sg_pass pass_id,const sg_pass_action * pass_action)13911 SOKOL_API_IMPL void sg_begin_pass(sg_pass pass_id, const sg_pass_action* pass_action) {
13912     SOKOL_ASSERT(_sg.valid);
13913     SOKOL_ASSERT(pass_action);
13914     SOKOL_ASSERT((pass_action->_start_canary == 0) && (pass_action->_end_canary == 0));
13915     _sg.cur_pass = pass_id;
13916     _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id);
13917     if (pass && _sg_validate_begin_pass(pass)) {
13918         _sg.pass_valid = true;
13919         sg_pass_action pa;
13920         _sg_resolve_default_pass_action(pass_action, &pa);
13921         const _sg_image_t* img = _sg_pass_color_image(pass, 0);
13922         SOKOL_ASSERT(img);
13923         const int w = img->cmn.width;
13924         const int h = img->cmn.height;
13925         _sg_begin_pass(pass, &pa, w, h);
13926         _SG_TRACE_ARGS(begin_pass, pass_id, pass_action);
13927     }
13928     else {
13929         _sg.pass_valid = false;
13930         _SG_TRACE_NOARGS(err_pass_invalid);
13931     }
13932 }
13933 
sg_apply_viewport(int x,int y,int width,int height,bool origin_top_left)13934 SOKOL_API_IMPL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left) {
13935     SOKOL_ASSERT(_sg.valid);
13936     if (!_sg.pass_valid) {
13937         _SG_TRACE_NOARGS(err_pass_invalid);
13938         return;
13939     }
13940     _sg_apply_viewport(x, y, width, height, origin_top_left);
13941     _SG_TRACE_ARGS(apply_viewport, x, y, width, height, origin_top_left);
13942 }
13943 
sg_apply_scissor_rect(int x,int y,int width,int height,bool origin_top_left)13944 SOKOL_API_IMPL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) {
13945     SOKOL_ASSERT(_sg.valid);
13946     if (!_sg.pass_valid) {
13947         _SG_TRACE_NOARGS(err_pass_invalid);
13948         return;
13949     }
13950     _sg_apply_scissor_rect(x, y, width, height, origin_top_left);
13951     _SG_TRACE_ARGS(apply_scissor_rect, x, y, width, height, origin_top_left);
13952 }
13953 
sg_apply_pipeline(sg_pipeline pip_id)13954 SOKOL_API_IMPL void sg_apply_pipeline(sg_pipeline pip_id) {
13955     SOKOL_ASSERT(_sg.valid);
13956     _sg.bindings_valid = false;
13957     if (!_sg_validate_apply_pipeline(pip_id)) {
13958         _sg.next_draw_valid = false;
13959         _SG_TRACE_NOARGS(err_draw_invalid);
13960         return;
13961     }
13962     if (!_sg.pass_valid) {
13963         _SG_TRACE_NOARGS(err_pass_invalid);
13964         return;
13965     }
13966     _sg.cur_pipeline = pip_id;
13967     _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id);
13968     SOKOL_ASSERT(pip);
13969     _sg.next_draw_valid = (SG_RESOURCESTATE_VALID == pip->slot.state);
13970     SOKOL_ASSERT(pip->shader && (pip->shader->slot.id == pip->cmn.shader_id.id));
13971     _sg_apply_pipeline(pip);
13972     _SG_TRACE_ARGS(apply_pipeline, pip_id);
13973 }
13974 
sg_apply_bindings(const sg_bindings * bindings)13975 SOKOL_API_IMPL void sg_apply_bindings(const sg_bindings* bindings) {
13976     SOKOL_ASSERT(_sg.valid);
13977     SOKOL_ASSERT(bindings);
13978     SOKOL_ASSERT((bindings->_start_canary == 0) && (bindings->_end_canary==0));
13979     if (!_sg_validate_apply_bindings(bindings)) {
13980         _sg.next_draw_valid = false;
13981         _SG_TRACE_NOARGS(err_draw_invalid);
13982         return;
13983     }
13984     _sg.bindings_valid = true;
13985 
13986     _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id);
13987     SOKOL_ASSERT(pip);
13988 
13989     _sg_buffer_t* vbs[SG_MAX_SHADERSTAGE_BUFFERS] = { 0 };
13990     int num_vbs = 0;
13991     for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++, num_vbs++) {
13992         if (bindings->vertex_buffers[i].id) {
13993             vbs[i] = _sg_lookup_buffer(&_sg.pools, bindings->vertex_buffers[i].id);
13994             SOKOL_ASSERT(vbs[i]);
13995             _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == vbs[i]->slot.state);
13996             _sg.next_draw_valid &= !vbs[i]->cmn.append_overflow;
13997         }
13998         else {
13999             break;
14000         }
14001     }
14002 
14003     _sg_buffer_t* ib = 0;
14004     if (bindings->index_buffer.id) {
14005         ib = _sg_lookup_buffer(&_sg.pools, bindings->index_buffer.id);
14006         SOKOL_ASSERT(ib);
14007         _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == ib->slot.state);
14008         _sg.next_draw_valid &= !ib->cmn.append_overflow;
14009     }
14010 
14011     _sg_image_t* vs_imgs[SG_MAX_SHADERSTAGE_IMAGES] = { 0 };
14012     int num_vs_imgs = 0;
14013     for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++, num_vs_imgs++) {
14014         if (bindings->vs_images[i].id) {
14015             vs_imgs[i] = _sg_lookup_image(&_sg.pools, bindings->vs_images[i].id);
14016             SOKOL_ASSERT(vs_imgs[i]);
14017             _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == vs_imgs[i]->slot.state);
14018         }
14019         else {
14020             break;
14021         }
14022     }
14023 
14024     _sg_image_t* fs_imgs[SG_MAX_SHADERSTAGE_IMAGES] = { 0 };
14025     int num_fs_imgs = 0;
14026     for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++, num_fs_imgs++) {
14027         if (bindings->fs_images[i].id) {
14028             fs_imgs[i] = _sg_lookup_image(&_sg.pools, bindings->fs_images[i].id);
14029             SOKOL_ASSERT(fs_imgs[i]);
14030             _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == fs_imgs[i]->slot.state);
14031         }
14032         else {
14033             break;
14034         }
14035     }
14036     if (_sg.next_draw_valid) {
14037         const int* vb_offsets = bindings->vertex_buffer_offsets;
14038         int ib_offset = bindings->index_buffer_offset;
14039         _sg_apply_bindings(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs);
14040         _SG_TRACE_ARGS(apply_bindings, bindings);
14041     }
14042     else {
14043         _SG_TRACE_NOARGS(err_draw_invalid);
14044     }
14045 }
14046 
sg_apply_uniforms(sg_shader_stage stage,int ub_index,const void * data,int num_bytes)14047 SOKOL_API_IMPL void sg_apply_uniforms(sg_shader_stage stage, int ub_index, const void* data, int num_bytes) {
14048     SOKOL_ASSERT(_sg.valid);
14049     SOKOL_ASSERT((stage == SG_SHADERSTAGE_VS) || (stage == SG_SHADERSTAGE_FS));
14050     SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS));
14051     SOKOL_ASSERT(data && (num_bytes > 0));
14052     if (!_sg_validate_apply_uniforms(stage, ub_index, data, num_bytes)) {
14053         _sg.next_draw_valid = false;
14054         _SG_TRACE_NOARGS(err_draw_invalid);
14055         return;
14056     }
14057     if (!_sg.pass_valid) {
14058         _SG_TRACE_NOARGS(err_pass_invalid);
14059         return;
14060     }
14061     if (!_sg.next_draw_valid) {
14062         _SG_TRACE_NOARGS(err_draw_invalid);
14063     }
14064     _sg_apply_uniforms(stage, ub_index, data, num_bytes);
14065     _SG_TRACE_ARGS(apply_uniforms, stage, ub_index, data, num_bytes);
14066 }
14067 
sg_draw(int base_element,int num_elements,int num_instances)14068 SOKOL_API_IMPL void sg_draw(int base_element, int num_elements, int num_instances) {
14069     SOKOL_ASSERT(_sg.valid);
14070     #if defined(SOKOL_DEBUG)
14071         if (!_sg.bindings_valid) {
14072             SOKOL_LOG("attempting to draw without resource bindings");
14073         }
14074     #endif
14075     if (!_sg.pass_valid) {
14076         _SG_TRACE_NOARGS(err_pass_invalid);
14077         return;
14078     }
14079     if (!_sg.next_draw_valid) {
14080         _SG_TRACE_NOARGS(err_draw_invalid);
14081         return;
14082     }
14083     if (!_sg.bindings_valid) {
14084         _SG_TRACE_NOARGS(err_bindings_invalid);
14085         return;
14086     }
14087     _sg_draw(base_element, num_elements, num_instances);
14088     _SG_TRACE_ARGS(draw, base_element, num_elements, num_instances);
14089 }
14090 
sg_end_pass(void)14091 SOKOL_API_IMPL void sg_end_pass(void) {
14092     SOKOL_ASSERT(_sg.valid);
14093     if (!_sg.pass_valid) {
14094         _SG_TRACE_NOARGS(err_pass_invalid);
14095         return;
14096     }
14097     _sg_end_pass();
14098     _sg.cur_pass.id = SG_INVALID_ID;
14099     _sg.cur_pipeline.id = SG_INVALID_ID;
14100     _sg.pass_valid = false;
14101     _SG_TRACE_NOARGS(end_pass);
14102 }
14103 
sg_commit(void)14104 SOKOL_API_IMPL void sg_commit(void) {
14105     SOKOL_ASSERT(_sg.valid);
14106     _sg_commit();
14107     _SG_TRACE_NOARGS(commit);
14108     _sg.frame_index++;
14109 }
14110 
sg_reset_state_cache(void)14111 SOKOL_API_IMPL void sg_reset_state_cache(void) {
14112     SOKOL_ASSERT(_sg.valid);
14113     _sg_reset_state_cache();
14114     _SG_TRACE_NOARGS(reset_state_cache);
14115 }
14116 
sg_update_buffer(sg_buffer buf_id,const void * data,int num_bytes)14117 SOKOL_API_IMPL void sg_update_buffer(sg_buffer buf_id, const void* data, int num_bytes) {
14118     SOKOL_ASSERT(_sg.valid);
14119     _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id);
14120     if ((num_bytes > 0) && buf && (buf->slot.state == SG_RESOURCESTATE_VALID)) {
14121         if (_sg_validate_update_buffer(buf, data, num_bytes)) {
14122             SOKOL_ASSERT(num_bytes <= buf->cmn.size);
14123             /* only one update allowed per buffer and frame */
14124             SOKOL_ASSERT(buf->cmn.update_frame_index != _sg.frame_index);
14125             /* update and append on same buffer in same frame not allowed */
14126             SOKOL_ASSERT(buf->cmn.append_frame_index != _sg.frame_index);
14127             _sg_update_buffer(buf, data, (uint32_t)num_bytes);
14128             buf->cmn.update_frame_index = _sg.frame_index;
14129         }
14130     }
14131     _SG_TRACE_ARGS(update_buffer, buf_id, data, num_bytes);
14132 }
14133 
sg_append_buffer(sg_buffer buf_id,const void * data,int num_bytes)14134 SOKOL_API_IMPL int sg_append_buffer(sg_buffer buf_id, const void* data, int num_bytes) {
14135     SOKOL_ASSERT(_sg.valid);
14136     _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id);
14137     int result;
14138     if (buf) {
14139         /* rewind append cursor in a new frame */
14140         if (buf->cmn.append_frame_index != _sg.frame_index) {
14141             buf->cmn.append_pos = 0;
14142             buf->cmn.append_overflow = false;
14143         }
14144         if ((buf->cmn.append_pos + _sg_roundup(num_bytes, 4)) > buf->cmn.size) {
14145             buf->cmn.append_overflow = true;
14146         }
14147         const int start_pos = buf->cmn.append_pos;
14148         if (buf->slot.state == SG_RESOURCESTATE_VALID) {
14149             if (_sg_validate_append_buffer(buf, data, num_bytes)) {
14150                 if (!buf->cmn.append_overflow && (num_bytes > 0)) {
14151                     /* update and append on same buffer in same frame not allowed */
14152                     SOKOL_ASSERT(buf->cmn.update_frame_index != _sg.frame_index);
14153                     uint32_t copied_num_bytes = _sg_append_buffer(buf, data, (uint32_t)num_bytes, buf->cmn.append_frame_index != _sg.frame_index);
14154                     buf->cmn.append_pos += copied_num_bytes;
14155                     buf->cmn.append_frame_index = _sg.frame_index;
14156                 }
14157             }
14158         }
14159         result = start_pos;
14160     }
14161     else {
14162         /* FIXME: should we return -1 here? */
14163         result = 0;
14164     }
14165     _SG_TRACE_ARGS(append_buffer, buf_id, data, num_bytes, result);
14166     return result;
14167 }
14168 
sg_query_buffer_overflow(sg_buffer buf_id)14169 SOKOL_API_IMPL bool sg_query_buffer_overflow(sg_buffer buf_id) {
14170     SOKOL_ASSERT(_sg.valid);
14171     _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id);
14172     bool result = buf ? buf->cmn.append_overflow : false;
14173     return result;
14174 }
14175 
sg_update_image(sg_image img_id,const sg_image_content * data)14176 SOKOL_API_IMPL void sg_update_image(sg_image img_id, const sg_image_content* data) {
14177     SOKOL_ASSERT(_sg.valid);
14178     _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id);
14179     if (img && img->slot.state == SG_RESOURCESTATE_VALID) {
14180         if (_sg_validate_update_image(img, data)) {
14181             SOKOL_ASSERT(img->cmn.upd_frame_index != _sg.frame_index);
14182             _sg_update_image(img, data);
14183             img->cmn.upd_frame_index = _sg.frame_index;
14184         }
14185     }
14186     _SG_TRACE_ARGS(update_image, img_id, data);
14187 }
14188 
sg_push_debug_group(const char * name)14189 SOKOL_API_IMPL void sg_push_debug_group(const char* name) {
14190     SOKOL_ASSERT(_sg.valid);
14191     SOKOL_ASSERT(name);
14192     _SOKOL_UNUSED(name);
14193     _SG_TRACE_ARGS(push_debug_group, name);
14194 }
14195 
sg_pop_debug_group(void)14196 SOKOL_API_IMPL void sg_pop_debug_group(void) {
14197     SOKOL_ASSERT(_sg.valid);
14198     _SG_TRACE_NOARGS(pop_debug_group);
14199 }
14200 
sg_query_buffer_info(sg_buffer buf_id)14201 SOKOL_API_IMPL sg_buffer_info sg_query_buffer_info(sg_buffer buf_id) {
14202     SOKOL_ASSERT(_sg.valid);
14203     sg_buffer_info info;
14204     memset(&info, 0, sizeof(info));
14205     const _sg_buffer_t* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id);
14206     if (buf) {
14207         info.slot.state = buf->slot.state;
14208         info.slot.res_id = buf->slot.id;
14209         info.slot.ctx_id = buf->slot.ctx_id;
14210         info.update_frame_index = buf->cmn.update_frame_index;
14211         info.append_frame_index = buf->cmn.append_frame_index;
14212         info.append_pos = buf->cmn.append_pos;
14213         info.append_overflow = buf->cmn.append_overflow;
14214         #if defined(SOKOL_D3D11)
14215         info.num_slots = 1;
14216         info.active_slot = 0;
14217         #else
14218         info.num_slots = buf->cmn.num_slots;
14219         info.active_slot = buf->cmn.active_slot;
14220         #endif
14221     }
14222     return info;
14223 }
14224 
sg_query_image_info(sg_image img_id)14225 SOKOL_API_IMPL sg_image_info sg_query_image_info(sg_image img_id) {
14226     SOKOL_ASSERT(_sg.valid);
14227     sg_image_info info;
14228     memset(&info, 0, sizeof(info));
14229     const _sg_image_t* img = _sg_lookup_image(&_sg.pools, img_id.id);
14230     if (img) {
14231         info.slot.state = img->slot.state;
14232         info.slot.res_id = img->slot.id;
14233         info.slot.ctx_id = img->slot.ctx_id;
14234         #if defined(SOKOL_D3D11)
14235         info.num_slots = 1;
14236         info.active_slot = 0;
14237         #else
14238         info.num_slots = img->cmn.num_slots;
14239         info.active_slot = img->cmn.active_slot;
14240         #endif
14241         info.width = img->cmn.width;
14242         info.height = img->cmn.height;
14243     }
14244     return info;
14245 }
14246 
sg_query_shader_info(sg_shader shd_id)14247 SOKOL_API_IMPL sg_shader_info sg_query_shader_info(sg_shader shd_id) {
14248     SOKOL_ASSERT(_sg.valid);
14249     sg_shader_info info;
14250     memset(&info, 0, sizeof(info));
14251     const _sg_shader_t* shd = _sg_lookup_shader(&_sg.pools, shd_id.id);
14252     if (shd) {
14253         info.slot.state = shd->slot.state;
14254         info.slot.res_id = shd->slot.id;
14255         info.slot.ctx_id = shd->slot.ctx_id;
14256     }
14257     return info;
14258 }
14259 
sg_query_pipeline_info(sg_pipeline pip_id)14260 SOKOL_API_IMPL sg_pipeline_info sg_query_pipeline_info(sg_pipeline pip_id) {
14261     SOKOL_ASSERT(_sg.valid);
14262     sg_pipeline_info info;
14263     memset(&info, 0, sizeof(info));
14264     const _sg_pipeline_t* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id);
14265     if (pip) {
14266         info.slot.state = pip->slot.state;
14267         info.slot.res_id = pip->slot.id;
14268         info.slot.ctx_id = pip->slot.ctx_id;
14269     }
14270     return info;
14271 }
14272 
sg_query_pass_info(sg_pass pass_id)14273 SOKOL_API_IMPL sg_pass_info sg_query_pass_info(sg_pass pass_id) {
14274     SOKOL_ASSERT(_sg.valid);
14275     sg_pass_info info;
14276     memset(&info, 0, sizeof(info));
14277     const _sg_pass_t* pass = _sg_lookup_pass(&_sg.pools, pass_id.id);
14278     if (pass) {
14279         info.slot.state = pass->slot.state;
14280         info.slot.res_id = pass->slot.id;
14281         info.slot.ctx_id = pass->slot.ctx_id;
14282     }
14283     return info;
14284 }
14285 
sg_query_buffer_defaults(const sg_buffer_desc * desc)14286 SOKOL_API_IMPL sg_buffer_desc sg_query_buffer_defaults(const sg_buffer_desc* desc) {
14287     SOKOL_ASSERT(_sg.valid && desc);
14288     return _sg_buffer_desc_defaults(desc);
14289 }
14290 
sg_query_image_defaults(const sg_image_desc * desc)14291 SOKOL_API_IMPL sg_image_desc sg_query_image_defaults(const sg_image_desc* desc) {
14292     SOKOL_ASSERT(_sg.valid && desc);
14293     return _sg_image_desc_defaults(desc);
14294 }
14295 
sg_query_shader_defaults(const sg_shader_desc * desc)14296 SOKOL_API_IMPL sg_shader_desc sg_query_shader_defaults(const sg_shader_desc* desc) {
14297     SOKOL_ASSERT(_sg.valid && desc);
14298     return _sg_shader_desc_defaults(desc);
14299 }
14300 
sg_query_pipeline_defaults(const sg_pipeline_desc * desc)14301 SOKOL_API_IMPL sg_pipeline_desc sg_query_pipeline_defaults(const sg_pipeline_desc* desc) {
14302     SOKOL_ASSERT(_sg.valid && desc);
14303     return _sg_pipeline_desc_defaults(desc);
14304 }
14305 
sg_query_pass_defaults(const sg_pass_desc * desc)14306 SOKOL_API_IMPL sg_pass_desc sg_query_pass_defaults(const sg_pass_desc* desc) {
14307     SOKOL_ASSERT(_sg.valid && desc);
14308     return _sg_pass_desc_defaults(desc);
14309 }
14310 
sg_mtl_render_command_encoder(void)14311 SOKOL_API_IMPL const void* sg_mtl_render_command_encoder(void) {
14312     #if defined(SOKOL_METAL)
14313         if (nil != _sg.mtl.cmd_encoder) {
14314             return (__bridge const void*) _sg.mtl.cmd_encoder;
14315         }
14316         else {
14317             return 0;
14318         }
14319     #else
14320         return 0;
14321     #endif
14322 }
14323 
14324 #ifdef _MSC_VER
14325 #pragma warning(pop)
14326 #endif
14327 
14328 #endif /* SOKOL_IMPL */
14329 
14330