1 /*
2  * Copyright 2015,2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef AMDKERNELCODET_H
26 #define AMDKERNELCODET_H
27 
28 //---------------------------------------------------------------------------//
29 // AMD Kernel Code, and its dependencies                                     //
30 //---------------------------------------------------------------------------//
31 
32 // Sets val bits for specified mask in specified dst packed instance.
33 #define AMD_HSA_BITS_SET(dst, mask, val)                                                           \
34    dst &= (~(1 << mask##_SHIFT) & ~mask);                                                          \
35    dst |= (((val) << mask##_SHIFT) & mask)
36 
37 // Gets bits for specified mask from specified src packed instance.
38 #define AMD_HSA_BITS_GET(src, mask) ((src & mask) >> mask##_SHIFT)
39 
40 /* Every amd_*_code_t has the following properties, which are composed of
41  * a number of bit fields. Every bit field has a mask (AMD_CODE_PROPERTY_*),
42  * bit width (AMD_CODE_PROPERTY_*_WIDTH, and bit shift amount
43  * (AMD_CODE_PROPERTY_*_SHIFT) for convenient access. Unused bits must be 0.
44  *
45  * (Note that bit fields cannot be used as their layout is
46  * implementation defined in the C standard and so cannot be used to
47  * specify an ABI)
48  */
49 enum amd_code_property_mask_t
50 {
51 
52    /* Enable the setup of the SGPR user data registers
53     * (AMD_CODE_PROPERTY_ENABLE_SGPR_*), see documentation of amd_kernel_code_t
54     * for initial register state.
55     *
56     * The total number of SGPRuser data registers requested must not
57     * exceed 16. Any requests beyond 16 will be ignored.
58     *
59     * Used to set COMPUTE_PGM_RSRC2.USER_SGPR (set to total count of
60     * SGPR user data registers enabled up to 16).
61     */
62 
63    AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT = 0,
64    AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_WIDTH = 1,
65    AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER =
66       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_WIDTH) - 1)
67       << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT,
68 
69    AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT = 1,
70    AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_WIDTH = 1,
71    AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR =
72       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_WIDTH) - 1)
73       << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT,
74 
75    AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT = 2,
76    AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_WIDTH = 1,
77    AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR =
78       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_WIDTH) - 1)
79       << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT,
80 
81    AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT = 3,
82    AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_WIDTH = 1,
83    AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR =
84       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_WIDTH) - 1)
85       << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT,
86 
87    AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT = 4,
88    AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_WIDTH = 1,
89    AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID =
90       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_WIDTH) - 1)
91       << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT,
92 
93    AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT = 5,
94    AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_WIDTH = 1,
95    AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT =
96       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_WIDTH) - 1)
97       << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT,
98 
99    AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT = 6,
100    AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_WIDTH = 1,
101    AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE =
102       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_WIDTH) - 1)
103       << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT,
104 
105    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT = 7,
106    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_WIDTH = 1,
107    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X =
108       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_WIDTH) - 1)
109       << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT,
110 
111    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT = 8,
112    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_WIDTH = 1,
113    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y =
114       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_WIDTH) - 1)
115       << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT,
116 
117    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT = 9,
118    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH = 1,
119    AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z =
120       ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH) - 1)
121       << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT,
122 
123    AMD_CODE_PROPERTY_RESERVED1_SHIFT = 10,
124    AMD_CODE_PROPERTY_RESERVED1_WIDTH = 6,
125    AMD_CODE_PROPERTY_RESERVED1 = ((1 << AMD_CODE_PROPERTY_RESERVED1_WIDTH) - 1)
126                                  << AMD_CODE_PROPERTY_RESERVED1_SHIFT,
127 
128    /* Control wave ID base counter for GDS ordered-append. Used to set
129     * COMPUTE_DISPATCH_INITIATOR.ORDERED_APPEND_ENBL. (Not sure if
130     * ORDERED_APPEND_MODE also needs to be settable)
131     */
132    AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT = 16,
133    AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH = 1,
134    AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS =
135       ((1 << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH) - 1)
136       << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT,
137 
138    /* The interleave (swizzle) element size in bytes required by the
139     * code for private memory. This must be 2, 4, 8 or 16. This value
140     * is provided to the finalizer when it is invoked and is recorded
141     * here. The hardware will interleave the memory requests of each
142     * lane of a wavefront by this element size to ensure each
143     * work-item gets a distinct memory memory location. Therefore, the
144     * finalizer ensures that all load and store operations done to
145     * private memory do not exceed this size. For example, if the
146     * element size is 4 (32-bits or dword) and a 64-bit value must be
147     * loaded, the finalizer will generate two 32-bit loads. This
148     * ensures that the interleaving will get the work-item
149     * specific dword for both halves of the 64-bit value. If it just
150     * did a 64-bit load then it would get one dword which belonged to
151     * its own work-item, but the second dword would belong to the
152     * adjacent lane work-item since the interleaving is in dwords.
153     *
154     * The value used must match the value that the runtime configures
155     * the GPU flat scratch (SH_STATIC_MEM_CONFIG.ELEMENT_SIZE). This
156     * is generally DWORD.
157     *
158     * USE VALUES FROM THE AMD_ELEMENT_BYTE_SIZE_T ENUM.
159     */
160    AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT = 17,
161    AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH = 2,
162    AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE =
163       ((1 << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH) - 1)
164       << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT,
165 
166    /* Are global memory addresses 64 bits. Must match
167     * amd_kernel_code_t.hsail_machine_model ==
168     * HSA_MACHINE_LARGE. Must also match
169     * SH_MEM_CONFIG.PTR32 (GFX6 (SI)/GFX7 (CI)),
170     * SH_MEM_CONFIG.ADDRESS_MODE (GFX8 (VI)+).
171     */
172    AMD_CODE_PROPERTY_IS_PTR64_SHIFT = 19,
173    AMD_CODE_PROPERTY_IS_PTR64_WIDTH = 1,
174    AMD_CODE_PROPERTY_IS_PTR64 = ((1 << AMD_CODE_PROPERTY_IS_PTR64_WIDTH) - 1)
175                                 << AMD_CODE_PROPERTY_IS_PTR64_SHIFT,
176 
177    /* Indicate if the generated ISA is using a dynamically sized call
178     * stack. This can happen if calls are implemented using a call
179     * stack and recursion, alloca or calls to indirect functions are
180     * present. In these cases the Finalizer cannot compute the total
181     * private segment size at compile time. In this case the
182     * workitem_private_segment_byte_size only specifies the statically
183     * know private segment size, and additional space must be added
184     * for the call stack.
185     */
186    AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT = 20,
187    AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH = 1,
188    AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK =
189       ((1 << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH) - 1)
190       << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT,
191 
192    /* Indicate if code generated has support for debugging. */
193    AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT = 21,
194    AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH = 1,
195    AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH) - 1)
196                                           << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT,
197 
198    AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT = 22,
199    AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_WIDTH = 1,
200    AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_WIDTH) - 1)
201                                           << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT,
202 
203    AMD_CODE_PROPERTY_RESERVED2_SHIFT = 23,
204    AMD_CODE_PROPERTY_RESERVED2_WIDTH = 9,
205    AMD_CODE_PROPERTY_RESERVED2 = ((1 << AMD_CODE_PROPERTY_RESERVED2_WIDTH) - 1)
206                                  << AMD_CODE_PROPERTY_RESERVED2_SHIFT
207 };
208 
209 /* AMD Kernel Code Object (amd_kernel_code_t). GPU CP uses the AMD Kernel
210  * Code Object to set up the hardware to execute the kernel dispatch.
211  *
212  * Initial Kernel Register State.
213  *
214  * Initial kernel register state will be set up by CP/SPI prior to the start
215  * of execution of every wavefront. This is limited by the constraints of the
216  * current hardware.
217  *
218  * The order of the SGPR registers is defined, but the Finalizer can specify
219  * which ones are actually setup in the amd_kernel_code_t object using the
220  * enable_sgpr_* bit fields. The register numbers used for enabled registers
221  * are dense starting at SGPR0: the first enabled register is SGPR0, the next
222  * enabled register is SGPR1 etc.; disabled registers do not have an SGPR
223  * number.
224  *
225  * The initial SGPRs comprise up to 16 User SRGPs that are set up by CP and
226  * apply to all waves of the grid. It is possible to specify more than 16 User
227  * SGPRs using the enable_sgpr_* bit fields, in which case only the first 16
228  * are actually initialized. These are then immediately followed by the System
229  * SGPRs that are set up by ADC/SPI and can have different values for each wave
230  * of the grid dispatch.
231  *
232  * SGPR register initial state is defined as follows:
233  *
234  * Private Segment Buffer (enable_sgpr_private_segment_buffer):
235  *   Number of User SGPR registers: 4. V# that can be used, together with
236  *   Scratch Wave Offset as an offset, to access the Private/Spill/Arg
237  *   segments using a segment address. It must be set as follows:
238  *     - Base address: of the scratch memory area used by the dispatch. It
239  *       does not include the scratch wave offset. It will be the per process
240  *       SH_HIDDEN_PRIVATE_BASE_VMID plus any offset from this dispatch (for
241  *       example there may be a per pipe offset, or per AQL Queue offset).
242  *     - Stride + data_format: Element Size * Index Stride (???)
243  *     - Cache swizzle: ???
244  *     - Swizzle enable: SH_STATIC_MEM_CONFIG.SWIZZLE_ENABLE (must be 1 for
245  *       scratch)
246  *     - Num records: Flat Scratch Work Item Size / Element Size (???)
247  *     - Dst_sel_*: ???
248  *     - Num_format: ???
249  *     - Element_size: SH_STATIC_MEM_CONFIG.ELEMENT_SIZE (will be DWORD, must
250  *       agree with amd_kernel_code_t.privateElementSize)
251  *     - Index_stride: SH_STATIC_MEM_CONFIG.INDEX_STRIDE (will be 64 as must
252  *       be number of wavefront lanes for scratch, must agree with
253  *       amd_kernel_code_t.wavefrontSize)
254  *     - Add tid enable: 1
255  *     - ATC: from SH_MEM_CONFIG.PRIVATE_ATC,
256  *     - Hash_enable: ???
257  *     - Heap: ???
258  *     - Mtype: from SH_STATIC_MEM_CONFIG.PRIVATE_MTYPE
259  *     - Type: 0 (a buffer) (???)
260  *
261  * Dispatch Ptr (enable_sgpr_dispatch_ptr):
262  *   Number of User SGPR registers: 2. 64 bit address of AQL dispatch packet
263  *   for kernel actually executing.
264  *
265  * Queue Ptr (enable_sgpr_queue_ptr):
266  *   Number of User SGPR registers: 2. 64 bit address of AmdQueue object for
267  *   AQL queue on which the dispatch packet was queued.
268  *
269  * Kernarg Segment Ptr (enable_sgpr_kernarg_segment_ptr):
270  *   Number of User SGPR registers: 2. 64 bit address of Kernarg segment. This
271  *   is directly copied from the kernargPtr in the dispatch packet. Having CP
272  *   load it once avoids loading it at the beginning of every wavefront.
273  *
274  * Dispatch Id (enable_sgpr_dispatch_id):
275  *   Number of User SGPR registers: 2. 64 bit Dispatch ID of the dispatch
276  *   packet being executed.
277  *
278  * Flat Scratch Init (enable_sgpr_flat_scratch_init):
279  *   Number of User SGPR registers: 2. This is 2 SGPRs.
280  *
281  *   For CI/VI:
282  *     The first SGPR is a 32 bit byte offset from SH_MEM_HIDDEN_PRIVATE_BASE
283  *     to base of memory for scratch for this dispatch. This is the same offset
284  *     used in computing the Scratch Segment Buffer base address. The value of
285  *     Scratch Wave Offset must be added by the kernel code and moved to
286  *     SGPRn-4 for use as the FLAT SCRATCH BASE in flat memory instructions.
287  *
288  *     The second SGPR is 32 bit byte size of a single work-item's scratch
289  *     memory usage. This is directly loaded from the dispatch packet Private
290  *     Segment Byte Size and rounded up to a multiple of DWORD.
291  *
292  *     \todo [Does CP need to round this to >4 byte alignment?]
293  *
294  *     The kernel code must move to SGPRn-3 for use as the FLAT SCRATCH SIZE in
295  *     flat memory instructions. Having CP load it once avoids loading it at
296  *     the beginning of every wavefront.
297  *
298  * Private Segment Size (enable_sgpr_private_segment_size):
299  *   Number of User SGPR registers: 1. The 32 bit byte size of a single
300  *   work-item's scratch memory allocation. This is the value from the dispatch
301  *   packet. Private Segment Byte Size rounded up by CP to a multiple of DWORD.
302  *
303  *   \todo [Does CP need to round this to >4 byte alignment?]
304  *
305  *   Having CP load it once avoids loading it at the beginning of every
306  *   wavefront.
307  *
308  *   \todo [This will not be used for CI/VI since it is the same value as
309  *   the second SGPR of Flat Scratch Init.
310  *
311  * Grid Work-Group Count X (enable_sgpr_grid_workgroup_count_x):
312  *   Number of User SGPR registers: 1. 32 bit count of the number of
313  *   work-groups in the X dimension for the grid being executed. Computed from
314  *   the fields in the HsaDispatchPacket as
315  *   ((gridSize.x+workgroupSize.x-1)/workgroupSize.x).
316  *
317  * Grid Work-Group Count Y (enable_sgpr_grid_workgroup_count_y):
318  *   Number of User SGPR registers: 1. 32 bit count of the number of
319  *   work-groups in the Y dimension for the grid being executed. Computed from
320  *   the fields in the HsaDispatchPacket as
321  *   ((gridSize.y+workgroupSize.y-1)/workgroupSize.y).
322  *
323  *   Only initialized if <16 previous SGPRs initialized.
324  *
325  * Grid Work-Group Count Z (enable_sgpr_grid_workgroup_count_z):
326  *   Number of User SGPR registers: 1. 32 bit count of the number of
327  *   work-groups in the Z dimension for the grid being executed. Computed
328  *   from the fields in the HsaDispatchPacket as
329  *   ((gridSize.z+workgroupSize.z-1)/workgroupSize.z).
330  *
331  *   Only initialized if <16 previous SGPRs initialized.
332  *
333  * Work-Group Id X (enable_sgpr_workgroup_id_x):
334  *   Number of System SGPR registers: 1. 32 bit work group id in X dimension
335  *   of grid for wavefront. Always present.
336  *
337  * Work-Group Id Y (enable_sgpr_workgroup_id_y):
338  *   Number of System SGPR registers: 1. 32 bit work group id in Y dimension
339  *   of grid for wavefront.
340  *
341  * Work-Group Id Z (enable_sgpr_workgroup_id_z):
342  *   Number of System SGPR registers: 1. 32 bit work group id in Z dimension
343  *   of grid for wavefront. If present then Work-group Id Y will also be
344  *   present
345  *
346  * Work-Group Info (enable_sgpr_workgroup_info):
347  *   Number of System SGPR registers: 1. {first_wave, 14'b0000,
348  *   ordered_append_term[10:0], threadgroup_size_in_waves[5:0]}
349  *
350  * Private Segment Wave Byte Offset
351  * (enable_sgpr_private_segment_wave_byte_offset):
352  *   Number of System SGPR registers: 1. 32 bit byte offset from base of
353  *   dispatch scratch base. Must be used as an offset with Private/Spill/Arg
354  *   segment address when using Scratch Segment Buffer. It must be added to
355  *   Flat Scratch Offset if setting up FLAT SCRATCH for flat addressing.
356  *
357  *
358  * The order of the VGPR registers is defined, but the Finalizer can specify
359  * which ones are actually setup in the amd_kernel_code_t object using the
360  * enableVgpr*  bit fields. The register numbers used for enabled registers
361  * are dense starting at VGPR0: the first enabled register is VGPR0, the next
362  * enabled register is VGPR1 etc.; disabled registers do not have an VGPR
363  * number.
364  *
365  * VGPR register initial state is defined as follows:
366  *
367  * Work-Item Id X (always initialized):
368  *   Number of registers: 1. 32 bit work item id in X dimension of work-group
369  *   for wavefront lane.
370  *
371  * Work-Item Id X (enable_vgpr_workitem_id > 0):
372  *   Number of registers: 1. 32 bit work item id in Y dimension of work-group
373  *   for wavefront lane.
374  *
375  * Work-Item Id X (enable_vgpr_workitem_id > 0):
376  *   Number of registers: 1. 32 bit work item id in Z dimension of work-group
377  *   for wavefront lane.
378  *
379  *
380  * The setting of registers is being done by existing GPU hardware as follows:
381  *   1) SGPRs before the Work-Group Ids are set by CP using the 16 User Data
382  *      registers.
383  *   2) Work-group Id registers X, Y, Z are set by SPI which supports any
384  *      combination including none.
385  *   3) Scratch Wave Offset is also set by SPI which is why its value cannot
386  *      be added into the value Flat Scratch Offset which would avoid the
387  *      Finalizer generated prolog having to do the add.
388  *   4) The VGPRs are set by SPI which only supports specifying either (X),
389  *      (X, Y) or (X, Y, Z).
390  *
391  * Flat Scratch Dispatch Offset and Flat Scratch Size are adjacent SGRRs so
392  * they can be moved as a 64 bit value to the hardware required SGPRn-3 and
393  * SGPRn-4 respectively using the Finalizer ?FLAT_SCRATCH? Register.
394  *
395  * The global segment can be accessed either using flat operations or buffer
396  * operations. If buffer operations are used then the Global Buffer used to
397  * access HSAIL Global/Readonly/Kernarg (which are combine) segments using a
398  * segment address is not passed into the kernel code by CP since its base
399  * address is always 0. Instead the Finalizer generates prolog code to
400  * initialize 4 SGPRs with a V# that has the following properties, and then
401  * uses that in the buffer instructions:
402  *   - base address of 0
403  *   - no swizzle
404  *   - ATC=1
405  *   - MTYPE set to support memory coherence specified in
406  *     amd_kernel_code_t.globalMemoryCoherence
407  *
408  * When the Global Buffer is used to access the Kernarg segment, must add the
409  * dispatch packet kernArgPtr to a kernarg segment address before using this V#.
410  * Alternatively scalar loads can be used if the kernarg offset is uniform, as
411  * the kernarg segment is constant for the duration of the kernel execution.
412  */
413 
414 typedef struct amd_kernel_code_s {
415    uint32_t amd_kernel_code_version_major;
416    uint32_t amd_kernel_code_version_minor;
417    uint16_t amd_machine_kind;
418    uint16_t amd_machine_version_major;
419    uint16_t amd_machine_version_minor;
420    uint16_t amd_machine_version_stepping;
421 
422    /* Byte offset (possibly negative) from start of amd_kernel_code_t
423     * object to kernel's entry point instruction. The actual code for
424     * the kernel is required to be 256 byte aligned to match hardware
425     * requirements (SQ cache line is 16). The code must be position
426     * independent code (PIC) for AMD devices to give runtime the
427     * option of copying code to discrete GPU memory or APU L2
428     * cache. The Finalizer should endeavour to allocate all kernel
429     * machine code in contiguous memory pages so that a device
430     * pre-fetcher will tend to only pre-fetch Kernel Code objects,
431     * improving cache performance.
432     */
433    int64_t kernel_code_entry_byte_offset;
434 
435    /* Range of bytes to consider prefetching expressed as an offset
436     * and size. The offset is from the start (possibly negative) of
437     * amd_kernel_code_t object. Set both to 0 if no prefetch
438     * information is available.
439     */
440    int64_t kernel_code_prefetch_byte_offset;
441    uint64_t kernel_code_prefetch_byte_size;
442 
443    /* Number of bytes of scratch backing memory required for full
444     * occupancy of target chip. This takes into account the number of
445     * bytes of scratch per work-item, the wavefront size, the maximum
446     * number of wavefronts per CU, and the number of CUs. This is an
447     * upper limit on scratch. If the grid being dispatched is small it
448     * may only need less than this. If the kernel uses no scratch, or
449     * the Finalizer has not computed this value, it must be 0.
450     */
451    uint64_t max_scratch_backing_memory_byte_size;
452 
453    /* Shader program settings for CS. Contains COMPUTE_PGM_RSRC1 and
454     * COMPUTE_PGM_RSRC2 registers.
455     */
456    uint64_t compute_pgm_resource_registers;
457 
458    /* Code properties. See amd_code_property_mask_t for a full list of
459     * properties.
460     */
461    uint32_t code_properties;
462 
463    /* The amount of memory required for the combined private, spill
464     * and arg segments for a work-item in bytes. If
465     * is_dynamic_callstack is 1 then additional space must be added to
466     * this value for the call stack.
467     */
468    uint32_t workitem_private_segment_byte_size;
469 
470    /* The amount of group segment memory required by a work-group in
471     * bytes. This does not include any dynamically allocated group
472     * segment memory that may be added when the kernel is
473     * dispatched.
474     */
475    uint32_t workgroup_group_segment_byte_size;
476 
477    /* Number of byte of GDS required by kernel dispatch. Must be 0 if
478     * not using GDS.
479     */
480    uint32_t gds_segment_byte_size;
481 
482    /* The size in bytes of the kernarg segment that holds the values
483     * of the arguments to the kernel. This could be used by CP to
484     * prefetch the kernarg segment pointed to by the dispatch packet.
485     */
486    uint64_t kernarg_segment_byte_size;
487 
488    /* Number of fbarrier's used in the kernel and all functions it
489     * calls. If the implementation uses group memory to allocate the
490     * fbarriers then that amount must already be included in the
491     * workgroup_group_segment_byte_size total.
492     */
493    uint32_t workgroup_fbarrier_count;
494 
495    /* Number of scalar registers used by a wavefront. This includes
496     * the special SGPRs for VCC, Flat Scratch Base, Flat Scratch Size
497     * and XNACK (for GFX8 (VI)). It does not include the 16 SGPR added if a
498     * trap handler is enabled. Used to set COMPUTE_PGM_RSRC1.SGPRS.
499     */
500    uint16_t wavefront_sgpr_count;
501 
502    /* Number of vector registers used by each work-item. Used to set
503     * COMPUTE_PGM_RSRC1.VGPRS.
504     */
505    uint16_t workitem_vgpr_count;
506 
507    /* If reserved_vgpr_count is 0 then must be 0. Otherwise, this is the
508     * first fixed VGPR number reserved.
509     */
510    uint16_t reserved_vgpr_first;
511 
512    /* The number of consecutive VGPRs reserved by the client. If
513     * is_debug_supported then this count includes VGPRs reserved
514     * for debugger use.
515     */
516    uint16_t reserved_vgpr_count;
517 
518    /* If reserved_sgpr_count is 0 then must be 0. Otherwise, this is the
519     * first fixed SGPR number reserved.
520     */
521    uint16_t reserved_sgpr_first;
522 
523    /* The number of consecutive SGPRs reserved by the client. If
524     * is_debug_supported then this count includes SGPRs reserved
525     * for debugger use.
526     */
527    uint16_t reserved_sgpr_count;
528 
529    /* If is_debug_supported is 0 then must be 0. Otherwise, this is the
530     * fixed SGPR number used to hold the wave scratch offset for the
531     * entire kernel execution, or uint16_t(-1) if the register is not
532     * used or not known.
533     */
534    uint16_t debug_wavefront_private_segment_offset_sgpr;
535 
536    /* If is_debug_supported is 0 then must be 0. Otherwise, this is the
537     * fixed SGPR number of the first of 4 SGPRs used to hold the
538     * scratch V# used for the entire kernel execution, or uint16_t(-1)
539     * if the registers are not used or not known.
540     */
541    uint16_t debug_private_segment_buffer_sgpr;
542 
543    /* The maximum byte alignment of variables used by the kernel in
544     * the specified memory segment. Expressed as a power of two. Must
545     * be at least HSA_POWERTWO_16.
546     */
547    uint8_t kernarg_segment_alignment;
548    uint8_t group_segment_alignment;
549    uint8_t private_segment_alignment;
550 
551    /* Wavefront size expressed as a power of two. Must be a power of 2
552     * in range 1..64 inclusive. Used to support runtime query that
553     * obtains wavefront size, which may be used by application to
554     * allocated dynamic group memory and set the dispatch work-group
555     * size.
556     */
557    uint8_t wavefront_size;
558 
559    int32_t call_convention;
560    uint8_t reserved3[12];
561    uint64_t runtime_loader_kernel_symbol;
562    uint64_t control_directives[16];
563 } amd_kernel_code_t;
564 
565 #endif // AMDKERNELCODET_H
566