1 /*
2  * Copyright © 2021 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <inttypes.h>
25 
26 #include "radv_cs.h"
27 #include "radv_private.h"
28 #include "sid.h"
29 
30 #define SPM_RING_BASE_ALIGN 32
31 
32 static bool
radv_spm_init_bo(struct radv_device * device)33 radv_spm_init_bo(struct radv_device *device)
34 {
35    struct radeon_winsys *ws = device->ws;
36    uint64_t size = 32 * 1024 * 1024; /* Default to 1MB. */
37    uint16_t sample_interval = 4096; /* Default to 4096 clk. */
38    VkResult result;
39 
40    device->spm_trace.buffer_size = size;
41    device->spm_trace.sample_interval = sample_interval;
42 
43    struct radeon_winsys_bo *bo = NULL;
44    result = ws->buffer_create(
45       ws, size, 4096, RADEON_DOMAIN_VRAM,
46       RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
47       RADV_BO_PRIORITY_SCRATCH, 0, &bo);
48    device->spm_trace.bo = bo;
49    if (result != VK_SUCCESS)
50       return false;
51 
52    result = ws->buffer_make_resident(ws, device->spm_trace.bo, true);
53    if (result != VK_SUCCESS)
54       return false;
55 
56    device->spm_trace.ptr = ws->buffer_map(device->spm_trace.bo);
57    if (!device->spm_trace.ptr)
58       return false;
59 
60    return true;
61 }
62 
63 static void
radv_emit_spm_counters(struct radv_device * device,struct radeon_cmdbuf * cs)64 radv_emit_spm_counters(struct radv_device *device, struct radeon_cmdbuf *cs)
65 {
66    struct ac_spm_trace_data *spm_trace = &device->spm_trace;
67 
68    for (uint32_t b = 0; b < spm_trace->num_used_sq_block_sel; b++) {
69       struct ac_spm_block_select *sq_block_sel = &spm_trace->sq_block_sel[b];
70       const struct ac_spm_counter_select *cntr_sel = &sq_block_sel->counters[0];
71       uint32_t reg_base = R_036700_SQ_PERFCOUNTER0_SELECT;
72 
73       radeon_set_uconfig_reg_seq(cs, reg_base + b * 4, 1);
74       radeon_emit(cs, cntr_sel->sel0 | S_036700_SQC_BANK_MASK(0xf)); /* SQC_BANK_MASK only gfx10 */
75    }
76 
77    for (uint32_t b = 0; b < spm_trace->num_block_sel; b++) {
78       struct ac_spm_block_select *block_sel = &spm_trace->block_sel[b];
79       struct ac_pc_block_base *regs = block_sel->b->b->b;
80 
81       radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, block_sel->grbm_gfx_index);
82 
83       for (unsigned c = 0; c < block_sel->num_counters; c++) {
84          const struct ac_spm_counter_select *cntr_sel = &block_sel->counters[c];
85 
86          if (!cntr_sel->active)
87             continue;
88 
89          radeon_set_uconfig_reg_seq(cs, regs->select0[c], 1);
90          radeon_emit(cs, cntr_sel->sel0);
91 
92          radeon_set_uconfig_reg_seq(cs, regs->select1[c], 1);
93          radeon_emit(cs, cntr_sel->sel1);
94       }
95    }
96 
97    /* Restore global broadcasting. */
98    radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
99                               S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
100                               S_030800_INSTANCE_BROADCAST_WRITES(1));
101 }
102 
103 void
radv_emit_spm_setup(struct radv_device * device,struct radeon_cmdbuf * cs)104 radv_emit_spm_setup(struct radv_device *device, struct radeon_cmdbuf *cs)
105 {
106    struct ac_spm_trace_data *spm_trace = &device->spm_trace;
107    uint64_t va = radv_buffer_get_va(spm_trace->bo);
108    uint64_t ring_size = spm_trace->buffer_size;
109 
110    /* It's required that the ring VA and the size are correctly aligned. */
111    assert(!(va & (SPM_RING_BASE_ALIGN - 1)));
112    assert(!(ring_size & (SPM_RING_BASE_ALIGN - 1)));
113    assert(spm_trace->sample_interval >= 32);
114 
115    /* Configure the SPM ring buffer. */
116    radeon_set_uconfig_reg(cs, R_037200_RLC_SPM_PERFMON_CNTL,
117                               S_037200_PERFMON_RING_MODE(0) | /* no stall and no interrupt on overflow */
118                               S_037200_PERFMON_SAMPLE_INTERVAL(spm_trace->sample_interval)); /* in sclk */
119    radeon_set_uconfig_reg(cs, R_037204_RLC_SPM_PERFMON_RING_BASE_LO, va);
120    radeon_set_uconfig_reg(cs, R_037208_RLC_SPM_PERFMON_RING_BASE_HI,
121                               S_037208_RING_BASE_HI(va >> 32));
122    radeon_set_uconfig_reg(cs, R_03720C_RLC_SPM_PERFMON_RING_SIZE, ring_size);
123 
124    /* Configure the muxsel. */
125    uint32_t total_muxsel_lines = 0;
126    for (unsigned s = 0; s < AC_SPM_SEGMENT_TYPE_COUNT; s++) {
127       total_muxsel_lines += spm_trace->num_muxsel_lines[s];
128    }
129 
130    radeon_set_uconfig_reg(cs, R_03726C_RLC_SPM_ACCUM_MODE, 0);
131    radeon_set_uconfig_reg(cs, R_037210_RLC_SPM_PERFMON_SEGMENT_SIZE, 0);
132    radeon_set_uconfig_reg(cs, R_03727C_RLC_SPM_PERFMON_SE3TO0_SEGMENT_SIZE,
133                               S_03727C_SE0_NUM_LINE(spm_trace->num_muxsel_lines[0]) |
134                               S_03727C_SE1_NUM_LINE(spm_trace->num_muxsel_lines[1]) |
135                               S_03727C_SE2_NUM_LINE(spm_trace->num_muxsel_lines[2]) |
136                               S_03727C_SE3_NUM_LINE(spm_trace->num_muxsel_lines[3]));
137    radeon_set_uconfig_reg(cs, R_037280_RLC_SPM_PERFMON_GLB_SEGMENT_SIZE,
138                               S_037280_PERFMON_SEGMENT_SIZE(total_muxsel_lines) |
139                               S_037280_GLOBAL_NUM_LINE(spm_trace->num_muxsel_lines[4]));
140 
141    /* Upload each muxsel ram to the RLC. */
142    for (unsigned s = 0; s < AC_SPM_SEGMENT_TYPE_COUNT; s++) {
143       unsigned rlc_muxsel_addr, rlc_muxsel_data;
144       unsigned grbm_gfx_index = S_030800_SH_BROADCAST_WRITES(1) |
145                                 S_030800_INSTANCE_BROADCAST_WRITES(1);
146 
147       if (!spm_trace->num_muxsel_lines[s])
148          continue;
149 
150       if (s == AC_SPM_SEGMENT_TYPE_GLOBAL) {
151          grbm_gfx_index |= S_030800_SE_BROADCAST_WRITES(1);
152 
153          rlc_muxsel_addr = R_037224_RLC_SPM_GLOBAL_MUXSEL_ADDR;
154          rlc_muxsel_data = R_037228_RLC_SPM_GLOBAL_MUXSEL_DATA;
155       } else {
156          grbm_gfx_index |= S_030800_SE_INDEX(s);
157 
158          rlc_muxsel_addr = R_03721C_RLC_SPM_SE_MUXSEL_ADDR;
159          rlc_muxsel_data = R_037220_RLC_SPM_SE_MUXSEL_DATA;
160       }
161 
162       radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, grbm_gfx_index);
163 
164       for (unsigned l = 0; l < spm_trace->num_muxsel_lines[s]; l++) {
165          uint32_t *data = (uint32_t *)spm_trace->muxsel_lines[s][l].muxsel;
166 
167          /* Select MUXSEL_ADDR to point to the next muxsel. */
168          radeon_set_uconfig_reg(cs, rlc_muxsel_addr, l * AC_SPM_MUXSEL_LINE_SIZE);
169 
170          /* Write the muxsel line configuration with MUXSEL_DATA. */
171          radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + AC_SPM_MUXSEL_LINE_SIZE, 0));
172          radeon_emit(cs, S_370_DST_SEL(V_370_MEM_MAPPED_REGISTER) |
173                          S_370_WR_CONFIRM(1) |
174                          S_370_ENGINE_SEL(V_370_ME) |
175                          S_370_WR_ONE_ADDR(1));
176          radeon_emit(cs, rlc_muxsel_data >> 2);
177          radeon_emit(cs, 0);
178          radeon_emit_array(cs, data, AC_SPM_MUXSEL_LINE_SIZE);
179       }
180    }
181 
182    /* Select SPM counters. */
183    radv_emit_spm_counters(device, cs);
184 }
185 
186 bool
radv_spm_init(struct radv_device * device)187 radv_spm_init(struct radv_device *device)
188 {
189    const struct radeon_info *info = &device->physical_device->rad_info;
190    struct ac_perfcounters *pc = &device->perfcounters;
191    struct ac_spm_counter_create_info spm_counters[] = {
192       {TCP, 0, 0x9},    /* Number of L2 requests. */
193       {TCP, 0, 0x12},   /* Number of L2 misses. */
194       {SQ, 0, 0x14f},   /* Number of SCACHE hits. */
195       {SQ, 0, 0x150},   /* Number of SCACHE misses. */
196       {SQ, 0, 0x151},   /* Number of SCACHE misses duplicate. */
197       {SQ, 0, 0x12c},   /* Number of ICACHE hits. */
198       {SQ, 0, 0x12d},   /* Number of ICACHE misses. */
199       {SQ, 0, 0x12e},   /* Number of ICACHE misses duplicate. */
200       {GL1C, 0, 0xe},   /* Number of GL1C requests. */
201       {GL1C, 0, 0x12},  /* Number of GL1C misses. */
202       {GL2C, 0, 0x3},   /* Number of GL2C requests. */
203       {GL2C, 0, info->chip_class >= GFX10_3 ? 0x2b : 0x23},  /* Number of GL2C misses. */
204    };
205 
206    if (!ac_init_perfcounters(info, false, false, pc))
207       return false;
208 
209    if (!ac_init_spm(info, pc, ARRAY_SIZE(spm_counters), spm_counters, &device->spm_trace))
210       return false;
211 
212    if (!radv_spm_init_bo(device))
213       return false;
214 
215    return true;
216 }
217 
218 void
radv_spm_finish(struct radv_device * device)219 radv_spm_finish(struct radv_device *device)
220 {
221    struct radeon_winsys *ws = device->ws;
222 
223    if (device->spm_trace.bo) {
224       ws->buffer_make_resident(ws, device->spm_trace.bo, false);
225       ws->buffer_destroy(ws, device->spm_trace.bo);
226    }
227 
228    ac_destroy_spm(&device->spm_trace);
229    ac_destroy_perfcounters(&device->perfcounters);
230 }
231