1f005ef32Sjsg /*
2f005ef32Sjsg * Copyright 2022 Advanced Micro Devices, Inc.
3f005ef32Sjsg *
4f005ef32Sjsg * Permission is hereby granted, free of charge, to any person obtaining a
5f005ef32Sjsg * copy of this software and associated documentation files (the "Software"),
6f005ef32Sjsg * to deal in the Software without restriction, including without limitation
7f005ef32Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8f005ef32Sjsg * and/or sell copies of the Software, and to permit persons to whom the
9f005ef32Sjsg * Software is furnished to do so, subject to the following conditions:
10f005ef32Sjsg *
11f005ef32Sjsg * The above copyright notice and this permission notice shall be included in
12f005ef32Sjsg * all copies or substantial portions of the Software.
13f005ef32Sjsg *
14f005ef32Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15f005ef32Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16f005ef32Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17f005ef32Sjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18f005ef32Sjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19f005ef32Sjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20f005ef32Sjsg * OTHER DEALINGS IN THE SOFTWARE.
21f005ef32Sjsg *
22f005ef32Sjsg */
23f005ef32Sjsg #include "amdgpu.h"
24f005ef32Sjsg #include "soc15.h"
25f005ef32Sjsg
26f005ef32Sjsg #include "soc15_common.h"
27f005ef32Sjsg #include "amdgpu_xcp.h"
28f005ef32Sjsg #include "gfx_v9_4_3.h"
29f005ef32Sjsg #include "gfxhub_v1_2.h"
30f005ef32Sjsg #include "sdma_v4_4_2.h"
31f005ef32Sjsg
32f005ef32Sjsg #define XCP_INST_MASK(num_inst, xcp_id) \
33f005ef32Sjsg (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
34f005ef32Sjsg
35f005ef32Sjsg #define AMDGPU_XCP_OPS_KFD (1 << 0)
36f005ef32Sjsg
aqua_vanjaram_doorbell_index_init(struct amdgpu_device * adev)37f005ef32Sjsg void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
38f005ef32Sjsg {
39f005ef32Sjsg int i;
40f005ef32Sjsg
41f005ef32Sjsg adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
42f005ef32Sjsg
43f005ef32Sjsg adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
44f005ef32Sjsg
45f005ef32Sjsg adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
46f005ef32Sjsg adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
47f005ef32Sjsg adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
48f005ef32Sjsg
49f005ef32Sjsg adev->doorbell_index.sdma_doorbell_range = 20;
50f005ef32Sjsg for (i = 0; i < adev->sdma.num_instances; i++)
51f005ef32Sjsg adev->doorbell_index.sdma_engine[i] =
52f005ef32Sjsg AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
53f005ef32Sjsg i * (adev->doorbell_index.sdma_doorbell_range >> 1);
54f005ef32Sjsg
55f005ef32Sjsg adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
56f005ef32Sjsg adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
57f005ef32Sjsg
58f005ef32Sjsg adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
59f005ef32Sjsg adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
60f005ef32Sjsg
61f005ef32Sjsg adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
62f005ef32Sjsg }
63f005ef32Sjsg
aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device * adev)6480512a86Sjsg static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
6580512a86Sjsg {
6680512a86Sjsg return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
6780512a86Sjsg }
6880512a86Sjsg
aqua_vanjaram_set_xcp_id(struct amdgpu_device * adev,uint32_t inst_idx,struct amdgpu_ring * ring)69f005ef32Sjsg static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
70f005ef32Sjsg uint32_t inst_idx, struct amdgpu_ring *ring)
71f005ef32Sjsg {
72f005ef32Sjsg int xcp_id;
73f005ef32Sjsg enum AMDGPU_XCP_IP_BLOCK ip_blk;
74f005ef32Sjsg uint32_t inst_mask;
75f005ef32Sjsg
76f005ef32Sjsg ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
77f005ef32Sjsg if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
78f005ef32Sjsg return;
79f005ef32Sjsg
80f005ef32Sjsg inst_mask = 1 << inst_idx;
81f005ef32Sjsg
82f005ef32Sjsg switch (ring->funcs->type) {
83f005ef32Sjsg case AMDGPU_HW_IP_GFX:
84f005ef32Sjsg case AMDGPU_RING_TYPE_COMPUTE:
85f005ef32Sjsg case AMDGPU_RING_TYPE_KIQ:
86f005ef32Sjsg ip_blk = AMDGPU_XCP_GFX;
87f005ef32Sjsg break;
88f005ef32Sjsg case AMDGPU_RING_TYPE_SDMA:
89f005ef32Sjsg ip_blk = AMDGPU_XCP_SDMA;
90f005ef32Sjsg break;
91f005ef32Sjsg case AMDGPU_RING_TYPE_VCN_ENC:
92f005ef32Sjsg case AMDGPU_RING_TYPE_VCN_JPEG:
93f005ef32Sjsg ip_blk = AMDGPU_XCP_VCN;
9480512a86Sjsg if (aqua_vanjaram_xcp_vcn_shared(adev))
95f005ef32Sjsg inst_mask = 1 << (inst_idx * 2);
96f005ef32Sjsg break;
97f005ef32Sjsg default:
98f005ef32Sjsg DRM_ERROR("Not support ring type %d!", ring->funcs->type);
99f005ef32Sjsg return;
100f005ef32Sjsg }
101f005ef32Sjsg
102f005ef32Sjsg for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
103f005ef32Sjsg if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
104f005ef32Sjsg ring->xcp_id = xcp_id;
105f005ef32Sjsg break;
106f005ef32Sjsg }
107f005ef32Sjsg }
108f005ef32Sjsg }
109f005ef32Sjsg
aqua_vanjaram_xcp_gpu_sched_update(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned int sel_xcp_id)110f005ef32Sjsg static void aqua_vanjaram_xcp_gpu_sched_update(
111f005ef32Sjsg struct amdgpu_device *adev,
112f005ef32Sjsg struct amdgpu_ring *ring,
113f005ef32Sjsg unsigned int sel_xcp_id)
114f005ef32Sjsg {
115f005ef32Sjsg unsigned int *num_gpu_sched;
116f005ef32Sjsg
117f005ef32Sjsg num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
118f005ef32Sjsg .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
119f005ef32Sjsg adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
120f005ef32Sjsg .sched[(*num_gpu_sched)++] = &ring->sched;
121f005ef32Sjsg DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
122f005ef32Sjsg sel_xcp_id, ring->funcs->type,
123f005ef32Sjsg ring->hw_prio, *num_gpu_sched);
124f005ef32Sjsg }
125f005ef32Sjsg
aqua_vanjaram_xcp_sched_list_update(struct amdgpu_device * adev)126f005ef32Sjsg static int aqua_vanjaram_xcp_sched_list_update(
127f005ef32Sjsg struct amdgpu_device *adev)
128f005ef32Sjsg {
129f005ef32Sjsg struct amdgpu_ring *ring;
130f005ef32Sjsg int i;
131f005ef32Sjsg
132f005ef32Sjsg for (i = 0; i < MAX_XCP; i++) {
133f005ef32Sjsg atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
134f005ef32Sjsg memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
135f005ef32Sjsg }
136f005ef32Sjsg
137f005ef32Sjsg if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
138f005ef32Sjsg return 0;
139f005ef32Sjsg
140f005ef32Sjsg for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
141f005ef32Sjsg ring = adev->rings[i];
142f005ef32Sjsg if (!ring || !ring->sched.ready || ring->no_scheduler)
143f005ef32Sjsg continue;
144f005ef32Sjsg
145f005ef32Sjsg aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
146f005ef32Sjsg
14780512a86Sjsg /* VCN may be shared by two partitions under CPX MODE in certain
14880512a86Sjsg * configs.
14980512a86Sjsg */
150f005ef32Sjsg if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
151f005ef32Sjsg ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
15280512a86Sjsg aqua_vanjaram_xcp_vcn_shared(adev))
153f005ef32Sjsg aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
154f005ef32Sjsg }
155f005ef32Sjsg
156f005ef32Sjsg return 0;
157f005ef32Sjsg }
158f005ef32Sjsg
aqua_vanjaram_update_partition_sched_list(struct amdgpu_device * adev)159f005ef32Sjsg static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
160f005ef32Sjsg {
161f005ef32Sjsg int i;
162f005ef32Sjsg
163f005ef32Sjsg for (i = 0; i < adev->num_rings; i++) {
164f005ef32Sjsg struct amdgpu_ring *ring = adev->rings[i];
165f005ef32Sjsg
166f005ef32Sjsg if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
167f005ef32Sjsg ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
168f005ef32Sjsg aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
169f005ef32Sjsg else
170f005ef32Sjsg aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
171f005ef32Sjsg }
172f005ef32Sjsg
173f005ef32Sjsg return aqua_vanjaram_xcp_sched_list_update(adev);
174f005ef32Sjsg }
175f005ef32Sjsg
aqua_vanjaram_select_scheds(struct amdgpu_device * adev,u32 hw_ip,u32 hw_prio,struct amdgpu_fpriv * fpriv,unsigned int * num_scheds,struct drm_gpu_scheduler *** scheds)176f005ef32Sjsg static int aqua_vanjaram_select_scheds(
177f005ef32Sjsg struct amdgpu_device *adev,
178f005ef32Sjsg u32 hw_ip,
179f005ef32Sjsg u32 hw_prio,
180f005ef32Sjsg struct amdgpu_fpriv *fpriv,
181f005ef32Sjsg unsigned int *num_scheds,
182f005ef32Sjsg struct drm_gpu_scheduler ***scheds)
183f005ef32Sjsg {
184f005ef32Sjsg u32 sel_xcp_id;
185f005ef32Sjsg int i;
186f005ef32Sjsg
187f005ef32Sjsg if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
188f005ef32Sjsg u32 least_ref_cnt = ~0;
189f005ef32Sjsg
190f005ef32Sjsg fpriv->xcp_id = 0;
191f005ef32Sjsg for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
192f005ef32Sjsg u32 total_ref_cnt;
193f005ef32Sjsg
194f005ef32Sjsg total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
195f005ef32Sjsg if (total_ref_cnt < least_ref_cnt) {
196f005ef32Sjsg fpriv->xcp_id = i;
197f005ef32Sjsg least_ref_cnt = total_ref_cnt;
198f005ef32Sjsg }
199f005ef32Sjsg }
200f005ef32Sjsg }
201f005ef32Sjsg sel_xcp_id = fpriv->xcp_id;
202f005ef32Sjsg
203f005ef32Sjsg if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
204f005ef32Sjsg *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
205f005ef32Sjsg *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
206f005ef32Sjsg atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
207f005ef32Sjsg DRM_DEBUG("Selected partition #%d", sel_xcp_id);
208f005ef32Sjsg } else {
209f005ef32Sjsg DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
210f005ef32Sjsg return -ENOENT;
211f005ef32Sjsg }
212f005ef32Sjsg
213f005ef32Sjsg return 0;
214f005ef32Sjsg }
215f005ef32Sjsg
aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,int8_t inst)216f005ef32Sjsg static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
217f005ef32Sjsg enum amd_hw_ip_block_type block,
218f005ef32Sjsg int8_t inst)
219f005ef32Sjsg {
220f005ef32Sjsg int8_t dev_inst;
221f005ef32Sjsg
222f005ef32Sjsg switch (block) {
223f005ef32Sjsg case GC_HWIP:
224f005ef32Sjsg case SDMA0_HWIP:
225f005ef32Sjsg /* Both JPEG and VCN as JPEG is only alias of VCN */
226f005ef32Sjsg case VCN_HWIP:
227f005ef32Sjsg dev_inst = adev->ip_map.dev_inst[block][inst];
228f005ef32Sjsg break;
229f005ef32Sjsg default:
230f005ef32Sjsg /* For rest of the IPs, no look up required.
231f005ef32Sjsg * Assume 'logical instance == physical instance' for all configs. */
232f005ef32Sjsg dev_inst = inst;
233f005ef32Sjsg break;
234f005ef32Sjsg }
235f005ef32Sjsg
236f005ef32Sjsg return dev_inst;
237f005ef32Sjsg }
238f005ef32Sjsg
aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device * adev,enum amd_hw_ip_block_type block,uint32_t mask)239f005ef32Sjsg static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
240f005ef32Sjsg enum amd_hw_ip_block_type block,
241f005ef32Sjsg uint32_t mask)
242f005ef32Sjsg {
243f005ef32Sjsg uint32_t dev_mask = 0;
244f005ef32Sjsg int8_t log_inst, dev_inst;
245f005ef32Sjsg
246f005ef32Sjsg while (mask) {
247f005ef32Sjsg log_inst = ffs(mask) - 1;
248f005ef32Sjsg dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
249f005ef32Sjsg dev_mask |= (1 << dev_inst);
250f005ef32Sjsg mask &= ~(1 << log_inst);
251f005ef32Sjsg }
252f005ef32Sjsg
253f005ef32Sjsg return dev_mask;
254f005ef32Sjsg }
255f005ef32Sjsg
aqua_vanjaram_populate_ip_map(struct amdgpu_device * adev,enum amd_hw_ip_block_type ip_block,uint32_t inst_mask)256f005ef32Sjsg static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
257f005ef32Sjsg enum amd_hw_ip_block_type ip_block,
258f005ef32Sjsg uint32_t inst_mask)
259f005ef32Sjsg {
260f005ef32Sjsg int l = 0, i;
261f005ef32Sjsg
262f005ef32Sjsg while (inst_mask) {
263f005ef32Sjsg i = ffs(inst_mask) - 1;
264f005ef32Sjsg adev->ip_map.dev_inst[ip_block][l++] = i;
265f005ef32Sjsg inst_mask &= ~(1 << i);
266f005ef32Sjsg }
267f005ef32Sjsg for (; l < HWIP_MAX_INSTANCE; l++)
268f005ef32Sjsg adev->ip_map.dev_inst[ip_block][l] = -1;
269f005ef32Sjsg }
270f005ef32Sjsg
aqua_vanjaram_ip_map_init(struct amdgpu_device * adev)271f005ef32Sjsg void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
272f005ef32Sjsg {
273f005ef32Sjsg u32 ip_map[][2] = {
274f005ef32Sjsg { GC_HWIP, adev->gfx.xcc_mask },
275f005ef32Sjsg { SDMA0_HWIP, adev->sdma.sdma_mask },
276f005ef32Sjsg { VCN_HWIP, adev->vcn.inst_mask },
277f005ef32Sjsg };
278f005ef32Sjsg int i;
279f005ef32Sjsg
280f005ef32Sjsg for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
281f005ef32Sjsg aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
282f005ef32Sjsg
283f005ef32Sjsg adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
284f005ef32Sjsg adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
285f005ef32Sjsg }
286f005ef32Sjsg
287f005ef32Sjsg /* Fixed pattern for smn addressing on different AIDs:
288f005ef32Sjsg * bit[34]: indicate cross AID access
289f005ef32Sjsg * bit[33:32]: indicate target AID id
290f005ef32Sjsg * AID id range is 0 ~ 3 as maximum AID number is 4.
291f005ef32Sjsg */
aqua_vanjaram_encode_ext_smn_addressing(int ext_id)292f005ef32Sjsg u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
293f005ef32Sjsg {
294f005ef32Sjsg u64 ext_offset;
295f005ef32Sjsg
296f005ef32Sjsg /* local routing and bit[34:32] will be zeros */
297f005ef32Sjsg if (ext_id == 0)
298f005ef32Sjsg return 0;
299f005ef32Sjsg
300f005ef32Sjsg /* Initiated from host, accessing to all non-zero aids are cross traffic */
301f005ef32Sjsg ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
302f005ef32Sjsg
303f005ef32Sjsg return ext_offset;
304f005ef32Sjsg }
305f005ef32Sjsg
aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr)306f005ef32Sjsg static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
307f005ef32Sjsg {
308f005ef32Sjsg enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
309f005ef32Sjsg struct amdgpu_device *adev = xcp_mgr->adev;
310f005ef32Sjsg
311f005ef32Sjsg if (adev->nbio.funcs->get_compute_partition_mode)
312f005ef32Sjsg mode = adev->nbio.funcs->get_compute_partition_mode(adev);
313f005ef32Sjsg
314f005ef32Sjsg return mode;
315f005ef32Sjsg }
316f005ef32Sjsg
__aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr * xcp_mgr,int mode)317f005ef32Sjsg static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
318f005ef32Sjsg {
319f005ef32Sjsg int num_xcc, num_xcc_per_xcp = 0;
320f005ef32Sjsg
321f005ef32Sjsg num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
322f005ef32Sjsg
323f005ef32Sjsg switch (mode) {
324f005ef32Sjsg case AMDGPU_SPX_PARTITION_MODE:
325f005ef32Sjsg num_xcc_per_xcp = num_xcc;
326f005ef32Sjsg break;
327f005ef32Sjsg case AMDGPU_DPX_PARTITION_MODE:
328f005ef32Sjsg num_xcc_per_xcp = num_xcc / 2;
329f005ef32Sjsg break;
330f005ef32Sjsg case AMDGPU_TPX_PARTITION_MODE:
331f005ef32Sjsg num_xcc_per_xcp = num_xcc / 3;
332f005ef32Sjsg break;
333f005ef32Sjsg case AMDGPU_QPX_PARTITION_MODE:
334f005ef32Sjsg num_xcc_per_xcp = num_xcc / 4;
335f005ef32Sjsg break;
336f005ef32Sjsg case AMDGPU_CPX_PARTITION_MODE:
337f005ef32Sjsg num_xcc_per_xcp = 1;
338f005ef32Sjsg break;
339f005ef32Sjsg }
340f005ef32Sjsg
341f005ef32Sjsg return num_xcc_per_xcp;
342f005ef32Sjsg }
343f005ef32Sjsg
__aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)344f005ef32Sjsg static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
345f005ef32Sjsg enum AMDGPU_XCP_IP_BLOCK ip_id,
346f005ef32Sjsg struct amdgpu_xcp_ip *ip)
347f005ef32Sjsg {
348f005ef32Sjsg struct amdgpu_device *adev = xcp_mgr->adev;
349f005ef32Sjsg int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
350f005ef32Sjsg int num_sdma, num_vcn;
351f005ef32Sjsg
352f005ef32Sjsg num_sdma = adev->sdma.num_instances;
353f005ef32Sjsg num_vcn = adev->vcn.num_vcn_inst;
354f005ef32Sjsg
355f005ef32Sjsg switch (xcp_mgr->mode) {
356f005ef32Sjsg case AMDGPU_SPX_PARTITION_MODE:
357f005ef32Sjsg num_sdma_xcp = num_sdma;
358f005ef32Sjsg num_vcn_xcp = num_vcn;
359f005ef32Sjsg break;
360f005ef32Sjsg case AMDGPU_DPX_PARTITION_MODE:
361f005ef32Sjsg num_sdma_xcp = num_sdma / 2;
362f005ef32Sjsg num_vcn_xcp = num_vcn / 2;
363f005ef32Sjsg break;
364f005ef32Sjsg case AMDGPU_TPX_PARTITION_MODE:
365f005ef32Sjsg num_sdma_xcp = num_sdma / 3;
366f005ef32Sjsg num_vcn_xcp = num_vcn / 3;
367f005ef32Sjsg break;
368f005ef32Sjsg case AMDGPU_QPX_PARTITION_MODE:
369f005ef32Sjsg num_sdma_xcp = num_sdma / 4;
370f005ef32Sjsg num_vcn_xcp = num_vcn / 4;
371f005ef32Sjsg break;
372f005ef32Sjsg case AMDGPU_CPX_PARTITION_MODE:
373f005ef32Sjsg num_sdma_xcp = 2;
374f005ef32Sjsg num_vcn_xcp = num_vcn ? 1 : 0;
375f005ef32Sjsg break;
376f005ef32Sjsg default:
377f005ef32Sjsg return -EINVAL;
378f005ef32Sjsg }
379f005ef32Sjsg
380f005ef32Sjsg num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
381f005ef32Sjsg
382f005ef32Sjsg switch (ip_id) {
383f005ef32Sjsg case AMDGPU_XCP_GFXHUB:
384f005ef32Sjsg ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
385f005ef32Sjsg ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
386f005ef32Sjsg break;
387f005ef32Sjsg case AMDGPU_XCP_GFX:
388f005ef32Sjsg ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
389f005ef32Sjsg ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
390f005ef32Sjsg break;
391f005ef32Sjsg case AMDGPU_XCP_SDMA:
392f005ef32Sjsg ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
393f005ef32Sjsg ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
394f005ef32Sjsg break;
395f005ef32Sjsg case AMDGPU_XCP_VCN:
396f005ef32Sjsg ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
397f005ef32Sjsg /* TODO : Assign IP funcs */
398f005ef32Sjsg break;
399f005ef32Sjsg default:
400f005ef32Sjsg return -EINVAL;
401f005ef32Sjsg }
402f005ef32Sjsg
403f005ef32Sjsg ip->ip_id = ip_id;
404f005ef32Sjsg
405f005ef32Sjsg return 0;
406f005ef32Sjsg }
407f005ef32Sjsg
408f005ef32Sjsg static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr * xcp_mgr)409f005ef32Sjsg __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
410f005ef32Sjsg {
411f005ef32Sjsg struct amdgpu_device *adev = xcp_mgr->adev;
412f005ef32Sjsg int num_xcc;
413f005ef32Sjsg
414f005ef32Sjsg num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
415f005ef32Sjsg
416f005ef32Sjsg if (adev->gmc.num_mem_partitions == 1)
417f005ef32Sjsg return AMDGPU_SPX_PARTITION_MODE;
418f005ef32Sjsg
419f005ef32Sjsg if (adev->gmc.num_mem_partitions == num_xcc)
420f005ef32Sjsg return AMDGPU_CPX_PARTITION_MODE;
421f005ef32Sjsg
422f005ef32Sjsg if (adev->gmc.num_mem_partitions == num_xcc / 2)
423f005ef32Sjsg return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
424f005ef32Sjsg AMDGPU_QPX_PARTITION_MODE;
425f005ef32Sjsg
426f005ef32Sjsg if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
427f005ef32Sjsg return AMDGPU_DPX_PARTITION_MODE;
428f005ef32Sjsg
429f005ef32Sjsg return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
430f005ef32Sjsg }
431f005ef32Sjsg
__aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr * xcp_mgr,enum amdgpu_gfx_partition mode)432f005ef32Sjsg static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
433f005ef32Sjsg enum amdgpu_gfx_partition mode)
434f005ef32Sjsg {
435f005ef32Sjsg struct amdgpu_device *adev = xcp_mgr->adev;
436f005ef32Sjsg int num_xcc, num_xccs_per_xcp;
437f005ef32Sjsg
438f005ef32Sjsg num_xcc = NUM_XCC(adev->gfx.xcc_mask);
439f005ef32Sjsg switch (mode) {
440f005ef32Sjsg case AMDGPU_SPX_PARTITION_MODE:
441f005ef32Sjsg return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
442f005ef32Sjsg case AMDGPU_DPX_PARTITION_MODE:
443f005ef32Sjsg return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
444f005ef32Sjsg case AMDGPU_TPX_PARTITION_MODE:
445f005ef32Sjsg return (adev->gmc.num_mem_partitions == 1 ||
446f005ef32Sjsg adev->gmc.num_mem_partitions == 3) &&
447f005ef32Sjsg ((num_xcc % 3) == 0);
448f005ef32Sjsg case AMDGPU_QPX_PARTITION_MODE:
449f005ef32Sjsg num_xccs_per_xcp = num_xcc / 4;
450f005ef32Sjsg return (adev->gmc.num_mem_partitions == 1 ||
451f005ef32Sjsg adev->gmc.num_mem_partitions == 4) &&
452f005ef32Sjsg (num_xccs_per_xcp >= 2);
453f005ef32Sjsg case AMDGPU_CPX_PARTITION_MODE:
454f005ef32Sjsg return ((num_xcc > 1) &&
455f005ef32Sjsg (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
456f005ef32Sjsg (num_xcc % adev->gmc.num_mem_partitions) == 0);
457f005ef32Sjsg default:
458f005ef32Sjsg return false;
459f005ef32Sjsg }
460f005ef32Sjsg
461f005ef32Sjsg return false;
462f005ef32Sjsg }
463f005ef32Sjsg
__aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)464f005ef32Sjsg static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
465f005ef32Sjsg {
466f005ef32Sjsg /* TODO:
467f005ef32Sjsg * Stop user queues and threads, and make sure GPU is empty of work.
468f005ef32Sjsg */
469f005ef32Sjsg
470f005ef32Sjsg if (flags & AMDGPU_XCP_OPS_KFD)
471f005ef32Sjsg amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
472f005ef32Sjsg
473f005ef32Sjsg return 0;
474f005ef32Sjsg }
475f005ef32Sjsg
__aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr * xcp_mgr,u32 flags)476f005ef32Sjsg static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
477f005ef32Sjsg {
478f005ef32Sjsg int ret = 0;
479f005ef32Sjsg
480f005ef32Sjsg if (flags & AMDGPU_XCP_OPS_KFD) {
481f005ef32Sjsg amdgpu_amdkfd_device_probe(xcp_mgr->adev);
482f005ef32Sjsg amdgpu_amdkfd_device_init(xcp_mgr->adev);
483f005ef32Sjsg /* If KFD init failed, return failure */
484f005ef32Sjsg if (!xcp_mgr->adev->kfd.init_complete)
485f005ef32Sjsg ret = -EIO;
486f005ef32Sjsg }
487f005ef32Sjsg
488f005ef32Sjsg return ret;
489f005ef32Sjsg }
490f005ef32Sjsg
aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr * xcp_mgr,int mode,int * num_xcps)491f005ef32Sjsg static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
492f005ef32Sjsg int mode, int *num_xcps)
493f005ef32Sjsg {
494f005ef32Sjsg int num_xcc_per_xcp, num_xcc, ret;
495f005ef32Sjsg struct amdgpu_device *adev;
496f005ef32Sjsg u32 flags = 0;
497f005ef32Sjsg
498f005ef32Sjsg adev = xcp_mgr->adev;
499f005ef32Sjsg num_xcc = NUM_XCC(adev->gfx.xcc_mask);
500f005ef32Sjsg
501f005ef32Sjsg if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
502f005ef32Sjsg mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
503*f926a68bSjsg if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
504*f926a68bSjsg dev_err(adev->dev,
505*f926a68bSjsg "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
506*f926a68bSjsg adev->gmc.num_mem_partitions);
507*f926a68bSjsg return -EINVAL;
508*f926a68bSjsg }
509f005ef32Sjsg } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
510f005ef32Sjsg dev_err(adev->dev,
511f005ef32Sjsg "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
512f005ef32Sjsg amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
513f005ef32Sjsg return -EINVAL;
514f005ef32Sjsg }
515f005ef32Sjsg
516f005ef32Sjsg if (adev->kfd.init_complete)
517f005ef32Sjsg flags |= AMDGPU_XCP_OPS_KFD;
518f005ef32Sjsg
519f005ef32Sjsg if (flags & AMDGPU_XCP_OPS_KFD) {
520f005ef32Sjsg ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
521f005ef32Sjsg if (ret)
522f005ef32Sjsg goto out;
523f005ef32Sjsg }
524f005ef32Sjsg
525f005ef32Sjsg ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
526f005ef32Sjsg if (ret)
527f005ef32Sjsg goto unlock;
528f005ef32Sjsg
529f005ef32Sjsg num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
530f005ef32Sjsg if (adev->gfx.funcs->switch_partition_mode)
531f005ef32Sjsg adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
532f005ef32Sjsg num_xcc_per_xcp);
533f005ef32Sjsg
534f005ef32Sjsg /* Init info about new xcps */
535f005ef32Sjsg *num_xcps = num_xcc / num_xcc_per_xcp;
536f005ef32Sjsg amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
537f005ef32Sjsg
538f005ef32Sjsg ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
539f005ef32Sjsg unlock:
540f005ef32Sjsg if (flags & AMDGPU_XCP_OPS_KFD)
541f005ef32Sjsg amdgpu_amdkfd_unlock_kfd(adev);
542f005ef32Sjsg out:
543f005ef32Sjsg return ret;
544f005ef32Sjsg }
545f005ef32Sjsg
__aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device * adev,int xcc_id,uint8_t * mem_id)546f005ef32Sjsg static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
547f005ef32Sjsg int xcc_id, uint8_t *mem_id)
548f005ef32Sjsg {
549f005ef32Sjsg /* memory/spatial modes validation check is already done */
550f005ef32Sjsg *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
551f005ef32Sjsg *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
552f005ef32Sjsg
553f005ef32Sjsg return 0;
554f005ef32Sjsg }
555f005ef32Sjsg
aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr * xcp_mgr,struct amdgpu_xcp * xcp,uint8_t * mem_id)556f005ef32Sjsg static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
557f005ef32Sjsg struct amdgpu_xcp *xcp, uint8_t *mem_id)
558f005ef32Sjsg {
559f005ef32Sjsg struct amdgpu_numa_info numa_info;
560f005ef32Sjsg struct amdgpu_device *adev;
561f005ef32Sjsg uint32_t xcc_mask;
562f005ef32Sjsg int r, i, xcc_id;
563f005ef32Sjsg
564f005ef32Sjsg adev = xcp_mgr->adev;
565f005ef32Sjsg /* TODO: BIOS is not returning the right info now
566f005ef32Sjsg * Check on this later
567f005ef32Sjsg */
568f005ef32Sjsg /*
569f005ef32Sjsg if (adev->gmc.gmc_funcs->query_mem_partition_mode)
570f005ef32Sjsg mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
571f005ef32Sjsg */
572f005ef32Sjsg if (adev->gmc.num_mem_partitions == 1) {
573f005ef32Sjsg /* Only one range */
574f005ef32Sjsg *mem_id = 0;
575f005ef32Sjsg return 0;
576f005ef32Sjsg }
577f005ef32Sjsg
578f005ef32Sjsg r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
579f005ef32Sjsg if (r || !xcc_mask)
580f005ef32Sjsg return -EINVAL;
581f005ef32Sjsg
582f005ef32Sjsg xcc_id = ffs(xcc_mask) - 1;
583f005ef32Sjsg if (!adev->gmc.is_app_apu)
584f005ef32Sjsg return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
585f005ef32Sjsg
586f005ef32Sjsg r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
587f005ef32Sjsg
588f005ef32Sjsg if (r)
589f005ef32Sjsg return r;
590f005ef32Sjsg
591f005ef32Sjsg r = -EINVAL;
592f005ef32Sjsg for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
593f005ef32Sjsg if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
594f005ef32Sjsg *mem_id = i;
595f005ef32Sjsg r = 0;
596f005ef32Sjsg break;
597f005ef32Sjsg }
598f005ef32Sjsg }
599f005ef32Sjsg
600f005ef32Sjsg return r;
601f005ef32Sjsg }
602f005ef32Sjsg
aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr * xcp_mgr,int xcp_id,enum AMDGPU_XCP_IP_BLOCK ip_id,struct amdgpu_xcp_ip * ip)603f005ef32Sjsg static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
604f005ef32Sjsg enum AMDGPU_XCP_IP_BLOCK ip_id,
605f005ef32Sjsg struct amdgpu_xcp_ip *ip)
606f005ef32Sjsg {
607f005ef32Sjsg if (!ip)
608f005ef32Sjsg return -EINVAL;
609f005ef32Sjsg
610f005ef32Sjsg return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
611f005ef32Sjsg }
612f005ef32Sjsg
613f005ef32Sjsg struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
614f005ef32Sjsg .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
615f005ef32Sjsg .query_partition_mode = &aqua_vanjaram_query_partition_mode,
616f005ef32Sjsg .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
617f005ef32Sjsg .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
618f005ef32Sjsg .select_scheds = &aqua_vanjaram_select_scheds,
619f005ef32Sjsg .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
620f005ef32Sjsg };
621f005ef32Sjsg
aqua_vanjaram_xcp_mgr_init(struct amdgpu_device * adev)622f005ef32Sjsg static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
623f005ef32Sjsg {
624f005ef32Sjsg int ret;
625f005ef32Sjsg
626f005ef32Sjsg ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
627f005ef32Sjsg &aqua_vanjaram_xcp_funcs);
628f005ef32Sjsg if (ret)
629f005ef32Sjsg return ret;
630f005ef32Sjsg
631f005ef32Sjsg /* TODO: Default memory node affinity init */
632f005ef32Sjsg
633f005ef32Sjsg return ret;
634f005ef32Sjsg }
635f005ef32Sjsg
aqua_vanjaram_init_soc_config(struct amdgpu_device * adev)636f005ef32Sjsg int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
637f005ef32Sjsg {
638f005ef32Sjsg u32 mask, inst_mask = adev->sdma.sdma_mask;
639f005ef32Sjsg int ret, i;
640f005ef32Sjsg
641f005ef32Sjsg /* generally 1 AID supports 4 instances */
642f005ef32Sjsg adev->sdma.num_inst_per_aid = 4;
643f005ef32Sjsg adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
644f005ef32Sjsg
645f005ef32Sjsg adev->aid_mask = i = 1;
646f005ef32Sjsg inst_mask >>= adev->sdma.num_inst_per_aid;
647f005ef32Sjsg
648f005ef32Sjsg for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
649f005ef32Sjsg inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
650f005ef32Sjsg if ((inst_mask & mask) == mask)
651f005ef32Sjsg adev->aid_mask |= (1 << i);
652f005ef32Sjsg }
653f005ef32Sjsg
654f005ef32Sjsg /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
655f005ef32Sjsg * addressed based on logical instance ids.
656f005ef32Sjsg */
657f005ef32Sjsg adev->vcn.harvest_config = 0;
658f005ef32Sjsg adev->vcn.num_inst_per_aid = 1;
659f005ef32Sjsg adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
660f005ef32Sjsg adev->jpeg.harvest_config = 0;
661f005ef32Sjsg adev->jpeg.num_inst_per_aid = 1;
662f005ef32Sjsg adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
663f005ef32Sjsg
664f005ef32Sjsg ret = aqua_vanjaram_xcp_mgr_init(adev);
665f005ef32Sjsg if (ret)
666f005ef32Sjsg return ret;
667f005ef32Sjsg
668f005ef32Sjsg aqua_vanjaram_ip_map_init(adev);
669f005ef32Sjsg
670f005ef32Sjsg return 0;
671f005ef32Sjsg }
672