1 /*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #ifndef AMDGPU_XCP_H
25 #define AMDGPU_XCP_H
26
27 #include <linux/pci.h>
28 #include <linux/xarray.h>
29
30 #include "amdgpu_ctx.h"
31
32 #define MAX_XCP 8
33
34 #define AMDGPU_XCP_MODE_NONE -1
35 #define AMDGPU_XCP_MODE_TRANS -2
36
37 #define AMDGPU_XCP_FL_NONE 0
38 #define AMDGPU_XCP_FL_LOCKED (1 << 0)
39
40 #define AMDGPU_XCP_NO_PARTITION (~0)
41
42 struct amdgpu_fpriv;
43
44 enum AMDGPU_XCP_IP_BLOCK {
45 AMDGPU_XCP_GFXHUB,
46 AMDGPU_XCP_GFX,
47 AMDGPU_XCP_SDMA,
48 AMDGPU_XCP_VCN,
49 AMDGPU_XCP_MAX_BLOCKS
50 };
51
52 enum AMDGPU_XCP_STATE {
53 AMDGPU_XCP_PREPARE_SUSPEND,
54 AMDGPU_XCP_SUSPEND,
55 AMDGPU_XCP_PREPARE_RESUME,
56 AMDGPU_XCP_RESUME,
57 };
58
59 struct amdgpu_xcp_ip_funcs {
60 int (*prepare_suspend)(void *handle, uint32_t inst_mask);
61 int (*suspend)(void *handle, uint32_t inst_mask);
62 int (*prepare_resume)(void *handle, uint32_t inst_mask);
63 int (*resume)(void *handle, uint32_t inst_mask);
64 };
65
66 struct amdgpu_xcp_ip {
67 struct amdgpu_xcp_ip_funcs *ip_funcs;
68 uint32_t inst_mask;
69
70 enum AMDGPU_XCP_IP_BLOCK ip_id;
71 bool valid;
72 };
73
74 struct amdgpu_xcp {
75 struct amdgpu_xcp_ip ip[AMDGPU_XCP_MAX_BLOCKS];
76
77 uint8_t id;
78 uint8_t mem_id;
79 bool valid;
80 atomic_t ref_cnt;
81 struct drm_device *ddev;
82 struct drm_device *rdev;
83 struct drm_device *pdev;
84 struct drm_driver *driver;
85 struct drm_vma_offset_manager *vma_offset_manager;
86 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
87 };
88
89 struct amdgpu_xcp_mgr {
90 struct amdgpu_device *adev;
91 struct rwlock xcp_lock;
92 struct amdgpu_xcp_mgr_funcs *funcs;
93
94 struct amdgpu_xcp xcp[MAX_XCP];
95 uint8_t num_xcps;
96 int8_t mode;
97
98 /* Used to determine KFD memory size limits per XCP */
99 unsigned int num_xcp_per_mem_partition;
100 };
101
102 struct amdgpu_xcp_mgr_funcs {
103 int (*switch_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr, int mode,
104 int *num_xcps);
105 int (*query_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr);
106 int (*get_ip_details)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
107 enum AMDGPU_XCP_IP_BLOCK ip_id,
108 struct amdgpu_xcp_ip *ip);
109 int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr,
110 struct amdgpu_xcp *xcp, uint8_t *mem_id);
111
112 int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
113 int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
114 int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
115 int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
116 int (*select_scheds)(struct amdgpu_device *adev,
117 u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv,
118 unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds);
119 int (*update_partition_sched_list)(struct amdgpu_device *adev);
120 };
121
122 int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
123 int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
124 int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
125 int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
126
127 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
128 int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs);
129 int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode);
130 int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
131 int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode);
132 int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
133 enum AMDGPU_XCP_IP_BLOCK ip, int instance);
134
135 int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
136 enum AMDGPU_XCP_IP_BLOCK ip,
137 uint32_t *inst_mask);
138
139 int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
140 const struct pci_device_id *ent);
141 void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev);
142 int amdgpu_xcp_open_device(struct amdgpu_device *adev,
143 struct amdgpu_fpriv *fpriv,
144 struct drm_file *file_priv);
145 void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
146 struct amdgpu_ctx_entity *entity);
147
148 #define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
149 ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
150 (adev)->xcp_mgr->funcs->select_scheds ? \
151 (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT)
152 #define amdgpu_xcp_update_partition_sched_list(adev) \
153 ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
154 (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
155 (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
156
amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr * xcp_mgr)157 static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
158 {
159 if (!xcp_mgr)
160 return 1;
161 else
162 return xcp_mgr->num_xcps;
163 }
164
165 static inline struct amdgpu_xcp *
amdgpu_get_next_xcp(struct amdgpu_xcp_mgr * xcp_mgr,int * from)166 amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from)
167 {
168 if (!xcp_mgr)
169 return NULL;
170
171 while (*from < MAX_XCP) {
172 if (xcp_mgr->xcp[*from].valid)
173 return &xcp_mgr->xcp[*from];
174 ++(*from);
175 }
176
177 return NULL;
178 }
179
180 #define for_each_xcp(xcp_mgr, xcp, i) \
181 for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \
182 ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
183
184 #endif
185