xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/amdgpu_xgmi.c (revision 5a38ef86)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include "amdgpu.h"
26 #include "amdgpu_xgmi.h"
27 #include "amdgpu_smu.h"
28 #include "amdgpu_ras.h"
29 #include "soc15.h"
30 #include "df/df_3_6_offset.h"
31 #include "xgmi/xgmi_4_0_0_smn.h"
32 #include "xgmi/xgmi_4_0_0_sh_mask.h"
33 #include "wafl/wafl2_4_0_0_smn.h"
34 #include "wafl/wafl2_4_0_0_sh_mask.h"
35 
36 static DEFINE_MUTEX(xgmi_mutex);
37 
38 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4
39 
40 static DRM_LIST_HEAD(xgmi_hive_list);
41 
42 static const int xgmi_pcs_err_status_reg_vg20[] = {
43 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
44 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
45 };
46 
47 static const int wafl_pcs_err_status_reg_vg20[] = {
48 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
49 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
50 };
51 
52 static const int xgmi_pcs_err_status_reg_arct[] = {
53 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
54 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
55 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
56 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
57 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
58 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
59 };
60 
61 /* same as vg20*/
62 static const int wafl_pcs_err_status_reg_arct[] = {
63 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
64 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
65 };
66 
67 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
68 	{"XGMI PCS DataLossErr",
69 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
70 	{"XGMI PCS TrainingErr",
71 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
72 	{"XGMI PCS CRCErr",
73 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
74 	{"XGMI PCS BERExceededErr",
75 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
76 	{"XGMI PCS TxMetaDataErr",
77 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
78 	{"XGMI PCS ReplayBufParityErr",
79 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
80 	{"XGMI PCS DataParityErr",
81 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
82 	{"XGMI PCS ReplayFifoOverflowErr",
83 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
84 	{"XGMI PCS ReplayFifoUnderflowErr",
85 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
86 	{"XGMI PCS ElasticFifoOverflowErr",
87 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
88 	{"XGMI PCS DeskewErr",
89 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
90 	{"XGMI PCS DataStartupLimitErr",
91 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
92 	{"XGMI PCS FCInitTimeoutErr",
93 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
94 	{"XGMI PCS RecoveryTimeoutErr",
95 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
96 	{"XGMI PCS ReadySerialTimeoutErr",
97 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
98 	{"XGMI PCS ReadySerialAttemptErr",
99 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
100 	{"XGMI PCS RecoveryAttemptErr",
101 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
102 	{"XGMI PCS RecoveryRelockAttemptErr",
103 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
104 };
105 
106 static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
107 	{"WAFL PCS DataLossErr",
108 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
109 	{"WAFL PCS TrainingErr",
110 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
111 	{"WAFL PCS CRCErr",
112 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
113 	{"WAFL PCS BERExceededErr",
114 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
115 	{"WAFL PCS TxMetaDataErr",
116 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
117 	{"WAFL PCS ReplayBufParityErr",
118 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
119 	{"WAFL PCS DataParityErr",
120 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
121 	{"WAFL PCS ReplayFifoOverflowErr",
122 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
123 	{"WAFL PCS ReplayFifoUnderflowErr",
124 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
125 	{"WAFL PCS ElasticFifoOverflowErr",
126 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
127 	{"WAFL PCS DeskewErr",
128 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
129 	{"WAFL PCS DataStartupLimitErr",
130 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
131 	{"WAFL PCS FCInitTimeoutErr",
132 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
133 	{"WAFL PCS RecoveryTimeoutErr",
134 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
135 	{"WAFL PCS ReadySerialTimeoutErr",
136 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
137 	{"WAFL PCS ReadySerialAttemptErr",
138 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
139 	{"WAFL PCS RecoveryAttemptErr",
140 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
141 	{"WAFL PCS RecoveryRelockAttemptErr",
142 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
143 };
144 
145 /**
146  * DOC: AMDGPU XGMI Support
147  *
148  * XGMI is a high speed interconnect that joins multiple GPU cards
149  * into a homogeneous memory space that is organized by a collective
150  * hive ID and individual node IDs, both of which are 64-bit numbers.
151  *
152  * The file xgmi_device_id contains the unique per GPU device ID and
153  * is stored in the /sys/class/drm/card${cardno}/device/ directory.
154  *
155  * Inside the device directory a sub-directory 'xgmi_hive_info' is
156  * created which contains the hive ID and the list of nodes.
157  *
158  * The hive ID is stored in:
159  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
160  *
161  * The node information is stored in numbered directories:
162  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
163  *
164  * Each device has their own xgmi_hive_info direction with a mirror
165  * set of node sub-directories.
166  *
167  * The XGMI memory space is built by contiguously adding the power of
168  * two padded VRAM space from each node to each other.
169  *
170  */
171 
172 static struct attribute amdgpu_xgmi_hive_id = {
173 	.name = "xgmi_hive_id",
174 #ifdef notyet
175 	.mode = S_IRUGO
176 #endif
177 };
178 
179 static struct attribute *amdgpu_xgmi_hive_attrs[] = {
180 	&amdgpu_xgmi_hive_id,
181 	NULL
182 };
183 
184 static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
185 	struct attribute *attr, char *buf)
186 {
187 	struct amdgpu_hive_info *hive = container_of(
188 		kobj, struct amdgpu_hive_info, kobj);
189 
190 	if (attr == &amdgpu_xgmi_hive_id)
191 		return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
192 
193 	return 0;
194 }
195 
196 static void amdgpu_xgmi_hive_release(struct kobject *kobj)
197 {
198 	struct amdgpu_hive_info *hive = container_of(
199 		kobj, struct amdgpu_hive_info, kobj);
200 
201 	mutex_destroy(&hive->hive_lock);
202 	kfree(hive);
203 }
204 
205 #ifdef notyet
206 static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
207 	.show = amdgpu_xgmi_show_attrs,
208 };
209 #endif
210 
211 struct kobj_type amdgpu_xgmi_hive_type = {
212 	.release = amdgpu_xgmi_hive_release,
213 #ifdef notyet
214 	.sysfs_ops = &amdgpu_xgmi_hive_ops,
215 	.default_attrs = amdgpu_xgmi_hive_attrs,
216 #endif
217 };
218 
219 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
220 				     struct device_attribute *attr,
221 				     char *buf)
222 {
223 	struct drm_device *ddev = dev_get_drvdata(dev);
224 	struct amdgpu_device *adev = drm_to_adev(ddev);
225 
226 	return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
227 
228 }
229 
230 #define AMDGPU_XGMI_SET_FICAA(o)	((o) | 0x456801)
231 static ssize_t amdgpu_xgmi_show_error(struct device *dev,
232 				      struct device_attribute *attr,
233 				      char *buf)
234 {
235 	struct drm_device *ddev = dev_get_drvdata(dev);
236 	struct amdgpu_device *adev = drm_to_adev(ddev);
237 	uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
238 	uint64_t fica_out;
239 	unsigned int error_count = 0;
240 
241 	ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
242 	ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
243 
244 	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
245 	if (fica_out != 0x1f)
246 		pr_err("xGMI error counters not enabled!\n");
247 
248 	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
249 
250 	if ((fica_out & 0xffff) == 2)
251 		error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
252 
253 	adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
254 
255 	return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
256 }
257 
258 
259 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
260 static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
261 
262 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
263 					 struct amdgpu_hive_info *hive)
264 {
265 	STUB();
266 	return -ENOSYS;
267 #ifdef notyet
268 	int ret = 0;
269 	char node[10] = { 0 };
270 
271 	/* Create xgmi device id file */
272 	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
273 	if (ret) {
274 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
275 		return ret;
276 	}
277 
278 	/* Create xgmi error file */
279 	ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
280 	if (ret)
281 		pr_err("failed to create xgmi_error\n");
282 
283 
284 	/* Create sysfs link to hive info folder on the first device */
285 	if (hive->kobj.parent != (&adev->dev->kobj)) {
286 		ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
287 					"xgmi_hive_info");
288 		if (ret) {
289 			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
290 			goto remove_file;
291 		}
292 	}
293 
294 	snprintf(node, sizeof(node), "node%d", atomic_read(&hive->number_devices));
295 	/* Create sysfs link form the hive folder to yourself */
296 	ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
297 	if (ret) {
298 		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
299 		goto remove_link;
300 	}
301 
302 	goto success;
303 
304 
305 remove_link:
306 	sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
307 
308 remove_file:
309 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
310 
311 success:
312 	return ret;
313 #endif
314 }
315 
316 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
317 					  struct amdgpu_hive_info *hive)
318 {
319 #ifdef __linux__
320 	char node[10];
321 	memset(node, 0, sizeof(node));
322 
323 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
324 	device_remove_file(adev->dev, &dev_attr_xgmi_error);
325 
326 	if (hive->kobj.parent != (&adev->dev->kobj))
327 		sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
328 
329 	sprintf(node, "node%d", atomic_read(&hive->number_devices));
330 	sysfs_remove_link(&hive->kobj, node);
331 #endif
332 }
333 
334 
335 
336 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
337 {
338 	struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
339 	int ret;
340 
341 	if (!adev->gmc.xgmi.hive_id)
342 		return NULL;
343 
344 	STUB();
345 	return NULL;
346 #ifdef notyet
347 
348 	if (adev->hive) {
349 		kobject_get(&adev->hive->kobj);
350 		return adev->hive;
351 	}
352 
353 	mutex_lock(&xgmi_mutex);
354 
355 	if (!list_empty(&xgmi_hive_list)) {
356 		list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node)  {
357 			if (hive->hive_id == adev->gmc.xgmi.hive_id)
358 				goto pro_end;
359 		}
360 	}
361 
362 	hive = kzalloc(sizeof(*hive), GFP_KERNEL);
363 	if (!hive) {
364 		dev_err(adev->dev, "XGMI: allocation failed\n");
365 		hive = NULL;
366 		goto pro_end;
367 	}
368 
369 	/* initialize new hive if not exist */
370 	ret = kobject_init_and_add(&hive->kobj,
371 			&amdgpu_xgmi_hive_type,
372 			&adev->dev->kobj,
373 			"%s", "xgmi_hive_info");
374 	if (ret) {
375 		dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
376 		kobject_put(&hive->kobj);
377 		kfree(hive);
378 		hive = NULL;
379 		goto pro_end;
380 	}
381 
382 	hive->hive_id = adev->gmc.xgmi.hive_id;
383 	INIT_LIST_HEAD(&hive->device_list);
384 	INIT_LIST_HEAD(&hive->node);
385 	rw_init(&hive->hive_lock, "aghive");
386 	atomic_set(&hive->in_reset, 0);
387 	atomic_set(&hive->number_devices, 0);
388 	task_barrier_init(&hive->tb);
389 	hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
390 	hive->hi_req_gpu = NULL;
391 	/*
392 	 * hive pstate on boot is high in vega20 so we have to go to low
393 	 * pstate on after boot.
394 	 */
395 	hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
396 	list_add_tail(&hive->node, &xgmi_hive_list);
397 
398 pro_end:
399 	if (hive)
400 		kobject_get(&hive->kobj);
401 	mutex_unlock(&xgmi_mutex);
402 	return hive;
403 #endif
404 }
405 
406 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
407 {
408 	if (hive)
409 		kobject_put(&hive->kobj);
410 }
411 
412 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
413 {
414 	int ret = 0;
415 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
416 	struct amdgpu_device *request_adev = hive->hi_req_gpu ?
417 						hive->hi_req_gpu : adev;
418 	bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
419 	bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
420 
421 	amdgpu_put_xgmi_hive(hive);
422 	/* fw bug so temporarily disable pstate switching */
423 	return 0;
424 
425 	if (!hive || adev->asic_type != CHIP_VEGA20)
426 		return 0;
427 
428 	mutex_lock(&hive->hive_lock);
429 
430 	if (is_hi_req)
431 		hive->hi_req_count++;
432 	else
433 		hive->hi_req_count--;
434 
435 	/*
436 	 * Vega20 only needs single peer to request pstate high for the hive to
437 	 * go high but all peers must request pstate low for the hive to go low
438 	 */
439 	if (hive->pstate == pstate ||
440 			(!is_hi_req && hive->hi_req_count && !init_low))
441 		goto out;
442 
443 	dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
444 
445 	ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
446 	if (ret) {
447 		dev_err(request_adev->dev,
448 			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
449 			request_adev->gmc.xgmi.node_id,
450 			request_adev->gmc.xgmi.hive_id, ret);
451 		goto out;
452 	}
453 
454 	if (init_low)
455 		hive->pstate = hive->hi_req_count ?
456 					hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
457 	else {
458 		hive->pstate = pstate;
459 		hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
460 							adev : NULL;
461 	}
462 out:
463 	mutex_unlock(&hive->hive_lock);
464 	return ret;
465 }
466 
467 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
468 {
469 	int ret;
470 
471 	/* Each psp need to set the latest topology */
472 	ret = psp_xgmi_set_topology_info(&adev->psp,
473 					 atomic_read(&hive->number_devices),
474 					 &adev->psp.xgmi_context.top_info);
475 	if (ret)
476 		dev_err(adev->dev,
477 			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
478 			adev->gmc.xgmi.node_id,
479 			adev->gmc.xgmi.hive_id, ret);
480 
481 	return ret;
482 }
483 
484 
485 /*
486  * NOTE psp_xgmi_node_info.num_hops layout is as follows:
487  * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
488  * num_hops[5:3] = reserved
489  * num_hops[2:0] = number of hops
490  */
491 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
492 		struct amdgpu_device *peer_adev)
493 {
494 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
495 	uint8_t num_hops_mask = 0x7;
496 	int i;
497 
498 	for (i = 0 ; i < top->num_nodes; ++i)
499 		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
500 			return top->nodes[i].num_hops & num_hops_mask;
501 	return	-EINVAL;
502 }
503 
504 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
505 {
506 	struct psp_xgmi_topology_info *top_info;
507 	struct amdgpu_hive_info *hive;
508 	struct amdgpu_xgmi	*entry;
509 	struct amdgpu_device *tmp_adev = NULL;
510 
511 	int count = 0, ret = 0;
512 
513 	if (!adev->gmc.xgmi.supported)
514 		return 0;
515 
516 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
517 		ret = psp_xgmi_initialize(&adev->psp);
518 		if (ret) {
519 			dev_err(adev->dev,
520 				"XGMI: Failed to initialize xgmi session\n");
521 			return ret;
522 		}
523 
524 		ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
525 		if (ret) {
526 			dev_err(adev->dev,
527 				"XGMI: Failed to get hive id\n");
528 			return ret;
529 		}
530 
531 		ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
532 		if (ret) {
533 			dev_err(adev->dev,
534 				"XGMI: Failed to get node id\n");
535 			return ret;
536 		}
537 	} else {
538 		adev->gmc.xgmi.hive_id = 16;
539 		adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
540 	}
541 
542 	hive = amdgpu_get_xgmi_hive(adev);
543 	if (!hive) {
544 		ret = -EINVAL;
545 		dev_err(adev->dev,
546 			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
547 			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
548 		goto exit;
549 	}
550 	mutex_lock(&hive->hive_lock);
551 
552 	top_info = &adev->psp.xgmi_context.top_info;
553 
554 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
555 	list_for_each_entry(entry, &hive->device_list, head)
556 		top_info->nodes[count++].node_id = entry->node_id;
557 	top_info->num_nodes = count;
558 	atomic_set(&hive->number_devices, count);
559 
560 	task_barrier_add_task(&hive->tb);
561 
562 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
563 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
564 			/* update node list for other device in the hive */
565 			if (tmp_adev != adev) {
566 				top_info = &tmp_adev->psp.xgmi_context.top_info;
567 				top_info->nodes[count - 1].node_id =
568 					adev->gmc.xgmi.node_id;
569 				top_info->num_nodes = count;
570 			}
571 			ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
572 			if (ret)
573 				goto exit_unlock;
574 		}
575 
576 		/* get latest topology info for each device from psp */
577 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
578 			ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
579 					&tmp_adev->psp.xgmi_context.top_info);
580 			if (ret) {
581 				dev_err(tmp_adev->dev,
582 					"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
583 					tmp_adev->gmc.xgmi.node_id,
584 					tmp_adev->gmc.xgmi.hive_id, ret);
585 				/* To do : continue with some node failed or disable the whole hive */
586 				goto exit_unlock;
587 			}
588 		}
589 	}
590 
591 	if (!ret)
592 		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
593 
594 exit_unlock:
595 	mutex_unlock(&hive->hive_lock);
596 exit:
597 	if (!ret) {
598 		adev->hive = hive;
599 		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
600 			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
601 	} else {
602 		amdgpu_put_xgmi_hive(hive);
603 		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
604 			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
605 			ret);
606 	}
607 
608 	return ret;
609 }
610 
611 int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
612 {
613 	struct amdgpu_hive_info *hive = adev->hive;
614 
615 	if (!adev->gmc.xgmi.supported)
616 		return -EINVAL;
617 
618 	if (!hive)
619 		return -EINVAL;
620 
621 	mutex_lock(&hive->hive_lock);
622 	task_barrier_rem_task(&hive->tb);
623 	amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
624 	if (hive->hi_req_gpu == adev)
625 		hive->hi_req_gpu = NULL;
626 	list_del(&adev->gmc.xgmi.head);
627 	mutex_unlock(&hive->hive_lock);
628 
629 	amdgpu_put_xgmi_hive(hive);
630 	adev->hive = NULL;
631 
632 	if (atomic_dec_return(&hive->number_devices) == 0) {
633 		/* Remove the hive from global hive list */
634 		mutex_lock(&xgmi_mutex);
635 		list_del(&hive->node);
636 		mutex_unlock(&xgmi_mutex);
637 
638 		amdgpu_put_xgmi_hive(hive);
639 	}
640 
641 	return psp_xgmi_terminate(&adev->psp);
642 }
643 
644 int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
645 {
646 	int r;
647 	struct ras_ih_if ih_info = {
648 		.cb = NULL,
649 	};
650 	struct ras_fs_if fs_info = {
651 		.sysfs_name = "xgmi_wafl_err_count",
652 	};
653 
654 	if (!adev->gmc.xgmi.supported ||
655 	    adev->gmc.xgmi.num_physical_nodes == 0)
656 		return 0;
657 
658 	amdgpu_xgmi_reset_ras_error_count(adev);
659 
660 	if (!adev->gmc.xgmi.ras_if) {
661 		adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
662 		if (!adev->gmc.xgmi.ras_if)
663 			return -ENOMEM;
664 		adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
665 		adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
666 		adev->gmc.xgmi.ras_if->sub_block_index = 0;
667 		strlcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl", sizeof(adev->gmc.xgmi.ras_if->name));
668 	}
669 	ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
670 	r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
671 				 &fs_info, &ih_info);
672 	if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
673 		kfree(adev->gmc.xgmi.ras_if);
674 		adev->gmc.xgmi.ras_if = NULL;
675 	}
676 
677 	return r;
678 }
679 
680 void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
681 {
682 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
683 			adev->gmc.xgmi.ras_if) {
684 		struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
685 		struct ras_ih_if ih_info = {
686 			.cb = NULL,
687 		};
688 
689 		amdgpu_ras_late_fini(adev, ras_if, &ih_info);
690 		kfree(ras_if);
691 	}
692 }
693 
694 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
695 					   uint64_t addr)
696 {
697 	struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
698 	return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
699 }
700 
701 static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
702 {
703 	WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
704 	WREG32_PCIE(pcs_status_reg, 0);
705 }
706 
707 void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
708 {
709 	uint32_t i;
710 
711 	switch (adev->asic_type) {
712 	case CHIP_ARCTURUS:
713 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
714 			pcs_clear_status(adev,
715 					 xgmi_pcs_err_status_reg_arct[i]);
716 		break;
717 	case CHIP_VEGA20:
718 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
719 			pcs_clear_status(adev,
720 					 xgmi_pcs_err_status_reg_vg20[i]);
721 		break;
722 	default:
723 		break;
724 	}
725 }
726 
727 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
728 					      uint32_t value,
729 					      uint32_t *ue_count,
730 					      uint32_t *ce_count,
731 					      bool is_xgmi_pcs)
732 {
733 	int i;
734 	int ue_cnt;
735 
736 	if (is_xgmi_pcs) {
737 		/* query xgmi pcs error status,
738 		 * only ue is supported */
739 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
740 			ue_cnt = (value &
741 				  xgmi_pcs_ras_fields[i].pcs_err_mask) >>
742 				  xgmi_pcs_ras_fields[i].pcs_err_shift;
743 			if (ue_cnt) {
744 				dev_info(adev->dev, "%s detected\n",
745 					 xgmi_pcs_ras_fields[i].err_name);
746 				*ue_count += ue_cnt;
747 			}
748 		}
749 	} else {
750 		/* query wafl pcs error status,
751 		 * only ue is supported */
752 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
753 			ue_cnt = (value &
754 				  wafl_pcs_ras_fields[i].pcs_err_mask) >>
755 				  wafl_pcs_ras_fields[i].pcs_err_shift;
756 			if (ue_cnt) {
757 				dev_info(adev->dev, "%s detected\n",
758 					 wafl_pcs_ras_fields[i].err_name);
759 				*ue_count += ue_cnt;
760 			}
761 		}
762 	}
763 
764 	return 0;
765 }
766 
767 int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
768 				      void *ras_error_status)
769 {
770 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
771 	int i;
772 	uint32_t data;
773 	uint32_t ue_cnt = 0, ce_cnt = 0;
774 
775 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
776 		return -EINVAL;
777 
778 	err_data->ue_count = 0;
779 	err_data->ce_count = 0;
780 
781 	switch (adev->asic_type) {
782 	case CHIP_ARCTURUS:
783 		/* check xgmi pcs error */
784 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
785 			data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
786 			if (data)
787 				amdgpu_xgmi_query_pcs_error_status(adev,
788 						data, &ue_cnt, &ce_cnt, true);
789 		}
790 		/* check wafl pcs error */
791 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
792 			data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
793 			if (data)
794 				amdgpu_xgmi_query_pcs_error_status(adev,
795 						data, &ue_cnt, &ce_cnt, false);
796 		}
797 		break;
798 	case CHIP_VEGA20:
799 	default:
800 		/* check xgmi pcs error */
801 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
802 			data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
803 			if (data)
804 				amdgpu_xgmi_query_pcs_error_status(adev,
805 						data, &ue_cnt, &ce_cnt, true);
806 		}
807 		/* check wafl pcs error */
808 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
809 			data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
810 			if (data)
811 				amdgpu_xgmi_query_pcs_error_status(adev,
812 						data, &ue_cnt, &ce_cnt, false);
813 		}
814 		break;
815 	}
816 
817 	amdgpu_xgmi_reset_ras_error_count(adev);
818 
819 	err_data->ue_count += ue_cnt;
820 	err_data->ce_count += ce_cnt;
821 
822 	return 0;
823 }
824