1 /*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "amdgpu_ras.h"
24 #include "amdgpu.h"
25 #include "amdgpu_mca.h"
26
27 #include "umc/umc_6_7_0_offset.h"
28 #include "umc/umc_6_7_0_sh_mask.h"
29
amdgpu_mca_query_correctable_error_count(struct amdgpu_device * adev,uint64_t mc_status_addr,unsigned long * error_count)30 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
31 uint64_t mc_status_addr,
32 unsigned long *error_count)
33 {
34 uint64_t mc_status = RREG64_PCIE(mc_status_addr);
35
36 if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
37 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
38 *error_count += 1;
39 }
40
amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device * adev,uint64_t mc_status_addr,unsigned long * error_count)41 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
42 uint64_t mc_status_addr,
43 unsigned long *error_count)
44 {
45 uint64_t mc_status = RREG64_PCIE(mc_status_addr);
46
47 if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
48 (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
49 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
50 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
51 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
52 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
53 *error_count += 1;
54 }
55
amdgpu_mca_reset_error_count(struct amdgpu_device * adev,uint64_t mc_status_addr)56 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
57 uint64_t mc_status_addr)
58 {
59 WREG64_PCIE(mc_status_addr, 0x0ULL);
60 }
61
amdgpu_mca_query_ras_error_count(struct amdgpu_device * adev,uint64_t mc_status_addr,void * ras_error_status)62 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
63 uint64_t mc_status_addr,
64 void *ras_error_status)
65 {
66 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
67
68 amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
69 amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
70
71 amdgpu_mca_reset_error_count(adev, mc_status_addr);
72 }
73
amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device * adev)74 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
75 {
76 int err;
77 struct amdgpu_mca_ras_block *ras;
78
79 if (!adev->mca.mp0.ras)
80 return 0;
81
82 ras = adev->mca.mp0.ras;
83
84 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
85 if (err) {
86 dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
87 return err;
88 }
89
90 strlcpy(ras->ras_block.ras_comm.name, "mca.mp0",
91 sizeof(ras->ras_block.ras_comm.name));
92 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
93 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
94 adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
95
96 return 0;
97 }
98
amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device * adev)99 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
100 {
101 int err;
102 struct amdgpu_mca_ras_block *ras;
103
104 if (!adev->mca.mp1.ras)
105 return 0;
106
107 ras = adev->mca.mp1.ras;
108
109 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
110 if (err) {
111 dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
112 return err;
113 }
114
115 strlcpy(ras->ras_block.ras_comm.name, "mca.mp1",
116 sizeof(ras->ras_block.ras_comm.name));
117 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
118 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
119 adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
120
121 return 0;
122 }
123
amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device * adev)124 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
125 {
126 int err;
127 struct amdgpu_mca_ras_block *ras;
128
129 if (!adev->mca.mpio.ras)
130 return 0;
131
132 ras = adev->mca.mpio.ras;
133
134 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
135 if (err) {
136 dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
137 return err;
138 }
139
140 strlcpy(ras->ras_block.ras_comm.name, "mca.mpio",
141 sizeof(ras->ras_block.ras_comm.name));
142 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
143 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
144 adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
145
146 return 0;
147 }
148