1 /* $NetBSD: amdgpu_sdma.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_sdma.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
28
29 #include "amdgpu.h"
30 #include "amdgpu_sdma.h"
31 #include "amdgpu_ras.h"
32
33 #define AMDGPU_CSA_SDMA_SIZE 64
34 /* SDMA CSA reside in the 3rd page of CSA */
35 #define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
36
37 /*
38 * GPU SDMA IP block helpers function.
39 */
40
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring * ring)41 struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
42 {
43 struct amdgpu_device *adev = ring->adev;
44 int i;
45
46 for (i = 0; i < adev->sdma.num_instances; i++)
47 if (ring == &adev->sdma.instance[i].ring ||
48 ring == &adev->sdma.instance[i].page)
49 return &adev->sdma.instance[i];
50
51 return NULL;
52 }
53
amdgpu_sdma_get_index_from_ring(struct amdgpu_ring * ring,uint32_t * index)54 int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
55 {
56 struct amdgpu_device *adev = ring->adev;
57 int i;
58
59 for (i = 0; i < adev->sdma.num_instances; i++) {
60 if (ring == &adev->sdma.instance[i].ring ||
61 ring == &adev->sdma.instance[i].page) {
62 *index = i;
63 return 0;
64 }
65 }
66
67 return -EINVAL;
68 }
69
amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring * ring,unsigned vmid)70 uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
71 unsigned vmid)
72 {
73 struct amdgpu_device *adev = ring->adev;
74 uint64_t csa_mc_addr;
75 uint32_t index = 0;
76 int r;
77
78 if (vmid == 0 || !amdgpu_mcbp)
79 return 0;
80
81 r = amdgpu_sdma_get_index_from_ring(ring, &index);
82
83 if (r || index > 31)
84 csa_mc_addr = 0;
85 else
86 csa_mc_addr = amdgpu_csa_vaddr(adev) +
87 AMDGPU_CSA_SDMA_OFFSET +
88 index * AMDGPU_CSA_SDMA_SIZE;
89
90 return csa_mc_addr;
91 }
92
amdgpu_sdma_ras_late_init(struct amdgpu_device * adev,void * ras_ih_info)93 int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
94 void *ras_ih_info)
95 {
96 int r, i;
97 struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
98 struct ras_fs_if fs_info = {
99 .sysfs_name = "sdma_err_count",
100 .debugfs_name = "sdma_err_inject",
101 };
102
103 if (!ih_info)
104 return -EINVAL;
105
106 if (!adev->sdma.ras_if) {
107 adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
108 if (!adev->sdma.ras_if)
109 return -ENOMEM;
110 adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
111 adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
112 adev->sdma.ras_if->sub_block_index = 0;
113 strcpy(adev->sdma.ras_if->name, "sdma");
114 }
115 fs_info.head = ih_info->head = *adev->sdma.ras_if;
116
117 r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
118 &fs_info, ih_info);
119 if (r)
120 goto free;
121
122 if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
123 for (i = 0; i < adev->sdma.num_instances; i++) {
124 r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
125 AMDGPU_SDMA_IRQ_INSTANCE0 + i);
126 if (r)
127 goto late_fini;
128 }
129 } else {
130 r = 0;
131 goto free;
132 }
133
134 return 0;
135
136 late_fini:
137 amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
138 free:
139 kfree(adev->sdma.ras_if);
140 adev->sdma.ras_if = NULL;
141 return r;
142 }
143
amdgpu_sdma_ras_fini(struct amdgpu_device * adev)144 void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
145 {
146 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
147 adev->sdma.ras_if) {
148 struct ras_common_if *ras_if = adev->sdma.ras_if;
149 struct ras_ih_if ih_info = {
150 .head = *ras_if,
151 /* the cb member will not be used by
152 * amdgpu_ras_interrupt_remove_handler, init it only
153 * to cheat the check in ras_late_fini
154 */
155 .cb = amdgpu_sdma_process_ras_data_cb,
156 };
157
158 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
159 kfree(ras_if);
160 }
161 }
162
amdgpu_sdma_process_ras_data_cb(struct amdgpu_device * adev,void * err_data,struct amdgpu_iv_entry * entry)163 int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
164 void *err_data,
165 struct amdgpu_iv_entry *entry)
166 {
167 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
168 amdgpu_ras_reset_gpu(adev);
169
170 return AMDGPU_RAS_SUCCESS;
171 }
172
amdgpu_sdma_process_ecc_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)173 int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
174 struct amdgpu_irq_src *source,
175 struct amdgpu_iv_entry *entry)
176 {
177 struct ras_common_if *ras_if = adev->sdma.ras_if;
178 struct ras_dispatch_if ih_data = {
179 .entry = entry,
180 };
181
182 if (!ras_if)
183 return 0;
184
185 ih_data.head = *ras_if;
186
187 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
188 return 0;
189 }
190