1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include "kfd_priv.h"
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_headers_aldebaran.h"
31 #include "cwsr_trap_handler.h"
32 #include "amdgpu_amdkfd.h"
33 #include "kfd_smi_events.h"
34 #include "kfd_svm.h"
35 #include "kfd_migrate.h"
36 #include "amdgpu.h"
37 #include "amdgpu_xcp.h"
38
39 #define MQD_SIZE_ALIGNED 768
40
41 /*
42 * kfd_locked is used to lock the kfd driver during suspend or reset
43 * once locked, kfd driver will stop any further GPU execution.
44 * create process (open) will return -EAGAIN.
45 */
46 static int kfd_locked;
47
48 #ifdef CONFIG_DRM_AMDGPU_CIK
49 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
50 #endif
51 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
52 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
53 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
54 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
55 extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
56 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
57 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
58 extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
59
60 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
61 unsigned int chunk_size);
62 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
63
64 static int kfd_resume(struct kfd_node *kfd);
65
kfd_device_info_set_sdma_info(struct kfd_dev * kfd)66 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
67 {
68 uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0);
69
70 switch (sdma_version) {
71 case IP_VERSION(4, 0, 0):/* VEGA10 */
72 case IP_VERSION(4, 0, 1):/* VEGA12 */
73 case IP_VERSION(4, 1, 0):/* RAVEN */
74 case IP_VERSION(4, 1, 1):/* RAVEN */
75 case IP_VERSION(4, 1, 2):/* RENOIR */
76 case IP_VERSION(5, 2, 1):/* VANGOGH */
77 case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
78 case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
79 case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
80 kfd->device_info.num_sdma_queues_per_engine = 2;
81 break;
82 case IP_VERSION(4, 2, 0):/* VEGA20 */
83 case IP_VERSION(4, 2, 2):/* ARCTURUS */
84 case IP_VERSION(4, 4, 0):/* ALDEBARAN */
85 case IP_VERSION(4, 4, 2):
86 case IP_VERSION(5, 0, 0):/* NAVI10 */
87 case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
88 case IP_VERSION(5, 0, 2):/* NAVI14 */
89 case IP_VERSION(5, 0, 5):/* NAVI12 */
90 case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
91 case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
92 case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
93 case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
94 case IP_VERSION(6, 0, 0):
95 case IP_VERSION(6, 0, 1):
96 case IP_VERSION(6, 0, 2):
97 case IP_VERSION(6, 0, 3):
98 case IP_VERSION(6, 1, 0):
99 case IP_VERSION(6, 1, 1):
100 kfd->device_info.num_sdma_queues_per_engine = 8;
101 break;
102 default:
103 dev_warn(kfd_device,
104 "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
105 sdma_version);
106 kfd->device_info.num_sdma_queues_per_engine = 8;
107 }
108
109 bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
110
111 switch (sdma_version) {
112 case IP_VERSION(6, 0, 0):
113 case IP_VERSION(6, 0, 1):
114 case IP_VERSION(6, 0, 2):
115 case IP_VERSION(6, 0, 3):
116 case IP_VERSION(6, 1, 0):
117 case IP_VERSION(6, 1, 1):
118 /* Reserve 1 for paging and 1 for gfx */
119 kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
120 /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
121 bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0,
122 kfd->adev->sdma.num_instances *
123 kfd->device_info.num_reserved_sdma_queues_per_engine);
124 break;
125 default:
126 break;
127 }
128 }
129
kfd_device_info_set_event_interrupt_class(struct kfd_dev * kfd)130 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
131 {
132 uint32_t gc_version = KFD_GC_VERSION(kfd);
133
134 switch (gc_version) {
135 case IP_VERSION(9, 0, 1): /* VEGA10 */
136 case IP_VERSION(9, 1, 0): /* RAVEN */
137 case IP_VERSION(9, 2, 1): /* VEGA12 */
138 case IP_VERSION(9, 2, 2): /* RAVEN */
139 case IP_VERSION(9, 3, 0): /* RENOIR */
140 case IP_VERSION(9, 4, 0): /* VEGA20 */
141 case IP_VERSION(9, 4, 1): /* ARCTURUS */
142 case IP_VERSION(9, 4, 2): /* ALDEBARAN */
143 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
144 break;
145 case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
146 kfd->device_info.event_interrupt_class =
147 &event_interrupt_class_v9_4_3;
148 break;
149 case IP_VERSION(10, 3, 1): /* VANGOGH */
150 case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
151 case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
152 case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
153 case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
154 case IP_VERSION(10, 1, 4):
155 case IP_VERSION(10, 1, 10): /* NAVI10 */
156 case IP_VERSION(10, 1, 2): /* NAVI12 */
157 case IP_VERSION(10, 1, 1): /* NAVI14 */
158 case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
159 case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
160 case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
161 case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
162 kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
163 break;
164 case IP_VERSION(11, 0, 0):
165 case IP_VERSION(11, 0, 1):
166 case IP_VERSION(11, 0, 2):
167 case IP_VERSION(11, 0, 3):
168 case IP_VERSION(11, 0, 4):
169 case IP_VERSION(11, 5, 0):
170 case IP_VERSION(11, 5, 1):
171 kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
172 break;
173 default:
174 dev_warn(kfd_device, "v9 event interrupt handler is set due to "
175 "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
176 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
177 }
178 }
179
kfd_device_info_init(struct kfd_dev * kfd,bool vf,uint32_t gfx_target_version)180 static void kfd_device_info_init(struct kfd_dev *kfd,
181 bool vf, uint32_t gfx_target_version)
182 {
183 uint32_t gc_version = KFD_GC_VERSION(kfd);
184 uint32_t asic_type = kfd->adev->asic_type;
185
186 kfd->device_info.max_pasid_bits = 16;
187 kfd->device_info.max_no_of_hqd = 24;
188 kfd->device_info.num_of_watch_points = 4;
189 kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
190 kfd->device_info.gfx_target_version = gfx_target_version;
191
192 if (KFD_IS_SOC15(kfd)) {
193 kfd->device_info.doorbell_size = 8;
194 kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
195 kfd->device_info.supports_cwsr = true;
196
197 kfd_device_info_set_sdma_info(kfd);
198
199 kfd_device_info_set_event_interrupt_class(kfd);
200
201 if (gc_version < IP_VERSION(11, 0, 0)) {
202 /* Navi2x+, Navi1x+ */
203 if (gc_version == IP_VERSION(10, 3, 6))
204 kfd->device_info.no_atomic_fw_version = 14;
205 else if (gc_version == IP_VERSION(10, 3, 7))
206 kfd->device_info.no_atomic_fw_version = 3;
207 else if (gc_version >= IP_VERSION(10, 3, 0))
208 kfd->device_info.no_atomic_fw_version = 92;
209 else if (gc_version >= IP_VERSION(10, 1, 1))
210 kfd->device_info.no_atomic_fw_version = 145;
211
212 /* Navi1x+ */
213 if (gc_version >= IP_VERSION(10, 1, 1))
214 kfd->device_info.needs_pci_atomics = true;
215 } else if (gc_version < IP_VERSION(12, 0, 0)) {
216 /*
217 * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
218 * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
219 * PCIe atomics support.
220 */
221 kfd->device_info.needs_pci_atomics = true;
222 kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
223 }
224 } else {
225 kfd->device_info.doorbell_size = 4;
226 kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
227 kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
228 kfd->device_info.num_sdma_queues_per_engine = 2;
229
230 if (asic_type != CHIP_KAVERI &&
231 asic_type != CHIP_HAWAII &&
232 asic_type != CHIP_TONGA)
233 kfd->device_info.supports_cwsr = true;
234
235 if (asic_type != CHIP_HAWAII && !vf)
236 kfd->device_info.needs_pci_atomics = true;
237 }
238 }
239
kgd2kfd_probe(struct amdgpu_device * adev,bool vf)240 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
241 {
242 struct kfd_dev *kfd = NULL;
243 const struct kfd2kgd_calls *f2g = NULL;
244 uint32_t gfx_target_version = 0;
245
246 switch (adev->asic_type) {
247 #ifdef CONFIG_DRM_AMDGPU_CIK
248 case CHIP_KAVERI:
249 gfx_target_version = 70000;
250 if (!vf)
251 f2g = &gfx_v7_kfd2kgd;
252 break;
253 #endif
254 case CHIP_CARRIZO:
255 gfx_target_version = 80001;
256 if (!vf)
257 f2g = &gfx_v8_kfd2kgd;
258 break;
259 #ifdef CONFIG_DRM_AMDGPU_CIK
260 case CHIP_HAWAII:
261 gfx_target_version = 70001;
262 if (!amdgpu_exp_hw_support)
263 pr_info(
264 "KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
265 );
266 else if (!vf)
267 f2g = &gfx_v7_kfd2kgd;
268 break;
269 #endif
270 case CHIP_TONGA:
271 gfx_target_version = 80002;
272 if (!vf)
273 f2g = &gfx_v8_kfd2kgd;
274 break;
275 case CHIP_FIJI:
276 case CHIP_POLARIS10:
277 gfx_target_version = 80003;
278 f2g = &gfx_v8_kfd2kgd;
279 break;
280 case CHIP_POLARIS11:
281 case CHIP_POLARIS12:
282 case CHIP_VEGAM:
283 gfx_target_version = 80003;
284 if (!vf)
285 f2g = &gfx_v8_kfd2kgd;
286 break;
287 default:
288 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
289 /* Vega 10 */
290 case IP_VERSION(9, 0, 1):
291 gfx_target_version = 90000;
292 f2g = &gfx_v9_kfd2kgd;
293 break;
294 /* Raven */
295 case IP_VERSION(9, 1, 0):
296 case IP_VERSION(9, 2, 2):
297 gfx_target_version = 90002;
298 if (!vf)
299 f2g = &gfx_v9_kfd2kgd;
300 break;
301 /* Vega12 */
302 case IP_VERSION(9, 2, 1):
303 gfx_target_version = 90004;
304 if (!vf)
305 f2g = &gfx_v9_kfd2kgd;
306 break;
307 /* Renoir */
308 case IP_VERSION(9, 3, 0):
309 gfx_target_version = 90012;
310 if (!vf)
311 f2g = &gfx_v9_kfd2kgd;
312 break;
313 /* Vega20 */
314 case IP_VERSION(9, 4, 0):
315 gfx_target_version = 90006;
316 if (!vf)
317 f2g = &gfx_v9_kfd2kgd;
318 break;
319 /* Arcturus */
320 case IP_VERSION(9, 4, 1):
321 gfx_target_version = 90008;
322 f2g = &arcturus_kfd2kgd;
323 break;
324 /* Aldebaran */
325 case IP_VERSION(9, 4, 2):
326 gfx_target_version = 90010;
327 f2g = &aldebaran_kfd2kgd;
328 break;
329 case IP_VERSION(9, 4, 3):
330 gfx_target_version = adev->rev_id >= 1 ? 90402
331 : adev->flags & AMD_IS_APU ? 90400
332 : 90401;
333 f2g = &gc_9_4_3_kfd2kgd;
334 break;
335 /* Navi10 */
336 case IP_VERSION(10, 1, 10):
337 gfx_target_version = 100100;
338 if (!vf)
339 f2g = &gfx_v10_kfd2kgd;
340 break;
341 /* Navi12 */
342 case IP_VERSION(10, 1, 2):
343 gfx_target_version = 100101;
344 f2g = &gfx_v10_kfd2kgd;
345 break;
346 /* Navi14 */
347 case IP_VERSION(10, 1, 1):
348 gfx_target_version = 100102;
349 if (!vf)
350 f2g = &gfx_v10_kfd2kgd;
351 break;
352 /* Cyan Skillfish */
353 case IP_VERSION(10, 1, 3):
354 case IP_VERSION(10, 1, 4):
355 gfx_target_version = 100103;
356 if (!vf)
357 f2g = &gfx_v10_kfd2kgd;
358 break;
359 /* Sienna Cichlid */
360 case IP_VERSION(10, 3, 0):
361 gfx_target_version = 100300;
362 f2g = &gfx_v10_3_kfd2kgd;
363 break;
364 /* Navy Flounder */
365 case IP_VERSION(10, 3, 2):
366 gfx_target_version = 100301;
367 f2g = &gfx_v10_3_kfd2kgd;
368 break;
369 /* Van Gogh */
370 case IP_VERSION(10, 3, 1):
371 gfx_target_version = 100303;
372 if (!vf)
373 f2g = &gfx_v10_3_kfd2kgd;
374 break;
375 /* Dimgrey Cavefish */
376 case IP_VERSION(10, 3, 4):
377 gfx_target_version = 100302;
378 f2g = &gfx_v10_3_kfd2kgd;
379 break;
380 /* Beige Goby */
381 case IP_VERSION(10, 3, 5):
382 gfx_target_version = 100304;
383 f2g = &gfx_v10_3_kfd2kgd;
384 break;
385 /* Yellow Carp */
386 case IP_VERSION(10, 3, 3):
387 gfx_target_version = 100305;
388 if (!vf)
389 f2g = &gfx_v10_3_kfd2kgd;
390 break;
391 case IP_VERSION(10, 3, 6):
392 case IP_VERSION(10, 3, 7):
393 gfx_target_version = 100306;
394 if (!vf)
395 f2g = &gfx_v10_3_kfd2kgd;
396 break;
397 case IP_VERSION(11, 0, 0):
398 gfx_target_version = 110000;
399 f2g = &gfx_v11_kfd2kgd;
400 break;
401 case IP_VERSION(11, 0, 1):
402 case IP_VERSION(11, 0, 4):
403 gfx_target_version = 110003;
404 f2g = &gfx_v11_kfd2kgd;
405 break;
406 case IP_VERSION(11, 0, 2):
407 gfx_target_version = 110002;
408 f2g = &gfx_v11_kfd2kgd;
409 break;
410 case IP_VERSION(11, 0, 3):
411 /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
412 gfx_target_version = 110001;
413 f2g = &gfx_v11_kfd2kgd;
414 break;
415 case IP_VERSION(11, 5, 0):
416 gfx_target_version = 110500;
417 f2g = &gfx_v11_kfd2kgd;
418 break;
419 case IP_VERSION(11, 5, 1):
420 gfx_target_version = 110501;
421 f2g = &gfx_v11_kfd2kgd;
422 break;
423 default:
424 break;
425 }
426 break;
427 }
428
429 if (!f2g) {
430 if (amdgpu_ip_version(adev, GC_HWIP, 0))
431 dev_info(kfd_device,
432 "GC IP %06x %s not supported in kfd\n",
433 amdgpu_ip_version(adev, GC_HWIP, 0),
434 vf ? "VF" : "");
435 else
436 dev_info(kfd_device, "%s %s not supported in kfd\n",
437 amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
438 return NULL;
439 }
440
441 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
442 if (!kfd)
443 return NULL;
444
445 kfd->adev = adev;
446 kfd_device_info_init(kfd, vf, gfx_target_version);
447 kfd->init_complete = false;
448 kfd->kfd2kgd = f2g;
449 atomic_set(&kfd->compute_profile, 0);
450
451 mutex_init(&kfd->doorbell_mutex);
452
453 ida_init(&kfd->doorbell_ida);
454
455 return kfd;
456 }
457
kfd_cwsr_init(struct kfd_dev * kfd)458 static void kfd_cwsr_init(struct kfd_dev *kfd)
459 {
460 if (cwsr_enable && kfd->device_info.supports_cwsr) {
461 if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
462 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex)
463 > KFD_CWSR_TMA_OFFSET);
464 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
465 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
466 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
467 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex)
468 > KFD_CWSR_TMA_OFFSET);
469 kfd->cwsr_isa = cwsr_trap_arcturus_hex;
470 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
471 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
472 BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex)
473 > KFD_CWSR_TMA_OFFSET);
474 kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
475 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
476 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) {
477 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex)
478 > KFD_CWSR_TMA_OFFSET);
479 kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
480 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
481 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
482 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex)
483 > KFD_CWSR_TMA_OFFSET);
484 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
485 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
486 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
487 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex)
488 > KFD_CWSR_TMA_OFFSET);
489 kfd->cwsr_isa = cwsr_trap_nv1x_hex;
490 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
491 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
492 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex)
493 > KFD_CWSR_TMA_OFFSET);
494 kfd->cwsr_isa = cwsr_trap_gfx10_hex;
495 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
496 } else {
497 /* The gfx11 cwsr trap handler must fit inside a single
498 page. */
499 BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
500 kfd->cwsr_isa = cwsr_trap_gfx11_hex;
501 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
502 }
503
504 kfd->cwsr_enabled = true;
505 }
506 }
507
kfd_gws_init(struct kfd_node * node)508 static int kfd_gws_init(struct kfd_node *node)
509 {
510 int ret = 0;
511 struct kfd_dev *kfd = node->kfd;
512 uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
513
514 if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
515 return 0;
516
517 if (hws_gws_support || (KFD_IS_SOC15(node) &&
518 ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
519 && kfd->mec2_fw_version >= 0x81b3) ||
520 (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
521 && kfd->mec2_fw_version >= 0x1b3) ||
522 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
523 && kfd->mec2_fw_version >= 0x30) ||
524 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
525 && kfd->mec2_fw_version >= 0x28) ||
526 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) ||
527 (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
528 && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
529 && kfd->mec2_fw_version >= 0x6b) ||
530 (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
531 && KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
532 && mes_rev >= 68))))
533 ret = amdgpu_amdkfd_alloc_gws(node->adev,
534 node->adev->gds.gws_size, &node->gws);
535
536 return ret;
537 }
538
kfd_smi_init(struct kfd_node * dev)539 static void kfd_smi_init(struct kfd_node *dev)
540 {
541 INIT_LIST_HEAD(&dev->smi_clients);
542 spin_lock_init(&dev->smi_lock);
543 }
544
kfd_init_node(struct kfd_node * node)545 static int kfd_init_node(struct kfd_node *node)
546 {
547 int err = -1;
548
549 if (kfd_interrupt_init(node)) {
550 dev_err(kfd_device, "Error initializing interrupts\n");
551 goto kfd_interrupt_error;
552 }
553
554 node->dqm = device_queue_manager_init(node);
555 if (!node->dqm) {
556 dev_err(kfd_device, "Error initializing queue manager\n");
557 goto device_queue_manager_error;
558 }
559
560 if (kfd_gws_init(node)) {
561 dev_err(kfd_device, "Could not allocate %d gws\n",
562 node->adev->gds.gws_size);
563 goto gws_error;
564 }
565
566 if (kfd_resume(node))
567 goto kfd_resume_error;
568
569 if (kfd_topology_add_device(node)) {
570 dev_err(kfd_device, "Error adding device to topology\n");
571 goto kfd_topology_add_device_error;
572 }
573
574 kfd_smi_init(node);
575
576 return 0;
577
578 kfd_topology_add_device_error:
579 kfd_resume_error:
580 gws_error:
581 device_queue_manager_uninit(node->dqm);
582 device_queue_manager_error:
583 kfd_interrupt_exit(node);
584 kfd_interrupt_error:
585 if (node->gws)
586 amdgpu_amdkfd_free_gws(node->adev, node->gws);
587
588 /* Cleanup the node memory here */
589 kfree(node);
590 return err;
591 }
592
kfd_cleanup_nodes(struct kfd_dev * kfd,unsigned int num_nodes)593 static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
594 {
595 struct kfd_node *knode;
596 unsigned int i;
597
598 for (i = 0; i < num_nodes; i++) {
599 knode = kfd->nodes[i];
600 device_queue_manager_uninit(knode->dqm);
601 kfd_interrupt_exit(knode);
602 kfd_topology_remove_device(knode);
603 if (knode->gws)
604 amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
605 kfree(knode);
606 kfd->nodes[i] = NULL;
607 }
608 }
609
kfd_setup_interrupt_bitmap(struct kfd_node * node,unsigned int kfd_node_idx)610 static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
611 unsigned int kfd_node_idx)
612 {
613 struct amdgpu_device *adev = node->adev;
614 uint32_t xcc_mask = node->xcc_mask;
615 uint32_t xcc, mapped_xcc;
616 /*
617 * Interrupt bitmap is setup for processing interrupts from
618 * different XCDs and AIDs.
619 * Interrupt bitmap is defined as follows:
620 * 1. Bits 0-15 - correspond to the NodeId field.
621 * Each bit corresponds to NodeId number. For example, if
622 * a KFD node has interrupt bitmap set to 0x7, then this
623 * KFD node will process interrupts with NodeId = 0, 1 and 2
624 * in the IH cookie.
625 * 2. Bits 16-31 - unused.
626 *
627 * Please note that the kfd_node_idx argument passed to this
628 * function is not related to NodeId field received in the
629 * IH cookie.
630 *
631 * In CPX mode, a KFD node will process an interrupt if:
632 * - the Node Id matches the corresponding bit set in
633 * Bits 0-15.
634 * - AND VMID reported in the interrupt lies within the
635 * VMID range of the node.
636 */
637 for_each_inst(xcc, xcc_mask) {
638 mapped_xcc = GET_INST(GC, xcc);
639 node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
640 }
641 dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx,
642 node->interrupt_bitmap);
643 }
644
kgd2kfd_device_init(struct kfd_dev * kfd,const struct kgd2kfd_shared_resources * gpu_resources)645 bool kgd2kfd_device_init(struct kfd_dev *kfd,
646 const struct kgd2kfd_shared_resources *gpu_resources)
647 {
648 unsigned int size, map_process_packet_size, i;
649 struct kfd_node *node;
650 uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
651 unsigned int max_proc_per_quantum;
652 int partition_mode;
653 int xcp_idx;
654
655 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
656 KGD_ENGINE_MEC1);
657 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
658 KGD_ENGINE_MEC2);
659 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
660 KGD_ENGINE_SDMA1);
661 kfd->shared_resources = *gpu_resources;
662
663 kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
664
665 if (kfd->num_nodes == 0) {
666 dev_err(kfd_device,
667 "KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
668 kfd->adev->gfx.num_xcc_per_xcp);
669 goto out;
670 }
671
672 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
673 * 32 and 64-bit requests are possible and must be
674 * supported.
675 */
676 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
677 if (!kfd->pci_atomic_requested &&
678 kfd->device_info.needs_pci_atomics &&
679 (!kfd->device_info.no_atomic_fw_version ||
680 kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
681 dev_info(kfd_device,
682 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
683 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
684 kfd->mec_fw_version,
685 kfd->device_info.no_atomic_fw_version);
686 return false;
687 }
688
689 first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
690 last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
691 vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
692
693 /* For GFX9.4.3, we need special handling for VMIDs depending on
694 * partition mode.
695 * In CPX mode, the VMID range needs to be shared between XCDs.
696 * Additionally, there are 13 VMIDs (3-15) available for KFD. To
697 * divide them equally, we change starting VMID to 4 and not use
698 * VMID 3.
699 * If the VMID range changes for GFX9.4.3, then this code MUST be
700 * revisited.
701 */
702 if (kfd->adev->xcp_mgr) {
703 partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
704 AMDGPU_XCP_FL_LOCKED);
705 if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
706 kfd->num_nodes != 1) {
707 vmid_num_kfd /= 2;
708 first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
709 }
710 }
711
712 /* Verify module parameters regarding mapped process number*/
713 if (hws_max_conc_proc >= 0)
714 max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
715 else
716 max_proc_per_quantum = vmid_num_kfd;
717
718 /* calculate max size of mqds needed for queues */
719 size = max_num_of_queues_per_device *
720 kfd->device_info.mqd_size_aligned;
721
722 /*
723 * calculate max size of runlist packet.
724 * There can be only 2 packets at once
725 */
726 map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
727 sizeof(struct pm4_mes_map_process_aldebaran) :
728 sizeof(struct pm4_mes_map_process);
729 size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
730 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
731 + sizeof(struct pm4_mes_runlist)) * 2;
732
733 /* Add size of HIQ & DIQ */
734 size += KFD_KERNEL_QUEUE_SIZE * 2;
735
736 /* add another 512KB for all other allocations on gart (HPD, fences) */
737 size += 512 * 1024;
738
739 if (amdgpu_amdkfd_alloc_gtt_mem(
740 kfd->adev, size, &kfd->gtt_mem,
741 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
742 false)) {
743 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
744 goto alloc_gtt_mem_failure;
745 }
746
747 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
748
749 /* Initialize GTT sa with 512 byte chunk size */
750 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
751 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
752 goto kfd_gtt_sa_init_error;
753 }
754
755 if (kfd_doorbell_init(kfd)) {
756 dev_err(kfd_device,
757 "Error initializing doorbell aperture\n");
758 goto kfd_doorbell_error;
759 }
760
761 if (amdgpu_use_xgmi_p2p)
762 kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
763
764 /*
765 * For GFX9.4.3, the KFD abstracts all partitions within a socket as
766 * xGMI connected in the topology so assign a unique hive id per
767 * device based on the pci device location if device is in PCIe mode.
768 */
769 if (!kfd->hive_id && (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) && kfd->num_nodes > 1)
770 kfd->hive_id = pci_dev_id(kfd->adev->pdev);
771
772 kfd->noretry = kfd->adev->gmc.noretry;
773
774 kfd_cwsr_init(kfd);
775
776 dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
777 kfd->num_nodes);
778
779 /* Allocate the KFD nodes */
780 for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
781 node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
782 if (!node)
783 goto node_alloc_error;
784
785 node->node_id = i;
786 node->adev = kfd->adev;
787 node->kfd = kfd;
788 node->kfd2kgd = kfd->kfd2kgd;
789 node->vm_info.vmid_num_kfd = vmid_num_kfd;
790 node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
791 /* TODO : Check if error handling is needed */
792 if (node->xcp) {
793 amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
794 &node->xcc_mask);
795 ++xcp_idx;
796 } else {
797 node->xcc_mask =
798 (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
799 }
800
801 if (node->xcp) {
802 dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
803 node->node_id, node->xcp->mem_id,
804 KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
805 }
806
807 if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
808 partition_mode == AMDGPU_CPX_PARTITION_MODE &&
809 kfd->num_nodes != 1) {
810 /* For GFX9.4.3 and CPX mode, first XCD gets VMID range
811 * 4-9 and second XCD gets VMID range 10-15.
812 */
813
814 node->vm_info.first_vmid_kfd = (i%2 == 0) ?
815 first_vmid_kfd :
816 first_vmid_kfd+vmid_num_kfd;
817 node->vm_info.last_vmid_kfd = (i%2 == 0) ?
818 last_vmid_kfd-vmid_num_kfd :
819 last_vmid_kfd;
820 node->compute_vmid_bitmap =
821 ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
822 ((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
823 } else {
824 node->vm_info.first_vmid_kfd = first_vmid_kfd;
825 node->vm_info.last_vmid_kfd = last_vmid_kfd;
826 node->compute_vmid_bitmap =
827 gpu_resources->compute_vmid_bitmap;
828 }
829 node->max_proc_per_quantum = max_proc_per_quantum;
830 atomic_set(&node->sram_ecc_flag, 0);
831
832 amdgpu_amdkfd_get_local_mem_info(kfd->adev,
833 &node->local_mem_info, node->xcp);
834
835 if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3))
836 kfd_setup_interrupt_bitmap(node, i);
837
838 /* Initialize the KFD node */
839 if (kfd_init_node(node)) {
840 dev_err(kfd_device, "Error initializing KFD node\n");
841 goto node_init_error;
842 }
843 kfd->nodes[i] = node;
844 }
845
846 svm_range_set_max_pages(kfd->adev);
847
848 spin_lock_init(&kfd->watch_points_lock);
849
850 kfd->init_complete = true;
851 dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
852 kfd->adev->pdev->device);
853
854 pr_debug("Starting kfd with the following scheduling policy %d\n",
855 node->dqm->sched_policy);
856
857 goto out;
858
859 node_init_error:
860 node_alloc_error:
861 kfd_cleanup_nodes(kfd, i);
862 kfd_doorbell_fini(kfd);
863 kfd_doorbell_error:
864 kfd_gtt_sa_fini(kfd);
865 kfd_gtt_sa_init_error:
866 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
867 alloc_gtt_mem_failure:
868 dev_err(kfd_device,
869 "device %x:%x NOT added due to errors\n",
870 kfd->adev->pdev->vendor, kfd->adev->pdev->device);
871 out:
872 return kfd->init_complete;
873 }
874
kgd2kfd_device_exit(struct kfd_dev * kfd)875 void kgd2kfd_device_exit(struct kfd_dev *kfd)
876 {
877 if (kfd->init_complete) {
878 /* Cleanup KFD nodes */
879 kfd_cleanup_nodes(kfd, kfd->num_nodes);
880 /* Cleanup common/shared resources */
881 kfd_doorbell_fini(kfd);
882 ida_destroy(&kfd->doorbell_ida);
883 kfd_gtt_sa_fini(kfd);
884 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
885 }
886
887 kfree(kfd);
888 }
889
kgd2kfd_pre_reset(struct kfd_dev * kfd)890 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
891 {
892 struct kfd_node *node;
893 int i;
894
895 if (!kfd->init_complete)
896 return 0;
897
898 for (i = 0; i < kfd->num_nodes; i++) {
899 node = kfd->nodes[i];
900 kfd_smi_event_update_gpu_reset(node, false);
901 node->dqm->ops.pre_reset(node->dqm);
902 }
903
904 kgd2kfd_suspend(kfd, false);
905
906 for (i = 0; i < kfd->num_nodes; i++)
907 kfd_signal_reset_event(kfd->nodes[i]);
908
909 return 0;
910 }
911
912 /*
913 * Fix me. KFD won't be able to resume existing process for now.
914 * We will keep all existing process in a evicted state and
915 * wait the process to be terminated.
916 */
917
kgd2kfd_post_reset(struct kfd_dev * kfd)918 int kgd2kfd_post_reset(struct kfd_dev *kfd)
919 {
920 int ret;
921 struct kfd_node *node;
922 int i;
923
924 if (!kfd->init_complete)
925 return 0;
926
927 for (i = 0; i < kfd->num_nodes; i++) {
928 ret = kfd_resume(kfd->nodes[i]);
929 if (ret)
930 return ret;
931 }
932
933 mutex_lock(&kfd_processes_mutex);
934 --kfd_locked;
935 mutex_unlock(&kfd_processes_mutex);
936
937 for (i = 0; i < kfd->num_nodes; i++) {
938 node = kfd->nodes[i];
939 atomic_set(&node->sram_ecc_flag, 0);
940 kfd_smi_event_update_gpu_reset(node, true);
941 }
942
943 return 0;
944 }
945
kfd_is_locked(void)946 bool kfd_is_locked(void)
947 {
948 lockdep_assert_held(&kfd_processes_mutex);
949 return (kfd_locked > 0);
950 }
951
kgd2kfd_suspend(struct kfd_dev * kfd,bool run_pm)952 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
953 {
954 struct kfd_node *node;
955 int i;
956
957 if (!kfd->init_complete)
958 return;
959
960 /* for runtime suspend, skip locking kfd */
961 if (!run_pm) {
962 mutex_lock(&kfd_processes_mutex);
963 /* For first KFD device suspend all the KFD processes */
964 if (++kfd_locked == 1)
965 kfd_suspend_all_processes();
966 mutex_unlock(&kfd_processes_mutex);
967 }
968
969 for (i = 0; i < kfd->num_nodes; i++) {
970 node = kfd->nodes[i];
971 node->dqm->ops.stop(node->dqm);
972 }
973 }
974
kgd2kfd_resume(struct kfd_dev * kfd,bool run_pm)975 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
976 {
977 int ret, i;
978
979 if (!kfd->init_complete)
980 return 0;
981
982 for (i = 0; i < kfd->num_nodes; i++) {
983 ret = kfd_resume(kfd->nodes[i]);
984 if (ret)
985 return ret;
986 }
987
988 /* for runtime resume, skip unlocking kfd */
989 if (!run_pm) {
990 mutex_lock(&kfd_processes_mutex);
991 if (--kfd_locked == 0)
992 ret = kfd_resume_all_processes();
993 WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
994 mutex_unlock(&kfd_processes_mutex);
995 }
996
997 return ret;
998 }
999
kfd_resume(struct kfd_node * node)1000 static int kfd_resume(struct kfd_node *node)
1001 {
1002 int err = 0;
1003
1004 err = node->dqm->ops.start(node->dqm);
1005 if (err)
1006 dev_err(kfd_device,
1007 "Error starting queue manager for device %x:%x\n",
1008 node->adev->pdev->vendor, node->adev->pdev->device);
1009
1010 return err;
1011 }
1012
kfd_queue_work(struct workqueue_struct * wq,struct work_struct * work)1013 static inline void kfd_queue_work(struct workqueue_struct *wq,
1014 struct work_struct *work)
1015 {
1016 int cpu, new_cpu;
1017
1018 cpu = new_cpu = smp_processor_id();
1019 do {
1020 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
1021 if (cpu_to_node(new_cpu) == numa_node_id())
1022 break;
1023 } while (cpu != new_cpu);
1024
1025 queue_work_on(new_cpu, wq, work);
1026 }
1027
1028 /* This is called directly from KGD at ISR. */
kgd2kfd_interrupt(struct kfd_dev * kfd,const void * ih_ring_entry)1029 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1030 {
1031 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
1032 bool is_patched = false;
1033 unsigned long flags;
1034 struct kfd_node *node;
1035
1036 if (!kfd->init_complete)
1037 return;
1038
1039 if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
1040 dev_err_once(kfd_device, "Ring entry too small\n");
1041 return;
1042 }
1043
1044 for (i = 0; i < kfd->num_nodes; i++) {
1045 node = kfd->nodes[i];
1046 spin_lock_irqsave(&node->interrupt_lock, flags);
1047
1048 if (node->interrupts_active
1049 && interrupt_is_wanted(node, ih_ring_entry,
1050 patched_ihre, &is_patched)
1051 && enqueue_ih_ring_entry(node,
1052 is_patched ? patched_ihre : ih_ring_entry)) {
1053 kfd_queue_work(node->ih_wq, &node->interrupt_work);
1054 spin_unlock_irqrestore(&node->interrupt_lock, flags);
1055 return;
1056 }
1057 spin_unlock_irqrestore(&node->interrupt_lock, flags);
1058 }
1059
1060 }
1061
kgd2kfd_quiesce_mm(struct mm_struct * mm,uint32_t trigger)1062 int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
1063 {
1064 struct kfd_process *p;
1065 int r;
1066
1067 /* Because we are called from arbitrary context (workqueue) as opposed
1068 * to process context, kfd_process could attempt to exit while we are
1069 * running so the lookup function increments the process ref count.
1070 */
1071 p = kfd_lookup_process_by_mm(mm);
1072 if (!p)
1073 return -ESRCH;
1074
1075 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1076 r = kfd_process_evict_queues(p, trigger);
1077
1078 kfd_unref_process(p);
1079 return r;
1080 }
1081
kgd2kfd_resume_mm(struct mm_struct * mm)1082 int kgd2kfd_resume_mm(struct mm_struct *mm)
1083 {
1084 struct kfd_process *p;
1085 int r;
1086
1087 /* Because we are called from arbitrary context (workqueue) as opposed
1088 * to process context, kfd_process could attempt to exit while we are
1089 * running so the lookup function increments the process ref count.
1090 */
1091 p = kfd_lookup_process_by_mm(mm);
1092 if (!p)
1093 return -ESRCH;
1094
1095 r = kfd_process_restore_queues(p);
1096
1097 kfd_unref_process(p);
1098 return r;
1099 }
1100
1101 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
1102 * prepare for safe eviction of KFD BOs that belong to the specified
1103 * process.
1104 *
1105 * @mm: mm_struct that identifies the specified KFD process
1106 * @fence: eviction fence attached to KFD process BOs
1107 *
1108 */
kgd2kfd_schedule_evict_and_restore_process(struct mm_struct * mm,struct dma_fence * fence)1109 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1110 struct dma_fence *fence)
1111 {
1112 struct kfd_process *p;
1113 unsigned long active_time;
1114 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1115
1116 if (!fence)
1117 return -EINVAL;
1118
1119 if (dma_fence_is_signaled(fence))
1120 return 0;
1121
1122 p = kfd_lookup_process_by_mm(mm);
1123 if (!p)
1124 return -ENODEV;
1125
1126 if (fence->seqno == p->last_eviction_seqno)
1127 goto out;
1128
1129 p->last_eviction_seqno = fence->seqno;
1130
1131 /* Avoid KFD process starvation. Wait for at least
1132 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1133 */
1134 active_time = get_jiffies_64() - p->last_restore_timestamp;
1135 if (delay_jiffies > active_time)
1136 delay_jiffies -= active_time;
1137 else
1138 delay_jiffies = 0;
1139
1140 /* During process initialization eviction_work.dwork is initialized
1141 * to kfd_evict_bo_worker
1142 */
1143 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1144 p->lead_thread->pid, delay_jiffies);
1145 schedule_delayed_work(&p->eviction_work, delay_jiffies);
1146 out:
1147 kfd_unref_process(p);
1148 return 0;
1149 }
1150
kfd_gtt_sa_init(struct kfd_dev * kfd,unsigned int buf_size,unsigned int chunk_size)1151 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1152 unsigned int chunk_size)
1153 {
1154 if (WARN_ON(buf_size < chunk_size))
1155 return -EINVAL;
1156 if (WARN_ON(buf_size == 0))
1157 return -EINVAL;
1158 if (WARN_ON(chunk_size == 0))
1159 return -EINVAL;
1160
1161 kfd->gtt_sa_chunk_size = chunk_size;
1162 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1163
1164 kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
1165 GFP_KERNEL);
1166 if (!kfd->gtt_sa_bitmap)
1167 return -ENOMEM;
1168
1169 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1170 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1171
1172 mutex_init(&kfd->gtt_sa_lock);
1173
1174 return 0;
1175 }
1176
kfd_gtt_sa_fini(struct kfd_dev * kfd)1177 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1178 {
1179 mutex_destroy(&kfd->gtt_sa_lock);
1180 bitmap_free(kfd->gtt_sa_bitmap);
1181 }
1182
kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,unsigned int bit_num,unsigned int chunk_size)1183 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1184 unsigned int bit_num,
1185 unsigned int chunk_size)
1186 {
1187 return start_addr + bit_num * chunk_size;
1188 }
1189
kfd_gtt_sa_calc_cpu_addr(void * start_addr,unsigned int bit_num,unsigned int chunk_size)1190 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1191 unsigned int bit_num,
1192 unsigned int chunk_size)
1193 {
1194 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1195 }
1196
kfd_gtt_sa_allocate(struct kfd_node * node,unsigned int size,struct kfd_mem_obj ** mem_obj)1197 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
1198 struct kfd_mem_obj **mem_obj)
1199 {
1200 unsigned int found, start_search, cur_size;
1201 struct kfd_dev *kfd = node->kfd;
1202
1203 if (size == 0)
1204 return -EINVAL;
1205
1206 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1207 return -ENOMEM;
1208
1209 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1210 if (!(*mem_obj))
1211 return -ENOMEM;
1212
1213 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1214
1215 start_search = 0;
1216
1217 mutex_lock(&kfd->gtt_sa_lock);
1218
1219 kfd_gtt_restart_search:
1220 /* Find the first chunk that is free */
1221 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1222 kfd->gtt_sa_num_of_chunks,
1223 start_search);
1224
1225 pr_debug("Found = %d\n", found);
1226
1227 /* If there wasn't any free chunk, bail out */
1228 if (found == kfd->gtt_sa_num_of_chunks)
1229 goto kfd_gtt_no_free_chunk;
1230
1231 /* Update fields of mem_obj */
1232 (*mem_obj)->range_start = found;
1233 (*mem_obj)->range_end = found;
1234 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1235 kfd->gtt_start_gpu_addr,
1236 found,
1237 kfd->gtt_sa_chunk_size);
1238 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1239 kfd->gtt_start_cpu_ptr,
1240 found,
1241 kfd->gtt_sa_chunk_size);
1242
1243 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1244 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1245
1246 /* If we need only one chunk, mark it as allocated and get out */
1247 if (size <= kfd->gtt_sa_chunk_size) {
1248 pr_debug("Single bit\n");
1249 __set_bit(found, kfd->gtt_sa_bitmap);
1250 goto kfd_gtt_out;
1251 }
1252
1253 /* Otherwise, try to see if we have enough contiguous chunks */
1254 cur_size = size - kfd->gtt_sa_chunk_size;
1255 do {
1256 (*mem_obj)->range_end =
1257 find_next_zero_bit(kfd->gtt_sa_bitmap,
1258 kfd->gtt_sa_num_of_chunks, ++found);
1259 /*
1260 * If next free chunk is not contiguous than we need to
1261 * restart our search from the last free chunk we found (which
1262 * wasn't contiguous to the previous ones
1263 */
1264 if ((*mem_obj)->range_end != found) {
1265 start_search = found;
1266 goto kfd_gtt_restart_search;
1267 }
1268
1269 /*
1270 * If we reached end of buffer, bail out with error
1271 */
1272 if (found == kfd->gtt_sa_num_of_chunks)
1273 goto kfd_gtt_no_free_chunk;
1274
1275 /* Check if we don't need another chunk */
1276 if (cur_size <= kfd->gtt_sa_chunk_size)
1277 cur_size = 0;
1278 else
1279 cur_size -= kfd->gtt_sa_chunk_size;
1280
1281 } while (cur_size > 0);
1282
1283 pr_debug("range_start = %d, range_end = %d\n",
1284 (*mem_obj)->range_start, (*mem_obj)->range_end);
1285
1286 /* Mark the chunks as allocated */
1287 bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1288 (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1289
1290 kfd_gtt_out:
1291 mutex_unlock(&kfd->gtt_sa_lock);
1292 return 0;
1293
1294 kfd_gtt_no_free_chunk:
1295 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1296 mutex_unlock(&kfd->gtt_sa_lock);
1297 kfree(*mem_obj);
1298 return -ENOMEM;
1299 }
1300
kfd_gtt_sa_free(struct kfd_node * node,struct kfd_mem_obj * mem_obj)1301 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
1302 {
1303 struct kfd_dev *kfd = node->kfd;
1304
1305 /* Act like kfree when trying to free a NULL object */
1306 if (!mem_obj)
1307 return 0;
1308
1309 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1310 mem_obj, mem_obj->range_start, mem_obj->range_end);
1311
1312 mutex_lock(&kfd->gtt_sa_lock);
1313
1314 /* Mark the chunks as free */
1315 bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1316 mem_obj->range_end - mem_obj->range_start + 1);
1317
1318 mutex_unlock(&kfd->gtt_sa_lock);
1319
1320 kfree(mem_obj);
1321 return 0;
1322 }
1323
kgd2kfd_set_sram_ecc_flag(struct kfd_dev * kfd)1324 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1325 {
1326 /*
1327 * TODO: Currently update SRAM ECC flag for first node.
1328 * This needs to be updated later when we can
1329 * identify SRAM ECC error on other nodes also.
1330 */
1331 if (kfd)
1332 atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
1333 }
1334
kfd_inc_compute_active(struct kfd_node * node)1335 void kfd_inc_compute_active(struct kfd_node *node)
1336 {
1337 if (atomic_inc_return(&node->kfd->compute_profile) == 1)
1338 amdgpu_amdkfd_set_compute_idle(node->adev, false);
1339 }
1340
kfd_dec_compute_active(struct kfd_node * node)1341 void kfd_dec_compute_active(struct kfd_node *node)
1342 {
1343 int count = atomic_dec_return(&node->kfd->compute_profile);
1344
1345 if (count == 0)
1346 amdgpu_amdkfd_set_compute_idle(node->adev, true);
1347 WARN_ONCE(count < 0, "Compute profile ref. count error");
1348 }
1349
kgd2kfd_smi_event_throttle(struct kfd_dev * kfd,uint64_t throttle_bitmask)1350 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1351 {
1352 /*
1353 * TODO: For now, raise the throttling event only on first node.
1354 * This will need to change after we are able to determine
1355 * which node raised the throttling event.
1356 */
1357 if (kfd && kfd->init_complete)
1358 kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
1359 throttle_bitmask);
1360 }
1361
1362 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1363 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1364 * When the device has more than two engines, we reserve two for PCIe to enable
1365 * full-duplex and the rest are used as XGMI.
1366 */
kfd_get_num_sdma_engines(struct kfd_node * node)1367 unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
1368 {
1369 /* If XGMI is not supported, all SDMA engines are PCIe */
1370 if (!node->adev->gmc.xgmi.supported)
1371 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
1372
1373 return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
1374 }
1375
kfd_get_num_xgmi_sdma_engines(struct kfd_node * node)1376 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
1377 {
1378 /* After reserved for PCIe, the rest of engines are XGMI */
1379 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
1380 kfd_get_num_sdma_engines(node);
1381 }
1382
kgd2kfd_check_and_lock_kfd(void)1383 int kgd2kfd_check_and_lock_kfd(void)
1384 {
1385 mutex_lock(&kfd_processes_mutex);
1386 if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
1387 mutex_unlock(&kfd_processes_mutex);
1388 return -EBUSY;
1389 }
1390
1391 ++kfd_locked;
1392 mutex_unlock(&kfd_processes_mutex);
1393
1394 return 0;
1395 }
1396
kgd2kfd_unlock_kfd(void)1397 void kgd2kfd_unlock_kfd(void)
1398 {
1399 mutex_lock(&kfd_processes_mutex);
1400 --kfd_locked;
1401 mutex_unlock(&kfd_processes_mutex);
1402 }
1403
1404 #if defined(CONFIG_DEBUG_FS)
1405
1406 /* This function will send a package to HIQ to hang the HWS
1407 * which will trigger a GPU reset and bring the HWS back to normal state
1408 */
kfd_debugfs_hang_hws(struct kfd_node * dev)1409 int kfd_debugfs_hang_hws(struct kfd_node *dev)
1410 {
1411 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1412 pr_err("HWS is not enabled");
1413 return -EINVAL;
1414 }
1415
1416 return dqm_debugfs_hang_hws(dev->dqm);
1417 }
1418
1419 #endif
1420