1 /*
2 * Copyright 2015-2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/pci.h>
24 #include <linux/acpi.h>
25 #include "kfd_crat.h"
26 #include "kfd_priv.h"
27 #include "kfd_topology.h"
28 #include "kfd_iommu.h"
29 #include "amdgpu.h"
30 #include "amdgpu_amdkfd.h"
31
32 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
33 * GPU processor ID are expressed with Bit[31]=1.
34 * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
35 * used in the CRAT.
36 */
37 static uint32_t gpu_processor_id_low = 0x80001000;
38
39 /* Return the next available gpu_processor_id and increment it for next GPU
40 * @total_cu_count - Total CUs present in the GPU including ones
41 * masked off
42 */
get_and_inc_gpu_processor_id(unsigned int total_cu_count)43 static inline unsigned int get_and_inc_gpu_processor_id(
44 unsigned int total_cu_count)
45 {
46 int current_id = gpu_processor_id_low;
47
48 gpu_processor_id_low += total_cu_count;
49 return current_id;
50 }
51
52 /* Static table to describe GPU Cache information */
53 struct kfd_gpu_cache_info {
54 uint32_t cache_size;
55 uint32_t cache_level;
56 uint32_t flags;
57 /* Indicates how many Compute Units share this cache
58 * Value = 1 indicates the cache is not shared
59 */
60 uint32_t num_cu_shared;
61 };
62
63 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
64 {
65 /* TCP L1 Cache per CU */
66 .cache_size = 16,
67 .cache_level = 1,
68 .flags = (CRAT_CACHE_FLAGS_ENABLED |
69 CRAT_CACHE_FLAGS_DATA_CACHE |
70 CRAT_CACHE_FLAGS_SIMD_CACHE),
71 .num_cu_shared = 1,
72
73 },
74 {
75 /* Scalar L1 Instruction Cache (in SQC module) per bank */
76 .cache_size = 16,
77 .cache_level = 1,
78 .flags = (CRAT_CACHE_FLAGS_ENABLED |
79 CRAT_CACHE_FLAGS_INST_CACHE |
80 CRAT_CACHE_FLAGS_SIMD_CACHE),
81 .num_cu_shared = 2,
82 },
83 {
84 /* Scalar L1 Data Cache (in SQC module) per bank */
85 .cache_size = 8,
86 .cache_level = 1,
87 .flags = (CRAT_CACHE_FLAGS_ENABLED |
88 CRAT_CACHE_FLAGS_DATA_CACHE |
89 CRAT_CACHE_FLAGS_SIMD_CACHE),
90 .num_cu_shared = 2,
91 },
92
93 /* TODO: Add L2 Cache information */
94 };
95
96
97 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
98 {
99 /* TCP L1 Cache per CU */
100 .cache_size = 16,
101 .cache_level = 1,
102 .flags = (CRAT_CACHE_FLAGS_ENABLED |
103 CRAT_CACHE_FLAGS_DATA_CACHE |
104 CRAT_CACHE_FLAGS_SIMD_CACHE),
105 .num_cu_shared = 1,
106 },
107 {
108 /* Scalar L1 Instruction Cache (in SQC module) per bank */
109 .cache_size = 8,
110 .cache_level = 1,
111 .flags = (CRAT_CACHE_FLAGS_ENABLED |
112 CRAT_CACHE_FLAGS_INST_CACHE |
113 CRAT_CACHE_FLAGS_SIMD_CACHE),
114 .num_cu_shared = 4,
115 },
116 {
117 /* Scalar L1 Data Cache (in SQC module) per bank. */
118 .cache_size = 4,
119 .cache_level = 1,
120 .flags = (CRAT_CACHE_FLAGS_ENABLED |
121 CRAT_CACHE_FLAGS_DATA_CACHE |
122 CRAT_CACHE_FLAGS_SIMD_CACHE),
123 .num_cu_shared = 4,
124 },
125
126 /* TODO: Add L2 Cache information */
127 };
128
129 /* NOTE: In future if more information is added to struct kfd_gpu_cache_info
130 * the following ASICs may need a separate table.
131 */
132 #define hawaii_cache_info kaveri_cache_info
133 #define tonga_cache_info carrizo_cache_info
134 #define fiji_cache_info carrizo_cache_info
135 #define polaris10_cache_info carrizo_cache_info
136 #define polaris11_cache_info carrizo_cache_info
137 #define polaris12_cache_info carrizo_cache_info
138 #define vegam_cache_info carrizo_cache_info
139 /* TODO - check & update Vega10 cache details */
140 #define vega10_cache_info carrizo_cache_info
141 #define raven_cache_info carrizo_cache_info
142 #define renoir_cache_info carrizo_cache_info
143 /* TODO - check & update Navi10 cache details */
144 #define navi10_cache_info carrizo_cache_info
145 #define vangogh_cache_info carrizo_cache_info
146
kfd_populated_cu_info_cpu(struct kfd_topology_device * dev,struct crat_subtype_computeunit * cu)147 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
148 struct crat_subtype_computeunit *cu)
149 {
150 dev->node_props.cpu_cores_count = cu->num_cpu_cores;
151 dev->node_props.cpu_core_id_base = cu->processor_id_low;
152 if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
153 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
154
155 pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
156 cu->processor_id_low);
157 }
158
kfd_populated_cu_info_gpu(struct kfd_topology_device * dev,struct crat_subtype_computeunit * cu)159 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
160 struct crat_subtype_computeunit *cu)
161 {
162 dev->node_props.simd_id_base = cu->processor_id_low;
163 dev->node_props.simd_count = cu->num_simd_cores;
164 dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
165 dev->node_props.max_waves_per_simd = cu->max_waves_simd;
166 dev->node_props.wave_front_size = cu->wave_front_size;
167 dev->node_props.array_count = cu->array_count;
168 dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
169 dev->node_props.simd_per_cu = cu->num_simd_per_cu;
170 dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
171 if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
172 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
173 pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
174 }
175
176 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
177 * topology device present in the device_list
178 */
kfd_parse_subtype_cu(struct crat_subtype_computeunit * cu,struct list_head * device_list)179 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
180 struct list_head *device_list)
181 {
182 struct kfd_topology_device *dev;
183
184 pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
185 cu->proximity_domain, cu->hsa_capability);
186 list_for_each_entry(dev, device_list, list) {
187 if (cu->proximity_domain == dev->proximity_domain) {
188 if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
189 kfd_populated_cu_info_cpu(dev, cu);
190
191 if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
192 kfd_populated_cu_info_gpu(dev, cu);
193 break;
194 }
195 }
196
197 return 0;
198 }
199
200 static struct kfd_mem_properties *
find_subtype_mem(uint32_t heap_type,uint32_t flags,uint32_t width,struct kfd_topology_device * dev)201 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
202 struct kfd_topology_device *dev)
203 {
204 struct kfd_mem_properties *props;
205
206 list_for_each_entry(props, &dev->mem_props, list) {
207 if (props->heap_type == heap_type
208 && props->flags == flags
209 && props->width == width)
210 return props;
211 }
212
213 return NULL;
214 }
215 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
216 * topology device present in the device_list
217 */
kfd_parse_subtype_mem(struct crat_subtype_memory * mem,struct list_head * device_list)218 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
219 struct list_head *device_list)
220 {
221 struct kfd_mem_properties *props;
222 struct kfd_topology_device *dev;
223 uint32_t heap_type;
224 uint64_t size_in_bytes;
225 uint32_t flags = 0;
226 uint32_t width;
227
228 pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
229 mem->proximity_domain);
230 list_for_each_entry(dev, device_list, list) {
231 if (mem->proximity_domain == dev->proximity_domain) {
232 /* We're on GPU node */
233 if (dev->node_props.cpu_cores_count == 0) {
234 /* APU */
235 if (mem->visibility_type == 0)
236 heap_type =
237 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
238 /* dGPU */
239 else
240 heap_type = mem->visibility_type;
241 } else
242 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
243
244 if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
245 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
246 if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
247 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
248
249 size_in_bytes =
250 ((uint64_t)mem->length_high << 32) +
251 mem->length_low;
252 width = mem->width;
253
254 /* Multiple banks of the same type are aggregated into
255 * one. User mode doesn't care about multiple physical
256 * memory segments. It's managed as a single virtual
257 * heap for user mode.
258 */
259 props = find_subtype_mem(heap_type, flags, width, dev);
260 if (props) {
261 props->size_in_bytes += size_in_bytes;
262 break;
263 }
264
265 props = kfd_alloc_struct(props);
266 if (!props)
267 return -ENOMEM;
268
269 props->heap_type = heap_type;
270 props->flags = flags;
271 props->size_in_bytes = size_in_bytes;
272 props->width = width;
273
274 dev->node_props.mem_banks_count++;
275 list_add_tail(&props->list, &dev->mem_props);
276
277 break;
278 }
279 }
280
281 return 0;
282 }
283
284 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
285 * topology device present in the device_list
286 */
kfd_parse_subtype_cache(struct crat_subtype_cache * cache,struct list_head * device_list)287 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
288 struct list_head *device_list)
289 {
290 struct kfd_cache_properties *props;
291 struct kfd_topology_device *dev;
292 uint32_t id;
293 uint32_t total_num_of_cu;
294
295 id = cache->processor_id_low;
296
297 pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
298 list_for_each_entry(dev, device_list, list) {
299 total_num_of_cu = (dev->node_props.array_count *
300 dev->node_props.cu_per_simd_array);
301
302 /* Cache infomration in CRAT doesn't have proximity_domain
303 * information as it is associated with a CPU core or GPU
304 * Compute Unit. So map the cache using CPU core Id or SIMD
305 * (GPU) ID.
306 * TODO: This works because currently we can safely assume that
307 * Compute Units are parsed before caches are parsed. In
308 * future, remove this dependency
309 */
310 if ((id >= dev->node_props.cpu_core_id_base &&
311 id <= dev->node_props.cpu_core_id_base +
312 dev->node_props.cpu_cores_count) ||
313 (id >= dev->node_props.simd_id_base &&
314 id < dev->node_props.simd_id_base +
315 total_num_of_cu)) {
316 props = kfd_alloc_struct(props);
317 if (!props)
318 return -ENOMEM;
319
320 props->processor_id_low = id;
321 props->cache_level = cache->cache_level;
322 props->cache_size = cache->cache_size;
323 props->cacheline_size = cache->cache_line_size;
324 props->cachelines_per_tag = cache->lines_per_tag;
325 props->cache_assoc = cache->associativity;
326 props->cache_latency = cache->cache_latency;
327 memcpy(props->sibling_map, cache->sibling_map,
328 sizeof(props->sibling_map));
329
330 if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
331 props->cache_type |= HSA_CACHE_TYPE_DATA;
332 if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
333 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
334 if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
335 props->cache_type |= HSA_CACHE_TYPE_CPU;
336 if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
337 props->cache_type |= HSA_CACHE_TYPE_HSACU;
338
339 dev->cache_count++;
340 dev->node_props.caches_count++;
341 list_add_tail(&props->list, &dev->cache_props);
342
343 break;
344 }
345 }
346
347 return 0;
348 }
349
350 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
351 * topology device present in the device_list
352 */
kfd_parse_subtype_iolink(struct crat_subtype_iolink * iolink,struct list_head * device_list)353 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
354 struct list_head *device_list)
355 {
356 struct kfd_iolink_properties *props = NULL, *props2;
357 struct kfd_topology_device *dev, *to_dev;
358 uint32_t id_from;
359 uint32_t id_to;
360
361 id_from = iolink->proximity_domain_from;
362 id_to = iolink->proximity_domain_to;
363
364 pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
365 id_from, id_to);
366 list_for_each_entry(dev, device_list, list) {
367 if (id_from == dev->proximity_domain) {
368 props = kfd_alloc_struct(props);
369 if (!props)
370 return -ENOMEM;
371
372 props->node_from = id_from;
373 props->node_to = id_to;
374 props->ver_maj = iolink->version_major;
375 props->ver_min = iolink->version_minor;
376 props->iolink_type = iolink->io_interface_type;
377
378 if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
379 props->weight = 20;
380 else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
381 props->weight = 15 * iolink->num_hops_xgmi;
382 else
383 props->weight = node_distance(id_from, id_to);
384
385 props->min_latency = iolink->minimum_latency;
386 props->max_latency = iolink->maximum_latency;
387 props->min_bandwidth = iolink->minimum_bandwidth_mbs;
388 props->max_bandwidth = iolink->maximum_bandwidth_mbs;
389 props->rec_transfer_size =
390 iolink->recommended_transfer_size;
391
392 dev->io_link_count++;
393 dev->node_props.io_links_count++;
394 list_add_tail(&props->list, &dev->io_link_props);
395 break;
396 }
397 }
398
399 /* CPU topology is created before GPUs are detected, so CPU->GPU
400 * links are not built at that time. If a PCIe type is discovered, it
401 * means a GPU is detected and we are adding GPU->CPU to the topology.
402 * At this time, also add the corresponded CPU->GPU link if GPU
403 * is large bar.
404 * For xGMI, we only added the link with one direction in the crat
405 * table, add corresponded reversed direction link now.
406 */
407 if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
408 to_dev = kfd_topology_device_by_proximity_domain(id_to);
409 if (!to_dev)
410 return -ENODEV;
411 /* same everything but the other direction */
412 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
413 props2->node_from = id_to;
414 props2->node_to = id_from;
415 props2->kobj = NULL;
416 to_dev->io_link_count++;
417 to_dev->node_props.io_links_count++;
418 list_add_tail(&props2->list, &to_dev->io_link_props);
419 }
420
421 return 0;
422 }
423
424 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
425 * present in the device_list
426 * @sub_type_hdr - subtype section of crat_image
427 * @device_list - list of topology devices present in this crat_image
428 */
kfd_parse_subtype(struct crat_subtype_generic * sub_type_hdr,struct list_head * device_list)429 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
430 struct list_head *device_list)
431 {
432 struct crat_subtype_computeunit *cu;
433 struct crat_subtype_memory *mem;
434 struct crat_subtype_cache *cache;
435 struct crat_subtype_iolink *iolink;
436 int ret = 0;
437
438 switch (sub_type_hdr->type) {
439 case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
440 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
441 ret = kfd_parse_subtype_cu(cu, device_list);
442 break;
443 case CRAT_SUBTYPE_MEMORY_AFFINITY:
444 mem = (struct crat_subtype_memory *)sub_type_hdr;
445 ret = kfd_parse_subtype_mem(mem, device_list);
446 break;
447 case CRAT_SUBTYPE_CACHE_AFFINITY:
448 cache = (struct crat_subtype_cache *)sub_type_hdr;
449 ret = kfd_parse_subtype_cache(cache, device_list);
450 break;
451 case CRAT_SUBTYPE_TLB_AFFINITY:
452 /*
453 * For now, nothing to do here
454 */
455 pr_debug("Found TLB entry in CRAT table (not processing)\n");
456 break;
457 case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
458 /*
459 * For now, nothing to do here
460 */
461 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
462 break;
463 case CRAT_SUBTYPE_IOLINK_AFFINITY:
464 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
465 ret = kfd_parse_subtype_iolink(iolink, device_list);
466 break;
467 default:
468 pr_warn("Unknown subtype %d in CRAT\n",
469 sub_type_hdr->type);
470 }
471
472 return ret;
473 }
474
475 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
476 * create a kfd_topology_device and add in to device_list. Also parse
477 * CRAT subtypes and attach it to appropriate kfd_topology_device
478 * @crat_image - input image containing CRAT
479 * @device_list - [OUT] list of kfd_topology_device generated after
480 * parsing crat_image
481 * @proximity_domain - Proximity domain of the first device in the table
482 *
483 * Return - 0 if successful else -ve value
484 */
kfd_parse_crat_table(void * crat_image,struct list_head * device_list,uint32_t proximity_domain)485 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
486 uint32_t proximity_domain)
487 {
488 struct kfd_topology_device *top_dev = NULL;
489 struct crat_subtype_generic *sub_type_hdr;
490 uint16_t node_id;
491 int ret = 0;
492 struct crat_header *crat_table = (struct crat_header *)crat_image;
493 uint16_t num_nodes;
494 uint32_t image_len;
495
496 if (!crat_image)
497 return -EINVAL;
498
499 if (!list_empty(device_list)) {
500 pr_warn("Error device list should be empty\n");
501 return -EINVAL;
502 }
503
504 num_nodes = crat_table->num_domains;
505 image_len = crat_table->length;
506
507 pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
508
509 for (node_id = 0; node_id < num_nodes; node_id++) {
510 top_dev = kfd_create_topology_device(device_list);
511 if (!top_dev)
512 break;
513 top_dev->proximity_domain = proximity_domain++;
514 }
515
516 if (!top_dev) {
517 ret = -ENOMEM;
518 goto err;
519 }
520
521 memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
522 memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
523 CRAT_OEMTABLEID_LENGTH);
524 top_dev->oem_revision = crat_table->oem_revision;
525
526 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
527 while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
528 ((char *)crat_image) + image_len) {
529 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
530 ret = kfd_parse_subtype(sub_type_hdr, device_list);
531 if (ret)
532 break;
533 }
534
535 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
536 sub_type_hdr->length);
537 }
538
539 err:
540 if (ret)
541 kfd_release_topology_device_list(device_list);
542
543 return ret;
544 }
545
546 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
fill_in_pcache(struct crat_subtype_cache * pcache,struct kfd_gpu_cache_info * pcache_info,struct kfd_cu_info * cu_info,int mem_available,int cu_bitmask,int cache_type,unsigned int cu_processor_id,int cu_block)547 static int fill_in_pcache(struct crat_subtype_cache *pcache,
548 struct kfd_gpu_cache_info *pcache_info,
549 struct kfd_cu_info *cu_info,
550 int mem_available,
551 int cu_bitmask,
552 int cache_type, unsigned int cu_processor_id,
553 int cu_block)
554 {
555 unsigned int cu_sibling_map_mask;
556 int first_active_cu;
557
558 /* First check if enough memory is available */
559 if (sizeof(struct crat_subtype_cache) > mem_available)
560 return -ENOMEM;
561
562 cu_sibling_map_mask = cu_bitmask;
563 cu_sibling_map_mask >>= cu_block;
564 cu_sibling_map_mask &=
565 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
566 first_active_cu = ffs(cu_sibling_map_mask);
567
568 /* CU could be inactive. In case of shared cache find the first active
569 * CU. and incase of non-shared cache check if the CU is inactive. If
570 * inactive active skip it
571 */
572 if (first_active_cu) {
573 memset(pcache, 0, sizeof(struct crat_subtype_cache));
574 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
575 pcache->length = sizeof(struct crat_subtype_cache);
576 pcache->flags = pcache_info[cache_type].flags;
577 pcache->processor_id_low = cu_processor_id
578 + (first_active_cu - 1);
579 pcache->cache_level = pcache_info[cache_type].cache_level;
580 pcache->cache_size = pcache_info[cache_type].cache_size;
581
582 /* Sibling map is w.r.t processor_id_low, so shift out
583 * inactive CU
584 */
585 cu_sibling_map_mask =
586 cu_sibling_map_mask >> (first_active_cu - 1);
587
588 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
589 pcache->sibling_map[1] =
590 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
591 pcache->sibling_map[2] =
592 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
593 pcache->sibling_map[3] =
594 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
595 return 0;
596 }
597 return 1;
598 }
599
600 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
601 * tables
602 *
603 * @kdev - [IN] GPU device
604 * @gpu_processor_id - [IN] GPU processor ID to which these caches
605 * associate
606 * @available_size - [IN] Amount of memory available in pcache
607 * @cu_info - [IN] Compute Unit info obtained from KGD
608 * @pcache - [OUT] memory into which cache data is to be filled in.
609 * @size_filled - [OUT] amount of data used up in pcache.
610 * @num_of_entries - [OUT] number of caches added
611 */
kfd_fill_gpu_cache_info(struct kfd_dev * kdev,int gpu_processor_id,int available_size,struct kfd_cu_info * cu_info,struct crat_subtype_cache * pcache,int * size_filled,int * num_of_entries)612 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
613 int gpu_processor_id,
614 int available_size,
615 struct kfd_cu_info *cu_info,
616 struct crat_subtype_cache *pcache,
617 int *size_filled,
618 int *num_of_entries)
619 {
620 struct kfd_gpu_cache_info *pcache_info;
621 int num_of_cache_types = 0;
622 int i, j, k;
623 int ct = 0;
624 int mem_available = available_size;
625 unsigned int cu_processor_id;
626 int ret;
627
628 switch (kdev->device_info->asic_family) {
629 case CHIP_KAVERI:
630 pcache_info = kaveri_cache_info;
631 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
632 break;
633 case CHIP_HAWAII:
634 pcache_info = hawaii_cache_info;
635 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
636 break;
637 case CHIP_CARRIZO:
638 pcache_info = carrizo_cache_info;
639 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
640 break;
641 case CHIP_TONGA:
642 pcache_info = tonga_cache_info;
643 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
644 break;
645 case CHIP_FIJI:
646 pcache_info = fiji_cache_info;
647 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
648 break;
649 case CHIP_POLARIS10:
650 pcache_info = polaris10_cache_info;
651 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
652 break;
653 case CHIP_POLARIS11:
654 pcache_info = polaris11_cache_info;
655 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
656 break;
657 case CHIP_POLARIS12:
658 pcache_info = polaris12_cache_info;
659 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
660 break;
661 case CHIP_VEGAM:
662 pcache_info = vegam_cache_info;
663 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
664 break;
665 case CHIP_VEGA10:
666 case CHIP_VEGA12:
667 case CHIP_VEGA20:
668 case CHIP_ARCTURUS:
669 case CHIP_ALDEBARAN:
670 pcache_info = vega10_cache_info;
671 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
672 break;
673 case CHIP_RAVEN:
674 pcache_info = raven_cache_info;
675 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
676 break;
677 case CHIP_RENOIR:
678 pcache_info = renoir_cache_info;
679 num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
680 break;
681 case CHIP_NAVI10:
682 case CHIP_NAVI12:
683 case CHIP_NAVI14:
684 case CHIP_SIENNA_CICHLID:
685 case CHIP_NAVY_FLOUNDER:
686 case CHIP_DIMGREY_CAVEFISH:
687 pcache_info = navi10_cache_info;
688 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
689 break;
690 case CHIP_VANGOGH:
691 pcache_info = vangogh_cache_info;
692 num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
693 break;
694 default:
695 return -EINVAL;
696 }
697
698 *size_filled = 0;
699 *num_of_entries = 0;
700
701 /* For each type of cache listed in the kfd_gpu_cache_info table,
702 * go through all available Compute Units.
703 * The [i,j,k] loop will
704 * if kfd_gpu_cache_info.num_cu_shared = 1
705 * will parse through all available CU
706 * If (kfd_gpu_cache_info.num_cu_shared != 1)
707 * then it will consider only one CU from
708 * the shared unit
709 */
710
711 for (ct = 0; ct < num_of_cache_types; ct++) {
712 cu_processor_id = gpu_processor_id;
713 for (i = 0; i < cu_info->num_shader_engines; i++) {
714 for (j = 0; j < cu_info->num_shader_arrays_per_engine;
715 j++) {
716 for (k = 0; k < cu_info->num_cu_per_sh;
717 k += pcache_info[ct].num_cu_shared) {
718
719 ret = fill_in_pcache(pcache,
720 pcache_info,
721 cu_info,
722 mem_available,
723 cu_info->cu_bitmap[i % 4][j + i / 4],
724 ct,
725 cu_processor_id,
726 k);
727
728 if (ret < 0)
729 break;
730
731 if (!ret) {
732 pcache++;
733 (*num_of_entries)++;
734 mem_available -=
735 sizeof(*pcache);
736 (*size_filled) +=
737 sizeof(*pcache);
738 }
739
740 /* Move to next CU block */
741 cu_processor_id +=
742 pcache_info[ct].num_cu_shared;
743 }
744 }
745 }
746 }
747
748 pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
749
750 return 0;
751 }
752
kfd_ignore_crat(void)753 static bool kfd_ignore_crat(void)
754 {
755 bool ret;
756
757 if (ignore_crat)
758 return true;
759
760 #ifndef KFD_SUPPORT_IOMMU_V2
761 ret = true;
762 #else
763 ret = false;
764 #endif
765
766 return ret;
767 }
768
769 /*
770 * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
771 * copies CRAT from ACPI (if available).
772 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
773 *
774 * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
775 * crat_image will be NULL
776 * @size: [OUT] size of crat_image
777 *
778 * Return 0 if successful else return error code
779 */
kfd_create_crat_image_acpi(void ** crat_image,size_t * size)780 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
781 {
782 struct acpi_table_header *crat_table;
783 acpi_status status;
784 void *pcrat_image;
785 int rc = 0;
786
787 if (!crat_image)
788 return -EINVAL;
789
790 *crat_image = NULL;
791
792 if (kfd_ignore_crat()) {
793 pr_info("CRAT table disabled by module option\n");
794 return -ENODATA;
795 }
796
797 /* Fetch the CRAT table from ACPI */
798 status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
799 if (status == AE_NOT_FOUND) {
800 pr_warn("CRAT table not found\n");
801 return -ENODATA;
802 } else if (ACPI_FAILURE(status)) {
803 const char *err = acpi_format_exception(status);
804
805 pr_err("CRAT table error: %s\n", err);
806 return -EINVAL;
807 }
808
809 pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
810 if (!pcrat_image) {
811 rc = -ENOMEM;
812 goto out;
813 }
814
815 memcpy(pcrat_image, crat_table, crat_table->length);
816 *crat_image = pcrat_image;
817 *size = crat_table->length;
818 out:
819 acpi_put_table(crat_table);
820 return rc;
821 }
822
823 /* Memory required to create Virtual CRAT.
824 * Since there is no easy way to predict the amount of memory required, the
825 * following amount is allocated for GPU Virtual CRAT. This is
826 * expected to cover all known conditions. But to be safe additional check
827 * is put in the code to ensure we don't overwrite.
828 */
829 #define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
830
831 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
832 *
833 * @numa_node_id: CPU NUMA node id
834 * @avail_size: Available size in the memory
835 * @sub_type_hdr: Memory into which compute info will be filled in
836 *
837 * Return 0 if successful else return -ve value
838 */
kfd_fill_cu_for_cpu(int numa_node_id,int * avail_size,int proximity_domain,struct crat_subtype_computeunit * sub_type_hdr)839 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
840 int proximity_domain,
841 struct crat_subtype_computeunit *sub_type_hdr)
842 {
843 const struct cpumask *cpumask;
844
845 *avail_size -= sizeof(struct crat_subtype_computeunit);
846 if (*avail_size < 0)
847 return -ENOMEM;
848
849 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
850
851 /* Fill in subtype header data */
852 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
853 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
854 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
855
856 cpumask = cpumask_of_node(numa_node_id);
857
858 /* Fill in CU data */
859 sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
860 sub_type_hdr->proximity_domain = proximity_domain;
861 sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
862 if (sub_type_hdr->processor_id_low == -1)
863 return -EINVAL;
864
865 sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
866
867 return 0;
868 }
869
870 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
871 *
872 * @numa_node_id: CPU NUMA node id
873 * @avail_size: Available size in the memory
874 * @sub_type_hdr: Memory into which compute info will be filled in
875 *
876 * Return 0 if successful else return -ve value
877 */
kfd_fill_mem_info_for_cpu(int numa_node_id,int * avail_size,int proximity_domain,struct crat_subtype_memory * sub_type_hdr)878 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
879 int proximity_domain,
880 struct crat_subtype_memory *sub_type_hdr)
881 {
882 uint64_t mem_in_bytes = 0;
883 pg_data_t *pgdat;
884 int zone_type;
885
886 *avail_size -= sizeof(struct crat_subtype_memory);
887 if (*avail_size < 0)
888 return -ENOMEM;
889
890 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
891
892 /* Fill in subtype header data */
893 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
894 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
895 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
896
897 /* Fill in Memory Subunit data */
898
899 /* Unlike si_meminfo, si_meminfo_node is not exported. So
900 * the following lines are duplicated from si_meminfo_node
901 * function
902 */
903 pgdat = NODE_DATA(numa_node_id);
904 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
905 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
906 mem_in_bytes <<= PAGE_SHIFT;
907
908 sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
909 sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
910 sub_type_hdr->proximity_domain = proximity_domain;
911
912 return 0;
913 }
914
915 #ifdef CONFIG_X86_64
kfd_fill_iolink_info_for_cpu(int numa_node_id,int * avail_size,uint32_t * num_entries,struct crat_subtype_iolink * sub_type_hdr)916 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
917 uint32_t *num_entries,
918 struct crat_subtype_iolink *sub_type_hdr)
919 {
920 int nid;
921 struct cpuinfo_x86 *c = &cpu_data(0);
922 uint8_t link_type;
923
924 if (c->x86_vendor == X86_VENDOR_AMD)
925 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
926 else
927 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
928
929 *num_entries = 0;
930
931 /* Create IO links from this node to other CPU nodes */
932 for_each_online_node(nid) {
933 if (nid == numa_node_id) /* node itself */
934 continue;
935
936 *avail_size -= sizeof(struct crat_subtype_iolink);
937 if (*avail_size < 0)
938 return -ENOMEM;
939
940 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
941
942 /* Fill in subtype header data */
943 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
944 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
945 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
946
947 /* Fill in IO link data */
948 sub_type_hdr->proximity_domain_from = numa_node_id;
949 sub_type_hdr->proximity_domain_to = nid;
950 sub_type_hdr->io_interface_type = link_type;
951
952 (*num_entries)++;
953 sub_type_hdr++;
954 }
955
956 return 0;
957 }
958 #endif
959
960 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
961 *
962 * @pcrat_image: Fill in VCRAT for CPU
963 * @size: [IN] allocated size of crat_image.
964 * [OUT] actual size of data filled in crat_image
965 */
kfd_create_vcrat_image_cpu(void * pcrat_image,size_t * size)966 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
967 {
968 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
969 struct acpi_table_header *acpi_table;
970 acpi_status status;
971 struct crat_subtype_generic *sub_type_hdr;
972 int avail_size = *size;
973 int numa_node_id;
974 #ifdef CONFIG_X86_64
975 uint32_t entries = 0;
976 #endif
977 int ret = 0;
978
979 if (!pcrat_image)
980 return -EINVAL;
981
982 /* Fill in CRAT Header.
983 * Modify length and total_entries as subunits are added.
984 */
985 avail_size -= sizeof(struct crat_header);
986 if (avail_size < 0)
987 return -ENOMEM;
988
989 memset(crat_table, 0, sizeof(struct crat_header));
990 memcpy(&crat_table->signature, CRAT_SIGNATURE,
991 sizeof(crat_table->signature));
992 crat_table->length = sizeof(struct crat_header);
993
994 status = acpi_get_table("DSDT", 0, &acpi_table);
995 if (status != AE_OK)
996 pr_warn("DSDT table not found for OEM information\n");
997 else {
998 crat_table->oem_revision = acpi_table->revision;
999 memcpy(crat_table->oem_id, acpi_table->oem_id,
1000 CRAT_OEMID_LENGTH);
1001 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
1002 CRAT_OEMTABLEID_LENGTH);
1003 acpi_put_table(acpi_table);
1004 }
1005 crat_table->total_entries = 0;
1006 crat_table->num_domains = 0;
1007
1008 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1009
1010 for_each_online_node(numa_node_id) {
1011 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1012 continue;
1013
1014 /* Fill in Subtype: Compute Unit */
1015 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1016 crat_table->num_domains,
1017 (struct crat_subtype_computeunit *)sub_type_hdr);
1018 if (ret < 0)
1019 return ret;
1020 crat_table->length += sub_type_hdr->length;
1021 crat_table->total_entries++;
1022
1023 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1024 sub_type_hdr->length);
1025
1026 /* Fill in Subtype: Memory */
1027 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1028 crat_table->num_domains,
1029 (struct crat_subtype_memory *)sub_type_hdr);
1030 if (ret < 0)
1031 return ret;
1032 crat_table->length += sub_type_hdr->length;
1033 crat_table->total_entries++;
1034
1035 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1036 sub_type_hdr->length);
1037
1038 /* Fill in Subtype: IO Link */
1039 #ifdef CONFIG_X86_64
1040 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1041 &entries,
1042 (struct crat_subtype_iolink *)sub_type_hdr);
1043 if (ret < 0)
1044 return ret;
1045
1046 if (entries) {
1047 crat_table->length += (sub_type_hdr->length * entries);
1048 crat_table->total_entries += entries;
1049
1050 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1051 sub_type_hdr->length * entries);
1052 }
1053 #else
1054 pr_info("IO link not available for non x86 platforms\n");
1055 #endif
1056
1057 crat_table->num_domains++;
1058 }
1059
1060 /* TODO: Add cache Subtype for CPU.
1061 * Currently, CPU cache information is available in function
1062 * detect_cache_attributes(cpu) defined in the file
1063 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1064 * exported and to get the same information the code needs to be
1065 * duplicated.
1066 */
1067
1068 *size = crat_table->length;
1069 pr_info("Virtual CRAT table created for CPU\n");
1070
1071 return 0;
1072 }
1073
kfd_fill_gpu_memory_affinity(int * avail_size,struct kfd_dev * kdev,uint8_t type,uint64_t size,struct crat_subtype_memory * sub_type_hdr,uint32_t proximity_domain,const struct kfd_local_mem_info * local_mem_info)1074 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1075 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1076 struct crat_subtype_memory *sub_type_hdr,
1077 uint32_t proximity_domain,
1078 const struct kfd_local_mem_info *local_mem_info)
1079 {
1080 *avail_size -= sizeof(struct crat_subtype_memory);
1081 if (*avail_size < 0)
1082 return -ENOMEM;
1083
1084 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1085 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1086 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1087 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1088
1089 sub_type_hdr->proximity_domain = proximity_domain;
1090
1091 pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1092 type, size);
1093
1094 sub_type_hdr->length_low = lower_32_bits(size);
1095 sub_type_hdr->length_high = upper_32_bits(size);
1096
1097 sub_type_hdr->width = local_mem_info->vram_width;
1098 sub_type_hdr->visibility_type = type;
1099
1100 return 0;
1101 }
1102
1103 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1104 * to its NUMA node
1105 * @avail_size: Available size in the memory
1106 * @kdev - [IN] GPU device
1107 * @sub_type_hdr: Memory into which io link info will be filled in
1108 * @proximity_domain - proximity domain of the GPU node
1109 *
1110 * Return 0 if successful else return -ve value
1111 */
kfd_fill_gpu_direct_io_link_to_cpu(int * avail_size,struct kfd_dev * kdev,struct crat_subtype_iolink * sub_type_hdr,uint32_t proximity_domain)1112 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1113 struct kfd_dev *kdev,
1114 struct crat_subtype_iolink *sub_type_hdr,
1115 uint32_t proximity_domain)
1116 {
1117 struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd;
1118
1119 *avail_size -= sizeof(struct crat_subtype_iolink);
1120 if (*avail_size < 0)
1121 return -ENOMEM;
1122
1123 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1124
1125 /* Fill in subtype header data */
1126 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1127 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1128 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1129 if (kfd_dev_is_large_bar(kdev))
1130 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1131
1132 /* Fill in IOLINK subtype.
1133 * TODO: Fill-in other fields of iolink subtype
1134 */
1135 if (adev->gmc.xgmi.connected_to_cpu) {
1136 /*
1137 * with host gpu xgmi link, host can access gpu memory whether
1138 * or not pcie bar type is large, so always create bidirectional
1139 * io link.
1140 */
1141 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1142 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1143 } else {
1144 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1145 }
1146
1147 sub_type_hdr->proximity_domain_from = proximity_domain;
1148 #ifdef CONFIG_NUMA
1149 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
1150 sub_type_hdr->proximity_domain_to = 0;
1151 else
1152 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
1153 #else
1154 sub_type_hdr->proximity_domain_to = 0;
1155 #endif
1156 return 0;
1157 }
1158
kfd_fill_gpu_xgmi_link_to_gpu(int * avail_size,struct kfd_dev * kdev,struct kfd_dev * peer_kdev,struct crat_subtype_iolink * sub_type_hdr,uint32_t proximity_domain_from,uint32_t proximity_domain_to)1159 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
1160 struct kfd_dev *kdev,
1161 struct kfd_dev *peer_kdev,
1162 struct crat_subtype_iolink *sub_type_hdr,
1163 uint32_t proximity_domain_from,
1164 uint32_t proximity_domain_to)
1165 {
1166 *avail_size -= sizeof(struct crat_subtype_iolink);
1167 if (*avail_size < 0)
1168 return -ENOMEM;
1169
1170 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1171
1172 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1173 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1174 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
1175 CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1176
1177 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1178 sub_type_hdr->proximity_domain_from = proximity_domain_from;
1179 sub_type_hdr->proximity_domain_to = proximity_domain_to;
1180 sub_type_hdr->num_hops_xgmi =
1181 amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
1182 return 0;
1183 }
1184
1185 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
1186 *
1187 * @pcrat_image: Fill in VCRAT for GPU
1188 * @size: [IN] allocated size of crat_image.
1189 * [OUT] actual size of data filled in crat_image
1190 */
kfd_create_vcrat_image_gpu(void * pcrat_image,size_t * size,struct kfd_dev * kdev,uint32_t proximity_domain)1191 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1192 size_t *size, struct kfd_dev *kdev,
1193 uint32_t proximity_domain)
1194 {
1195 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1196 struct crat_subtype_generic *sub_type_hdr;
1197 struct kfd_local_mem_info local_mem_info;
1198 struct kfd_topology_device *peer_dev;
1199 struct crat_subtype_computeunit *cu;
1200 struct kfd_cu_info cu_info;
1201 int avail_size = *size;
1202 uint32_t total_num_of_cu;
1203 int num_of_cache_entries = 0;
1204 int cache_mem_filled = 0;
1205 uint32_t nid = 0;
1206 int ret = 0;
1207
1208 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
1209 return -EINVAL;
1210
1211 /* Fill the CRAT Header.
1212 * Modify length and total_entries as subunits are added.
1213 */
1214 avail_size -= sizeof(struct crat_header);
1215 if (avail_size < 0)
1216 return -ENOMEM;
1217
1218 memset(crat_table, 0, sizeof(struct crat_header));
1219
1220 memcpy(&crat_table->signature, CRAT_SIGNATURE,
1221 sizeof(crat_table->signature));
1222 /* Change length as we add more subtypes*/
1223 crat_table->length = sizeof(struct crat_header);
1224 crat_table->num_domains = 1;
1225 crat_table->total_entries = 0;
1226
1227 /* Fill in Subtype: Compute Unit
1228 * First fill in the sub type header and then sub type data
1229 */
1230 avail_size -= sizeof(struct crat_subtype_computeunit);
1231 if (avail_size < 0)
1232 return -ENOMEM;
1233
1234 sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
1235 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1236
1237 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1238 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1239 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1240
1241 /* Fill CU subtype data */
1242 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1243 cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
1244 cu->proximity_domain = proximity_domain;
1245
1246 amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
1247 cu->num_simd_per_cu = cu_info.simd_per_cu;
1248 cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
1249 cu->max_waves_simd = cu_info.max_waves_per_simd;
1250
1251 cu->wave_front_size = cu_info.wave_front_size;
1252 cu->array_count = cu_info.num_shader_arrays_per_engine *
1253 cu_info.num_shader_engines;
1254 total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
1255 cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
1256 cu->num_cu_per_array = cu_info.num_cu_per_sh;
1257 cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
1258 cu->num_banks = cu_info.num_shader_engines;
1259 cu->lds_size_in_kb = cu_info.lds_size;
1260
1261 cu->hsa_capability = 0;
1262
1263 /* Check if this node supports IOMMU. During parsing this flag will
1264 * translate to HSA_CAP_ATS_PRESENT
1265 */
1266 if (!kfd_iommu_check_device(kdev))
1267 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1268
1269 crat_table->length += sub_type_hdr->length;
1270 crat_table->total_entries++;
1271
1272 /* Fill in Subtype: Memory. Only on systems with large BAR (no
1273 * private FB), report memory as public. On other systems
1274 * report the total FB size (public+private) as a single
1275 * private heap.
1276 */
1277 amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
1278 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1279 sub_type_hdr->length);
1280
1281 if (debug_largebar)
1282 local_mem_info.local_mem_size_private = 0;
1283
1284 if (local_mem_info.local_mem_size_private == 0)
1285 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1286 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
1287 local_mem_info.local_mem_size_public,
1288 (struct crat_subtype_memory *)sub_type_hdr,
1289 proximity_domain,
1290 &local_mem_info);
1291 else
1292 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1293 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
1294 local_mem_info.local_mem_size_public +
1295 local_mem_info.local_mem_size_private,
1296 (struct crat_subtype_memory *)sub_type_hdr,
1297 proximity_domain,
1298 &local_mem_info);
1299 if (ret < 0)
1300 return ret;
1301
1302 crat_table->length += sizeof(struct crat_subtype_memory);
1303 crat_table->total_entries++;
1304
1305 /* TODO: Fill in cache information. This information is NOT readily
1306 * available in KGD
1307 */
1308 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1309 sub_type_hdr->length);
1310 ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
1311 avail_size,
1312 &cu_info,
1313 (struct crat_subtype_cache *)sub_type_hdr,
1314 &cache_mem_filled,
1315 &num_of_cache_entries);
1316
1317 if (ret < 0)
1318 return ret;
1319
1320 crat_table->length += cache_mem_filled;
1321 crat_table->total_entries += num_of_cache_entries;
1322 avail_size -= cache_mem_filled;
1323
1324 /* Fill in Subtype: IO_LINKS
1325 * Only direct links are added here which is Link from GPU to
1326 * to its NUMA node. Indirect links are added by userspace.
1327 */
1328 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1329 cache_mem_filled);
1330 ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
1331 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
1332
1333 if (ret < 0)
1334 return ret;
1335
1336 crat_table->length += sub_type_hdr->length;
1337 crat_table->total_entries++;
1338
1339
1340 /* Fill in Subtype: IO_LINKS
1341 * Direct links from GPU to other GPUs through xGMI.
1342 * We will loop GPUs that already be processed (with lower value
1343 * of proximity_domain), add the link for the GPUs with same
1344 * hive id (from this GPU to other GPU) . The reversed iolink
1345 * (from other GPU to this GPU) will be added
1346 * in kfd_parse_subtype_iolink.
1347 */
1348 if (kdev->hive_id) {
1349 for (nid = 0; nid < proximity_domain; ++nid) {
1350 peer_dev = kfd_topology_device_by_proximity_domain(nid);
1351 if (!peer_dev->gpu)
1352 continue;
1353 if (peer_dev->gpu->hive_id != kdev->hive_id)
1354 continue;
1355 sub_type_hdr = (typeof(sub_type_hdr))(
1356 (char *)sub_type_hdr +
1357 sizeof(struct crat_subtype_iolink));
1358 ret = kfd_fill_gpu_xgmi_link_to_gpu(
1359 &avail_size, kdev, peer_dev->gpu,
1360 (struct crat_subtype_iolink *)sub_type_hdr,
1361 proximity_domain, nid);
1362 if (ret < 0)
1363 return ret;
1364 crat_table->length += sub_type_hdr->length;
1365 crat_table->total_entries++;
1366 }
1367 }
1368 *size = crat_table->length;
1369 pr_info("Virtual CRAT table created for GPU\n");
1370
1371 return ret;
1372 }
1373
1374 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
1375 * creates a Virtual CRAT (VCRAT) image
1376 *
1377 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1378 *
1379 * @crat_image: VCRAT image created because ACPI does not have a
1380 * CRAT for this device
1381 * @size: [OUT] size of virtual crat_image
1382 * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
1383 * COMPUTE_UNIT_GPU - Create VCRAT for GPU
1384 * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
1385 * -- this option is not currently implemented.
1386 * The assumption is that all AMD APUs will have CRAT
1387 * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
1388 *
1389 * Return 0 if successful else return -ve value
1390 */
kfd_create_crat_image_virtual(void ** crat_image,size_t * size,int flags,struct kfd_dev * kdev,uint32_t proximity_domain)1391 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
1392 int flags, struct kfd_dev *kdev,
1393 uint32_t proximity_domain)
1394 {
1395 void *pcrat_image = NULL;
1396 int ret = 0, num_nodes;
1397 size_t dyn_size;
1398
1399 if (!crat_image)
1400 return -EINVAL;
1401
1402 *crat_image = NULL;
1403
1404 /* Allocate the CPU Virtual CRAT size based on the number of online
1405 * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
1406 * This should cover all the current conditions. A check is put not
1407 * to overwrite beyond allocated size for GPUs
1408 */
1409 switch (flags) {
1410 case COMPUTE_UNIT_CPU:
1411 num_nodes = num_online_nodes();
1412 dyn_size = sizeof(struct crat_header) +
1413 num_nodes * (sizeof(struct crat_subtype_computeunit) +
1414 sizeof(struct crat_subtype_memory) +
1415 (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
1416 pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
1417 if (!pcrat_image)
1418 return -ENOMEM;
1419 *size = dyn_size;
1420 pr_debug("CRAT size is %ld", dyn_size);
1421 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
1422 break;
1423 case COMPUTE_UNIT_GPU:
1424 if (!kdev)
1425 return -EINVAL;
1426 pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
1427 if (!pcrat_image)
1428 return -ENOMEM;
1429 *size = VCRAT_SIZE_FOR_GPU;
1430 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
1431 proximity_domain);
1432 break;
1433 case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
1434 /* TODO: */
1435 ret = -EINVAL;
1436 pr_err("VCRAT not implemented for APU\n");
1437 break;
1438 default:
1439 ret = -EINVAL;
1440 }
1441
1442 if (!ret)
1443 *crat_image = pcrat_image;
1444 else
1445 kvfree(pcrat_image);
1446
1447 return ret;
1448 }
1449
1450
1451 /* kfd_destroy_crat_image
1452 *
1453 * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
1454 *
1455 */
kfd_destroy_crat_image(void * crat_image)1456 void kfd_destroy_crat_image(void *crat_image)
1457 {
1458 kvfree(crat_image);
1459 }
1460