Merge tag 'drm-misc-next-fixes-2023-09-11' of git://anongit.freedesktop.org/drm/drm...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdkfd / kfd_topology.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/pci.h>
27 #include <linux/errno.h>
28 #include <linux/acpi.h>
29 #include <linux/hash.h>
30 #include <linux/cpufreq.h>
31 #include <linux/log2.h>
32 #include <linux/dmi.h>
33 #include <linux/atomic.h>
34
35 #include "kfd_priv.h"
36 #include "kfd_crat.h"
37 #include "kfd_topology.h"
38 #include "kfd_device_queue_manager.h"
39 #include "kfd_svm.h"
40 #include "kfd_debug.h"
41 #include "amdgpu_amdkfd.h"
42 #include "amdgpu_ras.h"
43 #include "amdgpu.h"
44
45 /* topology_device_list - Master list of all topology devices */
46 static struct list_head topology_device_list;
47 static struct kfd_system_properties sys_props;
48
49 static DECLARE_RWSEM(topology_lock);
50 static uint32_t topology_crat_proximity_domain;
51
52 struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
53                                                 uint32_t proximity_domain)
54 {
55         struct kfd_topology_device *top_dev;
56         struct kfd_topology_device *device = NULL;
57
58         list_for_each_entry(top_dev, &topology_device_list, list)
59                 if (top_dev->proximity_domain == proximity_domain) {
60                         device = top_dev;
61                         break;
62                 }
63
64         return device;
65 }
66
67 struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
68                                                 uint32_t proximity_domain)
69 {
70         struct kfd_topology_device *device = NULL;
71
72         down_read(&topology_lock);
73
74         device = kfd_topology_device_by_proximity_domain_no_lock(
75                                                         proximity_domain);
76         up_read(&topology_lock);
77
78         return device;
79 }
80
81 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
82 {
83         struct kfd_topology_device *top_dev = NULL;
84         struct kfd_topology_device *ret = NULL;
85
86         down_read(&topology_lock);
87
88         list_for_each_entry(top_dev, &topology_device_list, list)
89                 if (top_dev->gpu_id == gpu_id) {
90                         ret = top_dev;
91                         break;
92                 }
93
94         up_read(&topology_lock);
95
96         return ret;
97 }
98
99 struct kfd_node *kfd_device_by_id(uint32_t gpu_id)
100 {
101         struct kfd_topology_device *top_dev;
102
103         top_dev = kfd_topology_device_by_id(gpu_id);
104         if (!top_dev)
105                 return NULL;
106
107         return top_dev->gpu;
108 }
109
110 struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev)
111 {
112         struct kfd_topology_device *top_dev;
113         struct kfd_node *device = NULL;
114
115         down_read(&topology_lock);
116
117         list_for_each_entry(top_dev, &topology_device_list, list)
118                 if (top_dev->gpu && top_dev->gpu->adev->pdev == pdev) {
119                         device = top_dev->gpu;
120                         break;
121                 }
122
123         up_read(&topology_lock);
124
125         return device;
126 }
127
128 /* Called with write topology_lock acquired */
129 static void kfd_release_topology_device(struct kfd_topology_device *dev)
130 {
131         struct kfd_mem_properties *mem;
132         struct kfd_cache_properties *cache;
133         struct kfd_iolink_properties *iolink;
134         struct kfd_iolink_properties *p2plink;
135         struct kfd_perf_properties *perf;
136
137         list_del(&dev->list);
138
139         while (dev->mem_props.next != &dev->mem_props) {
140                 mem = container_of(dev->mem_props.next,
141                                 struct kfd_mem_properties, list);
142                 list_del(&mem->list);
143                 kfree(mem);
144         }
145
146         while (dev->cache_props.next != &dev->cache_props) {
147                 cache = container_of(dev->cache_props.next,
148                                 struct kfd_cache_properties, list);
149                 list_del(&cache->list);
150                 kfree(cache);
151         }
152
153         while (dev->io_link_props.next != &dev->io_link_props) {
154                 iolink = container_of(dev->io_link_props.next,
155                                 struct kfd_iolink_properties, list);
156                 list_del(&iolink->list);
157                 kfree(iolink);
158         }
159
160         while (dev->p2p_link_props.next != &dev->p2p_link_props) {
161                 p2plink = container_of(dev->p2p_link_props.next,
162                                 struct kfd_iolink_properties, list);
163                 list_del(&p2plink->list);
164                 kfree(p2plink);
165         }
166
167         while (dev->perf_props.next != &dev->perf_props) {
168                 perf = container_of(dev->perf_props.next,
169                                 struct kfd_perf_properties, list);
170                 list_del(&perf->list);
171                 kfree(perf);
172         }
173
174         kfree(dev);
175 }
176
177 void kfd_release_topology_device_list(struct list_head *device_list)
178 {
179         struct kfd_topology_device *dev;
180
181         while (!list_empty(device_list)) {
182                 dev = list_first_entry(device_list,
183                                        struct kfd_topology_device, list);
184                 kfd_release_topology_device(dev);
185         }
186 }
187
188 static void kfd_release_live_view(void)
189 {
190         kfd_release_topology_device_list(&topology_device_list);
191         memset(&sys_props, 0, sizeof(sys_props));
192 }
193
194 struct kfd_topology_device *kfd_create_topology_device(
195                                 struct list_head *device_list)
196 {
197         struct kfd_topology_device *dev;
198
199         dev = kfd_alloc_struct(dev);
200         if (!dev) {
201                 pr_err("No memory to allocate a topology device");
202                 return NULL;
203         }
204
205         INIT_LIST_HEAD(&dev->mem_props);
206         INIT_LIST_HEAD(&dev->cache_props);
207         INIT_LIST_HEAD(&dev->io_link_props);
208         INIT_LIST_HEAD(&dev->p2p_link_props);
209         INIT_LIST_HEAD(&dev->perf_props);
210
211         list_add_tail(&dev->list, device_list);
212
213         return dev;
214 }
215
216
217 #define sysfs_show_gen_prop(buffer, offs, fmt, ...)             \
218                 (offs += snprintf(buffer+offs, PAGE_SIZE-offs,  \
219                                   fmt, __VA_ARGS__))
220 #define sysfs_show_32bit_prop(buffer, offs, name, value) \
221                 sysfs_show_gen_prop(buffer, offs, "%s %u\n", name, value)
222 #define sysfs_show_64bit_prop(buffer, offs, name, value) \
223                 sysfs_show_gen_prop(buffer, offs, "%s %llu\n", name, value)
224 #define sysfs_show_32bit_val(buffer, offs, value) \
225                 sysfs_show_gen_prop(buffer, offs, "%u\n", value)
226 #define sysfs_show_str_val(buffer, offs, value) \
227                 sysfs_show_gen_prop(buffer, offs, "%s\n", value)
228
229 static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
230                 char *buffer)
231 {
232         int offs = 0;
233
234         /* Making sure that the buffer is an empty string */
235         buffer[0] = 0;
236
237         if (attr == &sys_props.attr_genid) {
238                 sysfs_show_32bit_val(buffer, offs,
239                                      sys_props.generation_count);
240         } else if (attr == &sys_props.attr_props) {
241                 sysfs_show_64bit_prop(buffer, offs, "platform_oem",
242                                       sys_props.platform_oem);
243                 sysfs_show_64bit_prop(buffer, offs, "platform_id",
244                                       sys_props.platform_id);
245                 sysfs_show_64bit_prop(buffer, offs, "platform_rev",
246                                       sys_props.platform_rev);
247         } else {
248                 offs = -EINVAL;
249         }
250
251         return offs;
252 }
253
254 static void kfd_topology_kobj_release(struct kobject *kobj)
255 {
256         kfree(kobj);
257 }
258
259 static const struct sysfs_ops sysprops_ops = {
260         .show = sysprops_show,
261 };
262
263 static const struct kobj_type sysprops_type = {
264         .release = kfd_topology_kobj_release,
265         .sysfs_ops = &sysprops_ops,
266 };
267
268 static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
269                 char *buffer)
270 {
271         int offs = 0;
272         struct kfd_iolink_properties *iolink;
273
274         /* Making sure that the buffer is an empty string */
275         buffer[0] = 0;
276
277         iolink = container_of(attr, struct kfd_iolink_properties, attr);
278         if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
279                 return -EPERM;
280         sysfs_show_32bit_prop(buffer, offs, "type", iolink->iolink_type);
281         sysfs_show_32bit_prop(buffer, offs, "version_major", iolink->ver_maj);
282         sysfs_show_32bit_prop(buffer, offs, "version_minor", iolink->ver_min);
283         sysfs_show_32bit_prop(buffer, offs, "node_from", iolink->node_from);
284         sysfs_show_32bit_prop(buffer, offs, "node_to", iolink->node_to);
285         sysfs_show_32bit_prop(buffer, offs, "weight", iolink->weight);
286         sysfs_show_32bit_prop(buffer, offs, "min_latency", iolink->min_latency);
287         sysfs_show_32bit_prop(buffer, offs, "max_latency", iolink->max_latency);
288         sysfs_show_32bit_prop(buffer, offs, "min_bandwidth",
289                               iolink->min_bandwidth);
290         sysfs_show_32bit_prop(buffer, offs, "max_bandwidth",
291                               iolink->max_bandwidth);
292         sysfs_show_32bit_prop(buffer, offs, "recommended_transfer_size",
293                               iolink->rec_transfer_size);
294         sysfs_show_32bit_prop(buffer, offs, "flags", iolink->flags);
295
296         return offs;
297 }
298
299 static const struct sysfs_ops iolink_ops = {
300         .show = iolink_show,
301 };
302
303 static const struct kobj_type iolink_type = {
304         .release = kfd_topology_kobj_release,
305         .sysfs_ops = &iolink_ops,
306 };
307
308 static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
309                 char *buffer)
310 {
311         int offs = 0;
312         struct kfd_mem_properties *mem;
313
314         /* Making sure that the buffer is an empty string */
315         buffer[0] = 0;
316
317         mem = container_of(attr, struct kfd_mem_properties, attr);
318         if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
319                 return -EPERM;
320         sysfs_show_32bit_prop(buffer, offs, "heap_type", mem->heap_type);
321         sysfs_show_64bit_prop(buffer, offs, "size_in_bytes",
322                               mem->size_in_bytes);
323         sysfs_show_32bit_prop(buffer, offs, "flags", mem->flags);
324         sysfs_show_32bit_prop(buffer, offs, "width", mem->width);
325         sysfs_show_32bit_prop(buffer, offs, "mem_clk_max",
326                               mem->mem_clk_max);
327
328         return offs;
329 }
330
331 static const struct sysfs_ops mem_ops = {
332         .show = mem_show,
333 };
334
335 static const struct kobj_type mem_type = {
336         .release = kfd_topology_kobj_release,
337         .sysfs_ops = &mem_ops,
338 };
339
340 static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
341                 char *buffer)
342 {
343         int offs = 0;
344         uint32_t i, j;
345         struct kfd_cache_properties *cache;
346
347         /* Making sure that the buffer is an empty string */
348         buffer[0] = 0;
349         cache = container_of(attr, struct kfd_cache_properties, attr);
350         if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
351                 return -EPERM;
352         sysfs_show_32bit_prop(buffer, offs, "processor_id_low",
353                         cache->processor_id_low);
354         sysfs_show_32bit_prop(buffer, offs, "level", cache->cache_level);
355         sysfs_show_32bit_prop(buffer, offs, "size", cache->cache_size);
356         sysfs_show_32bit_prop(buffer, offs, "cache_line_size",
357                               cache->cacheline_size);
358         sysfs_show_32bit_prop(buffer, offs, "cache_lines_per_tag",
359                               cache->cachelines_per_tag);
360         sysfs_show_32bit_prop(buffer, offs, "association", cache->cache_assoc);
361         sysfs_show_32bit_prop(buffer, offs, "latency", cache->cache_latency);
362         sysfs_show_32bit_prop(buffer, offs, "type", cache->cache_type);
363
364         offs += snprintf(buffer+offs, PAGE_SIZE-offs, "sibling_map ");
365         for (i = 0; i < cache->sibling_map_size; i++)
366                 for (j = 0; j < sizeof(cache->sibling_map[0])*8; j++)
367                         /* Check each bit */
368                         offs += snprintf(buffer+offs, PAGE_SIZE-offs, "%d,",
369                                                 (cache->sibling_map[i] >> j) & 1);
370
371         /* Replace the last "," with end of line */
372         buffer[offs-1] = '\n';
373         return offs;
374 }
375
376 static const struct sysfs_ops cache_ops = {
377         .show = kfd_cache_show,
378 };
379
380 static const struct kobj_type cache_type = {
381         .release = kfd_topology_kobj_release,
382         .sysfs_ops = &cache_ops,
383 };
384
385 /****** Sysfs of Performance Counters ******/
386
387 struct kfd_perf_attr {
388         struct kobj_attribute attr;
389         uint32_t data;
390 };
391
392 static ssize_t perf_show(struct kobject *kobj, struct kobj_attribute *attrs,
393                         char *buf)
394 {
395         int offs = 0;
396         struct kfd_perf_attr *attr;
397
398         buf[0] = 0;
399         attr = container_of(attrs, struct kfd_perf_attr, attr);
400         if (!attr->data) /* invalid data for PMC */
401                 return 0;
402         else
403                 return sysfs_show_32bit_val(buf, offs, attr->data);
404 }
405
406 #define KFD_PERF_DESC(_name, _data)                     \
407 {                                                       \
408         .attr  = __ATTR(_name, 0444, perf_show, NULL),  \
409         .data = _data,                                  \
410 }
411
412 static struct kfd_perf_attr perf_attr_iommu[] = {
413         KFD_PERF_DESC(max_concurrent, 0),
414         KFD_PERF_DESC(num_counters, 0),
415         KFD_PERF_DESC(counter_ids, 0),
416 };
417 /****************************************/
418
419 static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
420                 char *buffer)
421 {
422         int offs = 0;
423         struct kfd_topology_device *dev;
424         uint32_t log_max_watch_addr;
425
426         /* Making sure that the buffer is an empty string */
427         buffer[0] = 0;
428
429         if (strcmp(attr->name, "gpu_id") == 0) {
430                 dev = container_of(attr, struct kfd_topology_device,
431                                 attr_gpuid);
432                 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
433                         return -EPERM;
434                 return sysfs_show_32bit_val(buffer, offs, dev->gpu_id);
435         }
436
437         if (strcmp(attr->name, "name") == 0) {
438                 dev = container_of(attr, struct kfd_topology_device,
439                                 attr_name);
440
441                 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
442                         return -EPERM;
443                 return sysfs_show_str_val(buffer, offs, dev->node_props.name);
444         }
445
446         dev = container_of(attr, struct kfd_topology_device,
447                         attr_props);
448         if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
449                 return -EPERM;
450         sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
451                               dev->node_props.cpu_cores_count);
452         sysfs_show_32bit_prop(buffer, offs, "simd_count",
453                               dev->gpu ? (dev->node_props.simd_count *
454                                           NUM_XCC(dev->gpu->xcc_mask)) : 0);
455         sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
456                               dev->node_props.mem_banks_count);
457         sysfs_show_32bit_prop(buffer, offs, "caches_count",
458                               dev->node_props.caches_count);
459         sysfs_show_32bit_prop(buffer, offs, "io_links_count",
460                               dev->node_props.io_links_count);
461         sysfs_show_32bit_prop(buffer, offs, "p2p_links_count",
462                               dev->node_props.p2p_links_count);
463         sysfs_show_32bit_prop(buffer, offs, "cpu_core_id_base",
464                               dev->node_props.cpu_core_id_base);
465         sysfs_show_32bit_prop(buffer, offs, "simd_id_base",
466                               dev->node_props.simd_id_base);
467         sysfs_show_32bit_prop(buffer, offs, "max_waves_per_simd",
468                               dev->node_props.max_waves_per_simd);
469         sysfs_show_32bit_prop(buffer, offs, "lds_size_in_kb",
470                               dev->node_props.lds_size_in_kb);
471         sysfs_show_32bit_prop(buffer, offs, "gds_size_in_kb",
472                               dev->node_props.gds_size_in_kb);
473         sysfs_show_32bit_prop(buffer, offs, "num_gws",
474                               dev->node_props.num_gws);
475         sysfs_show_32bit_prop(buffer, offs, "wave_front_size",
476                               dev->node_props.wave_front_size);
477         sysfs_show_32bit_prop(buffer, offs, "array_count",
478                               dev->gpu ? (dev->node_props.array_count *
479                                           NUM_XCC(dev->gpu->xcc_mask)) : 0);
480         sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine",
481                               dev->node_props.simd_arrays_per_engine);
482         sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array",
483                               dev->node_props.cu_per_simd_array);
484         sysfs_show_32bit_prop(buffer, offs, "simd_per_cu",
485                               dev->node_props.simd_per_cu);
486         sysfs_show_32bit_prop(buffer, offs, "max_slots_scratch_cu",
487                               dev->node_props.max_slots_scratch_cu);
488         sysfs_show_32bit_prop(buffer, offs, "gfx_target_version",
489                               dev->node_props.gfx_target_version);
490         sysfs_show_32bit_prop(buffer, offs, "vendor_id",
491                               dev->node_props.vendor_id);
492         sysfs_show_32bit_prop(buffer, offs, "device_id",
493                               dev->node_props.device_id);
494         sysfs_show_32bit_prop(buffer, offs, "location_id",
495                               dev->node_props.location_id);
496         sysfs_show_32bit_prop(buffer, offs, "domain",
497                               dev->node_props.domain);
498         sysfs_show_32bit_prop(buffer, offs, "drm_render_minor",
499                               dev->node_props.drm_render_minor);
500         sysfs_show_64bit_prop(buffer, offs, "hive_id",
501                               dev->node_props.hive_id);
502         sysfs_show_32bit_prop(buffer, offs, "num_sdma_engines",
503                               dev->node_props.num_sdma_engines);
504         sysfs_show_32bit_prop(buffer, offs, "num_sdma_xgmi_engines",
505                               dev->node_props.num_sdma_xgmi_engines);
506         sysfs_show_32bit_prop(buffer, offs, "num_sdma_queues_per_engine",
507                               dev->node_props.num_sdma_queues_per_engine);
508         sysfs_show_32bit_prop(buffer, offs, "num_cp_queues",
509                               dev->node_props.num_cp_queues);
510
511         if (dev->gpu) {
512                 log_max_watch_addr =
513                         __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points);
514
515                 if (log_max_watch_addr) {
516                         dev->node_props.capability |=
517                                         HSA_CAP_WATCH_POINTS_SUPPORTED;
518
519                         dev->node_props.capability |=
520                                 ((log_max_watch_addr <<
521                                         HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT) &
522                                 HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
523                 }
524
525                 if (dev->gpu->adev->asic_type == CHIP_TONGA)
526                         dev->node_props.capability |=
527                                         HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
528
529                 sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
530                         dev->node_props.max_engine_clk_fcompute);
531
532                 sysfs_show_64bit_prop(buffer, offs, "local_mem_size", 0ULL);
533
534                 sysfs_show_32bit_prop(buffer, offs, "fw_version",
535                                       dev->gpu->kfd->mec_fw_version);
536                 sysfs_show_32bit_prop(buffer, offs, "capability",
537                                       dev->node_props.capability);
538                 sysfs_show_64bit_prop(buffer, offs, "debug_prop",
539                                       dev->node_props.debug_prop);
540                 sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
541                                       dev->gpu->kfd->sdma_fw_version);
542                 sysfs_show_64bit_prop(buffer, offs, "unique_id",
543                                       dev->gpu->adev->unique_id);
544                 sysfs_show_32bit_prop(buffer, offs, "num_xcc",
545                                       NUM_XCC(dev->gpu->xcc_mask));
546         }
547
548         return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
549                                      cpufreq_quick_get_max(0)/1000);
550 }
551
552 static const struct sysfs_ops node_ops = {
553         .show = node_show,
554 };
555
556 static const struct kobj_type node_type = {
557         .release = kfd_topology_kobj_release,
558         .sysfs_ops = &node_ops,
559 };
560
561 static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr)
562 {
563         sysfs_remove_file(kobj, attr);
564         kobject_del(kobj);
565         kobject_put(kobj);
566 }
567
568 static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
569 {
570         struct kfd_iolink_properties *p2plink;
571         struct kfd_iolink_properties *iolink;
572         struct kfd_cache_properties *cache;
573         struct kfd_mem_properties *mem;
574         struct kfd_perf_properties *perf;
575
576         if (dev->kobj_iolink) {
577                 list_for_each_entry(iolink, &dev->io_link_props, list)
578                         if (iolink->kobj) {
579                                 kfd_remove_sysfs_file(iolink->kobj,
580                                                         &iolink->attr);
581                                 iolink->kobj = NULL;
582                         }
583                 kobject_del(dev->kobj_iolink);
584                 kobject_put(dev->kobj_iolink);
585                 dev->kobj_iolink = NULL;
586         }
587
588         if (dev->kobj_p2plink) {
589                 list_for_each_entry(p2plink, &dev->p2p_link_props, list)
590                         if (p2plink->kobj) {
591                                 kfd_remove_sysfs_file(p2plink->kobj,
592                                                         &p2plink->attr);
593                                 p2plink->kobj = NULL;
594                         }
595                 kobject_del(dev->kobj_p2plink);
596                 kobject_put(dev->kobj_p2plink);
597                 dev->kobj_p2plink = NULL;
598         }
599
600         if (dev->kobj_cache) {
601                 list_for_each_entry(cache, &dev->cache_props, list)
602                         if (cache->kobj) {
603                                 kfd_remove_sysfs_file(cache->kobj,
604                                                         &cache->attr);
605                                 cache->kobj = NULL;
606                         }
607                 kobject_del(dev->kobj_cache);
608                 kobject_put(dev->kobj_cache);
609                 dev->kobj_cache = NULL;
610         }
611
612         if (dev->kobj_mem) {
613                 list_for_each_entry(mem, &dev->mem_props, list)
614                         if (mem->kobj) {
615                                 kfd_remove_sysfs_file(mem->kobj, &mem->attr);
616                                 mem->kobj = NULL;
617                         }
618                 kobject_del(dev->kobj_mem);
619                 kobject_put(dev->kobj_mem);
620                 dev->kobj_mem = NULL;
621         }
622
623         if (dev->kobj_perf) {
624                 list_for_each_entry(perf, &dev->perf_props, list) {
625                         kfree(perf->attr_group);
626                         perf->attr_group = NULL;
627                 }
628                 kobject_del(dev->kobj_perf);
629                 kobject_put(dev->kobj_perf);
630                 dev->kobj_perf = NULL;
631         }
632
633         if (dev->kobj_node) {
634                 sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
635                 sysfs_remove_file(dev->kobj_node, &dev->attr_name);
636                 sysfs_remove_file(dev->kobj_node, &dev->attr_props);
637                 kobject_del(dev->kobj_node);
638                 kobject_put(dev->kobj_node);
639                 dev->kobj_node = NULL;
640         }
641 }
642
643 static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
644                 uint32_t id)
645 {
646         struct kfd_iolink_properties *p2plink;
647         struct kfd_iolink_properties *iolink;
648         struct kfd_cache_properties *cache;
649         struct kfd_mem_properties *mem;
650         struct kfd_perf_properties *perf;
651         int ret;
652         uint32_t i, num_attrs;
653         struct attribute **attrs;
654
655         if (WARN_ON(dev->kobj_node))
656                 return -EEXIST;
657
658         /*
659          * Creating the sysfs folders
660          */
661         dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
662         if (!dev->kobj_node)
663                 return -ENOMEM;
664
665         ret = kobject_init_and_add(dev->kobj_node, &node_type,
666                         sys_props.kobj_nodes, "%d", id);
667         if (ret < 0) {
668                 kobject_put(dev->kobj_node);
669                 return ret;
670         }
671
672         dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
673         if (!dev->kobj_mem)
674                 return -ENOMEM;
675
676         dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node);
677         if (!dev->kobj_cache)
678                 return -ENOMEM;
679
680         dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node);
681         if (!dev->kobj_iolink)
682                 return -ENOMEM;
683
684         dev->kobj_p2plink = kobject_create_and_add("p2p_links", dev->kobj_node);
685         if (!dev->kobj_p2plink)
686                 return -ENOMEM;
687
688         dev->kobj_perf = kobject_create_and_add("perf", dev->kobj_node);
689         if (!dev->kobj_perf)
690                 return -ENOMEM;
691
692         /*
693          * Creating sysfs files for node properties
694          */
695         dev->attr_gpuid.name = "gpu_id";
696         dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE;
697         sysfs_attr_init(&dev->attr_gpuid);
698         dev->attr_name.name = "name";
699         dev->attr_name.mode = KFD_SYSFS_FILE_MODE;
700         sysfs_attr_init(&dev->attr_name);
701         dev->attr_props.name = "properties";
702         dev->attr_props.mode = KFD_SYSFS_FILE_MODE;
703         sysfs_attr_init(&dev->attr_props);
704         ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid);
705         if (ret < 0)
706                 return ret;
707         ret = sysfs_create_file(dev->kobj_node, &dev->attr_name);
708         if (ret < 0)
709                 return ret;
710         ret = sysfs_create_file(dev->kobj_node, &dev->attr_props);
711         if (ret < 0)
712                 return ret;
713
714         i = 0;
715         list_for_each_entry(mem, &dev->mem_props, list) {
716                 mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
717                 if (!mem->kobj)
718                         return -ENOMEM;
719                 ret = kobject_init_and_add(mem->kobj, &mem_type,
720                                 dev->kobj_mem, "%d", i);
721                 if (ret < 0) {
722                         kobject_put(mem->kobj);
723                         return ret;
724                 }
725
726                 mem->attr.name = "properties";
727                 mem->attr.mode = KFD_SYSFS_FILE_MODE;
728                 sysfs_attr_init(&mem->attr);
729                 ret = sysfs_create_file(mem->kobj, &mem->attr);
730                 if (ret < 0)
731                         return ret;
732                 i++;
733         }
734
735         i = 0;
736         list_for_each_entry(cache, &dev->cache_props, list) {
737                 cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
738                 if (!cache->kobj)
739                         return -ENOMEM;
740                 ret = kobject_init_and_add(cache->kobj, &cache_type,
741                                 dev->kobj_cache, "%d", i);
742                 if (ret < 0) {
743                         kobject_put(cache->kobj);
744                         return ret;
745                 }
746
747                 cache->attr.name = "properties";
748                 cache->attr.mode = KFD_SYSFS_FILE_MODE;
749                 sysfs_attr_init(&cache->attr);
750                 ret = sysfs_create_file(cache->kobj, &cache->attr);
751                 if (ret < 0)
752                         return ret;
753                 i++;
754         }
755
756         i = 0;
757         list_for_each_entry(iolink, &dev->io_link_props, list) {
758                 iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
759                 if (!iolink->kobj)
760                         return -ENOMEM;
761                 ret = kobject_init_and_add(iolink->kobj, &iolink_type,
762                                 dev->kobj_iolink, "%d", i);
763                 if (ret < 0) {
764                         kobject_put(iolink->kobj);
765                         return ret;
766                 }
767
768                 iolink->attr.name = "properties";
769                 iolink->attr.mode = KFD_SYSFS_FILE_MODE;
770                 sysfs_attr_init(&iolink->attr);
771                 ret = sysfs_create_file(iolink->kobj, &iolink->attr);
772                 if (ret < 0)
773                         return ret;
774                 i++;
775         }
776
777         i = 0;
778         list_for_each_entry(p2plink, &dev->p2p_link_props, list) {
779                 p2plink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
780                 if (!p2plink->kobj)
781                         return -ENOMEM;
782                 ret = kobject_init_and_add(p2plink->kobj, &iolink_type,
783                                 dev->kobj_p2plink, "%d", i);
784                 if (ret < 0) {
785                         kobject_put(p2plink->kobj);
786                         return ret;
787                 }
788
789                 p2plink->attr.name = "properties";
790                 p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
791                 sysfs_attr_init(&p2plink->attr);
792                 ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
793                 if (ret < 0)
794                         return ret;
795                 i++;
796         }
797
798         /* All hardware blocks have the same number of attributes. */
799         num_attrs = ARRAY_SIZE(perf_attr_iommu);
800         list_for_each_entry(perf, &dev->perf_props, list) {
801                 perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
802                         * num_attrs + sizeof(struct attribute_group),
803                         GFP_KERNEL);
804                 if (!perf->attr_group)
805                         return -ENOMEM;
806
807                 attrs = (struct attribute **)(perf->attr_group + 1);
808                 if (!strcmp(perf->block_name, "iommu")) {
809                 /* Information of IOMMU's num_counters and counter_ids is shown
810                  * under /sys/bus/event_source/devices/amd_iommu. We don't
811                  * duplicate here.
812                  */
813                         perf_attr_iommu[0].data = perf->max_concurrent;
814                         for (i = 0; i < num_attrs; i++)
815                                 attrs[i] = &perf_attr_iommu[i].attr.attr;
816                 }
817                 perf->attr_group->name = perf->block_name;
818                 perf->attr_group->attrs = attrs;
819                 ret = sysfs_create_group(dev->kobj_perf, perf->attr_group);
820                 if (ret < 0)
821                         return ret;
822         }
823
824         return 0;
825 }
826
827 /* Called with write topology lock acquired */
828 static int kfd_build_sysfs_node_tree(void)
829 {
830         struct kfd_topology_device *dev;
831         int ret;
832         uint32_t i = 0;
833
834         list_for_each_entry(dev, &topology_device_list, list) {
835                 ret = kfd_build_sysfs_node_entry(dev, i);
836                 if (ret < 0)
837                         return ret;
838                 i++;
839         }
840
841         return 0;
842 }
843
844 /* Called with write topology lock acquired */
845 static void kfd_remove_sysfs_node_tree(void)
846 {
847         struct kfd_topology_device *dev;
848
849         list_for_each_entry(dev, &topology_device_list, list)
850                 kfd_remove_sysfs_node_entry(dev);
851 }
852
853 static int kfd_topology_update_sysfs(void)
854 {
855         int ret;
856
857         if (!sys_props.kobj_topology) {
858                 sys_props.kobj_topology =
859                                 kfd_alloc_struct(sys_props.kobj_topology);
860                 if (!sys_props.kobj_topology)
861                         return -ENOMEM;
862
863                 ret = kobject_init_and_add(sys_props.kobj_topology,
864                                 &sysprops_type,  &kfd_device->kobj,
865                                 "topology");
866                 if (ret < 0) {
867                         kobject_put(sys_props.kobj_topology);
868                         return ret;
869                 }
870
871                 sys_props.kobj_nodes = kobject_create_and_add("nodes",
872                                 sys_props.kobj_topology);
873                 if (!sys_props.kobj_nodes)
874                         return -ENOMEM;
875
876                 sys_props.attr_genid.name = "generation_id";
877                 sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE;
878                 sysfs_attr_init(&sys_props.attr_genid);
879                 ret = sysfs_create_file(sys_props.kobj_topology,
880                                 &sys_props.attr_genid);
881                 if (ret < 0)
882                         return ret;
883
884                 sys_props.attr_props.name = "system_properties";
885                 sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE;
886                 sysfs_attr_init(&sys_props.attr_props);
887                 ret = sysfs_create_file(sys_props.kobj_topology,
888                                 &sys_props.attr_props);
889                 if (ret < 0)
890                         return ret;
891         }
892
893         kfd_remove_sysfs_node_tree();
894
895         return kfd_build_sysfs_node_tree();
896 }
897
898 static void kfd_topology_release_sysfs(void)
899 {
900         kfd_remove_sysfs_node_tree();
901         if (sys_props.kobj_topology) {
902                 sysfs_remove_file(sys_props.kobj_topology,
903                                 &sys_props.attr_genid);
904                 sysfs_remove_file(sys_props.kobj_topology,
905                                 &sys_props.attr_props);
906                 if (sys_props.kobj_nodes) {
907                         kobject_del(sys_props.kobj_nodes);
908                         kobject_put(sys_props.kobj_nodes);
909                         sys_props.kobj_nodes = NULL;
910                 }
911                 kobject_del(sys_props.kobj_topology);
912                 kobject_put(sys_props.kobj_topology);
913                 sys_props.kobj_topology = NULL;
914         }
915 }
916
917 /* Called with write topology_lock acquired */
918 static void kfd_topology_update_device_list(struct list_head *temp_list,
919                                         struct list_head *master_list)
920 {
921         while (!list_empty(temp_list)) {
922                 list_move_tail(temp_list->next, master_list);
923                 sys_props.num_devices++;
924         }
925 }
926
927 static void kfd_debug_print_topology(void)
928 {
929         struct kfd_topology_device *dev;
930
931         down_read(&topology_lock);
932
933         dev = list_last_entry(&topology_device_list,
934                         struct kfd_topology_device, list);
935         if (dev) {
936                 if (dev->node_props.cpu_cores_count &&
937                                 dev->node_props.simd_count) {
938                         pr_info("Topology: Add APU node [0x%0x:0x%0x]\n",
939                                 dev->node_props.device_id,
940                                 dev->node_props.vendor_id);
941                 } else if (dev->node_props.cpu_cores_count)
942                         pr_info("Topology: Add CPU node\n");
943                 else if (dev->node_props.simd_count)
944                         pr_info("Topology: Add dGPU node [0x%0x:0x%0x]\n",
945                                 dev->node_props.device_id,
946                                 dev->node_props.vendor_id);
947         }
948         up_read(&topology_lock);
949 }
950
951 /* Helper function for intializing platform_xx members of
952  * kfd_system_properties. Uses OEM info from the last CPU/APU node.
953  */
954 static void kfd_update_system_properties(void)
955 {
956         struct kfd_topology_device *dev;
957
958         down_read(&topology_lock);
959         dev = list_last_entry(&topology_device_list,
960                         struct kfd_topology_device, list);
961         if (dev) {
962                 sys_props.platform_id =
963                         (*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
964                 sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
965                 sys_props.platform_rev = dev->oem_revision;
966         }
967         up_read(&topology_lock);
968 }
969
970 static void find_system_memory(const struct dmi_header *dm,
971         void *private)
972 {
973         struct kfd_mem_properties *mem;
974         u16 mem_width, mem_clock;
975         struct kfd_topology_device *kdev =
976                 (struct kfd_topology_device *)private;
977         const u8 *dmi_data = (const u8 *)(dm + 1);
978
979         if (dm->type == DMI_ENTRY_MEM_DEVICE && dm->length >= 0x15) {
980                 mem_width = (u16)(*(const u16 *)(dmi_data + 0x6));
981                 mem_clock = (u16)(*(const u16 *)(dmi_data + 0x11));
982                 list_for_each_entry(mem, &kdev->mem_props, list) {
983                         if (mem_width != 0xFFFF && mem_width != 0)
984                                 mem->width = mem_width;
985                         if (mem_clock != 0)
986                                 mem->mem_clk_max = mem_clock;
987                 }
988         }
989 }
990
991 /* kfd_add_non_crat_information - Add information that is not currently
992  *      defined in CRAT but is necessary for KFD topology
993  * @dev - topology device to which addition info is added
994  */
995 static void kfd_add_non_crat_information(struct kfd_topology_device *kdev)
996 {
997         /* Check if CPU only node. */
998         if (!kdev->gpu) {
999                 /* Add system memory information */
1000                 dmi_walk(find_system_memory, kdev);
1001         }
1002         /* TODO: For GPU node, rearrange code from kfd_topology_add_device */
1003 }
1004
1005 int kfd_topology_init(void)
1006 {
1007         void *crat_image = NULL;
1008         size_t image_size = 0;
1009         int ret;
1010         struct list_head temp_topology_device_list;
1011         int cpu_only_node = 0;
1012         struct kfd_topology_device *kdev;
1013         int proximity_domain;
1014
1015         /* topology_device_list - Master list of all topology devices
1016          * temp_topology_device_list - temporary list created while parsing CRAT
1017          * or VCRAT. Once parsing is complete the contents of list is moved to
1018          * topology_device_list
1019          */
1020
1021         /* Initialize the head for the both the lists */
1022         INIT_LIST_HEAD(&topology_device_list);
1023         INIT_LIST_HEAD(&temp_topology_device_list);
1024         init_rwsem(&topology_lock);
1025
1026         memset(&sys_props, 0, sizeof(sys_props));
1027
1028         /* Proximity domains in ACPI CRAT tables start counting at
1029          * 0. The same should be true for virtual CRAT tables created
1030          * at this stage. GPUs added later in kfd_topology_add_device
1031          * use a counter.
1032          */
1033         proximity_domain = 0;
1034
1035         ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
1036                                             COMPUTE_UNIT_CPU, NULL,
1037                                             proximity_domain);
1038         cpu_only_node = 1;
1039         if (ret) {
1040                 pr_err("Error creating VCRAT table for CPU\n");
1041                 return ret;
1042         }
1043
1044         ret = kfd_parse_crat_table(crat_image,
1045                                    &temp_topology_device_list,
1046                                    proximity_domain);
1047         if (ret) {
1048                 pr_err("Error parsing VCRAT table for CPU\n");
1049                 goto err;
1050         }
1051
1052         kdev = list_first_entry(&temp_topology_device_list,
1053                                 struct kfd_topology_device, list);
1054
1055         down_write(&topology_lock);
1056         kfd_topology_update_device_list(&temp_topology_device_list,
1057                                         &topology_device_list);
1058         topology_crat_proximity_domain = sys_props.num_devices-1;
1059         ret = kfd_topology_update_sysfs();
1060         up_write(&topology_lock);
1061
1062         if (!ret) {
1063                 sys_props.generation_count++;
1064                 kfd_update_system_properties();
1065                 kfd_debug_print_topology();
1066         } else
1067                 pr_err("Failed to update topology in sysfs ret=%d\n", ret);
1068
1069         /* For nodes with GPU, this information gets added
1070          * when GPU is detected (kfd_topology_add_device).
1071          */
1072         if (cpu_only_node) {
1073                 /* Add additional information to CPU only node created above */
1074                 down_write(&topology_lock);
1075                 kdev = list_first_entry(&topology_device_list,
1076                                 struct kfd_topology_device, list);
1077                 up_write(&topology_lock);
1078                 kfd_add_non_crat_information(kdev);
1079         }
1080
1081 err:
1082         kfd_destroy_crat_image(crat_image);
1083         return ret;
1084 }
1085
1086 void kfd_topology_shutdown(void)
1087 {
1088         down_write(&topology_lock);
1089         kfd_topology_release_sysfs();
1090         kfd_release_live_view();
1091         up_write(&topology_lock);
1092 }
1093
1094 static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu)
1095 {
1096         uint32_t hashout;
1097         uint32_t buf[8];
1098         uint64_t local_mem_size;
1099         int i;
1100
1101         if (!gpu)
1102                 return 0;
1103
1104         local_mem_size = gpu->local_mem_info.local_mem_size_private +
1105                         gpu->local_mem_info.local_mem_size_public;
1106         buf[0] = gpu->adev->pdev->devfn;
1107         buf[1] = gpu->adev->pdev->subsystem_vendor |
1108                 (gpu->adev->pdev->subsystem_device << 16);
1109         buf[2] = pci_domain_nr(gpu->adev->pdev->bus);
1110         buf[3] = gpu->adev->pdev->device;
1111         buf[4] = gpu->adev->pdev->bus->number;
1112         buf[5] = lower_32_bits(local_mem_size);
1113         buf[6] = upper_32_bits(local_mem_size);
1114         buf[7] = (ffs(gpu->xcc_mask) - 1) | (NUM_XCC(gpu->xcc_mask) << 16);
1115
1116         for (i = 0, hashout = 0; i < 8; i++)
1117                 hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
1118
1119         return hashout;
1120 }
1121 /* kfd_assign_gpu - Attach @gpu to the correct kfd topology device. If
1122  *              the GPU device is not already present in the topology device
1123  *              list then return NULL. This means a new topology device has to
1124  *              be created for this GPU.
1125  */
1126 static struct kfd_topology_device *kfd_assign_gpu(struct kfd_node *gpu)
1127 {
1128         struct kfd_topology_device *dev;
1129         struct kfd_topology_device *out_dev = NULL;
1130         struct kfd_mem_properties *mem;
1131         struct kfd_cache_properties *cache;
1132         struct kfd_iolink_properties *iolink;
1133         struct kfd_iolink_properties *p2plink;
1134
1135         list_for_each_entry(dev, &topology_device_list, list) {
1136                 /* Discrete GPUs need their own topology device list
1137                  * entries. Don't assign them to CPU/APU nodes.
1138                  */
1139                 if (dev->node_props.cpu_cores_count)
1140                         continue;
1141
1142                 if (!dev->gpu && (dev->node_props.simd_count > 0)) {
1143                         dev->gpu = gpu;
1144                         out_dev = dev;
1145
1146                         list_for_each_entry(mem, &dev->mem_props, list)
1147                                 mem->gpu = dev->gpu;
1148                         list_for_each_entry(cache, &dev->cache_props, list)
1149                                 cache->gpu = dev->gpu;
1150                         list_for_each_entry(iolink, &dev->io_link_props, list)
1151                                 iolink->gpu = dev->gpu;
1152                         list_for_each_entry(p2plink, &dev->p2p_link_props, list)
1153                                 p2plink->gpu = dev->gpu;
1154                         break;
1155                 }
1156         }
1157         return out_dev;
1158 }
1159
1160 static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
1161 {
1162         /*
1163          * TODO: Generate an event for thunk about the arrival/removal
1164          * of the GPU
1165          */
1166 }
1167
1168 /* kfd_fill_mem_clk_max_info - Since CRAT doesn't have memory clock info,
1169  *              patch this after CRAT parsing.
1170  */
1171 static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
1172 {
1173         struct kfd_mem_properties *mem;
1174         struct kfd_local_mem_info local_mem_info;
1175
1176         if (!dev)
1177                 return;
1178
1179         /* Currently, amdgpu driver (amdgpu_mc) deals only with GPUs with
1180          * single bank of VRAM local memory.
1181          * for dGPUs - VCRAT reports only one bank of Local Memory
1182          * for APUs - If CRAT from ACPI reports more than one bank, then
1183          *      all the banks will report the same mem_clk_max information
1184          */
1185         amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info,
1186                                          dev->gpu->xcp);
1187
1188         list_for_each_entry(mem, &dev->mem_props, list)
1189                 mem->mem_clk_max = local_mem_info.mem_clk_max;
1190 }
1191
1192 static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
1193                                         struct kfd_topology_device *target_gpu_dev,
1194                                         struct kfd_iolink_properties *link)
1195 {
1196         /* xgmi always supports atomics between links. */
1197         if (link->iolink_type == CRAT_IOLINK_TYPE_XGMI)
1198                 return;
1199
1200         /* check pcie support to set cpu(dev) flags for target_gpu_dev link. */
1201         if (target_gpu_dev) {
1202                 uint32_t cap;
1203
1204                 pcie_capability_read_dword(target_gpu_dev->gpu->adev->pdev,
1205                                 PCI_EXP_DEVCAP2, &cap);
1206
1207                 if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
1208                              PCI_EXP_DEVCAP2_ATOMIC_COMP64)))
1209                         link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
1210                                 CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
1211         /* set gpu (dev) flags. */
1212         } else {
1213                 if (!dev->gpu->kfd->pci_atomic_requested ||
1214                                 dev->gpu->adev->asic_type == CHIP_HAWAII)
1215                         link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
1216                                 CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
1217         }
1218 }
1219
1220 static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
1221                 struct kfd_iolink_properties *outbound_link,
1222                 struct kfd_iolink_properties *inbound_link)
1223 {
1224         /* CPU -> GPU with PCIe */
1225         if (!to_dev->gpu &&
1226             inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
1227                 inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
1228
1229         if (to_dev->gpu) {
1230                 /* GPU <-> GPU with PCIe and
1231                  * Vega20 with XGMI
1232                  */
1233                 if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
1234                     (inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
1235                     KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) {
1236                         outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
1237                         inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
1238                 }
1239         }
1240 }
1241
1242 static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
1243 {
1244         struct kfd_iolink_properties *link, *inbound_link;
1245         struct kfd_topology_device *peer_dev;
1246
1247         if (!dev || !dev->gpu)
1248                 return;
1249
1250         /* GPU only creates direct links so apply flags setting to all */
1251         list_for_each_entry(link, &dev->io_link_props, list) {
1252                 link->flags = CRAT_IOLINK_FLAGS_ENABLED;
1253                 kfd_set_iolink_no_atomics(dev, NULL, link);
1254                 peer_dev = kfd_topology_device_by_proximity_domain(
1255                                 link->node_to);
1256
1257                 if (!peer_dev)
1258                         continue;
1259
1260                 /* Include the CPU peer in GPU hive if connected over xGMI. */
1261                 if (!peer_dev->gpu &&
1262                     link->iolink_type == CRAT_IOLINK_TYPE_XGMI) {
1263                         /*
1264                          * If the GPU is not part of a GPU hive, use its pci
1265                          * device location as the hive ID to bind with the CPU.
1266                          */
1267                         if (!dev->node_props.hive_id)
1268                                 dev->node_props.hive_id = pci_dev_id(dev->gpu->adev->pdev);
1269                         peer_dev->node_props.hive_id = dev->node_props.hive_id;
1270                 }
1271
1272                 list_for_each_entry(inbound_link, &peer_dev->io_link_props,
1273                                                                         list) {
1274                         if (inbound_link->node_to != link->node_from)
1275                                 continue;
1276
1277                         inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
1278                         kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
1279                         kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
1280                 }
1281         }
1282
1283         /* Create indirect links so apply flags setting to all */
1284         list_for_each_entry(link, &dev->p2p_link_props, list) {
1285                 link->flags = CRAT_IOLINK_FLAGS_ENABLED;
1286                 kfd_set_iolink_no_atomics(dev, NULL, link);
1287                 peer_dev = kfd_topology_device_by_proximity_domain(
1288                                 link->node_to);
1289
1290                 if (!peer_dev)
1291                         continue;
1292
1293                 list_for_each_entry(inbound_link, &peer_dev->p2p_link_props,
1294                                                                         list) {
1295                         if (inbound_link->node_to != link->node_from)
1296                                 continue;
1297
1298                         inbound_link->flags = CRAT_IOLINK_FLAGS_ENABLED;
1299                         kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link);
1300                         kfd_set_iolink_non_coherent(peer_dev, link, inbound_link);
1301                 }
1302         }
1303 }
1304
1305 static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
1306                                 struct kfd_iolink_properties *p2plink)
1307 {
1308         int ret;
1309
1310         p2plink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
1311         if (!p2plink->kobj)
1312                 return -ENOMEM;
1313
1314         ret = kobject_init_and_add(p2plink->kobj, &iolink_type,
1315                         dev->kobj_p2plink, "%d", dev->node_props.p2p_links_count - 1);
1316         if (ret < 0) {
1317                 kobject_put(p2plink->kobj);
1318                 return ret;
1319         }
1320
1321         p2plink->attr.name = "properties";
1322         p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
1323         sysfs_attr_init(&p2plink->attr);
1324         ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
1325         if (ret < 0)
1326                 return ret;
1327
1328         return 0;
1329 }
1330
1331 static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
1332 {
1333         struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
1334         struct kfd_iolink_properties *props = NULL, *props2 = NULL;
1335         struct kfd_topology_device *cpu_dev;
1336         int ret = 0;
1337         int i, num_cpu;
1338
1339         num_cpu = 0;
1340         list_for_each_entry(cpu_dev, &topology_device_list, list) {
1341                 if (cpu_dev->gpu)
1342                         break;
1343                 num_cpu++;
1344         }
1345
1346         gpu_link = list_first_entry(&kdev->io_link_props,
1347                                         struct kfd_iolink_properties, list);
1348         if (!gpu_link)
1349                 return -ENOMEM;
1350
1351         for (i = 0; i < num_cpu; i++) {
1352                 /* CPU <--> GPU */
1353                 if (gpu_link->node_to == i)
1354                         continue;
1355
1356                 /* find CPU <-->  CPU links */
1357                 cpu_link = NULL;
1358                 cpu_dev = kfd_topology_device_by_proximity_domain(i);
1359                 if (cpu_dev) {
1360                         list_for_each_entry(tmp_link,
1361                                         &cpu_dev->io_link_props, list) {
1362                                 if (tmp_link->node_to == gpu_link->node_to) {
1363                                         cpu_link = tmp_link;
1364                                         break;
1365                                 }
1366                         }
1367                 }
1368
1369                 if (!cpu_link)
1370                         return -ENOMEM;
1371
1372                 /* CPU <--> CPU <--> GPU, GPU node*/
1373                 props = kfd_alloc_struct(props);
1374                 if (!props)
1375                         return -ENOMEM;
1376
1377                 memcpy(props, gpu_link, sizeof(struct kfd_iolink_properties));
1378                 props->weight = gpu_link->weight + cpu_link->weight;
1379                 props->min_latency = gpu_link->min_latency + cpu_link->min_latency;
1380                 props->max_latency = gpu_link->max_latency + cpu_link->max_latency;
1381                 props->min_bandwidth = min(gpu_link->min_bandwidth, cpu_link->min_bandwidth);
1382                 props->max_bandwidth = min(gpu_link->max_bandwidth, cpu_link->max_bandwidth);
1383
1384                 props->node_from = gpu_node;
1385                 props->node_to = i;
1386                 kdev->node_props.p2p_links_count++;
1387                 list_add_tail(&props->list, &kdev->p2p_link_props);
1388                 ret = kfd_build_p2p_node_entry(kdev, props);
1389                 if (ret < 0)
1390                         return ret;
1391
1392                 /* for small Bar, no CPU --> GPU in-direct links */
1393                 if (kfd_dev_is_large_bar(kdev->gpu)) {
1394                         /* CPU <--> CPU <--> GPU, CPU node*/
1395                         props2 = kfd_alloc_struct(props2);
1396                         if (!props2)
1397                                 return -ENOMEM;
1398
1399                         memcpy(props2, props, sizeof(struct kfd_iolink_properties));
1400                         props2->node_from = i;
1401                         props2->node_to = gpu_node;
1402                         props2->kobj = NULL;
1403                         cpu_dev->node_props.p2p_links_count++;
1404                         list_add_tail(&props2->list, &cpu_dev->p2p_link_props);
1405                         ret = kfd_build_p2p_node_entry(cpu_dev, props2);
1406                         if (ret < 0)
1407                                 return ret;
1408                 }
1409         }
1410         return ret;
1411 }
1412
1413 #if defined(CONFIG_HSA_AMD_P2P)
1414 static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
1415                 struct kfd_topology_device *peer, int from, int to)
1416 {
1417         struct kfd_iolink_properties *props = NULL;
1418         struct kfd_iolink_properties *iolink1, *iolink2, *iolink3;
1419         struct kfd_topology_device *cpu_dev;
1420         int ret = 0;
1421
1422         if (!amdgpu_device_is_peer_accessible(
1423                                 kdev->gpu->adev,
1424                                 peer->gpu->adev))
1425                 return ret;
1426
1427         iolink1 = list_first_entry(&kdev->io_link_props,
1428                                                         struct kfd_iolink_properties, list);
1429         if (!iolink1)
1430                 return -ENOMEM;
1431
1432         iolink2 = list_first_entry(&peer->io_link_props,
1433                                                         struct kfd_iolink_properties, list);
1434         if (!iolink2)
1435                 return -ENOMEM;
1436
1437         props = kfd_alloc_struct(props);
1438         if (!props)
1439                 return -ENOMEM;
1440
1441         memcpy(props, iolink1, sizeof(struct kfd_iolink_properties));
1442
1443         props->weight = iolink1->weight + iolink2->weight;
1444         props->min_latency = iolink1->min_latency + iolink2->min_latency;
1445         props->max_latency = iolink1->max_latency + iolink2->max_latency;
1446         props->min_bandwidth = min(iolink1->min_bandwidth, iolink2->min_bandwidth);
1447         props->max_bandwidth = min(iolink2->max_bandwidth, iolink2->max_bandwidth);
1448
1449         if (iolink1->node_to != iolink2->node_to) {
1450                 /* CPU->CPU  link*/
1451                 cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
1452                 if (cpu_dev) {
1453                         list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
1454                                 if (iolink3->node_to == iolink2->node_to)
1455                                         break;
1456
1457                         props->weight += iolink3->weight;
1458                         props->min_latency += iolink3->min_latency;
1459                         props->max_latency += iolink3->max_latency;
1460                         props->min_bandwidth = min(props->min_bandwidth,
1461                                                         iolink3->min_bandwidth);
1462                         props->max_bandwidth = min(props->max_bandwidth,
1463                                                         iolink3->max_bandwidth);
1464                 } else {
1465                         WARN(1, "CPU node not found");
1466                 }
1467         }
1468
1469         props->node_from = from;
1470         props->node_to = to;
1471         peer->node_props.p2p_links_count++;
1472         list_add_tail(&props->list, &peer->p2p_link_props);
1473         ret = kfd_build_p2p_node_entry(peer, props);
1474
1475         return ret;
1476 }
1477 #endif
1478
1479 static int kfd_dev_create_p2p_links(void)
1480 {
1481         struct kfd_topology_device *dev;
1482         struct kfd_topology_device *new_dev;
1483 #if defined(CONFIG_HSA_AMD_P2P)
1484         uint32_t i;
1485 #endif
1486         uint32_t k;
1487         int ret = 0;
1488
1489         k = 0;
1490         list_for_each_entry(dev, &topology_device_list, list)
1491                 k++;
1492         if (k < 2)
1493                 return 0;
1494
1495         new_dev = list_last_entry(&topology_device_list, struct kfd_topology_device, list);
1496         if (WARN_ON(!new_dev->gpu))
1497                 return 0;
1498
1499         k--;
1500
1501         /* create in-direct links */
1502         ret = kfd_create_indirect_link_prop(new_dev, k);
1503         if (ret < 0)
1504                 goto out;
1505
1506         /* create p2p links */
1507 #if defined(CONFIG_HSA_AMD_P2P)
1508         i = 0;
1509         list_for_each_entry(dev, &topology_device_list, list) {
1510                 if (dev == new_dev)
1511                         break;
1512                 if (!dev->gpu || !dev->gpu->adev ||
1513                     (dev->gpu->kfd->hive_id &&
1514                      dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id))
1515                         goto next;
1516
1517                 /* check if node(s) is/are peer accessible in one direction or bi-direction */
1518                 ret = kfd_add_peer_prop(new_dev, dev, i, k);
1519                 if (ret < 0)
1520                         goto out;
1521
1522                 ret = kfd_add_peer_prop(dev, new_dev, k, i);
1523                 if (ret < 0)
1524                         goto out;
1525 next:
1526                 i++;
1527         }
1528 #endif
1529
1530 out:
1531         return ret;
1532 }
1533
1534 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1535 static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
1536                                 struct kfd_gpu_cache_info *pcache_info,
1537                                 struct kfd_cu_info *cu_info,
1538                                 int cu_bitmask,
1539                                 int cache_type, unsigned int cu_processor_id,
1540                                 int cu_block)
1541 {
1542         unsigned int cu_sibling_map_mask;
1543         int first_active_cu;
1544         struct kfd_cache_properties *pcache = NULL;
1545
1546         cu_sibling_map_mask = cu_bitmask;
1547         cu_sibling_map_mask >>= cu_block;
1548         cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1549         first_active_cu = ffs(cu_sibling_map_mask);
1550
1551         /* CU could be inactive. In case of shared cache find the first active
1552          * CU. and incase of non-shared cache check if the CU is inactive. If
1553          * inactive active skip it
1554          */
1555         if (first_active_cu) {
1556                 pcache = kfd_alloc_struct(pcache);
1557                 if (!pcache)
1558                         return -ENOMEM;
1559
1560                 memset(pcache, 0, sizeof(struct kfd_cache_properties));
1561                 pcache->processor_id_low = cu_processor_id + (first_active_cu - 1);
1562                 pcache->cache_level = pcache_info[cache_type].cache_level;
1563                 pcache->cache_size = pcache_info[cache_type].cache_size;
1564
1565                 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
1566                         pcache->cache_type |= HSA_CACHE_TYPE_DATA;
1567                 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_INST_CACHE)
1568                         pcache->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
1569                 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_CPU_CACHE)
1570                         pcache->cache_type |= HSA_CACHE_TYPE_CPU;
1571                 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
1572                         pcache->cache_type |= HSA_CACHE_TYPE_HSACU;
1573
1574                 /* Sibling map is w.r.t processor_id_low, so shift out
1575                  * inactive CU
1576                  */
1577                 cu_sibling_map_mask =
1578                         cu_sibling_map_mask >> (first_active_cu - 1);
1579
1580                 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1581                 pcache->sibling_map[1] =
1582                                 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1583                 pcache->sibling_map[2] =
1584                                 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1585                 pcache->sibling_map[3] =
1586                                 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1587
1588                 pcache->sibling_map_size = 4;
1589                 *props_ext = pcache;
1590
1591                 return 0;
1592         }
1593         return 1;
1594 }
1595
1596 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1597 static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
1598                                 struct kfd_gpu_cache_info *pcache_info,
1599                                 struct kfd_cu_info *cu_info,
1600                                 int cache_type, unsigned int cu_processor_id)
1601 {
1602         unsigned int cu_sibling_map_mask;
1603         int first_active_cu;
1604         int i, j, k;
1605         struct kfd_cache_properties *pcache = NULL;
1606
1607         cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
1608         cu_sibling_map_mask &=
1609                 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1610         first_active_cu = ffs(cu_sibling_map_mask);
1611
1612         /* CU could be inactive. In case of shared cache find the first active
1613          * CU. and incase of non-shared cache check if the CU is inactive. If
1614          * inactive active skip it
1615          */
1616         if (first_active_cu) {
1617                 pcache = kfd_alloc_struct(pcache);
1618                 if (!pcache)
1619                         return -ENOMEM;
1620
1621                 memset(pcache, 0, sizeof(struct kfd_cache_properties));
1622                 pcache->processor_id_low = cu_processor_id
1623                                         + (first_active_cu - 1);
1624                 pcache->cache_level = pcache_info[cache_type].cache_level;
1625                 pcache->cache_size = pcache_info[cache_type].cache_size;
1626
1627                 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
1628                         pcache->cache_type |= HSA_CACHE_TYPE_DATA;
1629                 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_INST_CACHE)
1630                         pcache->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
1631                 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_CPU_CACHE)
1632                         pcache->cache_type |= HSA_CACHE_TYPE_CPU;
1633                 if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
1634                         pcache->cache_type |= HSA_CACHE_TYPE_HSACU;
1635
1636                 /* Sibling map is w.r.t processor_id_low, so shift out
1637                  * inactive CU
1638                  */
1639                 cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
1640                 k = 0;
1641
1642                 for (i = 0; i < cu_info->num_shader_engines; i++) {
1643                         for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1644                                 pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1645                                 pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1646                                 pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1647                                 pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1648                                 k += 4;
1649
1650                                 cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4];
1651                                 cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1652                         }
1653                 }
1654                 pcache->sibling_map_size = k;
1655                 *props_ext = pcache;
1656                 return 0;
1657         }
1658         return 1;
1659 }
1660
1661 #define KFD_MAX_CACHE_TYPES 6
1662
1663 /* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info
1664  * tables
1665  */
1666 static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
1667 {
1668         struct kfd_gpu_cache_info *pcache_info = NULL;
1669         int i, j, k;
1670         int ct = 0;
1671         unsigned int cu_processor_id;
1672         int ret;
1673         unsigned int num_cu_shared;
1674         struct kfd_cu_info cu_info;
1675         struct kfd_cu_info *pcu_info;
1676         int gpu_processor_id;
1677         struct kfd_cache_properties *props_ext;
1678         int num_of_entries = 0;
1679         int num_of_cache_types = 0;
1680         struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
1681
1682         amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
1683         pcu_info = &cu_info;
1684
1685         gpu_processor_id = dev->node_props.simd_id_base;
1686
1687         pcache_info = cache_info;
1688         num_of_cache_types = kfd_get_gpu_cache_info(kdev, &pcache_info);
1689         if (!num_of_cache_types) {
1690                 pr_warn("no cache info found\n");
1691                 return;
1692         }
1693
1694         /* For each type of cache listed in the kfd_gpu_cache_info table,
1695          * go through all available Compute Units.
1696          * The [i,j,k] loop will
1697          *              if kfd_gpu_cache_info.num_cu_shared = 1
1698          *                      will parse through all available CU
1699          *              If (kfd_gpu_cache_info.num_cu_shared != 1)
1700          *                      then it will consider only one CU from
1701          *                      the shared unit
1702          */
1703         for (ct = 0; ct < num_of_cache_types; ct++) {
1704                 cu_processor_id = gpu_processor_id;
1705                 if (pcache_info[ct].cache_level == 1) {
1706                         for (i = 0; i < pcu_info->num_shader_engines; i++) {
1707                                 for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
1708                                         for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
1709
1710                                                 ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
1711                                                                                 pcu_info->cu_bitmap[i % 4][j + i / 4], ct,
1712                                                                                 cu_processor_id, k);
1713
1714                                                 if (ret < 0)
1715                                                         break;
1716
1717                                                 if (!ret) {
1718                                                         num_of_entries++;
1719                                                         list_add_tail(&props_ext->list, &dev->cache_props);
1720                                                 }
1721
1722                                                 /* Move to next CU block */
1723                                                 num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1724                                                         pcu_info->num_cu_per_sh) ?
1725                                                         pcache_info[ct].num_cu_shared :
1726                                                         (pcu_info->num_cu_per_sh - k);
1727                                                 cu_processor_id += num_cu_shared;
1728                                         }
1729                                 }
1730                         }
1731                 } else {
1732                         ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
1733                                                                 pcu_info, ct, cu_processor_id);
1734
1735                         if (ret < 0)
1736                                 break;
1737
1738                         if (!ret) {
1739                                 num_of_entries++;
1740                                 list_add_tail(&props_ext->list, &dev->cache_props);
1741                         }
1742                 }
1743         }
1744         dev->node_props.caches_count += num_of_entries;
1745         pr_debug("Added [%d] GPU cache entries\n", num_of_entries);
1746 }
1747
1748 static int kfd_topology_add_device_locked(struct kfd_node *gpu, uint32_t gpu_id,
1749                                           struct kfd_topology_device **dev)
1750 {
1751         int proximity_domain = ++topology_crat_proximity_domain;
1752         struct list_head temp_topology_device_list;
1753         void *crat_image = NULL;
1754         size_t image_size = 0;
1755         int res;
1756
1757         res = kfd_create_crat_image_virtual(&crat_image, &image_size,
1758                                             COMPUTE_UNIT_GPU, gpu,
1759                                             proximity_domain);
1760         if (res) {
1761                 pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
1762                        gpu_id);
1763                 topology_crat_proximity_domain--;
1764                 goto err;
1765         }
1766
1767         INIT_LIST_HEAD(&temp_topology_device_list);
1768
1769         res = kfd_parse_crat_table(crat_image,
1770                                    &temp_topology_device_list,
1771                                    proximity_domain);
1772         if (res) {
1773                 pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
1774                        gpu_id);
1775                 topology_crat_proximity_domain--;
1776                 goto err;
1777         }
1778
1779         kfd_topology_update_device_list(&temp_topology_device_list,
1780                                         &topology_device_list);
1781
1782         *dev = kfd_assign_gpu(gpu);
1783         if (WARN_ON(!*dev)) {
1784                 res = -ENODEV;
1785                 goto err;
1786         }
1787
1788         /* Fill the cache affinity information here for the GPUs
1789          * using VCRAT
1790          */
1791         kfd_fill_cache_non_crat_info(*dev, gpu);
1792
1793         /* Update the SYSFS tree, since we added another topology
1794          * device
1795          */
1796         res = kfd_topology_update_sysfs();
1797         if (!res)
1798                 sys_props.generation_count++;
1799         else
1800                 pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
1801                        gpu_id, res);
1802
1803 err:
1804         kfd_destroy_crat_image(crat_image);
1805         return res;
1806 }
1807
1808 static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *dev)
1809 {
1810         bool firmware_supported = true;
1811
1812         if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) &&
1813                         KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) {
1814                 uint32_t mes_api_rev = (dev->gpu->adev->mes.sched_version &
1815                                                 AMDGPU_MES_API_VERSION_MASK) >>
1816                                                 AMDGPU_MES_API_VERSION_SHIFT;
1817                 uint32_t mes_rev = dev->gpu->adev->mes.sched_version &
1818                                                 AMDGPU_MES_VERSION_MASK;
1819
1820                 firmware_supported = (mes_api_rev >= 14) && (mes_rev >= 64);
1821                 goto out;
1822         }
1823
1824         /*
1825          * Note: Any unlisted devices here are assumed to support exception handling.
1826          * Add additional checks here as needed.
1827          */
1828         switch (KFD_GC_VERSION(dev->gpu)) {
1829         case IP_VERSION(9, 0, 1):
1830                 firmware_supported = dev->gpu->kfd->mec_fw_version >= 459 + 32768;
1831                 break;
1832         case IP_VERSION(9, 1, 0):
1833         case IP_VERSION(9, 2, 1):
1834         case IP_VERSION(9, 2, 2):
1835         case IP_VERSION(9, 3, 0):
1836         case IP_VERSION(9, 4, 0):
1837                 firmware_supported = dev->gpu->kfd->mec_fw_version >= 459;
1838                 break;
1839         case IP_VERSION(9, 4, 1):
1840                 firmware_supported = dev->gpu->kfd->mec_fw_version >= 60;
1841                 break;
1842         case IP_VERSION(9, 4, 2):
1843                 firmware_supported = dev->gpu->kfd->mec_fw_version >= 51;
1844                 break;
1845         case IP_VERSION(10, 1, 10):
1846         case IP_VERSION(10, 1, 2):
1847         case IP_VERSION(10, 1, 1):
1848                 firmware_supported = dev->gpu->kfd->mec_fw_version >= 144;
1849                 break;
1850         case IP_VERSION(10, 3, 0):
1851         case IP_VERSION(10, 3, 2):
1852         case IP_VERSION(10, 3, 1):
1853         case IP_VERSION(10, 3, 4):
1854         case IP_VERSION(10, 3, 5):
1855                 firmware_supported = dev->gpu->kfd->mec_fw_version >= 89;
1856                 break;
1857         case IP_VERSION(10, 1, 3):
1858         case IP_VERSION(10, 3, 3):
1859                 firmware_supported = false;
1860                 break;
1861         default:
1862                 break;
1863         }
1864
1865 out:
1866         if (firmware_supported)
1867                 dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED;
1868 }
1869
1870 static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
1871 {
1872         dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
1873                                 HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
1874                                 HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
1875
1876         dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_SUPPORT |
1877                         HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED |
1878                         HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED;
1879
1880         if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
1881                 dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
1882
1883         if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
1884                 if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3))
1885                         dev->node_props.debug_prop |=
1886                                 HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 |
1887                                 HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3;
1888                 else
1889                         dev->node_props.debug_prop |=
1890                                 HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 |
1891                                 HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
1892
1893                 if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2))
1894                         dev->node_props.capability |=
1895                                 HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
1896         } else {
1897                 dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
1898                                         HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
1899
1900                 if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0))
1901                         dev->node_props.capability |=
1902                                 HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
1903         }
1904
1905         kfd_topology_set_dbg_firmware_support(dev);
1906 }
1907
1908 int kfd_topology_add_device(struct kfd_node *gpu)
1909 {
1910         uint32_t gpu_id;
1911         struct kfd_topology_device *dev;
1912         struct kfd_cu_info cu_info;
1913         int res = 0;
1914         int i;
1915         const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
1916
1917         gpu_id = kfd_generate_gpu_id(gpu);
1918         if (gpu->xcp && !gpu->xcp->ddev) {
1919                 dev_warn(gpu->adev->dev,
1920                 "Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.",
1921                 gpu_id);
1922                 return 0;
1923         } else {
1924                 pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
1925         }
1926
1927         /* Check to see if this gpu device exists in the topology_device_list.
1928          * If so, assign the gpu to that device,
1929          * else create a Virtual CRAT for this gpu device and then parse that
1930          * CRAT to create a new topology device. Once created assign the gpu to
1931          * that topology device
1932          */
1933         down_write(&topology_lock);
1934         dev = kfd_assign_gpu(gpu);
1935         if (!dev)
1936                 res = kfd_topology_add_device_locked(gpu, gpu_id, &dev);
1937         up_write(&topology_lock);
1938         if (res)
1939                 return res;
1940
1941         dev->gpu_id = gpu_id;
1942         gpu->id = gpu_id;
1943
1944         kfd_dev_create_p2p_links();
1945
1946         /* TODO: Move the following lines to function
1947          *      kfd_add_non_crat_information
1948          */
1949
1950         /* Fill-in additional information that is not available in CRAT but
1951          * needed for the topology
1952          */
1953
1954         amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info);
1955
1956         for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1; i++) {
1957                 dev->node_props.name[i] = __tolower(asic_name[i]);
1958                 if (asic_name[i] == '\0')
1959                         break;
1960         }
1961         dev->node_props.name[i] = '\0';
1962
1963         dev->node_props.simd_arrays_per_engine =
1964                 cu_info.num_shader_arrays_per_engine;
1965
1966         dev->node_props.gfx_target_version =
1967                                 gpu->kfd->device_info.gfx_target_version;
1968         dev->node_props.vendor_id = gpu->adev->pdev->vendor;
1969         dev->node_props.device_id = gpu->adev->pdev->device;
1970         dev->node_props.capability |=
1971                 ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) &
1972                         HSA_CAP_ASIC_REVISION_MASK);
1973
1974         dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
1975         if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
1976                 dev->node_props.location_id |= dev->gpu->node_id;
1977
1978         dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
1979         dev->node_props.max_engine_clk_fcompute =
1980                 amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev);
1981         dev->node_props.max_engine_clk_ccompute =
1982                 cpufreq_quick_get_max(0) / 1000;
1983
1984         if (gpu->xcp)
1985                 dev->node_props.drm_render_minor = gpu->xcp->ddev->render->index;
1986         else
1987                 dev->node_props.drm_render_minor =
1988                                 gpu->kfd->shared_resources.drm_render_minor;
1989
1990         dev->node_props.hive_id = gpu->kfd->hive_id;
1991         dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu);
1992         dev->node_props.num_sdma_xgmi_engines =
1993                                         kfd_get_num_xgmi_sdma_engines(gpu);
1994         dev->node_props.num_sdma_queues_per_engine =
1995                                 gpu->kfd->device_info.num_sdma_queues_per_engine -
1996                                 gpu->kfd->device_info.num_reserved_sdma_queues_per_engine;
1997         dev->node_props.num_gws = (dev->gpu->gws &&
1998                 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
1999                 dev->gpu->adev->gds.gws_size : 0;
2000         dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
2001
2002         kfd_fill_mem_clk_max_info(dev);
2003         kfd_fill_iolink_non_crat_info(dev);
2004
2005         switch (dev->gpu->adev->asic_type) {
2006         case CHIP_KAVERI:
2007         case CHIP_HAWAII:
2008         case CHIP_TONGA:
2009                 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_PRE_1_0 <<
2010                         HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
2011                         HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
2012                 break;
2013         case CHIP_CARRIZO:
2014         case CHIP_FIJI:
2015         case CHIP_POLARIS10:
2016         case CHIP_POLARIS11:
2017         case CHIP_POLARIS12:
2018         case CHIP_VEGAM:
2019                 pr_debug("Adding doorbell packet type capability\n");
2020                 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
2021                         HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
2022                         HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
2023                 break;
2024         default:
2025                 if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 0, 1))
2026                         WARN(1, "Unexpected ASIC family %u",
2027                              dev->gpu->adev->asic_type);
2028                 else
2029                         kfd_topology_set_capabilities(dev);
2030         }
2031
2032         /*
2033          * Overwrite ATS capability according to needs_iommu_device to fix
2034          * potential missing corresponding bit in CRAT of BIOS.
2035          */
2036         dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
2037
2038         /* Fix errors in CZ CRAT.
2039          * simd_count: Carrizo CRAT reports wrong simd_count, probably
2040          *              because it doesn't consider masked out CUs
2041          * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
2042          */
2043         if (dev->gpu->adev->asic_type == CHIP_CARRIZO) {
2044                 dev->node_props.simd_count =
2045                         cu_info.simd_per_cu * cu_info.cu_active_number;
2046                 dev->node_props.max_waves_per_simd = 10;
2047         }
2048
2049         /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
2050         dev->node_props.capability |=
2051                 ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
2052                 HSA_CAP_SRAM_EDCSUPPORTED : 0;
2053         dev->node_props.capability |=
2054                 ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
2055                 HSA_CAP_MEM_EDCSUPPORTED : 0;
2056
2057         if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1))
2058                 dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
2059                         HSA_CAP_RASEVENTNOTIFY : 0;
2060
2061         if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev))
2062                 dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
2063
2064         if (dev->gpu->adev->gmc.is_app_apu ||
2065                 dev->gpu->adev->gmc.xgmi.connected_to_cpu)
2066                 dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS;
2067
2068         kfd_debug_print_topology();
2069
2070         kfd_notify_gpu_change(gpu_id, 1);
2071
2072         return 0;
2073 }
2074
2075 /**
2076  * kfd_topology_update_io_links() - Update IO links after device removal.
2077  * @proximity_domain: Proximity domain value of the dev being removed.
2078  *
2079  * The topology list currently is arranged in increasing order of
2080  * proximity domain.
2081  *
2082  * Two things need to be done when a device is removed:
2083  * 1. All the IO links to this device need to be removed.
2084  * 2. All nodes after the current device node need to move
2085  *    up once this device node is removed from the topology
2086  *    list. As a result, the proximity domain values for
2087  *    all nodes after the node being deleted reduce by 1.
2088  *    This would also cause the proximity domain values for
2089  *    io links to be updated based on new proximity domain
2090  *    values.
2091  *
2092  * Context: The caller must hold write topology_lock.
2093  */
2094 static void kfd_topology_update_io_links(int proximity_domain)
2095 {
2096         struct kfd_topology_device *dev;
2097         struct kfd_iolink_properties *iolink, *p2plink, *tmp;
2098
2099         list_for_each_entry(dev, &topology_device_list, list) {
2100                 if (dev->proximity_domain > proximity_domain)
2101                         dev->proximity_domain--;
2102
2103                 list_for_each_entry_safe(iolink, tmp, &dev->io_link_props, list) {
2104                         /*
2105                          * If there is an io link to the dev being deleted
2106                          * then remove that IO link also.
2107                          */
2108                         if (iolink->node_to == proximity_domain) {
2109                                 list_del(&iolink->list);
2110                                 dev->node_props.io_links_count--;
2111                         } else {
2112                                 if (iolink->node_from > proximity_domain)
2113                                         iolink->node_from--;
2114                                 if (iolink->node_to > proximity_domain)
2115                                         iolink->node_to--;
2116                         }
2117                 }
2118
2119                 list_for_each_entry_safe(p2plink, tmp, &dev->p2p_link_props, list) {
2120                         /*
2121                          * If there is a p2p link to the dev being deleted
2122                          * then remove that p2p link also.
2123                          */
2124                         if (p2plink->node_to == proximity_domain) {
2125                                 list_del(&p2plink->list);
2126                                 dev->node_props.p2p_links_count--;
2127                         } else {
2128                                 if (p2plink->node_from > proximity_domain)
2129                                         p2plink->node_from--;
2130                                 if (p2plink->node_to > proximity_domain)
2131                                         p2plink->node_to--;
2132                         }
2133                 }
2134         }
2135 }
2136
2137 int kfd_topology_remove_device(struct kfd_node *gpu)
2138 {
2139         struct kfd_topology_device *dev, *tmp;
2140         uint32_t gpu_id;
2141         int res = -ENODEV;
2142         int i = 0;
2143
2144         down_write(&topology_lock);
2145
2146         list_for_each_entry_safe(dev, tmp, &topology_device_list, list) {
2147                 if (dev->gpu == gpu) {
2148                         gpu_id = dev->gpu_id;
2149                         kfd_remove_sysfs_node_entry(dev);
2150                         kfd_release_topology_device(dev);
2151                         sys_props.num_devices--;
2152                         kfd_topology_update_io_links(i);
2153                         topology_crat_proximity_domain = sys_props.num_devices-1;
2154                         sys_props.generation_count++;
2155                         res = 0;
2156                         if (kfd_topology_update_sysfs() < 0)
2157                                 kfd_topology_release_sysfs();
2158                         break;
2159                 }
2160                 i++;
2161         }
2162
2163         up_write(&topology_lock);
2164
2165         if (!res)
2166                 kfd_notify_gpu_change(gpu_id, 0);
2167
2168         return res;
2169 }
2170
2171 /* kfd_topology_enum_kfd_devices - Enumerate through all devices in KFD
2172  *      topology. If GPU device is found @idx, then valid kfd_dev pointer is
2173  *      returned through @kdev
2174  * Return -     0: On success (@kdev will be NULL for non GPU nodes)
2175  *              -1: If end of list
2176  */
2177 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev)
2178 {
2179
2180         struct kfd_topology_device *top_dev;
2181         uint8_t device_idx = 0;
2182
2183         *kdev = NULL;
2184         down_read(&topology_lock);
2185
2186         list_for_each_entry(top_dev, &topology_device_list, list) {
2187                 if (device_idx == idx) {
2188                         *kdev = top_dev->gpu;
2189                         up_read(&topology_lock);
2190                         return 0;
2191                 }
2192
2193                 device_idx++;
2194         }
2195
2196         up_read(&topology_lock);
2197
2198         return -1;
2199
2200 }
2201
2202 static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
2203 {
2204         int first_cpu_of_numa_node;
2205
2206         if (!cpumask || cpumask == cpu_none_mask)
2207                 return -1;
2208         first_cpu_of_numa_node = cpumask_first(cpumask);
2209         if (first_cpu_of_numa_node >= nr_cpu_ids)
2210                 return -1;
2211 #ifdef CONFIG_X86_64
2212         return cpu_data(first_cpu_of_numa_node).apicid;
2213 #else
2214         return first_cpu_of_numa_node;
2215 #endif
2216 }
2217
2218 /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
2219  *      of the given NUMA node (numa_node_id)
2220  * Return -1 on failure
2221  */
2222 int kfd_numa_node_to_apic_id(int numa_node_id)
2223 {
2224         if (numa_node_id == -1) {
2225                 pr_warn("Invalid NUMA Node. Use online CPU mask\n");
2226                 return kfd_cpumask_to_apic_id(cpu_online_mask);
2227         }
2228         return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
2229 }
2230
2231 #if defined(CONFIG_DEBUG_FS)
2232
2233 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
2234 {
2235         struct kfd_topology_device *dev;
2236         unsigned int i = 0;
2237         int r = 0;
2238
2239         down_read(&topology_lock);
2240
2241         list_for_each_entry(dev, &topology_device_list, list) {
2242                 if (!dev->gpu) {
2243                         i++;
2244                         continue;
2245                 }
2246
2247                 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
2248                 r = dqm_debugfs_hqds(m, dev->gpu->dqm);
2249                 if (r)
2250                         break;
2251         }
2252
2253         up_read(&topology_lock);
2254
2255         return r;
2256 }
2257
2258 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
2259 {
2260         struct kfd_topology_device *dev;
2261         unsigned int i = 0;
2262         int r = 0;
2263
2264         down_read(&topology_lock);
2265
2266         list_for_each_entry(dev, &topology_device_list, list) {
2267                 if (!dev->gpu) {
2268                         i++;
2269                         continue;
2270                 }
2271
2272                 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
2273                 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr);
2274                 if (r)
2275                         break;
2276         }
2277
2278         up_read(&topology_lock);
2279
2280         return r;
2281 }
2282
2283 #endif