2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/bsearch.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_pm4_headers_vi.h"
29 #include "cwsr_trap_handler.h"
30 #include "kfd_iommu.h"
31 #include "amdgpu_amdkfd.h"
33 #define MQD_SIZE_ALIGNED 768
36 * kfd_locked is used to lock the kfd driver during suspend or reset
37 * once locked, kfd driver will stop any further GPU execution.
38 * create process (open) will return -EAGAIN.
40 static atomic_t kfd_locked = ATOMIC_INIT(0);
42 #ifdef KFD_SUPPORT_IOMMU_V2
43 static const struct kfd_device_info kaveri_device_info = {
44 .asic_family = CHIP_KAVERI,
46 /* max num of queues for KV.TODO should be a dynamic value */
49 .ih_ring_entry_size = 4 * sizeof(uint32_t),
50 .event_interrupt_class = &event_interrupt_class_cik,
51 .num_of_watch_points = 4,
52 .mqd_size_aligned = MQD_SIZE_ALIGNED,
53 .supports_cwsr = false,
54 .needs_iommu_device = true,
55 .needs_pci_atomics = false,
56 .num_sdma_engines = 2,
57 .num_sdma_queues_per_engine = 2,
60 static const struct kfd_device_info carrizo_device_info = {
61 .asic_family = CHIP_CARRIZO,
63 /* max num of queues for CZ.TODO should be a dynamic value */
66 .ih_ring_entry_size = 4 * sizeof(uint32_t),
67 .event_interrupt_class = &event_interrupt_class_cik,
68 .num_of_watch_points = 4,
69 .mqd_size_aligned = MQD_SIZE_ALIGNED,
70 .supports_cwsr = true,
71 .needs_iommu_device = true,
72 .needs_pci_atomics = false,
73 .num_sdma_engines = 2,
74 .num_sdma_queues_per_engine = 2,
77 static const struct kfd_device_info raven_device_info = {
78 .asic_family = CHIP_RAVEN,
82 .ih_ring_entry_size = 8 * sizeof(uint32_t),
83 .event_interrupt_class = &event_interrupt_class_v9,
84 .num_of_watch_points = 4,
85 .mqd_size_aligned = MQD_SIZE_ALIGNED,
86 .supports_cwsr = true,
87 .needs_iommu_device = true,
88 .needs_pci_atomics = true,
89 .num_sdma_engines = 1,
90 .num_sdma_queues_per_engine = 2,
94 static const struct kfd_device_info hawaii_device_info = {
95 .asic_family = CHIP_HAWAII,
97 /* max num of queues for KV.TODO should be a dynamic value */
100 .ih_ring_entry_size = 4 * sizeof(uint32_t),
101 .event_interrupt_class = &event_interrupt_class_cik,
102 .num_of_watch_points = 4,
103 .mqd_size_aligned = MQD_SIZE_ALIGNED,
104 .supports_cwsr = false,
105 .needs_iommu_device = false,
106 .needs_pci_atomics = false,
107 .num_sdma_engines = 2,
108 .num_sdma_queues_per_engine = 2,
111 static const struct kfd_device_info tonga_device_info = {
112 .asic_family = CHIP_TONGA,
113 .max_pasid_bits = 16,
116 .ih_ring_entry_size = 4 * sizeof(uint32_t),
117 .event_interrupt_class = &event_interrupt_class_cik,
118 .num_of_watch_points = 4,
119 .mqd_size_aligned = MQD_SIZE_ALIGNED,
120 .supports_cwsr = false,
121 .needs_iommu_device = false,
122 .needs_pci_atomics = true,
123 .num_sdma_engines = 2,
124 .num_sdma_queues_per_engine = 2,
127 static const struct kfd_device_info fiji_device_info = {
128 .asic_family = CHIP_FIJI,
129 .max_pasid_bits = 16,
132 .ih_ring_entry_size = 4 * sizeof(uint32_t),
133 .event_interrupt_class = &event_interrupt_class_cik,
134 .num_of_watch_points = 4,
135 .mqd_size_aligned = MQD_SIZE_ALIGNED,
136 .supports_cwsr = true,
137 .needs_iommu_device = false,
138 .needs_pci_atomics = true,
139 .num_sdma_engines = 2,
140 .num_sdma_queues_per_engine = 2,
143 static const struct kfd_device_info fiji_vf_device_info = {
144 .asic_family = CHIP_FIJI,
145 .max_pasid_bits = 16,
148 .ih_ring_entry_size = 4 * sizeof(uint32_t),
149 .event_interrupt_class = &event_interrupt_class_cik,
150 .num_of_watch_points = 4,
151 .mqd_size_aligned = MQD_SIZE_ALIGNED,
152 .supports_cwsr = true,
153 .needs_iommu_device = false,
154 .needs_pci_atomics = false,
155 .num_sdma_engines = 2,
156 .num_sdma_queues_per_engine = 2,
160 static const struct kfd_device_info polaris10_device_info = {
161 .asic_family = CHIP_POLARIS10,
162 .max_pasid_bits = 16,
165 .ih_ring_entry_size = 4 * sizeof(uint32_t),
166 .event_interrupt_class = &event_interrupt_class_cik,
167 .num_of_watch_points = 4,
168 .mqd_size_aligned = MQD_SIZE_ALIGNED,
169 .supports_cwsr = true,
170 .needs_iommu_device = false,
171 .needs_pci_atomics = true,
172 .num_sdma_engines = 2,
173 .num_sdma_queues_per_engine = 2,
176 static const struct kfd_device_info polaris10_vf_device_info = {
177 .asic_family = CHIP_POLARIS10,
178 .max_pasid_bits = 16,
181 .ih_ring_entry_size = 4 * sizeof(uint32_t),
182 .event_interrupt_class = &event_interrupt_class_cik,
183 .num_of_watch_points = 4,
184 .mqd_size_aligned = MQD_SIZE_ALIGNED,
185 .supports_cwsr = true,
186 .needs_iommu_device = false,
187 .needs_pci_atomics = false,
188 .num_sdma_engines = 2,
189 .num_sdma_queues_per_engine = 2,
192 static const struct kfd_device_info polaris11_device_info = {
193 .asic_family = CHIP_POLARIS11,
194 .max_pasid_bits = 16,
197 .ih_ring_entry_size = 4 * sizeof(uint32_t),
198 .event_interrupt_class = &event_interrupt_class_cik,
199 .num_of_watch_points = 4,
200 .mqd_size_aligned = MQD_SIZE_ALIGNED,
201 .supports_cwsr = true,
202 .needs_iommu_device = false,
203 .needs_pci_atomics = true,
204 .num_sdma_engines = 2,
205 .num_sdma_queues_per_engine = 2,
208 static const struct kfd_device_info polaris12_device_info = {
209 .asic_family = CHIP_POLARIS12,
210 .max_pasid_bits = 16,
213 .ih_ring_entry_size = 4 * sizeof(uint32_t),
214 .event_interrupt_class = &event_interrupt_class_cik,
215 .num_of_watch_points = 4,
216 .mqd_size_aligned = MQD_SIZE_ALIGNED,
217 .supports_cwsr = true,
218 .needs_iommu_device = false,
219 .needs_pci_atomics = true,
220 .num_sdma_engines = 2,
221 .num_sdma_queues_per_engine = 2,
224 static const struct kfd_device_info vega10_device_info = {
225 .asic_family = CHIP_VEGA10,
226 .max_pasid_bits = 16,
229 .ih_ring_entry_size = 8 * sizeof(uint32_t),
230 .event_interrupt_class = &event_interrupt_class_v9,
231 .num_of_watch_points = 4,
232 .mqd_size_aligned = MQD_SIZE_ALIGNED,
233 .supports_cwsr = true,
234 .needs_iommu_device = false,
235 .needs_pci_atomics = false,
236 .num_sdma_engines = 2,
237 .num_sdma_queues_per_engine = 2,
240 static const struct kfd_device_info vega10_vf_device_info = {
241 .asic_family = CHIP_VEGA10,
242 .max_pasid_bits = 16,
245 .ih_ring_entry_size = 8 * sizeof(uint32_t),
246 .event_interrupt_class = &event_interrupt_class_v9,
247 .num_of_watch_points = 4,
248 .mqd_size_aligned = MQD_SIZE_ALIGNED,
249 .supports_cwsr = true,
250 .needs_iommu_device = false,
251 .needs_pci_atomics = false,
252 .num_sdma_engines = 2,
253 .num_sdma_queues_per_engine = 2,
256 static const struct kfd_device_info vega12_device_info = {
257 .asic_family = CHIP_VEGA12,
258 .max_pasid_bits = 16,
261 .ih_ring_entry_size = 8 * sizeof(uint32_t),
262 .event_interrupt_class = &event_interrupt_class_v9,
263 .num_of_watch_points = 4,
264 .mqd_size_aligned = MQD_SIZE_ALIGNED,
265 .supports_cwsr = true,
266 .needs_iommu_device = false,
267 .needs_pci_atomics = false,
268 .num_sdma_engines = 2,
269 .num_sdma_queues_per_engine = 2,
272 static const struct kfd_device_info vega20_device_info = {
273 .asic_family = CHIP_VEGA20,
274 .max_pasid_bits = 16,
277 .ih_ring_entry_size = 8 * sizeof(uint32_t),
278 .event_interrupt_class = &event_interrupt_class_v9,
279 .num_of_watch_points = 4,
280 .mqd_size_aligned = MQD_SIZE_ALIGNED,
281 .supports_cwsr = true,
282 .needs_iommu_device = false,
283 .needs_pci_atomics = false,
284 .num_sdma_engines = 2,
285 .num_sdma_queues_per_engine = 8,
288 struct kfd_deviceid {
290 const struct kfd_device_info *device_info;
293 static const struct kfd_deviceid supported_devices[] = {
294 #ifdef KFD_SUPPORT_IOMMU_V2
295 { 0x1304, &kaveri_device_info }, /* Kaveri */
296 { 0x1305, &kaveri_device_info }, /* Kaveri */
297 { 0x1306, &kaveri_device_info }, /* Kaveri */
298 { 0x1307, &kaveri_device_info }, /* Kaveri */
299 { 0x1309, &kaveri_device_info }, /* Kaveri */
300 { 0x130A, &kaveri_device_info }, /* Kaveri */
301 { 0x130B, &kaveri_device_info }, /* Kaveri */
302 { 0x130C, &kaveri_device_info }, /* Kaveri */
303 { 0x130D, &kaveri_device_info }, /* Kaveri */
304 { 0x130E, &kaveri_device_info }, /* Kaveri */
305 { 0x130F, &kaveri_device_info }, /* Kaveri */
306 { 0x1310, &kaveri_device_info }, /* Kaveri */
307 { 0x1311, &kaveri_device_info }, /* Kaveri */
308 { 0x1312, &kaveri_device_info }, /* Kaveri */
309 { 0x1313, &kaveri_device_info }, /* Kaveri */
310 { 0x1315, &kaveri_device_info }, /* Kaveri */
311 { 0x1316, &kaveri_device_info }, /* Kaveri */
312 { 0x1317, &kaveri_device_info }, /* Kaveri */
313 { 0x1318, &kaveri_device_info }, /* Kaveri */
314 { 0x131B, &kaveri_device_info }, /* Kaveri */
315 { 0x131C, &kaveri_device_info }, /* Kaveri */
316 { 0x131D, &kaveri_device_info }, /* Kaveri */
317 { 0x9870, &carrizo_device_info }, /* Carrizo */
318 { 0x9874, &carrizo_device_info }, /* Carrizo */
319 { 0x9875, &carrizo_device_info }, /* Carrizo */
320 { 0x9876, &carrizo_device_info }, /* Carrizo */
321 { 0x9877, &carrizo_device_info }, /* Carrizo */
322 { 0x15DD, &raven_device_info }, /* Raven */
323 { 0x15D8, &raven_device_info }, /* Raven */
325 { 0x67A0, &hawaii_device_info }, /* Hawaii */
326 { 0x67A1, &hawaii_device_info }, /* Hawaii */
327 { 0x67A2, &hawaii_device_info }, /* Hawaii */
328 { 0x67A8, &hawaii_device_info }, /* Hawaii */
329 { 0x67A9, &hawaii_device_info }, /* Hawaii */
330 { 0x67AA, &hawaii_device_info }, /* Hawaii */
331 { 0x67B0, &hawaii_device_info }, /* Hawaii */
332 { 0x67B1, &hawaii_device_info }, /* Hawaii */
333 { 0x67B8, &hawaii_device_info }, /* Hawaii */
334 { 0x67B9, &hawaii_device_info }, /* Hawaii */
335 { 0x67BA, &hawaii_device_info }, /* Hawaii */
336 { 0x67BE, &hawaii_device_info }, /* Hawaii */
337 { 0x6920, &tonga_device_info }, /* Tonga */
338 { 0x6921, &tonga_device_info }, /* Tonga */
339 { 0x6928, &tonga_device_info }, /* Tonga */
340 { 0x6929, &tonga_device_info }, /* Tonga */
341 { 0x692B, &tonga_device_info }, /* Tonga */
342 { 0x6938, &tonga_device_info }, /* Tonga */
343 { 0x6939, &tonga_device_info }, /* Tonga */
344 { 0x7300, &fiji_device_info }, /* Fiji */
345 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
346 { 0x67C0, &polaris10_device_info }, /* Polaris10 */
347 { 0x67C1, &polaris10_device_info }, /* Polaris10 */
348 { 0x67C2, &polaris10_device_info }, /* Polaris10 */
349 { 0x67C4, &polaris10_device_info }, /* Polaris10 */
350 { 0x67C7, &polaris10_device_info }, /* Polaris10 */
351 { 0x67C8, &polaris10_device_info }, /* Polaris10 */
352 { 0x67C9, &polaris10_device_info }, /* Polaris10 */
353 { 0x67CA, &polaris10_device_info }, /* Polaris10 */
354 { 0x67CC, &polaris10_device_info }, /* Polaris10 */
355 { 0x67CF, &polaris10_device_info }, /* Polaris10 */
356 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
357 { 0x67DF, &polaris10_device_info }, /* Polaris10 */
358 { 0x67E0, &polaris11_device_info }, /* Polaris11 */
359 { 0x67E1, &polaris11_device_info }, /* Polaris11 */
360 { 0x67E3, &polaris11_device_info }, /* Polaris11 */
361 { 0x67E7, &polaris11_device_info }, /* Polaris11 */
362 { 0x67E8, &polaris11_device_info }, /* Polaris11 */
363 { 0x67E9, &polaris11_device_info }, /* Polaris11 */
364 { 0x67EB, &polaris11_device_info }, /* Polaris11 */
365 { 0x67EF, &polaris11_device_info }, /* Polaris11 */
366 { 0x67FF, &polaris11_device_info }, /* Polaris11 */
367 { 0x6980, &polaris12_device_info }, /* Polaris12 */
368 { 0x6981, &polaris12_device_info }, /* Polaris12 */
369 { 0x6985, &polaris12_device_info }, /* Polaris12 */
370 { 0x6986, &polaris12_device_info }, /* Polaris12 */
371 { 0x6987, &polaris12_device_info }, /* Polaris12 */
372 { 0x6995, &polaris12_device_info }, /* Polaris12 */
373 { 0x6997, &polaris12_device_info }, /* Polaris12 */
374 { 0x699F, &polaris12_device_info }, /* Polaris12 */
375 { 0x6860, &vega10_device_info }, /* Vega10 */
376 { 0x6861, &vega10_device_info }, /* Vega10 */
377 { 0x6862, &vega10_device_info }, /* Vega10 */
378 { 0x6863, &vega10_device_info }, /* Vega10 */
379 { 0x6864, &vega10_device_info }, /* Vega10 */
380 { 0x6867, &vega10_device_info }, /* Vega10 */
381 { 0x6868, &vega10_device_info }, /* Vega10 */
382 { 0x6869, &vega10_device_info }, /* Vega10 */
383 { 0x686A, &vega10_device_info }, /* Vega10 */
384 { 0x686B, &vega10_device_info }, /* Vega10 */
385 { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
386 { 0x686D, &vega10_device_info }, /* Vega10 */
387 { 0x686E, &vega10_device_info }, /* Vega10 */
388 { 0x686F, &vega10_device_info }, /* Vega10 */
389 { 0x687F, &vega10_device_info }, /* Vega10 */
390 { 0x69A0, &vega12_device_info }, /* Vega12 */
391 { 0x69A1, &vega12_device_info }, /* Vega12 */
392 { 0x69A2, &vega12_device_info }, /* Vega12 */
393 { 0x69A3, &vega12_device_info }, /* Vega12 */
394 { 0x69AF, &vega12_device_info }, /* Vega12 */
395 { 0x66a0, &vega20_device_info }, /* Vega20 */
396 { 0x66a1, &vega20_device_info }, /* Vega20 */
397 { 0x66a2, &vega20_device_info }, /* Vega20 */
398 { 0x66a3, &vega20_device_info }, /* Vega20 */
399 { 0x66a4, &vega20_device_info }, /* Vega20 */
400 { 0x66a7, &vega20_device_info }, /* Vega20 */
401 { 0x66af, &vega20_device_info } /* Vega20 */
404 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
405 unsigned int chunk_size);
406 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
408 static int kfd_resume(struct kfd_dev *kfd);
410 static const struct kfd_device_info *lookup_device_info(unsigned short did)
414 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
415 if (supported_devices[i].did == did) {
416 WARN_ON(!supported_devices[i].device_info);
417 return supported_devices[i].device_info;
421 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
427 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
428 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
432 const struct kfd_device_info *device_info =
433 lookup_device_info(pdev->device);
436 dev_err(kfd_device, "kgd2kfd_probe failed\n");
440 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
444 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
445 * 32 and 64-bit requests are possible and must be
448 ret = pci_enable_atomic_ops_to_root(pdev,
449 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
450 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
451 if (device_info->needs_pci_atomics && ret < 0) {
453 "skipped device %x:%x, PCI rejects atomics\n",
454 pdev->vendor, pdev->device);
458 kfd->pci_atomic_requested = true;
461 kfd->device_info = device_info;
463 kfd->init_complete = false;
466 mutex_init(&kfd->doorbell_mutex);
467 memset(&kfd->doorbell_available_index, 0,
468 sizeof(kfd->doorbell_available_index));
470 atomic_set(&kfd->sram_ecc_flag, 0);
475 static void kfd_cwsr_init(struct kfd_dev *kfd)
477 if (cwsr_enable && kfd->device_info->supports_cwsr) {
478 if (kfd->device_info->asic_family < CHIP_VEGA10) {
479 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
480 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
481 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
483 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
484 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
485 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
488 kfd->cwsr_enabled = true;
492 bool kgd2kfd_device_init(struct kfd_dev *kfd,
493 const struct kgd2kfd_shared_resources *gpu_resources)
497 kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
499 kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
501 kfd->shared_resources = *gpu_resources;
503 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
504 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
505 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
506 - kfd->vm_info.first_vmid_kfd + 1;
508 /* Verify module parameters regarding mapped process number*/
509 if ((hws_max_conc_proc < 0)
510 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
512 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
513 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
514 kfd->vm_info.vmid_num_kfd);
515 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
517 kfd->max_proc_per_quantum = hws_max_conc_proc;
519 /* calculate max size of mqds needed for queues */
520 size = max_num_of_queues_per_device *
521 kfd->device_info->mqd_size_aligned;
524 * calculate max size of runlist packet.
525 * There can be only 2 packets at once
527 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
528 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
529 + sizeof(struct pm4_mes_runlist)) * 2;
531 /* Add size of HIQ & DIQ */
532 size += KFD_KERNEL_QUEUE_SIZE * 2;
534 /* add another 512KB for all other allocations on gart (HPD, fences) */
537 if (amdgpu_amdkfd_alloc_gtt_mem(
538 kfd->kgd, size, &kfd->gtt_mem,
539 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
541 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
545 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
547 /* Initialize GTT sa with 512 byte chunk size */
548 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
549 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
550 goto kfd_gtt_sa_init_error;
553 if (kfd_doorbell_init(kfd)) {
555 "Error initializing doorbell aperture\n");
556 goto kfd_doorbell_error;
559 if (kfd->kfd2kgd->get_hive_id)
560 kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
562 if (kfd_topology_add_device(kfd)) {
563 dev_err(kfd_device, "Error adding device to topology\n");
564 goto kfd_topology_add_device_error;
567 if (kfd_interrupt_init(kfd)) {
568 dev_err(kfd_device, "Error initializing interrupts\n");
569 goto kfd_interrupt_error;
572 kfd->dqm = device_queue_manager_init(kfd);
574 dev_err(kfd_device, "Error initializing queue manager\n");
575 goto device_queue_manager_error;
578 if (kfd_iommu_device_init(kfd)) {
579 dev_err(kfd_device, "Error initializing iommuv2\n");
580 goto device_iommu_error;
586 goto kfd_resume_error;
590 kfd->init_complete = true;
591 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
594 pr_debug("Starting kfd with the following scheduling policy %d\n",
595 kfd->dqm->sched_policy);
601 device_queue_manager_uninit(kfd->dqm);
602 device_queue_manager_error:
603 kfd_interrupt_exit(kfd);
605 kfd_topology_remove_device(kfd);
606 kfd_topology_add_device_error:
607 kfd_doorbell_fini(kfd);
609 kfd_gtt_sa_fini(kfd);
610 kfd_gtt_sa_init_error:
611 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
613 "device %x:%x NOT added due to errors\n",
614 kfd->pdev->vendor, kfd->pdev->device);
616 return kfd->init_complete;
619 void kgd2kfd_device_exit(struct kfd_dev *kfd)
621 if (kfd->init_complete) {
622 kgd2kfd_suspend(kfd);
623 device_queue_manager_uninit(kfd->dqm);
624 kfd_interrupt_exit(kfd);
625 kfd_topology_remove_device(kfd);
626 kfd_doorbell_fini(kfd);
627 kfd_gtt_sa_fini(kfd);
628 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
634 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
636 if (!kfd->init_complete)
638 kgd2kfd_suspend(kfd);
640 /* hold dqm->lock to prevent further execution*/
643 kfd_signal_reset_event(kfd);
648 * Fix me. KFD won't be able to resume existing process for now.
649 * We will keep all existing process in a evicted state and
650 * wait the process to be terminated.
653 int kgd2kfd_post_reset(struct kfd_dev *kfd)
657 if (!kfd->init_complete)
660 dqm_unlock(kfd->dqm);
662 ret = kfd_resume(kfd);
665 count = atomic_dec_return(&kfd_locked);
666 WARN_ONCE(count != 0, "KFD reset ref. error");
668 atomic_set(&kfd->sram_ecc_flag, 0);
673 bool kfd_is_locked(void)
675 return (atomic_read(&kfd_locked) > 0);
678 void kgd2kfd_suspend(struct kfd_dev *kfd)
680 if (!kfd->init_complete)
683 /* For first KFD device suspend all the KFD processes */
684 if (atomic_inc_return(&kfd_locked) == 1)
685 kfd_suspend_all_processes();
687 kfd->dqm->ops.stop(kfd->dqm);
689 kfd_iommu_suspend(kfd);
692 int kgd2kfd_resume(struct kfd_dev *kfd)
696 if (!kfd->init_complete)
699 ret = kfd_resume(kfd);
703 count = atomic_dec_return(&kfd_locked);
704 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
706 ret = kfd_resume_all_processes();
711 static int kfd_resume(struct kfd_dev *kfd)
715 err = kfd_iommu_resume(kfd);
718 "Failed to resume IOMMU for device %x:%x\n",
719 kfd->pdev->vendor, kfd->pdev->device);
723 err = kfd->dqm->ops.start(kfd->dqm);
726 "Error starting queue manager for device %x:%x\n",
727 kfd->pdev->vendor, kfd->pdev->device);
728 goto dqm_start_error;
734 kfd_iommu_suspend(kfd);
738 /* This is called directly from KGD at ISR. */
739 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
741 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
742 bool is_patched = false;
745 if (!kfd->init_complete)
748 if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
749 dev_err_once(kfd_device, "Ring entry too small\n");
753 spin_lock_irqsave(&kfd->interrupt_lock, flags);
755 if (kfd->interrupts_active
756 && interrupt_is_wanted(kfd, ih_ring_entry,
757 patched_ihre, &is_patched)
758 && enqueue_ih_ring_entry(kfd,
759 is_patched ? patched_ihre : ih_ring_entry))
760 queue_work(kfd->ih_wq, &kfd->interrupt_work);
762 spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
765 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
767 struct kfd_process *p;
770 /* Because we are called from arbitrary context (workqueue) as opposed
771 * to process context, kfd_process could attempt to exit while we are
772 * running so the lookup function increments the process ref count.
774 p = kfd_lookup_process_by_mm(mm);
778 r = kfd_process_evict_queues(p);
780 kfd_unref_process(p);
784 int kgd2kfd_resume_mm(struct mm_struct *mm)
786 struct kfd_process *p;
789 /* Because we are called from arbitrary context (workqueue) as opposed
790 * to process context, kfd_process could attempt to exit while we are
791 * running so the lookup function increments the process ref count.
793 p = kfd_lookup_process_by_mm(mm);
797 r = kfd_process_restore_queues(p);
799 kfd_unref_process(p);
803 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
804 * prepare for safe eviction of KFD BOs that belong to the specified
807 * @mm: mm_struct that identifies the specified KFD process
808 * @fence: eviction fence attached to KFD process BOs
811 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
812 struct dma_fence *fence)
814 struct kfd_process *p;
815 unsigned long active_time;
816 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
821 if (dma_fence_is_signaled(fence))
824 p = kfd_lookup_process_by_mm(mm);
828 if (fence->seqno == p->last_eviction_seqno)
831 p->last_eviction_seqno = fence->seqno;
833 /* Avoid KFD process starvation. Wait for at least
834 * PROCESS_ACTIVE_TIME_MS before evicting the process again
836 active_time = get_jiffies_64() - p->last_restore_timestamp;
837 if (delay_jiffies > active_time)
838 delay_jiffies -= active_time;
842 /* During process initialization eviction_work.dwork is initialized
843 * to kfd_evict_bo_worker
845 schedule_delayed_work(&p->eviction_work, delay_jiffies);
847 kfd_unref_process(p);
851 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
852 unsigned int chunk_size)
854 unsigned int num_of_longs;
856 if (WARN_ON(buf_size < chunk_size))
858 if (WARN_ON(buf_size == 0))
860 if (WARN_ON(chunk_size == 0))
863 kfd->gtt_sa_chunk_size = chunk_size;
864 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
866 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
869 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
871 if (!kfd->gtt_sa_bitmap)
874 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
875 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
877 mutex_init(&kfd->gtt_sa_lock);
883 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
885 mutex_destroy(&kfd->gtt_sa_lock);
886 kfree(kfd->gtt_sa_bitmap);
889 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
890 unsigned int bit_num,
891 unsigned int chunk_size)
893 return start_addr + bit_num * chunk_size;
896 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
897 unsigned int bit_num,
898 unsigned int chunk_size)
900 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
903 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
904 struct kfd_mem_obj **mem_obj)
906 unsigned int found, start_search, cur_size;
911 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
914 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
918 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
922 mutex_lock(&kfd->gtt_sa_lock);
924 kfd_gtt_restart_search:
925 /* Find the first chunk that is free */
926 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
927 kfd->gtt_sa_num_of_chunks,
930 pr_debug("Found = %d\n", found);
932 /* If there wasn't any free chunk, bail out */
933 if (found == kfd->gtt_sa_num_of_chunks)
934 goto kfd_gtt_no_free_chunk;
936 /* Update fields of mem_obj */
937 (*mem_obj)->range_start = found;
938 (*mem_obj)->range_end = found;
939 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
940 kfd->gtt_start_gpu_addr,
942 kfd->gtt_sa_chunk_size);
943 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
944 kfd->gtt_start_cpu_ptr,
946 kfd->gtt_sa_chunk_size);
948 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
949 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
951 /* If we need only one chunk, mark it as allocated and get out */
952 if (size <= kfd->gtt_sa_chunk_size) {
953 pr_debug("Single bit\n");
954 set_bit(found, kfd->gtt_sa_bitmap);
958 /* Otherwise, try to see if we have enough contiguous chunks */
959 cur_size = size - kfd->gtt_sa_chunk_size;
961 (*mem_obj)->range_end =
962 find_next_zero_bit(kfd->gtt_sa_bitmap,
963 kfd->gtt_sa_num_of_chunks, ++found);
965 * If next free chunk is not contiguous than we need to
966 * restart our search from the last free chunk we found (which
967 * wasn't contiguous to the previous ones
969 if ((*mem_obj)->range_end != found) {
970 start_search = found;
971 goto kfd_gtt_restart_search;
975 * If we reached end of buffer, bail out with error
977 if (found == kfd->gtt_sa_num_of_chunks)
978 goto kfd_gtt_no_free_chunk;
980 /* Check if we don't need another chunk */
981 if (cur_size <= kfd->gtt_sa_chunk_size)
984 cur_size -= kfd->gtt_sa_chunk_size;
986 } while (cur_size > 0);
988 pr_debug("range_start = %d, range_end = %d\n",
989 (*mem_obj)->range_start, (*mem_obj)->range_end);
991 /* Mark the chunks as allocated */
992 for (found = (*mem_obj)->range_start;
993 found <= (*mem_obj)->range_end;
995 set_bit(found, kfd->gtt_sa_bitmap);
998 mutex_unlock(&kfd->gtt_sa_lock);
1001 kfd_gtt_no_free_chunk:
1002 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
1003 mutex_unlock(&kfd->gtt_sa_lock);
1008 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
1012 /* Act like kfree when trying to free a NULL object */
1016 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1017 mem_obj, mem_obj->range_start, mem_obj->range_end);
1019 mutex_lock(&kfd->gtt_sa_lock);
1021 /* Mark the chunks as free */
1022 for (bit = mem_obj->range_start;
1023 bit <= mem_obj->range_end;
1025 clear_bit(bit, kfd->gtt_sa_bitmap);
1027 mutex_unlock(&kfd->gtt_sa_lock);
1033 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1036 atomic_inc(&kfd->sram_ecc_flag);
1039 #if defined(CONFIG_DEBUG_FS)
1041 /* This function will send a package to HIQ to hang the HWS
1042 * which will trigger a GPU reset and bring the HWS back to normal state
1044 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1048 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1049 pr_err("HWS is not enabled");
1053 r = pm_debugfs_hang_hws(&dev->dqm->packets);
1055 r = dqm_debugfs_execute_queues(dev->dqm);