2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/fdtable.h>
24 #include <linux/uaccess.h>
25 #include <linux/firmware.h>
28 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_ucode.h"
33 #include "gca/gfx_7_2_d.h"
34 #include "gca/gfx_7_2_enum.h"
35 #include "gca/gfx_7_2_sh_mask.h"
36 #include "oss/oss_2_0_d.h"
37 #include "oss/oss_2_0_sh_mask.h"
38 #include "gmc/gmc_7_1_d.h"
39 #include "gmc/gmc_7_1_sh_mask.h"
40 #include "cik_structs.h"
42 enum hqd_dequeue_request_type {
49 MAX_TRAPID = 8, /* 3 bits in the bitfield. */
50 MAX_WATCH_ADDRESSES = 4
54 ADDRESS_WATCH_REG_ADDR_HI = 0,
55 ADDRESS_WATCH_REG_ADDR_LO,
56 ADDRESS_WATCH_REG_CNTL,
60 /* not defined in the CI/KV reg file */
62 ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
63 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
64 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
65 /* extend the mask to 26 bits to match the low address field */
66 ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
67 ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
70 static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
71 mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
72 mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
73 mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
74 mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
77 union TCP_WATCH_CNTL_BITS {
91 * Register access functions
94 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
95 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
96 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
98 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
101 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
102 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
103 uint32_t queue_id, uint32_t __user *wptr,
104 uint32_t wptr_shift, uint32_t wptr_mask,
105 struct mm_struct *mm);
106 static int kgd_hqd_dump(struct kgd_dev *kgd,
107 uint32_t pipe_id, uint32_t queue_id,
108 uint32_t (**dump)[2], uint32_t *n_regs);
109 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
110 uint32_t __user *wptr, struct mm_struct *mm);
111 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
112 uint32_t engine_id, uint32_t queue_id,
113 uint32_t (**dump)[2], uint32_t *n_regs);
114 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
115 uint32_t pipe_id, uint32_t queue_id);
117 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
118 enum kfd_preempt_type reset_type,
119 unsigned int utimeout, uint32_t pipe_id,
121 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
122 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
123 unsigned int utimeout);
124 static int kgd_address_watch_disable(struct kgd_dev *kgd);
125 static int kgd_address_watch_execute(struct kgd_dev *kgd,
126 unsigned int watch_point_id,
130 static int kgd_wave_control_execute(struct kgd_dev *kgd,
131 uint32_t gfx_index_val,
133 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
134 unsigned int watch_point_id,
135 unsigned int reg_offset);
137 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
138 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
141 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
142 static void set_scratch_backing_va(struct kgd_dev *kgd,
143 uint64_t va, uint32_t vmid);
144 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
145 uint64_t page_table_base);
146 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
147 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
148 static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
150 /* Because of REG_GET_FIELD() being used, we put this function in the
151 * asic specific file.
153 static int get_tile_config(struct kgd_dev *kgd,
154 struct tile_config *config)
156 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
158 config->gb_addr_config = adev->gfx.config.gb_addr_config;
159 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
160 MC_ARB_RAMCFG, NOOFBANK);
161 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
162 MC_ARB_RAMCFG, NOOFRANKS);
164 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
165 config->num_tile_configs =
166 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
167 config->macro_tile_config_ptr =
168 adev->gfx.config.macrotile_mode_array;
169 config->num_macro_tile_configs =
170 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
175 static const struct kfd2kgd_calls kfd2kgd = {
176 .init_gtt_mem_allocation = alloc_gtt_mem,
177 .free_gtt_mem = free_gtt_mem,
178 .get_local_mem_info = get_local_mem_info,
179 .get_gpu_clock_counter = get_gpu_clock_counter,
180 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
181 .alloc_pasid = amdgpu_pasid_alloc,
182 .free_pasid = amdgpu_pasid_free,
183 .program_sh_mem_settings = kgd_program_sh_mem_settings,
184 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
185 .init_interrupts = kgd_init_interrupts,
186 .hqd_load = kgd_hqd_load,
187 .hqd_sdma_load = kgd_hqd_sdma_load,
188 .hqd_dump = kgd_hqd_dump,
189 .hqd_sdma_dump = kgd_hqd_sdma_dump,
190 .hqd_is_occupied = kgd_hqd_is_occupied,
191 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
192 .hqd_destroy = kgd_hqd_destroy,
193 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
194 .address_watch_disable = kgd_address_watch_disable,
195 .address_watch_execute = kgd_address_watch_execute,
196 .wave_control_execute = kgd_wave_control_execute,
197 .address_watch_get_offset = kgd_address_watch_get_offset,
198 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
199 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
200 .get_fw_version = get_fw_version,
201 .set_scratch_backing_va = set_scratch_backing_va,
202 .get_tile_config = get_tile_config,
203 .get_cu_info = get_cu_info,
204 .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
205 .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
206 .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
207 .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
208 .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
209 .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
210 .set_vm_context_page_table_base = set_vm_context_page_table_base,
211 .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
212 .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
213 .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
214 .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
215 .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
216 .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
217 .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
218 .invalidate_tlbs = invalidate_tlbs,
219 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
220 .submit_ib = amdgpu_amdkfd_submit_ib,
221 .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
222 .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
223 .gpu_recover = amdgpu_amdkfd_gpu_reset,
224 .set_compute_idle = amdgpu_amdkfd_set_compute_idle
227 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
229 return (struct kfd2kgd_calls *)&kfd2kgd;
232 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
234 return (struct amdgpu_device *)kgd;
237 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
238 uint32_t queue, uint32_t vmid)
240 struct amdgpu_device *adev = get_amdgpu_device(kgd);
241 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
243 mutex_lock(&adev->srbm_mutex);
244 WREG32(mmSRBM_GFX_CNTL, value);
247 static void unlock_srbm(struct kgd_dev *kgd)
249 struct amdgpu_device *adev = get_amdgpu_device(kgd);
251 WREG32(mmSRBM_GFX_CNTL, 0);
252 mutex_unlock(&adev->srbm_mutex);
255 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
258 struct amdgpu_device *adev = get_amdgpu_device(kgd);
260 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
261 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
263 lock_srbm(kgd, mec, pipe, queue_id, 0);
266 static void release_queue(struct kgd_dev *kgd)
271 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
272 uint32_t sh_mem_config,
273 uint32_t sh_mem_ape1_base,
274 uint32_t sh_mem_ape1_limit,
275 uint32_t sh_mem_bases)
277 struct amdgpu_device *adev = get_amdgpu_device(kgd);
279 lock_srbm(kgd, 0, 0, 0, vmid);
281 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
282 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
283 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
284 WREG32(mmSH_MEM_BASES, sh_mem_bases);
289 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
292 struct amdgpu_device *adev = get_amdgpu_device(kgd);
295 * We have to assume that there is no outstanding mapping.
296 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
297 * a mapping is in progress or because a mapping finished and the
298 * SW cleared it. So the protocol is to always wait & clear.
300 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
301 ATC_VMID0_PASID_MAPPING__VALID_MASK;
303 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
305 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
307 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
309 /* Mapping vmid to pasid also for IH block */
310 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
315 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
317 struct amdgpu_device *adev = get_amdgpu_device(kgd);
321 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
322 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
324 lock_srbm(kgd, mec, pipe, 0, 0);
326 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
327 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
334 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
338 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
339 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
341 pr_debug("kfd: sdma base address: 0x%x\n", retval);
346 static inline struct cik_mqd *get_mqd(void *mqd)
348 return (struct cik_mqd *)mqd;
351 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
353 return (struct cik_sdma_rlc_registers *)mqd;
356 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
357 uint32_t queue_id, uint32_t __user *wptr,
358 uint32_t wptr_shift, uint32_t wptr_mask,
359 struct mm_struct *mm)
361 struct amdgpu_device *adev = get_amdgpu_device(kgd);
364 uint32_t reg, wptr_val, data;
365 bool valid_wptr = false;
369 acquire_queue(kgd, pipe_id, queue_id);
371 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
372 mqd_hqd = &m->cp_mqd_base_addr_lo;
374 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
375 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
377 /* Copy userspace write pointer value to register.
378 * Activate doorbell logic to monitor subsequent changes.
380 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
381 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
382 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
384 /* read_user_ptr may take the mm->mmap_sem.
385 * release srbm_mutex to avoid circular dependency between
386 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
389 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
390 acquire_queue(kgd, pipe_id, queue_id);
392 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
394 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
395 WREG32(mmCP_HQD_ACTIVE, data);
402 static int kgd_hqd_dump(struct kgd_dev *kgd,
403 uint32_t pipe_id, uint32_t queue_id,
404 uint32_t (**dump)[2], uint32_t *n_regs)
406 struct amdgpu_device *adev = get_amdgpu_device(kgd);
408 #define HQD_N_REGS (35+4)
409 #define DUMP_REG(addr) do { \
410 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
412 (*dump)[i][0] = (addr) << 2; \
413 (*dump)[i++][1] = RREG32(addr); \
416 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
420 acquire_queue(kgd, pipe_id, queue_id);
422 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
423 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
424 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
425 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
427 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
432 WARN_ON_ONCE(i != HQD_N_REGS);
438 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
439 uint32_t __user *wptr, struct mm_struct *mm)
441 struct amdgpu_device *adev = get_amdgpu_device(kgd);
442 struct cik_sdma_rlc_registers *m;
443 unsigned long end_jiffies;
444 uint32_t sdma_base_addr;
447 m = get_sdma_mqd(mqd);
448 sdma_base_addr = get_sdma_base_addr(m);
450 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
451 m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
453 end_jiffies = msecs_to_jiffies(2000) + jiffies;
455 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
456 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
458 if (time_after(jiffies, end_jiffies))
460 usleep_range(500, 1000);
462 if (m->sdma_engine_id) {
463 data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
464 data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
466 WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
468 data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
469 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
471 WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
474 data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
476 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
477 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
479 if (read_user_wptr(mm, wptr, data))
480 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
482 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
483 m->sdma_rlc_rb_rptr);
485 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
486 m->sdma_rlc_virtual_addr);
487 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
488 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
489 m->sdma_rlc_rb_base_hi);
490 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
491 m->sdma_rlc_rb_rptr_addr_lo);
492 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
493 m->sdma_rlc_rb_rptr_addr_hi);
495 data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
497 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
502 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
503 uint32_t engine_id, uint32_t queue_id,
504 uint32_t (**dump)[2], uint32_t *n_regs)
506 struct amdgpu_device *adev = get_amdgpu_device(kgd);
507 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
508 queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
511 #define HQD_N_REGS (19+4)
513 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
517 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
518 DUMP_REG(sdma_offset + reg);
519 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
521 DUMP_REG(sdma_offset + reg);
523 WARN_ON_ONCE(i != HQD_N_REGS);
529 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
530 uint32_t pipe_id, uint32_t queue_id)
532 struct amdgpu_device *adev = get_amdgpu_device(kgd);
537 acquire_queue(kgd, pipe_id, queue_id);
538 act = RREG32(mmCP_HQD_ACTIVE);
540 low = lower_32_bits(queue_address >> 8);
541 high = upper_32_bits(queue_address >> 8);
543 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
544 high == RREG32(mmCP_HQD_PQ_BASE_HI))
551 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
553 struct amdgpu_device *adev = get_amdgpu_device(kgd);
554 struct cik_sdma_rlc_registers *m;
555 uint32_t sdma_base_addr;
556 uint32_t sdma_rlc_rb_cntl;
558 m = get_sdma_mqd(mqd);
559 sdma_base_addr = get_sdma_base_addr(m);
561 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
563 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
569 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
570 enum kfd_preempt_type reset_type,
571 unsigned int utimeout, uint32_t pipe_id,
574 struct amdgpu_device *adev = get_amdgpu_device(kgd);
576 enum hqd_dequeue_request_type type;
577 unsigned long flags, end_jiffies;
580 if (adev->in_gpu_reset)
583 acquire_queue(kgd, pipe_id, queue_id);
584 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
586 switch (reset_type) {
587 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
590 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
598 /* Workaround: If IQ timer is active and the wait time is close to or
599 * equal to 0, dequeueing is not safe. Wait until either the wait time
600 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
601 * cleared before continuing. Also, ensure wait times are set to at
604 local_irq_save(flags);
606 retry = 5000; /* wait for 500 usecs at maximum */
608 temp = RREG32(mmCP_HQD_IQ_TIMER);
609 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
610 pr_debug("HW is processing IQ\n");
613 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
614 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
615 == 3) /* SEM-rearm is safe */
617 /* Wait time 3 is safe for CP, but our MMIO read/write
618 * time is close to 1 microsecond, so check for 10 to
619 * leave more buffer room
621 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
624 pr_debug("IQ timer is active\n");
629 pr_err("CP HQD IQ timer status time out\n");
637 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
638 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
640 pr_debug("Dequeue request is pending\n");
643 pr_err("CP HQD dequeue request time out\n");
649 local_irq_restore(flags);
652 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
654 end_jiffies = (utimeout * HZ / 1000) + jiffies;
656 temp = RREG32(mmCP_HQD_ACTIVE);
657 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
659 if (time_after(jiffies, end_jiffies)) {
660 pr_err("cp queue preemption time out\n");
664 usleep_range(500, 1000);
671 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
672 unsigned int utimeout)
674 struct amdgpu_device *adev = get_amdgpu_device(kgd);
675 struct cik_sdma_rlc_registers *m;
676 uint32_t sdma_base_addr;
678 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
680 m = get_sdma_mqd(mqd);
681 sdma_base_addr = get_sdma_base_addr(m);
683 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
684 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
685 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
688 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
689 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
691 if (time_after(jiffies, end_jiffies))
693 usleep_range(500, 1000);
696 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
697 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
698 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
699 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
701 m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
706 static int kgd_address_watch_disable(struct kgd_dev *kgd)
708 struct amdgpu_device *adev = get_amdgpu_device(kgd);
709 union TCP_WATCH_CNTL_BITS cntl;
714 cntl.bitfields.valid = 0;
715 cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
716 cntl.bitfields.atc = 1;
718 /* Turning off this address until we set all the registers */
719 for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
720 WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
721 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
726 static int kgd_address_watch_execute(struct kgd_dev *kgd,
727 unsigned int watch_point_id,
732 struct amdgpu_device *adev = get_amdgpu_device(kgd);
733 union TCP_WATCH_CNTL_BITS cntl;
735 cntl.u32All = cntl_val;
737 /* Turning off this watch point until we set all the registers */
738 cntl.bitfields.valid = 0;
739 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
740 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
742 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
743 ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
745 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
746 ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
748 /* Enable the watch point */
749 cntl.bitfields.valid = 1;
751 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
752 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
757 static int kgd_wave_control_execute(struct kgd_dev *kgd,
758 uint32_t gfx_index_val,
761 struct amdgpu_device *adev = get_amdgpu_device(kgd);
764 mutex_lock(&adev->grbm_idx_mutex);
766 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
767 WREG32(mmSQ_CMD, sq_cmd);
769 /* Restore the GRBM_GFX_INDEX register */
771 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
772 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
773 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
775 WREG32(mmGRBM_GFX_INDEX, data);
777 mutex_unlock(&adev->grbm_idx_mutex);
782 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
783 unsigned int watch_point_id,
784 unsigned int reg_offset)
786 return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
789 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
793 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
795 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
796 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
799 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
803 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
805 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
806 return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
809 static void set_scratch_backing_va(struct kgd_dev *kgd,
810 uint64_t va, uint32_t vmid)
812 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
814 lock_srbm(kgd, 0, 0, 0, vmid);
815 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
819 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
821 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
822 const union amdgpu_firmware_header *hdr;
826 hdr = (const union amdgpu_firmware_header *)
827 adev->gfx.pfp_fw->data;
831 hdr = (const union amdgpu_firmware_header *)
832 adev->gfx.me_fw->data;
836 hdr = (const union amdgpu_firmware_header *)
837 adev->gfx.ce_fw->data;
840 case KGD_ENGINE_MEC1:
841 hdr = (const union amdgpu_firmware_header *)
842 adev->gfx.mec_fw->data;
845 case KGD_ENGINE_MEC2:
846 hdr = (const union amdgpu_firmware_header *)
847 adev->gfx.mec2_fw->data;
851 hdr = (const union amdgpu_firmware_header *)
852 adev->gfx.rlc_fw->data;
855 case KGD_ENGINE_SDMA1:
856 hdr = (const union amdgpu_firmware_header *)
857 adev->sdma.instance[0].fw->data;
860 case KGD_ENGINE_SDMA2:
861 hdr = (const union amdgpu_firmware_header *)
862 adev->sdma.instance[1].fw->data;
872 /* Only 12 bit in use*/
873 return hdr->common.ucode_version;
876 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
877 uint64_t page_table_base)
879 struct amdgpu_device *adev = get_amdgpu_device(kgd);
881 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
882 pr_err("trying to set page table base for wrong VMID\n");
885 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
886 lower_32_bits(page_table_base));
889 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
891 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
895 if (adev->in_gpu_reset)
898 for (vmid = 0; vmid < 16; vmid++) {
899 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
902 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
903 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
904 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
905 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
906 RREG32(mmVM_INVALIDATE_RESPONSE);
914 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
916 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
918 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
919 pr_err("non kfd vmid\n");
923 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
924 RREG32(mmVM_INVALIDATE_RESPONSE);
929 * read_vmid_from_vmfault_reg - read vmid from register
931 * adev: amdgpu_device pointer
932 * @vmid: vmid pointer
933 * read vmid from register (CIK).
935 static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
937 struct amdgpu_device *adev = get_amdgpu_device(kgd);
939 uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
941 return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);