2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
28 #include "kfd_mqd_manager.h"
29 #include "v10_structs.h"
30 #include "gc/gc_10_1_0_offset.h"
31 #include "gc/gc_10_1_0_sh_mask.h"
32 #include "amdgpu_amdkfd.h"
34 static inline struct v10_compute_mqd *get_mqd(void *mqd)
36 return (struct v10_compute_mqd *)mqd;
39 static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
41 return (struct v10_sdma_mqd *)mqd;
44 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
45 struct queue_properties *q)
47 struct v10_compute_mqd *m;
48 uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
50 if (q->cu_mask_count == 0)
53 mqd_symmetrically_map_cu_mask(mm,
54 q->cu_mask, q->cu_mask_count, se_mask);
57 m->compute_static_thread_mgmt_se0 = se_mask[0];
58 m->compute_static_thread_mgmt_se1 = se_mask[1];
59 m->compute_static_thread_mgmt_se2 = se_mask[2];
60 m->compute_static_thread_mgmt_se3 = se_mask[3];
62 pr_debug("update cu mask to %#x %#x %#x %#x\n",
63 m->compute_static_thread_mgmt_se0,
64 m->compute_static_thread_mgmt_se1,
65 m->compute_static_thread_mgmt_se2,
66 m->compute_static_thread_mgmt_se3);
69 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
70 struct queue_properties *q)
73 struct kfd_mem_obj *mqd_mem_obj = NULL;
75 /* From V9, for CWSR, the control stack is located on the next page
76 * boundary after the mqd, we will use the gtt allocation function
77 * instead of sub-allocation function.
79 if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
80 mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
83 retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
84 ALIGN(q->ctl_stack_size, PAGE_SIZE) +
85 ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE),
86 &(mqd_mem_obj->gtt_mem),
87 &(mqd_mem_obj->gpu_addr),
88 (void *)&(mqd_mem_obj->cpu_ptr), true);
90 retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
103 static void init_mqd(struct mqd_manager *mm, void **mqd,
104 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
105 struct queue_properties *q)
108 struct v10_compute_mqd *m;
110 m = (struct v10_compute_mqd *) mqd_mem_obj->cpu_ptr;
111 addr = mqd_mem_obj->gpu_addr;
113 memset(m, 0, sizeof(struct v10_compute_mqd));
115 m->header = 0xC0310800;
116 m->compute_pipelinestat_enable = 1;
117 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
118 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
119 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
120 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
122 m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
123 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
125 m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
127 m->cp_mqd_base_addr_lo = lower_32_bits(addr);
128 m->cp_mqd_base_addr_hi = upper_32_bits(addr);
130 m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
131 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
132 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
134 m->cp_hqd_pipe_priority = 1;
135 m->cp_hqd_queue_priority = 15;
137 if (q->format == KFD_QUEUE_FORMAT_AQL) {
138 m->cp_hqd_aql_control =
139 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
142 if (mm->dev->cwsr_enabled) {
143 m->cp_hqd_persistent_state |=
144 (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
145 m->cp_hqd_ctx_save_base_addr_lo =
146 lower_32_bits(q->ctx_save_restore_area_address);
147 m->cp_hqd_ctx_save_base_addr_hi =
148 upper_32_bits(q->ctx_save_restore_area_address);
149 m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
150 m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
151 m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
152 m->cp_hqd_wg_state_offset = q->ctl_stack_size;
158 mm->update_mqd(mm, m, q);
161 static int load_mqd(struct mqd_manager *mm, void *mqd,
162 uint32_t pipe_id, uint32_t queue_id,
163 struct queue_properties *p, struct mm_struct *mms)
166 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
167 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
169 r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
170 (uint32_t __user *)p->write_ptr,
175 static void update_mqd(struct mqd_manager *mm, void *mqd,
176 struct queue_properties *q)
178 struct v10_compute_mqd *m;
182 m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
183 m->cp_hqd_pq_control |=
184 ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
185 pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
187 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
188 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
190 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
191 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
192 m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
193 m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
195 m->cp_hqd_pq_doorbell_control =
197 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
198 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
199 m->cp_hqd_pq_doorbell_control);
201 m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT;
204 * HW does not clamp this field correctly. Maximum EOP queue size
205 * is constrained by per-SE EOP done signal count, which is 8-bit.
206 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
207 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
208 * is safe, giving a maximum field value of 0xA.
210 m->cp_hqd_eop_control = min(0xA,
211 ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
212 m->cp_hqd_eop_base_addr_lo =
213 lower_32_bits(q->eop_ring_buffer_address >> 8);
214 m->cp_hqd_eop_base_addr_hi =
215 upper_32_bits(q->eop_ring_buffer_address >> 8);
217 m->cp_hqd_iq_timer = 0;
219 m->cp_hqd_vmid = q->vmid;
221 if (q->format == KFD_QUEUE_FORMAT_AQL) {
222 /* GC 10 removed WPP_CLAMP from PQ Control */
223 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
224 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
225 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT ;
226 m->cp_hqd_pq_doorbell_control |=
227 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
229 if (mm->dev->cwsr_enabled)
230 m->cp_hqd_ctx_save_control = 0;
232 update_cu_mask(mm, mqd, q);
234 q->is_active = (q->queue_size > 0 &&
235 q->queue_address != 0 &&
236 q->queue_percent > 0 &&
240 static int destroy_mqd(struct mqd_manager *mm, void *mqd,
241 enum kfd_preempt_type type,
242 unsigned int timeout, uint32_t pipe_id,
245 return mm->dev->kfd2kgd->hqd_destroy
246 (mm->dev->kgd, mqd, type, timeout,
250 static void free_mqd(struct mqd_manager *mm, void *mqd,
251 struct kfd_mem_obj *mqd_mem_obj)
253 struct kfd_dev *kfd = mm->dev;
255 if (mqd_mem_obj->gtt_mem) {
256 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
259 kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
263 static bool is_occupied(struct mqd_manager *mm, void *mqd,
264 uint64_t queue_address, uint32_t pipe_id,
267 return mm->dev->kfd2kgd->hqd_is_occupied(
268 mm->dev->kgd, queue_address,
272 static int get_wave_state(struct mqd_manager *mm, void *mqd,
273 void __user *ctl_stack,
274 u32 *ctl_stack_used_size,
275 u32 *save_area_used_size)
277 struct v10_compute_mqd *m;
279 /* Control stack is located one page after MQD. */
280 void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
284 *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
285 m->cp_hqd_cntl_stack_offset;
286 *save_area_used_size = m->cp_hqd_wg_state_offset -
287 m->cp_hqd_cntl_stack_size;
289 if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
295 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
296 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
297 struct queue_properties *q)
299 struct v10_compute_mqd *m;
301 init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
305 m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
306 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
309 static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
310 struct queue_properties *q)
312 struct v10_compute_mqd *m;
314 update_mqd(mm, mqd, q);
316 /* TODO: what's the point? update_mqd already does this. */
318 m->cp_hqd_vmid = q->vmid;
321 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
322 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
323 struct queue_properties *q)
325 struct v10_sdma_mqd *m;
327 m = (struct v10_sdma_mqd *) mqd_mem_obj->cpu_ptr;
329 memset(m, 0, sizeof(struct v10_sdma_mqd));
333 *gart_addr = mqd_mem_obj->gpu_addr;
335 mm->update_mqd(mm, m, q);
338 static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
339 uint32_t pipe_id, uint32_t queue_id,
340 struct queue_properties *p, struct mm_struct *mms)
342 return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
343 (uint32_t __user *)p->write_ptr,
347 #define SDMA_RLC_DUMMY_DEFAULT 0xf
349 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
350 struct queue_properties *q)
352 struct v10_sdma_mqd *m;
354 m = get_sdma_mqd(mqd);
355 m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
356 << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
357 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
358 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
359 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
361 m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
362 m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
363 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
364 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
365 m->sdmax_rlcx_doorbell_offset =
366 q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
368 m->sdma_engine_id = q->sdma_engine_id;
369 m->sdma_queue_id = q->sdma_queue_id;
370 m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
373 q->is_active = (q->queue_size > 0 &&
374 q->queue_address != 0 &&
375 q->queue_percent > 0 &&
380 * * preempt type here is ignored because there is only one way
381 * * to preempt sdma queue
383 static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
384 enum kfd_preempt_type type,
385 unsigned int timeout, uint32_t pipe_id,
388 return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
391 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
392 uint64_t queue_address, uint32_t pipe_id,
395 return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
398 #if defined(CONFIG_DEBUG_FS)
400 static int debugfs_show_mqd(struct seq_file *m, void *data)
402 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
403 data, sizeof(struct v10_compute_mqd), false);
407 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
409 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
410 data, sizeof(struct v10_sdma_mqd), false);
416 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
419 struct mqd_manager *mqd;
421 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
424 mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
431 case KFD_MQD_TYPE_CP:
432 pr_debug("%s@%i\n", __func__, __LINE__);
433 case KFD_MQD_TYPE_COMPUTE:
434 pr_debug("%s@%i\n", __func__, __LINE__);
435 mqd->allocate_mqd = allocate_mqd;
436 mqd->init_mqd = init_mqd;
437 mqd->free_mqd = free_mqd;
438 mqd->load_mqd = load_mqd;
439 mqd->update_mqd = update_mqd;
440 mqd->destroy_mqd = destroy_mqd;
441 mqd->is_occupied = is_occupied;
442 mqd->mqd_size = sizeof(struct v10_compute_mqd);
443 mqd->get_wave_state = get_wave_state;
444 #if defined(CONFIG_DEBUG_FS)
445 mqd->debugfs_show_mqd = debugfs_show_mqd;
447 pr_debug("%s@%i\n", __func__, __LINE__);
449 case KFD_MQD_TYPE_HIQ:
450 pr_debug("%s@%i\n", __func__, __LINE__);
451 mqd->allocate_mqd = allocate_hiq_mqd;
452 mqd->init_mqd = init_mqd_hiq;
453 mqd->free_mqd = free_mqd_hiq_sdma;
454 mqd->load_mqd = load_mqd;
455 mqd->update_mqd = update_mqd_hiq;
456 mqd->destroy_mqd = destroy_mqd;
457 mqd->is_occupied = is_occupied;
458 mqd->mqd_size = sizeof(struct v10_compute_mqd);
459 #if defined(CONFIG_DEBUG_FS)
460 mqd->debugfs_show_mqd = debugfs_show_mqd;
462 pr_debug("%s@%i\n", __func__, __LINE__);
464 case KFD_MQD_TYPE_DIQ:
465 mqd->allocate_mqd = allocate_hiq_mqd;
466 mqd->init_mqd = init_mqd_hiq;
467 mqd->free_mqd = free_mqd;
468 mqd->load_mqd = load_mqd;
469 mqd->update_mqd = update_mqd_hiq;
470 mqd->destroy_mqd = destroy_mqd;
471 mqd->is_occupied = is_occupied;
472 mqd->mqd_size = sizeof(struct v10_compute_mqd);
473 #if defined(CONFIG_DEBUG_FS)
474 mqd->debugfs_show_mqd = debugfs_show_mqd;
477 case KFD_MQD_TYPE_SDMA:
478 pr_debug("%s@%i\n", __func__, __LINE__);
479 mqd->allocate_mqd = allocate_sdma_mqd;
480 mqd->init_mqd = init_mqd_sdma;
481 mqd->free_mqd = free_mqd_hiq_sdma;
482 mqd->load_mqd = load_mqd_sdma;
483 mqd->update_mqd = update_mqd_sdma;
484 mqd->destroy_mqd = destroy_mqd_sdma;
485 mqd->is_occupied = is_occupied_sdma;
486 mqd->mqd_size = sizeof(struct v10_sdma_mqd);
487 #if defined(CONFIG_DEBUG_FS)
488 mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
490 pr_debug("%s@%i\n", __func__, __LINE__);