2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
29 #include "kfd_pm4_headers.h"
30 #include "kfd_pm4_headers_vi.h"
31 #include "kfd_pm4_opcodes.h"
33 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
34 unsigned int buffer_size_bytes)
36 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
38 BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
42 static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
44 union PM4_MES_TYPE_3_HEADER header;
47 header.opcode = opcode;
48 header.count = packet_size/sizeof(uint32_t) - 2;
49 header.type = PM4_TYPE_3;
54 static void pm_calc_rlib_size(struct packet_manager *pm,
55 unsigned int *rlib_size,
56 bool *over_subscription)
58 unsigned int process_count, queue_count;
59 unsigned int map_queue_size;
61 BUG_ON(!pm || !rlib_size || !over_subscription);
63 process_count = pm->dqm->processes_count;
64 queue_count = pm->dqm->queue_count;
66 /* check if there is over subscription*/
67 *over_subscription = false;
68 if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
69 *over_subscription = true;
70 pr_debug("kfd: over subscribed runlist\n");
74 (pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
75 sizeof(struct pm4_mes_map_queues) :
76 sizeof(struct pm4_map_queues);
77 /* calculate run list ib allocation size */
78 *rlib_size = process_count * sizeof(struct pm4_map_process) +
79 queue_count * map_queue_size;
82 * Increase the allocation size in case we need a chained run list
83 * when over subscription
85 if (*over_subscription)
86 *rlib_size += sizeof(struct pm4_runlist);
88 pr_debug("kfd: runlist ib size %d\n", *rlib_size);
91 static int pm_allocate_runlist_ib(struct packet_manager *pm,
92 unsigned int **rl_buffer,
93 uint64_t *rl_gpu_buffer,
94 unsigned int *rl_buffer_size,
95 bool *is_over_subscription)
100 BUG_ON(pm->allocated);
101 BUG_ON(is_over_subscription == NULL);
103 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
105 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
109 pr_err("kfd: failed to allocate runlist IB\n");
113 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
114 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
116 memset(*rl_buffer, 0, *rl_buffer_size);
117 pm->allocated = true;
121 static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
122 uint64_t ib, size_t ib_size_in_dwords, bool chain)
124 struct pm4_runlist *packet;
126 BUG_ON(!pm || !buffer || !ib);
128 packet = (struct pm4_runlist *)buffer;
130 memset(buffer, 0, sizeof(struct pm4_runlist));
131 packet->header.u32all = build_pm4_header(IT_RUN_LIST,
132 sizeof(struct pm4_runlist));
134 packet->bitfields4.ib_size = ib_size_in_dwords;
135 packet->bitfields4.chain = chain ? 1 : 0;
136 packet->bitfields4.offload_polling = 0;
137 packet->bitfields4.valid = 1;
138 packet->ordinal2 = lower_32_bits(ib);
139 packet->bitfields3.ib_base_hi = upper_32_bits(ib);
144 static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
145 struct qcm_process_device *qpd)
147 struct pm4_map_process *packet;
151 BUG_ON(!pm || !buffer || !qpd);
153 packet = (struct pm4_map_process *)buffer;
155 pr_debug("kfd: In func %s\n", __func__);
157 memset(buffer, 0, sizeof(struct pm4_map_process));
159 packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
160 sizeof(struct pm4_map_process));
161 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
162 packet->bitfields2.process_quantum = 1;
163 packet->bitfields2.pasid = qpd->pqm->process->pasid;
164 packet->bitfields3.page_table_base = qpd->page_table_base;
165 packet->bitfields10.gds_size = qpd->gds_size;
166 packet->bitfields10.num_gws = qpd->num_gws;
167 packet->bitfields10.num_oac = qpd->num_oac;
169 list_for_each_entry(cur, &qpd->queues_list, list)
171 packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues;
173 packet->sh_mem_config = qpd->sh_mem_config;
174 packet->sh_mem_bases = qpd->sh_mem_bases;
175 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
176 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
178 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
179 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
184 static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
185 struct queue *q, bool is_static)
187 struct pm4_mes_map_queues *packet;
188 bool use_static = is_static;
190 BUG_ON(!pm || !buffer || !q);
192 pr_debug("kfd: In func %s\n", __func__);
194 packet = (struct pm4_mes_map_queues *)buffer;
195 memset(buffer, 0, sizeof(struct pm4_map_queues));
197 packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
198 sizeof(struct pm4_map_queues));
199 packet->bitfields2.alloc_format =
200 alloc_format__mes_map_queues__one_per_pipe_vi;
201 packet->bitfields2.num_queues = 1;
202 packet->bitfields2.queue_sel =
203 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
205 packet->bitfields2.engine_sel =
206 engine_sel__mes_map_queues__compute_vi;
207 packet->bitfields2.queue_type =
208 queue_type__mes_map_queues__normal_compute_vi;
210 switch (q->properties.type) {
211 case KFD_QUEUE_TYPE_COMPUTE:
213 packet->bitfields2.queue_type =
214 queue_type__mes_map_queues__normal_latency_static_queue_vi;
216 case KFD_QUEUE_TYPE_DIQ:
217 packet->bitfields2.queue_type =
218 queue_type__mes_map_queues__debug_interface_queue_vi;
220 case KFD_QUEUE_TYPE_SDMA:
221 packet->bitfields2.engine_sel =
222 engine_sel__mes_map_queues__sdma0_vi;
223 use_static = false; /* no static queues under SDMA */
226 pr_err("kfd: in %s queue type %d\n", __func__,
231 packet->bitfields3.doorbell_offset =
232 q->properties.doorbell_off;
234 packet->mqd_addr_lo =
235 lower_32_bits(q->gart_mqd_addr);
237 packet->mqd_addr_hi =
238 upper_32_bits(q->gart_mqd_addr);
240 packet->wptr_addr_lo =
241 lower_32_bits((uint64_t)q->properties.write_ptr);
243 packet->wptr_addr_hi =
244 upper_32_bits((uint64_t)q->properties.write_ptr);
249 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
250 struct queue *q, bool is_static)
252 struct pm4_map_queues *packet;
253 bool use_static = is_static;
255 BUG_ON(!pm || !buffer || !q);
257 pr_debug("kfd: In func %s\n", __func__);
259 packet = (struct pm4_map_queues *)buffer;
260 memset(buffer, 0, sizeof(struct pm4_map_queues));
262 packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
263 sizeof(struct pm4_map_queues));
264 packet->bitfields2.alloc_format =
265 alloc_format__mes_map_queues__one_per_pipe;
266 packet->bitfields2.num_queues = 1;
267 packet->bitfields2.queue_sel =
268 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
270 packet->bitfields2.vidmem = (q->properties.is_interop) ?
271 vidmem__mes_map_queues__uses_video_memory :
272 vidmem__mes_map_queues__uses_no_video_memory;
274 switch (q->properties.type) {
275 case KFD_QUEUE_TYPE_COMPUTE:
276 case KFD_QUEUE_TYPE_DIQ:
277 packet->bitfields2.engine_sel =
278 engine_sel__mes_map_queues__compute;
280 case KFD_QUEUE_TYPE_SDMA:
281 packet->bitfields2.engine_sel =
282 engine_sel__mes_map_queues__sdma0;
283 use_static = false; /* no static queues under SDMA */
290 packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
291 q->properties.doorbell_off;
293 packet->mes_map_queues_ordinals[0].bitfields3.is_static =
294 (use_static) ? 1 : 0;
296 packet->mes_map_queues_ordinals[0].mqd_addr_lo =
297 lower_32_bits(q->gart_mqd_addr);
299 packet->mes_map_queues_ordinals[0].mqd_addr_hi =
300 upper_32_bits(q->gart_mqd_addr);
302 packet->mes_map_queues_ordinals[0].wptr_addr_lo =
303 lower_32_bits((uint64_t)q->properties.write_ptr);
305 packet->mes_map_queues_ordinals[0].wptr_addr_hi =
306 upper_32_bits((uint64_t)q->properties.write_ptr);
311 static int pm_create_runlist_ib(struct packet_manager *pm,
312 struct list_head *queues,
313 uint64_t *rl_gpu_addr,
314 size_t *rl_size_bytes)
316 unsigned int alloc_size_bytes;
317 unsigned int *rl_buffer, rl_wptr, i;
318 int retval, proccesses_mapped;
319 struct device_process_node *cur;
320 struct qcm_process_device *qpd;
322 struct kernel_queue *kq;
323 bool is_over_subscription;
325 BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
327 rl_wptr = retval = proccesses_mapped = 0;
329 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
330 &alloc_size_bytes, &is_over_subscription);
334 *rl_size_bytes = alloc_size_bytes;
336 pr_debug("kfd: In func %s\n", __func__);
337 pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
338 pm->dqm->processes_count, pm->dqm->queue_count);
340 /* build the run list ib packet */
341 list_for_each_entry(cur, queues, list) {
343 /* build map process packet */
344 if (proccesses_mapped >= pm->dqm->processes_count) {
345 pr_debug("kfd: not enough space left in runlist IB\n");
350 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
355 inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
358 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
359 if (!kq->queue->properties.is_active)
362 pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
363 kq->queue->queue, qpd->is_debug);
365 if (pm->dqm->dev->device_info->asic_family ==
367 retval = pm_create_map_queue_vi(pm,
372 retval = pm_create_map_queue(pm,
380 sizeof(struct pm4_map_queues),
384 list_for_each_entry(q, &qpd->queues_list, list) {
385 if (!q->properties.is_active)
388 pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
389 q->queue, qpd->is_debug);
391 if (pm->dqm->dev->device_info->asic_family ==
393 retval = pm_create_map_queue_vi(pm,
398 retval = pm_create_map_queue(pm,
407 sizeof(struct pm4_map_queues),
412 pr_debug("kfd: finished map process and queues to runlist\n");
414 if (is_over_subscription)
415 pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
416 alloc_size_bytes / sizeof(uint32_t), true);
418 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
419 pr_debug("0x%2X ", rl_buffer[i]);
425 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
430 mutex_init(&pm->lock);
431 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
432 if (pm->priv_queue == NULL) {
433 mutex_destroy(&pm->lock);
436 pm->allocated = false;
441 void pm_uninit(struct packet_manager *pm)
445 mutex_destroy(&pm->lock);
446 kernel_queue_uninit(pm->priv_queue);
449 int pm_send_set_resources(struct packet_manager *pm,
450 struct scheduling_resources *res)
452 struct pm4_set_resources *packet;
456 pr_debug("kfd: In func %s\n", __func__);
458 mutex_lock(&pm->lock);
459 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
460 sizeof(*packet) / sizeof(uint32_t),
461 (unsigned int **)&packet);
462 if (packet == NULL) {
463 mutex_unlock(&pm->lock);
464 pr_err("kfd: failed to allocate buffer on kernel queue\n");
468 memset(packet, 0, sizeof(struct pm4_set_resources));
469 packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
470 sizeof(struct pm4_set_resources));
472 packet->bitfields2.queue_type =
473 queue_type__mes_set_resources__hsa_interface_queue_hiq;
474 packet->bitfields2.vmid_mask = res->vmid_mask;
475 packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
476 packet->bitfields7.oac_mask = res->oac_mask;
477 packet->bitfields8.gds_heap_base = res->gds_heap_base;
478 packet->bitfields8.gds_heap_size = res->gds_heap_size;
480 packet->gws_mask_lo = lower_32_bits(res->gws_mask);
481 packet->gws_mask_hi = upper_32_bits(res->gws_mask);
483 packet->queue_mask_lo = lower_32_bits(res->queue_mask);
484 packet->queue_mask_hi = upper_32_bits(res->queue_mask);
486 pm->priv_queue->ops.submit_packet(pm->priv_queue);
488 mutex_unlock(&pm->lock);
493 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
495 uint64_t rl_gpu_ib_addr;
497 size_t rl_ib_size, packet_size_dwords;
500 BUG_ON(!pm || !dqm_queues);
502 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
505 goto fail_create_runlist_ib;
507 pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
509 packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
510 mutex_lock(&pm->lock);
512 retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
513 packet_size_dwords, &rl_buffer);
515 goto fail_acquire_packet_buffer;
517 retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
518 rl_ib_size / sizeof(uint32_t), false);
520 goto fail_create_runlist;
522 pm->priv_queue->ops.submit_packet(pm->priv_queue);
524 mutex_unlock(&pm->lock);
529 pm->priv_queue->ops.rollback_packet(pm->priv_queue);
530 fail_acquire_packet_buffer:
531 mutex_unlock(&pm->lock);
532 fail_create_runlist_ib:
538 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
539 uint32_t fence_value)
542 struct pm4_query_status *packet;
544 BUG_ON(!pm || !fence_address);
546 mutex_lock(&pm->lock);
547 retval = pm->priv_queue->ops.acquire_packet_buffer(
549 sizeof(struct pm4_query_status) / sizeof(uint32_t),
550 (unsigned int **)&packet);
552 goto fail_acquire_packet_buffer;
554 packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
555 sizeof(struct pm4_query_status));
557 packet->bitfields2.context_id = 0;
558 packet->bitfields2.interrupt_sel =
559 interrupt_sel__mes_query_status__completion_status;
560 packet->bitfields2.command =
561 command__mes_query_status__fence_only_after_write_ack;
563 packet->addr_hi = upper_32_bits((uint64_t)fence_address);
564 packet->addr_lo = lower_32_bits((uint64_t)fence_address);
565 packet->data_hi = upper_32_bits((uint64_t)fence_value);
566 packet->data_lo = lower_32_bits((uint64_t)fence_value);
568 pm->priv_queue->ops.submit_packet(pm->priv_queue);
569 mutex_unlock(&pm->lock);
573 fail_acquire_packet_buffer:
574 mutex_unlock(&pm->lock);
578 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
579 enum kfd_preempt_type_filter mode,
580 uint32_t filter_param, bool reset,
581 unsigned int sdma_engine)
585 struct pm4_unmap_queues *packet;
589 mutex_lock(&pm->lock);
590 retval = pm->priv_queue->ops.acquire_packet_buffer(
592 sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
595 goto err_acquire_packet_buffer;
597 packet = (struct pm4_unmap_queues *)buffer;
598 memset(buffer, 0, sizeof(struct pm4_unmap_queues));
599 pr_debug("kfd: static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
601 packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
602 sizeof(struct pm4_unmap_queues));
604 case KFD_QUEUE_TYPE_COMPUTE:
605 case KFD_QUEUE_TYPE_DIQ:
606 packet->bitfields2.engine_sel =
607 engine_sel__mes_unmap_queues__compute;
609 case KFD_QUEUE_TYPE_SDMA:
610 packet->bitfields2.engine_sel =
611 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
619 packet->bitfields2.action =
620 action__mes_unmap_queues__reset_queues;
622 packet->bitfields2.action =
623 action__mes_unmap_queues__preempt_queues;
626 case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
627 packet->bitfields2.queue_sel =
628 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
629 packet->bitfields2.num_queues = 1;
630 packet->bitfields3b.doorbell_offset0 = filter_param;
632 case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
633 packet->bitfields2.queue_sel =
634 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
635 packet->bitfields3a.pasid = filter_param;
637 case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
638 packet->bitfields2.queue_sel =
639 queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
641 case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
642 /* in this case, we do not preempt static queues */
643 packet->bitfields2.queue_sel =
644 queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
651 pm->priv_queue->ops.submit_packet(pm->priv_queue);
653 mutex_unlock(&pm->lock);
656 err_acquire_packet_buffer:
657 mutex_unlock(&pm->lock);
661 void pm_release_ib(struct packet_manager *pm)
665 mutex_lock(&pm->lock);
667 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
668 pm->allocated = false;
670 mutex_unlock(&pm->lock);