1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
14 static void job_wq_completion(struct work_struct *work);
15 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
16 struct hl_ctx *ctx, u64 timeout_us, u64 seq);
17 static void cs_do_release(struct kref *ref);
19 static const char *hl_fence_get_driver_name(struct dma_fence *fence)
24 static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
26 struct hl_dma_fence *hl_fence =
27 container_of(fence, struct hl_dma_fence, base_fence);
29 return dev_name(hl_fence->hdev->dev);
32 static bool hl_fence_enable_signaling(struct dma_fence *fence)
37 static void hl_fence_release(struct dma_fence *fence)
39 struct hl_dma_fence *hl_fence =
40 container_of(fence, struct hl_dma_fence, base_fence);
42 kfree_rcu(hl_fence, base_fence.rcu);
45 static const struct dma_fence_ops hl_fence_ops = {
46 .get_driver_name = hl_fence_get_driver_name,
47 .get_timeline_name = hl_fence_get_timeline_name,
48 .enable_signaling = hl_fence_enable_signaling,
49 .wait = dma_fence_default_wait,
50 .release = hl_fence_release
53 static void cs_get(struct hl_cs *cs)
55 kref_get(&cs->refcount);
58 static int cs_get_unless_zero(struct hl_cs *cs)
60 return kref_get_unless_zero(&cs->refcount);
63 static void cs_put(struct hl_cs *cs)
65 kref_put(&cs->refcount, cs_do_release);
69 * cs_parser - parse the user command submission
71 * @hpriv : pointer to the private data of the fd
72 * @job : pointer to the job that holds the command submission info
74 * The function parses the command submission of the user. It calls the
75 * ASIC specific parser, which returns a list of memory blocks to send
76 * to the device as different command buffers
79 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
81 struct hl_device *hdev = hpriv->hdev;
82 struct hl_cs_parser parser;
85 parser.ctx_id = job->cs->ctx->asid;
86 parser.cs_sequence = job->cs->sequence;
87 parser.job_id = job->id;
89 parser.hw_queue_id = job->hw_queue_id;
90 parser.job_userptr_list = &job->userptr_list;
91 parser.patched_cb = NULL;
92 parser.user_cb = job->user_cb;
93 parser.user_cb_size = job->user_cb_size;
94 parser.ext_queue = job->ext_queue;
95 job->patched_cb = NULL;
96 parser.use_virt_addr = hdev->mmu_enable;
98 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
101 job->patched_cb = parser.patched_cb;
102 job->job_cb_size = parser.patched_cb_size;
104 spin_lock(&job->patched_cb->lock);
105 job->patched_cb->cs_cnt++;
106 spin_unlock(&job->patched_cb->lock);
110 * Whether the parsing worked or not, we don't need the
111 * original CB anymore because it was already parsed and
112 * won't be accessed again for this CS
114 spin_lock(&job->user_cb->lock);
115 job->user_cb->cs_cnt--;
116 spin_unlock(&job->user_cb->lock);
117 hl_cb_put(job->user_cb);
124 static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
126 struct hl_cs *cs = job->cs;
128 if (job->ext_queue) {
129 hl_userptr_delete_list(hdev, &job->userptr_list);
132 * We might arrive here from rollback and patched CB wasn't
133 * created, so we need to check it's not NULL
135 if (job->patched_cb) {
136 spin_lock(&job->patched_cb->lock);
137 job->patched_cb->cs_cnt--;
138 spin_unlock(&job->patched_cb->lock);
140 hl_cb_put(job->patched_cb);
145 * This is the only place where there can be multiple threads
146 * modifying the list at the same time
148 spin_lock(&cs->job_lock);
149 list_del(&job->cs_node);
150 spin_unlock(&cs->job_lock);
152 hl_debugfs_remove_job(hdev, job);
160 static void cs_do_release(struct kref *ref)
162 struct hl_cs *cs = container_of(ref, struct hl_cs,
164 struct hl_device *hdev = cs->ctx->hdev;
165 struct hl_cs_job *job, *tmp;
167 cs->completed = true;
170 * Although if we reached here it means that all external jobs have
171 * finished, because each one of them took refcnt to CS, we still
172 * need to go over the internal jobs and free them. Otherwise, we
173 * will have leaked memory and what's worse, the CS object (and
174 * potentially the CTX object) could be released, while the JOB
175 * still holds a pointer to them (but no reference).
177 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
180 /* We also need to update CI for internal queues */
182 int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
184 WARN_ONCE((cs_cnt < 0),
185 "hl%d: error in CS active cnt %d\n",
188 hl_int_hw_queue_update_ci(cs);
190 spin_lock(&hdev->hw_queues_mirror_lock);
191 /* remove CS from hw_queues mirror list */
192 list_del_init(&cs->mirror_node);
193 spin_unlock(&hdev->hw_queues_mirror_lock);
196 * Don't cancel TDR in case this CS was timedout because we
197 * might be running from the TDR context
199 if ((!cs->timedout) &&
200 (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
204 cancel_delayed_work_sync(&cs->work_tdr);
206 spin_lock(&hdev->hw_queues_mirror_lock);
208 /* queue TDR for next CS */
209 next = list_first_entry_or_null(
210 &hdev->hw_queues_mirror_list,
211 struct hl_cs, mirror_node);
213 if ((next) && (!next->tdr_active)) {
214 next->tdr_active = true;
215 schedule_delayed_work(&next->work_tdr,
216 hdev->timeout_jiffies);
219 spin_unlock(&hdev->hw_queues_mirror_lock);
224 * Must be called before hl_ctx_put because inside we use ctx to get
227 hl_debugfs_remove_cs(cs);
232 dma_fence_set_error(cs->fence, -ETIMEDOUT);
233 else if (cs->aborted)
234 dma_fence_set_error(cs->fence, -EIO);
236 dma_fence_signal(cs->fence);
237 dma_fence_put(cs->fence);
242 static void cs_timedout(struct work_struct *work)
244 struct hl_device *hdev;
246 struct hl_cs *cs = container_of(work, struct hl_cs,
248 rc = cs_get_unless_zero(cs);
252 if ((!cs->submitted) || (cs->completed)) {
257 /* Mark the CS is timed out so we won't try to cancel its TDR */
260 hdev = cs->ctx->hdev;
261 ctx_asid = cs->ctx->asid;
263 /* TODO: add information about last signaled seq and last emitted seq */
264 dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence);
268 if (hdev->reset_on_lockup)
269 hl_device_reset(hdev, false, false);
272 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
273 struct hl_cs **cs_new)
275 struct hl_dma_fence *fence;
276 struct dma_fence *other = NULL;
280 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
285 cs->submitted = false;
286 cs->completed = false;
287 INIT_LIST_HEAD(&cs->job_list);
288 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
289 kref_init(&cs->refcount);
290 spin_lock_init(&cs->job_lock);
292 fence = kmalloc(sizeof(*fence), GFP_ATOMIC);
299 spin_lock_init(&fence->lock);
300 cs->fence = &fence->base_fence;
302 spin_lock(&ctx->cs_lock);
304 fence->cs_seq = ctx->cs_sequence;
305 other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
306 if ((other) && (!dma_fence_is_signaled(other))) {
307 spin_unlock(&ctx->cs_lock);
312 dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock,
313 ctx->asid, ctx->cs_sequence);
315 cs->sequence = fence->cs_seq;
317 ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] =
321 dma_fence_get(&fence->base_fence);
323 dma_fence_put(other);
325 spin_unlock(&ctx->cs_lock);
338 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
340 struct hl_cs_job *job, *tmp;
342 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
346 void hl_cs_rollback_all(struct hl_device *hdev)
348 struct hl_cs *cs, *tmp;
350 /* flush all completions */
351 flush_workqueue(hdev->cq_wq);
353 /* Make sure we don't have leftovers in the H/W queues mirror list */
354 list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
358 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
359 cs->ctx->asid, cs->sequence);
360 cs_rollback(hdev, cs);
365 static void job_wq_completion(struct work_struct *work)
367 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
369 struct hl_cs *cs = job->cs;
370 struct hl_device *hdev = cs->ctx->hdev;
372 /* job is no longer needed */
376 static struct hl_cb *validate_queue_index(struct hl_device *hdev,
377 struct hl_cb_mgr *cb_mgr,
378 struct hl_cs_chunk *chunk,
381 struct asic_fixed_properties *asic = &hdev->asic_prop;
382 struct hw_queue_properties *hw_queue_prop;
386 /* Assume external queue */
389 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
391 if ((chunk->queue_index >= HL_MAX_QUEUES) ||
392 (hw_queue_prop->type == QUEUE_TYPE_NA)) {
393 dev_err(hdev->dev, "Queue index %d is invalid\n",
398 if (hw_queue_prop->kmd_only) {
399 dev_err(hdev->dev, "Queue index %d is restricted for KMD\n",
402 } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
404 return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
407 /* Retrieve CB object */
408 cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
410 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
412 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
416 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
417 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
421 spin_lock(&cb->lock);
423 spin_unlock(&cb->lock);
432 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
434 struct hl_cs_job *job;
436 job = kzalloc(sizeof(*job), GFP_ATOMIC);
440 job->ext_queue = ext_queue;
442 if (job->ext_queue) {
443 INIT_LIST_HEAD(&job->userptr_list);
444 INIT_WORK(&job->finish_work, job_wq_completion);
450 static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
451 u32 num_chunks, u64 *cs_seq)
453 struct hl_device *hdev = hpriv->hdev;
454 struct hl_cs_chunk *cs_chunk_array;
455 struct hl_cs_job *job;
458 bool ext_queue_present = false;
460 int rc, i, parse_cnt;
462 *cs_seq = ULLONG_MAX;
464 if (num_chunks > HL_MAX_JOBS_PER_CS) {
466 "Number of chunks can NOT be larger than %d\n",
472 cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
474 if (!cs_chunk_array) {
479 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
480 if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
481 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
483 goto free_cs_chunk_array;
486 /* increment refcnt for context */
487 hl_ctx_get(hdev, hpriv->ctx);
489 rc = allocate_cs(hdev, hpriv->ctx, &cs);
491 hl_ctx_put(hpriv->ctx);
492 goto free_cs_chunk_array;
495 *cs_seq = cs->sequence;
497 hl_debugfs_add_cs(cs);
499 /* Validate ALL the CS chunks before submitting the CS */
500 for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
501 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
504 cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
507 ext_queue_present = true;
514 job = hl_cs_allocate_job(hdev, ext_queue);
516 dev_err(hdev->dev, "Failed to allocate a new job\n");
527 job->user_cb_size = chunk->cb_size;
529 job->job_cb_size = cb->size;
531 job->job_cb_size = chunk->cb_size;
532 job->hw_queue_id = chunk->queue_index;
534 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
536 list_add_tail(&job->cs_node, &cs->job_list);
539 * Increment CS reference. When CS reference is 0, CS is
540 * done and can be signaled to user and free all its resources
541 * Only increment for JOB on external queues, because only
542 * for those JOBs we get completion
547 hl_debugfs_add_job(hdev, job);
549 rc = cs_parser(hpriv, job);
552 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
553 cs->ctx->asid, cs->sequence, job->id, rc);
558 if (!ext_queue_present) {
560 "Reject CS %d.%llu because no external queues jobs\n",
561 cs->ctx->asid, cs->sequence);
566 rc = hl_hw_queue_schedule_cs(cs);
569 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
570 cs->ctx->asid, cs->sequence, rc);
574 rc = HL_CS_STATUS_SUCCESS;
578 spin_lock(&cb->lock);
580 spin_unlock(&cb->lock);
583 cs_rollback(hdev, cs);
584 *cs_seq = ULLONG_MAX;
585 /* The path below is both for good and erroneous exits */
587 /* We finished with the CS in this function, so put the ref */
590 kfree(cs_chunk_array);
595 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
597 struct hl_device *hdev = hpriv->hdev;
598 union hl_cs_args *args = data;
599 struct hl_ctx *ctx = hpriv->ctx;
602 u64 cs_seq = ULONG_MAX;
604 bool need_soft_reset = false;
606 if (hl_device_disabled_or_in_reset(hdev)) {
608 "Device is %s. Can't submit new CS\n",
609 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
614 do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
616 if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
619 chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
620 num_chunks = args->in.num_chunks_restore;
622 mutex_lock(&hpriv->restore_phase_mutex);
625 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
627 dev_err_ratelimited(hdev->dev,
628 "Failed to switch to context %d, rejecting CS! %d\n",
631 * If we timedout, or if the device is not IDLE
632 * while we want to do context-switch (-EBUSY),
633 * we need to soft-reset because QMAN is
634 * probably stuck. However, we can't call to
635 * reset here directly because of deadlock, so
636 * need to do it at the very end of this
639 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
640 need_soft_reset = true;
641 mutex_unlock(&hpriv->restore_phase_mutex);
646 hdev->asic_funcs->restore_phase_topology(hdev);
648 if (num_chunks == 0) {
650 "Need to run restore phase but restore CS is empty\n");
653 rc = _hl_cs_ioctl(hpriv, chunks, num_chunks,
657 mutex_unlock(&hpriv->restore_phase_mutex);
661 "Failed to submit restore CS for context %d (%d)\n",
666 /* Need to wait for restore completion before execution phase */
667 if (num_chunks > 0) {
668 ret = _hl_cs_wait_ioctl(hdev, ctx,
669 jiffies_to_usecs(hdev->timeout_jiffies),
673 "Restore CS for context %d failed to complete %ld\n",
680 ctx->thread_restore_wait_token = 1;
681 } else if (!ctx->thread_restore_wait_token) {
684 rc = hl_poll_timeout_memory(hdev,
685 (u64) (uintptr_t) &ctx->thread_restore_wait_token,
686 jiffies_to_usecs(hdev->timeout_jiffies),
691 "restore phase hasn't finished in time\n");
697 chunks = (void __user *)(uintptr_t)args->in.chunks_execute;
698 num_chunks = args->in.num_chunks_execute;
700 if (num_chunks == 0) {
702 "Got execute CS with 0 chunks, context %d\n",
708 rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq);
712 memset(args, 0, sizeof(*args));
713 args->out.status = rc;
714 args->out.seq = cs_seq;
717 if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
718 hl_device_reset(hdev, false, false);
723 static long _hl_cs_wait_ioctl(struct hl_device *hdev,
724 struct hl_ctx *ctx, u64 timeout_us, u64 seq)
726 struct dma_fence *fence;
727 unsigned long timeout;
730 if (timeout_us == MAX_SCHEDULE_TIMEOUT)
731 timeout = timeout_us;
733 timeout = usecs_to_jiffies(timeout_us);
735 hl_ctx_get(hdev, ctx);
737 fence = hl_ctx_get_fence(ctx, seq);
741 rc = dma_fence_wait_timeout(fence, true, timeout);
742 if (fence->error == -ETIMEDOUT)
744 else if (fence->error == -EIO)
746 dma_fence_put(fence);
755 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
757 struct hl_device *hdev = hpriv->hdev;
758 union hl_wait_cs_args *args = data;
759 u64 seq = args->in.seq;
762 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
764 memset(args, 0, sizeof(*args));
767 dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n",
769 if (rc == -ERESTARTSYS) {
770 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
772 } else if (rc == -ETIMEDOUT) {
773 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
774 } else if (rc == -EIO) {
775 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
781 args->out.status = HL_WAIT_CS_STATUS_BUSY;
783 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;