1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
12 static void hl_ctx_fini(struct hl_ctx *ctx)
14 struct hl_device *hdev = ctx->hdev;
18 * If we arrived here, there are no jobs waiting for this context
19 * on its queues so we can safely remove it.
20 * This is because for each CS, we increment the ref count and for
21 * every CS that was finished we decrement it and we won't arrive
22 * to this function unless the ref count is 0
25 for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
26 dma_fence_put(ctx->cs_pending[i]);
28 if (ctx->asid != HL_KERNEL_ASID_ID) {
30 hl_asid_free(hdev, ctx->asid);
34 void hl_ctx_do_release(struct kref *ref)
38 ctx = container_of(ref, struct hl_ctx, refcount);
43 hl_hpriv_put(ctx->hpriv);
48 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
50 struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
54 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
60 rc = hl_ctx_init(hdev, ctx, false);
67 /* TODO: remove for multiple contexts */
71 mutex_lock(&mgr->ctx_lock);
72 rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
73 mutex_unlock(&mgr->ctx_lock);
76 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
77 hl_ctx_free(hdev, ctx);
89 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
91 if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
95 "Context %d closed or terminated but its CS are executing\n",
99 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
105 kref_init(&ctx->refcount);
107 ctx->cs_sequence = 1;
108 spin_lock_init(&ctx->cs_lock);
109 atomic_set(&ctx->thread_restore_token, 1);
110 ctx->thread_restore_wait_token = 0;
113 ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
115 ctx->asid = hl_asid_alloc(hdev);
117 dev_err(hdev->dev, "No free ASID, failed to create context\n");
121 rc = hl_vm_ctx_init(ctx);
123 dev_err(hdev->dev, "Failed to init mem ctx module\n");
132 if (ctx->asid != HL_KERNEL_ASID_ID)
133 hl_asid_free(hdev, ctx->asid);
138 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
140 kref_get(&ctx->refcount);
143 int hl_ctx_put(struct hl_ctx *ctx)
145 return kref_put(&ctx->refcount, hl_ctx_do_release);
148 struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
150 struct hl_device *hdev = ctx->hdev;
151 struct dma_fence *fence;
153 spin_lock(&ctx->cs_lock);
155 if (seq >= ctx->cs_sequence) {
156 dev_notice(hdev->dev,
157 "Can't wait on seq %llu because current CS is at seq %llu\n",
158 seq, ctx->cs_sequence);
159 spin_unlock(&ctx->cs_lock);
160 return ERR_PTR(-EINVAL);
164 if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
166 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
167 seq, ctx->cs_sequence);
168 spin_unlock(&ctx->cs_lock);
172 fence = dma_fence_get(
173 ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
174 spin_unlock(&ctx->cs_lock);
180 * hl_ctx_mgr_init - initialize the context manager
182 * @mgr: pointer to context manager structure
184 * This manager is an object inside the hpriv object of the user process.
185 * The function is called when a user process opens the FD.
187 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
189 mutex_init(&mgr->ctx_lock);
190 idr_init(&mgr->ctx_handles);
194 * hl_ctx_mgr_fini - finalize the context manager
196 * @hdev: pointer to device structure
197 * @mgr: pointer to context manager structure
199 * This function goes over all the contexts in the manager and frees them.
200 * It is called when a process closes the FD.
202 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
208 idp = &mgr->ctx_handles;
210 idr_for_each_entry(idp, ctx, id)
211 hl_ctx_free(hdev, ctx);
213 idr_destroy(&mgr->ctx_handles);
214 mutex_destroy(&mgr->ctx_lock);