1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, Linaro Limited
5 #include <linux/arm-smccc.h>
6 #include <linux/device.h>
8 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include <linux/types.h>
13 #include <linux/uaccess.h>
14 #include "optee_private.h"
15 #include "optee_smc.h"
17 struct optee_call_waiter {
18 struct list_head list_node;
22 static void optee_cq_wait_init(struct optee_call_queue *cq,
23 struct optee_call_waiter *w)
26 * We're preparing to make a call to secure world. In case we can't
27 * allocate a thread in secure world we'll end up waiting in
28 * optee_cq_wait_for_completion().
30 * Normally if there's no contention in secure world the call will
31 * complete and we can cleanup directly with optee_cq_wait_final().
33 mutex_lock(&cq->mutex);
36 * We add ourselves to the queue, but we don't wait. This
37 * guarantees that we don't lose a completion if secure world
38 * returns busy and another thread just exited and try to complete
41 init_completion(&w->c);
42 list_add_tail(&w->list_node, &cq->waiters);
44 mutex_unlock(&cq->mutex);
47 static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
48 struct optee_call_waiter *w)
50 wait_for_completion(&w->c);
52 mutex_lock(&cq->mutex);
54 /* Move to end of list to get out of the way for other waiters */
55 list_del(&w->list_node);
56 reinit_completion(&w->c);
57 list_add_tail(&w->list_node, &cq->waiters);
59 mutex_unlock(&cq->mutex);
62 static void optee_cq_complete_one(struct optee_call_queue *cq)
64 struct optee_call_waiter *w;
66 list_for_each_entry(w, &cq->waiters, list_node) {
67 if (!completion_done(&w->c)) {
74 static void optee_cq_wait_final(struct optee_call_queue *cq,
75 struct optee_call_waiter *w)
78 * We're done with the call to secure world. The thread in secure
79 * world that was used for this call is now available for some
82 mutex_lock(&cq->mutex);
84 /* Get out of the list */
85 list_del(&w->list_node);
87 /* Wake up one eventual waiting task */
88 optee_cq_complete_one(cq);
91 * If we're completed we've got a completion from another task that
92 * was just done with its call to secure world. Since yet another
93 * thread now is available in secure world wake up another eventual
96 if (completion_done(&w->c))
97 optee_cq_complete_one(cq);
99 mutex_unlock(&cq->mutex);
102 /* Requires the filpstate mutex to be held */
103 static struct optee_session *find_session(struct optee_context_data *ctxdata,
106 struct optee_session *sess;
108 list_for_each_entry(sess, &ctxdata->sess_list, list_node)
109 if (sess->session_id == session_id)
116 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
117 * @ctx: calling context
118 * @parg: physical address of message to pass to secure world
120 * Does and SMC to OP-TEE in secure world and handles eventual resulting
121 * Remote Procedure Calls (RPC) from OP-TEE.
123 * Returns return code from secure world, 0 is OK
125 u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
127 struct optee *optee = tee_get_drvdata(ctx->teedev);
128 struct optee_call_waiter w;
129 struct optee_rpc_param param = { };
130 struct optee_call_ctx call_ctx = { };
133 param.a0 = OPTEE_SMC_CALL_WITH_ARG;
134 reg_pair_from_64(¶m.a1, ¶m.a2, parg);
135 /* Initialize waiter */
136 optee_cq_wait_init(&optee->call_queue, &w);
138 struct arm_smccc_res res;
140 optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
141 param.a4, param.a5, param.a6, param.a7,
144 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
146 * Out of threads in secure world, wait for a thread
149 optee_cq_wait_for_completion(&optee->call_queue, &w);
150 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
155 optee_handle_rpc(ctx, ¶m, &call_ctx);
162 optee_rpc_finalize_call(&call_ctx);
164 * We're done with our thread in secure world, if there's any
165 * thread waiters wake up one.
167 optee_cq_wait_final(&optee->call_queue, &w);
172 static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
173 struct optee_msg_arg **msg_arg,
174 phys_addr_t *msg_parg)
178 struct optee_msg_arg *ma;
180 shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
185 ma = tee_shm_get_va(shm, 0);
191 rc = tee_shm_get_pa(shm, 0, msg_parg);
195 memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
196 ma->num_params = num_params;
207 int optee_open_session(struct tee_context *ctx,
208 struct tee_ioctl_open_session_arg *arg,
209 struct tee_param *param)
211 struct optee_context_data *ctxdata = ctx->data;
214 struct optee_msg_arg *msg_arg;
215 phys_addr_t msg_parg;
216 struct optee_session *sess = NULL;
218 /* +2 for the meta parameters added below */
219 shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
223 msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
224 msg_arg->cancel_id = arg->cancel_id;
227 * Initialize and add the meta parameters needed when opening a
230 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
232 msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
234 memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
235 memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
236 msg_arg->params[1].u.value.c = arg->clnt_login;
238 rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
242 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
248 if (optee_do_call_with_arg(ctx, msg_parg)) {
249 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
250 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
253 if (msg_arg->ret == TEEC_SUCCESS) {
254 /* A new session has been created, add it to the list. */
255 sess->session_id = msg_arg->session;
256 mutex_lock(&ctxdata->mutex);
257 list_add(&sess->list_node, &ctxdata->sess_list);
258 mutex_unlock(&ctxdata->mutex);
263 if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
264 arg->ret = TEEC_ERROR_COMMUNICATION;
265 arg->ret_origin = TEEC_ORIGIN_COMMS;
266 /* Close session again to avoid leakage */
267 optee_close_session(ctx, msg_arg->session);
269 arg->session = msg_arg->session;
270 arg->ret = msg_arg->ret;
271 arg->ret_origin = msg_arg->ret_origin;
279 int optee_close_session(struct tee_context *ctx, u32 session)
281 struct optee_context_data *ctxdata = ctx->data;
283 struct optee_msg_arg *msg_arg;
284 phys_addr_t msg_parg;
285 struct optee_session *sess;
287 /* Check that the session is valid and remove it from the list */
288 mutex_lock(&ctxdata->mutex);
289 sess = find_session(ctxdata, session);
291 list_del(&sess->list_node);
292 mutex_unlock(&ctxdata->mutex);
297 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
301 msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
302 msg_arg->session = session;
303 optee_do_call_with_arg(ctx, msg_parg);
309 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
310 struct tee_param *param)
312 struct optee_context_data *ctxdata = ctx->data;
314 struct optee_msg_arg *msg_arg;
315 phys_addr_t msg_parg;
316 struct optee_session *sess;
319 /* Check that the session is valid */
320 mutex_lock(&ctxdata->mutex);
321 sess = find_session(ctxdata, arg->session);
322 mutex_unlock(&ctxdata->mutex);
326 shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
329 msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
330 msg_arg->func = arg->func;
331 msg_arg->session = arg->session;
332 msg_arg->cancel_id = arg->cancel_id;
334 rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
338 if (optee_do_call_with_arg(ctx, msg_parg)) {
339 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
340 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
343 if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
344 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
345 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
348 arg->ret = msg_arg->ret;
349 arg->ret_origin = msg_arg->ret_origin;
355 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
357 struct optee_context_data *ctxdata = ctx->data;
359 struct optee_msg_arg *msg_arg;
360 phys_addr_t msg_parg;
361 struct optee_session *sess;
363 /* Check that the session is valid */
364 mutex_lock(&ctxdata->mutex);
365 sess = find_session(ctxdata, session);
366 mutex_unlock(&ctxdata->mutex);
370 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
374 msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
375 msg_arg->session = session;
376 msg_arg->cancel_id = cancel_id;
377 optee_do_call_with_arg(ctx, msg_parg);
384 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
386 * @optee: main service struct
388 void optee_enable_shm_cache(struct optee *optee)
390 struct optee_call_waiter w;
392 /* We need to retry until secure world isn't busy. */
393 optee_cq_wait_init(&optee->call_queue, &w);
395 struct arm_smccc_res res;
397 optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
399 if (res.a0 == OPTEE_SMC_RETURN_OK)
401 optee_cq_wait_for_completion(&optee->call_queue, &w);
403 optee_cq_wait_final(&optee->call_queue, &w);
407 * optee_disable_shm_cache() - Disables caching of some shared memory allocation
409 * @optee: main service struct
411 void optee_disable_shm_cache(struct optee *optee)
413 struct optee_call_waiter w;
415 /* We need to retry until secure world isn't busy. */
416 optee_cq_wait_init(&optee->call_queue, &w);
419 struct arm_smccc_res smccc;
420 struct optee_smc_disable_shm_cache_result result;
423 optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
425 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
426 break; /* All shm's freed */
427 if (res.result.status == OPTEE_SMC_RETURN_OK) {
430 shm = reg_pair_to_ptr(res.result.shm_upper32,
431 res.result.shm_lower32);
434 optee_cq_wait_for_completion(&optee->call_queue, &w);
437 optee_cq_wait_final(&optee->call_queue, &w);
440 #define PAGELIST_ENTRIES_PER_PAGE \
441 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
444 * optee_fill_pages_list() - write list of user pages to given shared
447 * @dst: page-aligned buffer where list of pages will be stored
448 * @pages: array of pages that represents shared buffer
449 * @num_pages: number of entries in @pages
450 * @page_offset: offset of user buffer from page start
452 * @dst should be big enough to hold list of user page addresses and
453 * links to the next pages of buffer
455 void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
459 phys_addr_t optee_page;
461 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
465 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
470 * Currently OP-TEE uses 4k page size and it does not looks
471 * like this will change in the future. On other hand, there are
472 * no know ARM architectures with page size < 4k.
473 * Thus the next built assert looks redundant. But the following
474 * code heavily relies on this assumption, so it is better be
477 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
479 pages_data = (void *)dst;
481 * If linux page is bigger than 4k, and user buffer offset is
482 * larger than 4k/8k/12k/etc this will skip first 4k pages,
483 * because they bear no value data for OP-TEE.
485 optee_page = page_to_phys(*pages) +
486 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
489 pages_data->pages_list[n++] = optee_page;
491 if (n == PAGELIST_ENTRIES_PER_PAGE) {
492 pages_data->next_page_data =
493 virt_to_phys(pages_data + 1);
498 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
499 if (!(optee_page & ~PAGE_MASK)) {
503 optee_page = page_to_phys(*pages);
509 * The final entry in each pagelist page is a pointer to the next
512 static size_t get_pages_list_size(size_t num_entries)
514 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
516 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
519 u64 *optee_allocate_pages_list(size_t num_entries)
521 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
524 void optee_free_pages_list(void *list, size_t num_entries)
526 free_pages_exact(list, get_pages_list_size(num_entries));
529 static bool is_normal_memory(pgprot_t p)
531 #if defined(CONFIG_ARM)
532 return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
533 #elif defined(CONFIG_ARM64)
534 return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
536 #error "Unuspported architecture"
540 static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
542 while (vma && is_normal_memory(vma->vm_page_prot)) {
543 if (vma->vm_end >= end)
551 static int check_mem_type(unsigned long start, size_t num_pages)
553 struct mm_struct *mm = current->mm;
556 down_read(&mm->mmap_sem);
557 rc = __check_mem_type(find_vma(mm, start),
558 start + num_pages * PAGE_SIZE);
559 up_read(&mm->mmap_sem);
564 int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
565 struct page **pages, size_t num_pages,
568 struct tee_shm *shm_arg = NULL;
569 struct optee_msg_arg *msg_arg;
571 phys_addr_t msg_parg;
577 rc = check_mem_type(start, num_pages);
581 pages_list = optee_allocate_pages_list(num_pages);
585 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
586 if (IS_ERR(shm_arg)) {
587 rc = PTR_ERR(shm_arg);
591 optee_fill_pages_list(pages_list, pages, num_pages,
592 tee_shm_get_page_offset(shm));
594 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
595 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
596 OPTEE_MSG_ATTR_NONCONTIG;
597 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
598 msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
600 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
601 * store buffer offset from 4k page, as described in OP-TEE ABI.
603 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
604 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
606 if (optee_do_call_with_arg(ctx, msg_parg) ||
607 msg_arg->ret != TEEC_SUCCESS)
610 tee_shm_free(shm_arg);
612 optee_free_pages_list(pages_list, num_pages);
616 int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
618 struct tee_shm *shm_arg;
619 struct optee_msg_arg *msg_arg;
620 phys_addr_t msg_parg;
623 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
625 return PTR_ERR(shm_arg);
627 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
629 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
630 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
632 if (optee_do_call_with_arg(ctx, msg_parg) ||
633 msg_arg->ret != TEEC_SUCCESS)
635 tee_shm_free(shm_arg);
639 int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
640 struct page **pages, size_t num_pages,
644 * We don't want to register supplicant memory in OP-TEE.
645 * Instead information about it will be passed in RPC code.
647 return check_mem_type(start, num_pages);
650 int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)