1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, Linaro Limited
5 #include <linux/arm-smccc.h>
6 #include <linux/device.h>
8 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/tee_drv.h>
13 #include <linux/types.h>
14 #include <linux/uaccess.h>
15 #include "optee_private.h"
16 #include "optee_smc.h"
17 #define CREATE_TRACE_POINTS
18 #include "optee_trace.h"
20 struct optee_call_waiter {
21 struct list_head list_node;
25 static void optee_cq_wait_init(struct optee_call_queue *cq,
26 struct optee_call_waiter *w)
29 * We're preparing to make a call to secure world. In case we can't
30 * allocate a thread in secure world we'll end up waiting in
31 * optee_cq_wait_for_completion().
33 * Normally if there's no contention in secure world the call will
34 * complete and we can cleanup directly with optee_cq_wait_final().
36 mutex_lock(&cq->mutex);
39 * We add ourselves to the queue, but we don't wait. This
40 * guarantees that we don't lose a completion if secure world
41 * returns busy and another thread just exited and try to complete
44 init_completion(&w->c);
45 list_add_tail(&w->list_node, &cq->waiters);
47 mutex_unlock(&cq->mutex);
50 static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
51 struct optee_call_waiter *w)
53 wait_for_completion(&w->c);
55 mutex_lock(&cq->mutex);
57 /* Move to end of list to get out of the way for other waiters */
58 list_del(&w->list_node);
59 reinit_completion(&w->c);
60 list_add_tail(&w->list_node, &cq->waiters);
62 mutex_unlock(&cq->mutex);
65 static void optee_cq_complete_one(struct optee_call_queue *cq)
67 struct optee_call_waiter *w;
69 list_for_each_entry(w, &cq->waiters, list_node) {
70 if (!completion_done(&w->c)) {
77 static void optee_cq_wait_final(struct optee_call_queue *cq,
78 struct optee_call_waiter *w)
81 * We're done with the call to secure world. The thread in secure
82 * world that was used for this call is now available for some
85 mutex_lock(&cq->mutex);
87 /* Get out of the list */
88 list_del(&w->list_node);
90 /* Wake up one eventual waiting task */
91 optee_cq_complete_one(cq);
94 * If we're completed we've got a completion from another task that
95 * was just done with its call to secure world. Since yet another
96 * thread now is available in secure world wake up another eventual
99 if (completion_done(&w->c))
100 optee_cq_complete_one(cq);
102 mutex_unlock(&cq->mutex);
105 /* Requires the filpstate mutex to be held */
106 static struct optee_session *find_session(struct optee_context_data *ctxdata,
109 struct optee_session *sess;
111 list_for_each_entry(sess, &ctxdata->sess_list, list_node)
112 if (sess->session_id == session_id)
119 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
120 * @ctx: calling context
121 * @parg: physical address of message to pass to secure world
123 * Does and SMC to OP-TEE in secure world and handles eventual resulting
124 * Remote Procedure Calls (RPC) from OP-TEE.
126 * Returns return code from secure world, 0 is OK
128 u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
130 struct optee *optee = tee_get_drvdata(ctx->teedev);
131 struct optee_call_waiter w;
132 struct optee_rpc_param param = { };
133 struct optee_call_ctx call_ctx = { };
136 param.a0 = OPTEE_SMC_CALL_WITH_ARG;
137 reg_pair_from_64(¶m.a1, ¶m.a2, parg);
138 /* Initialize waiter */
139 optee_cq_wait_init(&optee->call_queue, &w);
141 struct arm_smccc_res res;
143 trace_optee_invoke_fn_begin(¶m);
144 optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
145 param.a4, param.a5, param.a6, param.a7,
147 trace_optee_invoke_fn_end(¶m, &res);
149 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
151 * Out of threads in secure world, wait for a thread
154 optee_cq_wait_for_completion(&optee->call_queue, &w);
155 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
161 optee_handle_rpc(ctx, ¶m, &call_ctx);
168 optee_rpc_finalize_call(&call_ctx);
170 * We're done with our thread in secure world, if there's any
171 * thread waiters wake up one.
173 optee_cq_wait_final(&optee->call_queue, &w);
178 static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
179 struct optee_msg_arg **msg_arg,
180 phys_addr_t *msg_parg)
184 struct optee_msg_arg *ma;
186 shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
187 TEE_SHM_MAPPED | TEE_SHM_PRIV);
191 ma = tee_shm_get_va(shm, 0);
197 rc = tee_shm_get_pa(shm, 0, msg_parg);
201 memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
202 ma->num_params = num_params;
213 int optee_open_session(struct tee_context *ctx,
214 struct tee_ioctl_open_session_arg *arg,
215 struct tee_param *param)
217 struct optee_context_data *ctxdata = ctx->data;
220 struct optee_msg_arg *msg_arg;
221 phys_addr_t msg_parg;
222 struct optee_session *sess = NULL;
225 /* +2 for the meta parameters added below */
226 shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
230 msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
231 msg_arg->cancel_id = arg->cancel_id;
234 * Initialize and add the meta parameters needed when opening a
237 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
239 msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
241 memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
242 msg_arg->params[1].u.value.c = arg->clnt_login;
244 rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
248 export_uuid(msg_arg->params[1].u.octets, &client_uuid);
250 rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
254 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
260 if (optee_do_call_with_arg(ctx, msg_parg)) {
261 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
262 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
265 if (msg_arg->ret == TEEC_SUCCESS) {
266 /* A new session has been created, add it to the list. */
267 sess->session_id = msg_arg->session;
268 mutex_lock(&ctxdata->mutex);
269 list_add(&sess->list_node, &ctxdata->sess_list);
270 mutex_unlock(&ctxdata->mutex);
275 if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
276 arg->ret = TEEC_ERROR_COMMUNICATION;
277 arg->ret_origin = TEEC_ORIGIN_COMMS;
278 /* Close session again to avoid leakage */
279 optee_close_session(ctx, msg_arg->session);
281 arg->session = msg_arg->session;
282 arg->ret = msg_arg->ret;
283 arg->ret_origin = msg_arg->ret_origin;
291 int optee_close_session(struct tee_context *ctx, u32 session)
293 struct optee_context_data *ctxdata = ctx->data;
295 struct optee_msg_arg *msg_arg;
296 phys_addr_t msg_parg;
297 struct optee_session *sess;
299 /* Check that the session is valid and remove it from the list */
300 mutex_lock(&ctxdata->mutex);
301 sess = find_session(ctxdata, session);
303 list_del(&sess->list_node);
304 mutex_unlock(&ctxdata->mutex);
309 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
313 msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
314 msg_arg->session = session;
315 optee_do_call_with_arg(ctx, msg_parg);
321 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
322 struct tee_param *param)
324 struct optee_context_data *ctxdata = ctx->data;
326 struct optee_msg_arg *msg_arg;
327 phys_addr_t msg_parg;
328 struct optee_session *sess;
331 /* Check that the session is valid */
332 mutex_lock(&ctxdata->mutex);
333 sess = find_session(ctxdata, arg->session);
334 mutex_unlock(&ctxdata->mutex);
338 shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
341 msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
342 msg_arg->func = arg->func;
343 msg_arg->session = arg->session;
344 msg_arg->cancel_id = arg->cancel_id;
346 rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
350 if (optee_do_call_with_arg(ctx, msg_parg)) {
351 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
352 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
355 if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
356 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
357 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
360 arg->ret = msg_arg->ret;
361 arg->ret_origin = msg_arg->ret_origin;
367 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
369 struct optee_context_data *ctxdata = ctx->data;
371 struct optee_msg_arg *msg_arg;
372 phys_addr_t msg_parg;
373 struct optee_session *sess;
375 /* Check that the session is valid */
376 mutex_lock(&ctxdata->mutex);
377 sess = find_session(ctxdata, session);
378 mutex_unlock(&ctxdata->mutex);
382 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
386 msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
387 msg_arg->session = session;
388 msg_arg->cancel_id = cancel_id;
389 optee_do_call_with_arg(ctx, msg_parg);
396 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
398 * @optee: main service struct
400 void optee_enable_shm_cache(struct optee *optee)
402 struct optee_call_waiter w;
404 /* We need to retry until secure world isn't busy. */
405 optee_cq_wait_init(&optee->call_queue, &w);
407 struct arm_smccc_res res;
409 optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
411 if (res.a0 == OPTEE_SMC_RETURN_OK)
413 optee_cq_wait_for_completion(&optee->call_queue, &w);
415 optee_cq_wait_final(&optee->call_queue, &w);
419 * __optee_disable_shm_cache() - Disables caching of some shared memory
420 * allocation in OP-TEE
421 * @optee: main service struct
422 * @is_mapped: true if the cached shared memory addresses were mapped by this
423 * kernel, are safe to dereference, and should be freed
425 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
427 struct optee_call_waiter w;
429 /* We need to retry until secure world isn't busy. */
430 optee_cq_wait_init(&optee->call_queue, &w);
433 struct arm_smccc_res smccc;
434 struct optee_smc_disable_shm_cache_result result;
437 optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
439 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
440 break; /* All shm's freed */
441 if (res.result.status == OPTEE_SMC_RETURN_OK) {
445 * Shared memory references that were not mapped by
446 * this kernel must be ignored to prevent a crash.
451 shm = reg_pair_to_ptr(res.result.shm_upper32,
452 res.result.shm_lower32);
455 optee_cq_wait_for_completion(&optee->call_queue, &w);
458 optee_cq_wait_final(&optee->call_queue, &w);
462 * optee_disable_shm_cache() - Disables caching of mapped shared memory
463 * allocations in OP-TEE
464 * @optee: main service struct
466 void optee_disable_shm_cache(struct optee *optee)
468 return __optee_disable_shm_cache(optee, true);
472 * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
473 * allocations in OP-TEE which are not
475 * @optee: main service struct
477 void optee_disable_unmapped_shm_cache(struct optee *optee)
479 return __optee_disable_shm_cache(optee, false);
482 #define PAGELIST_ENTRIES_PER_PAGE \
483 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
486 * optee_fill_pages_list() - write list of user pages to given shared
489 * @dst: page-aligned buffer where list of pages will be stored
490 * @pages: array of pages that represents shared buffer
491 * @num_pages: number of entries in @pages
492 * @page_offset: offset of user buffer from page start
494 * @dst should be big enough to hold list of user page addresses and
495 * links to the next pages of buffer
497 void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
501 phys_addr_t optee_page;
503 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
507 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
512 * Currently OP-TEE uses 4k page size and it does not looks
513 * like this will change in the future. On other hand, there are
514 * no know ARM architectures with page size < 4k.
515 * Thus the next built assert looks redundant. But the following
516 * code heavily relies on this assumption, so it is better be
519 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
521 pages_data = (void *)dst;
523 * If linux page is bigger than 4k, and user buffer offset is
524 * larger than 4k/8k/12k/etc this will skip first 4k pages,
525 * because they bear no value data for OP-TEE.
527 optee_page = page_to_phys(*pages) +
528 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
531 pages_data->pages_list[n++] = optee_page;
533 if (n == PAGELIST_ENTRIES_PER_PAGE) {
534 pages_data->next_page_data =
535 virt_to_phys(pages_data + 1);
540 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
541 if (!(optee_page & ~PAGE_MASK)) {
545 optee_page = page_to_phys(*pages);
551 * The final entry in each pagelist page is a pointer to the next
554 static size_t get_pages_list_size(size_t num_entries)
556 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
558 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
561 u64 *optee_allocate_pages_list(size_t num_entries)
563 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
566 void optee_free_pages_list(void *list, size_t num_entries)
568 free_pages_exact(list, get_pages_list_size(num_entries));
571 static bool is_normal_memory(pgprot_t p)
573 #if defined(CONFIG_ARM)
574 return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
575 ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
576 #elif defined(CONFIG_ARM64)
577 return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
579 #error "Unuspported architecture"
583 static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
585 while (vma && is_normal_memory(vma->vm_page_prot)) {
586 if (vma->vm_end >= end)
594 static int check_mem_type(unsigned long start, size_t num_pages)
596 struct mm_struct *mm = current->mm;
600 * Allow kernel address to register with OP-TEE as kernel
601 * pages are configured as normal memory only.
603 if (virt_addr_valid(start))
607 rc = __check_mem_type(find_vma(mm, start),
608 start + num_pages * PAGE_SIZE);
609 mmap_read_unlock(mm);
614 int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
615 struct page **pages, size_t num_pages,
618 struct tee_shm *shm_arg = NULL;
619 struct optee_msg_arg *msg_arg;
621 phys_addr_t msg_parg;
627 rc = check_mem_type(start, num_pages);
631 pages_list = optee_allocate_pages_list(num_pages);
635 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
636 if (IS_ERR(shm_arg)) {
637 rc = PTR_ERR(shm_arg);
641 optee_fill_pages_list(pages_list, pages, num_pages,
642 tee_shm_get_page_offset(shm));
644 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
645 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
646 OPTEE_MSG_ATTR_NONCONTIG;
647 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
648 msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
650 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
651 * store buffer offset from 4k page, as described in OP-TEE ABI.
653 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
654 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
656 if (optee_do_call_with_arg(ctx, msg_parg) ||
657 msg_arg->ret != TEEC_SUCCESS)
660 tee_shm_free(shm_arg);
662 optee_free_pages_list(pages_list, num_pages);
666 int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
668 struct tee_shm *shm_arg;
669 struct optee_msg_arg *msg_arg;
670 phys_addr_t msg_parg;
673 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
675 return PTR_ERR(shm_arg);
677 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
679 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
680 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
682 if (optee_do_call_with_arg(ctx, msg_parg) ||
683 msg_arg->ret != TEEC_SUCCESS)
685 tee_shm_free(shm_arg);
689 int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
690 struct page **pages, size_t num_pages,
694 * We don't want to register supplicant memory in OP-TEE.
695 * Instead information about it will be passed in RPC code.
697 return check_mem_type(start, num_pages);
700 int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)