blob: c2ac12a23a290513d2473cdd141da53327fa37c1 [file] [log] [blame]
Patrick Williams2194f502022-10-16 14:26:09 -05001From bfcb795efc2d07bf99fd6c6e4f43951bc7354d5e Mon Sep 17 00:00:00 2001
2From: Jens Wiklander <jens.wiklander@linaro.org>
3Date: Wed, 21 Jul 2021 16:30:28 +0200
4Subject: [PATCH 11/40] optee: isolate smc abi
5
6Isolate the ABI based on raw SMCs. Code specific to the raw SMC ABI is
7moved into smc_abi.c. This makes room for other ABIs with a clear
8separation.
9
10The driver changes to use module_init()/module_exit() instead of
11module_platform_driver(). The platform_driver_register() and
12platform_driver_unregister() functions called directly to keep the same
13behavior. This is needed because module_platform_driver() is based on
14module_driver() which can only be used once in a module.
15
16A function optee_rpc_cmd() is factored out from the function
17handle_rpc_func_cmd() to handle the ABI independent part of RPC
18processing.
19
20This patch is not supposed to change the driver behavior, it's only a
21matter of reorganizing the code.
22
23Reviewed-by: Sumit Garg <sumit.garg@linaro.org>
24Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
25Upstream-Status: Pending [Not submitted to upstream yet]
26Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com>
27---
28 drivers/tee/optee/Makefile | 4 +-
29 drivers/tee/optee/call.c | 369 +-------
30 drivers/tee/optee/core.c | 721 ++-------------
31 drivers/tee/optee/optee_private.h | 106 ++-
32 drivers/tee/optee/rpc.c | 218 +----
33 drivers/tee/optee/shm_pool.h | 14 -
34 drivers/tee/optee/smc_abi.c | 1361 +++++++++++++++++++++++++++++
35 7 files changed, 1506 insertions(+), 1287 deletions(-)
36 delete mode 100644 drivers/tee/optee/shm_pool.h
37 create mode 100644 drivers/tee/optee/smc_abi.c
38
39diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
40index 3aa33ea9e6a6..97ac3ab3e1c0 100644
41--- a/drivers/tee/optee/Makefile
42+++ b/drivers/tee/optee/Makefile
43@@ -4,8 +4,8 @@ optee-objs += core.o
44 optee-objs += call.o
45 optee-objs += rpc.o
46 optee-objs += supp.o
47-optee-objs += shm_pool.o
48 optee-objs += device.o
49+optee-objs += smc_abi.o
50
51 # for tracing framework to find optee_trace.h
52-CFLAGS_call.o := -I$(src)
53+CFLAGS_smc_abi.o := -I$(src)
54diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
55index ddedde45f1ee..9ff4f0812825 100644
56--- a/drivers/tee/optee/call.c
57+++ b/drivers/tee/optee/call.c
58@@ -2,28 +2,17 @@
59 /*
60 * Copyright (c) 2015-2021, Linaro Limited
61 */
62-#include <linux/arm-smccc.h>
63 #include <linux/device.h>
64 #include <linux/err.h>
65 #include <linux/errno.h>
66 #include <linux/mm.h>
67-#include <linux/sched.h>
68 #include <linux/slab.h>
69 #include <linux/tee_drv.h>
70 #include <linux/types.h>
71-#include <linux/uaccess.h>
72 #include "optee_private.h"
73-#include "optee_smc.h"
74-#define CREATE_TRACE_POINTS
75-#include "optee_trace.h"
76
77-struct optee_call_waiter {
78- struct list_head list_node;
79- struct completion c;
80-};
81-
82-static void optee_cq_wait_init(struct optee_call_queue *cq,
83- struct optee_call_waiter *w)
84+void optee_cq_wait_init(struct optee_call_queue *cq,
85+ struct optee_call_waiter *w)
86 {
87 /*
88 * We're preparing to make a call to secure world. In case we can't
89@@ -47,8 +36,8 @@ static void optee_cq_wait_init(struct optee_call_queue *cq,
90 mutex_unlock(&cq->mutex);
91 }
92
93-static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
94- struct optee_call_waiter *w)
95+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
96+ struct optee_call_waiter *w)
97 {
98 wait_for_completion(&w->c);
99
100@@ -74,8 +63,8 @@ static void optee_cq_complete_one(struct optee_call_queue *cq)
101 }
102 }
103
104-static void optee_cq_wait_final(struct optee_call_queue *cq,
105- struct optee_call_waiter *w)
106+void optee_cq_wait_final(struct optee_call_queue *cq,
107+ struct optee_call_waiter *w)
108 {
109 /*
110 * We're done with the call to secure world. The thread in secure
111@@ -115,73 +104,8 @@ static struct optee_session *find_session(struct optee_context_data *ctxdata,
112 return NULL;
113 }
114
115-/**
116- * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
117- * @ctx: calling context
118- * @arg: shared memory holding the message to pass to secure world
119- *
120- * Does and SMC to OP-TEE in secure world and handles eventual resulting
121- * Remote Procedure Calls (RPC) from OP-TEE.
122- *
123- * Returns return code from secure world, 0 is OK
124- */
125-int optee_do_call_with_arg(struct tee_context *ctx, struct tee_shm *arg)
126-{
127- struct optee *optee = tee_get_drvdata(ctx->teedev);
128- struct optee_call_waiter w;
129- struct optee_rpc_param param = { };
130- struct optee_call_ctx call_ctx = { };
131- phys_addr_t parg;
132- int rc;
133-
134- rc = tee_shm_get_pa(arg, 0, &parg);
135- if (rc)
136- return rc;
137-
138- param.a0 = OPTEE_SMC_CALL_WITH_ARG;
139- reg_pair_from_64(&param.a1, &param.a2, parg);
140- /* Initialize waiter */
141- optee_cq_wait_init(&optee->call_queue, &w);
142- while (true) {
143- struct arm_smccc_res res;
144-
145- trace_optee_invoke_fn_begin(&param);
146- optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
147- param.a4, param.a5, param.a6, param.a7,
148- &res);
149- trace_optee_invoke_fn_end(&param, &res);
150-
151- if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
152- /*
153- * Out of threads in secure world, wait for a thread
154- * become available.
155- */
156- optee_cq_wait_for_completion(&optee->call_queue, &w);
157- } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
158- cond_resched();
159- param.a0 = res.a0;
160- param.a1 = res.a1;
161- param.a2 = res.a2;
162- param.a3 = res.a3;
163- optee_handle_rpc(ctx, &param, &call_ctx);
164- } else {
165- rc = res.a0;
166- break;
167- }
168- }
169-
170- optee_rpc_finalize_call(&call_ctx);
171- /*
172- * We're done with our thread in secure world, if there's any
173- * thread waiters wake up one.
174- */
175- optee_cq_wait_final(&optee->call_queue, &w);
176-
177- return rc;
178-}
179-
180-static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
181- struct optee_msg_arg **msg_arg)
182+struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
183+ struct optee_msg_arg **msg_arg)
184 {
185 struct tee_shm *shm;
186 struct optee_msg_arg *ma;
187@@ -217,7 +141,7 @@ int optee_open_session(struct tee_context *ctx,
188 uuid_t client_uuid;
189
190 /* +2 for the meta parameters added below */
191- shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg);
192+ shm = optee_get_msg_arg(ctx, arg->num_params + 2, &msg_arg);
193 if (IS_ERR(shm))
194 return PTR_ERR(shm);
195
196@@ -290,7 +214,7 @@ int optee_close_session_helper(struct tee_context *ctx, u32 session)
197 struct optee *optee = tee_get_drvdata(ctx->teedev);
198 struct optee_msg_arg *msg_arg;
199
200- shm = get_msg_arg(ctx, 0, &msg_arg);
201+ shm = optee_get_msg_arg(ctx, 0, &msg_arg);
202 if (IS_ERR(shm))
203 return PTR_ERR(shm);
204
205@@ -338,7 +262,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
206 if (!sess)
207 return -EINVAL;
208
209- shm = get_msg_arg(ctx, arg->num_params, &msg_arg);
210+ shm = optee_get_msg_arg(ctx, arg->num_params, &msg_arg);
211 if (IS_ERR(shm))
212 return PTR_ERR(shm);
213 msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
214@@ -384,7 +308,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
215 if (!sess)
216 return -EINVAL;
217
218- shm = get_msg_arg(ctx, 0, &msg_arg);
219+ shm = optee_get_msg_arg(ctx, 0, &msg_arg);
220 if (IS_ERR(shm))
221 return PTR_ERR(shm);
222
223@@ -397,182 +321,6 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
224 return 0;
225 }
226
227-/**
228- * optee_enable_shm_cache() - Enables caching of some shared memory allocation
229- * in OP-TEE
230- * @optee: main service struct
231- */
232-void optee_enable_shm_cache(struct optee *optee)
233-{
234- struct optee_call_waiter w;
235-
236- /* We need to retry until secure world isn't busy. */
237- optee_cq_wait_init(&optee->call_queue, &w);
238- while (true) {
239- struct arm_smccc_res res;
240-
241- optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
242- 0, &res);
243- if (res.a0 == OPTEE_SMC_RETURN_OK)
244- break;
245- optee_cq_wait_for_completion(&optee->call_queue, &w);
246- }
247- optee_cq_wait_final(&optee->call_queue, &w);
248-}
249-
250-/**
251- * __optee_disable_shm_cache() - Disables caching of some shared memory
252- * allocation in OP-TEE
253- * @optee: main service struct
254- * @is_mapped: true if the cached shared memory addresses were mapped by this
255- * kernel, are safe to dereference, and should be freed
256- */
257-static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
258-{
259- struct optee_call_waiter w;
260-
261- /* We need to retry until secure world isn't busy. */
262- optee_cq_wait_init(&optee->call_queue, &w);
263- while (true) {
264- union {
265- struct arm_smccc_res smccc;
266- struct optee_smc_disable_shm_cache_result result;
267- } res;
268-
269- optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
270- 0, &res.smccc);
271- if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
272- break; /* All shm's freed */
273- if (res.result.status == OPTEE_SMC_RETURN_OK) {
274- struct tee_shm *shm;
275-
276- /*
277- * Shared memory references that were not mapped by
278- * this kernel must be ignored to prevent a crash.
279- */
280- if (!is_mapped)
281- continue;
282-
283- shm = reg_pair_to_ptr(res.result.shm_upper32,
284- res.result.shm_lower32);
285- tee_shm_free(shm);
286- } else {
287- optee_cq_wait_for_completion(&optee->call_queue, &w);
288- }
289- }
290- optee_cq_wait_final(&optee->call_queue, &w);
291-}
292-
293-/**
294- * optee_disable_shm_cache() - Disables caching of mapped shared memory
295- * allocations in OP-TEE
296- * @optee: main service struct
297- */
298-void optee_disable_shm_cache(struct optee *optee)
299-{
300- return __optee_disable_shm_cache(optee, true);
301-}
302-
303-/**
304- * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
305- * allocations in OP-TEE which are not
306- * currently mapped
307- * @optee: main service struct
308- */
309-void optee_disable_unmapped_shm_cache(struct optee *optee)
310-{
311- return __optee_disable_shm_cache(optee, false);
312-}
313-
314-#define PAGELIST_ENTRIES_PER_PAGE \
315- ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
316-
317-/**
318- * optee_fill_pages_list() - write list of user pages to given shared
319- * buffer.
320- *
321- * @dst: page-aligned buffer where list of pages will be stored
322- * @pages: array of pages that represents shared buffer
323- * @num_pages: number of entries in @pages
324- * @page_offset: offset of user buffer from page start
325- *
326- * @dst should be big enough to hold list of user page addresses and
327- * links to the next pages of buffer
328- */
329-void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
330- size_t page_offset)
331-{
332- int n = 0;
333- phys_addr_t optee_page;
334- /*
335- * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
336- * for details.
337- */
338- struct {
339- u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
340- u64 next_page_data;
341- } *pages_data;
342-
343- /*
344- * Currently OP-TEE uses 4k page size and it does not looks
345- * like this will change in the future. On other hand, there are
346- * no know ARM architectures with page size < 4k.
347- * Thus the next built assert looks redundant. But the following
348- * code heavily relies on this assumption, so it is better be
349- * safe than sorry.
350- */
351- BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
352-
353- pages_data = (void *)dst;
354- /*
355- * If linux page is bigger than 4k, and user buffer offset is
356- * larger than 4k/8k/12k/etc this will skip first 4k pages,
357- * because they bear no value data for OP-TEE.
358- */
359- optee_page = page_to_phys(*pages) +
360- round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
361-
362- while (true) {
363- pages_data->pages_list[n++] = optee_page;
364-
365- if (n == PAGELIST_ENTRIES_PER_PAGE) {
366- pages_data->next_page_data =
367- virt_to_phys(pages_data + 1);
368- pages_data++;
369- n = 0;
370- }
371-
372- optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
373- if (!(optee_page & ~PAGE_MASK)) {
374- if (!--num_pages)
375- break;
376- pages++;
377- optee_page = page_to_phys(*pages);
378- }
379- }
380-}
381-
382-/*
383- * The final entry in each pagelist page is a pointer to the next
384- * pagelist page.
385- */
386-static size_t get_pages_list_size(size_t num_entries)
387-{
388- int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
389-
390- return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
391-}
392-
393-u64 *optee_allocate_pages_list(size_t num_entries)
394-{
395- return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
396-}
397-
398-void optee_free_pages_list(void *list, size_t num_entries)
399-{
400- free_pages_exact(list, get_pages_list_size(num_entries));
401-}
402-
403 static bool is_normal_memory(pgprot_t p)
404 {
405 #if defined(CONFIG_ARM)
406@@ -596,7 +344,7 @@ static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
407 return -EINVAL;
408 }
409
410-static int check_mem_type(unsigned long start, size_t num_pages)
411+int optee_check_mem_type(unsigned long start, size_t num_pages)
412 {
413 struct mm_struct *mm = current->mm;
414 int rc;
415@@ -615,94 +363,3 @@ static int check_mem_type(unsigned long start, size_t num_pages)
416
417 return rc;
418 }
419-
420-int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
421- struct page **pages, size_t num_pages,
422- unsigned long start)
423-{
424- struct optee *optee = tee_get_drvdata(ctx->teedev);
425- struct optee_msg_arg *msg_arg;
426- struct tee_shm *shm_arg;
427- u64 *pages_list;
428- int rc;
429-
430- if (!num_pages)
431- return -EINVAL;
432-
433- rc = check_mem_type(start, num_pages);
434- if (rc)
435- return rc;
436-
437- pages_list = optee_allocate_pages_list(num_pages);
438- if (!pages_list)
439- return -ENOMEM;
440-
441- shm_arg = get_msg_arg(ctx, 1, &msg_arg);
442- if (IS_ERR(shm_arg)) {
443- rc = PTR_ERR(shm_arg);
444- goto out;
445- }
446-
447- optee_fill_pages_list(pages_list, pages, num_pages,
448- tee_shm_get_page_offset(shm));
449-
450- msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
451- msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
452- OPTEE_MSG_ATTR_NONCONTIG;
453- msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
454- msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
455- /*
456- * In the least bits of msg_arg->params->u.tmem.buf_ptr we
457- * store buffer offset from 4k page, as described in OP-TEE ABI.
458- */
459- msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
460- (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
461-
462- if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
463- msg_arg->ret != TEEC_SUCCESS)
464- rc = -EINVAL;
465-
466- tee_shm_free(shm_arg);
467-out:
468- optee_free_pages_list(pages_list, num_pages);
469- return rc;
470-}
471-
472-int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
473-{
474- struct optee *optee = tee_get_drvdata(ctx->teedev);
475- struct optee_msg_arg *msg_arg;
476- struct tee_shm *shm_arg;
477- int rc = 0;
478-
479- shm_arg = get_msg_arg(ctx, 1, &msg_arg);
480- if (IS_ERR(shm_arg))
481- return PTR_ERR(shm_arg);
482-
483- msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
484-
485- msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
486- msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
487-
488- if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
489- msg_arg->ret != TEEC_SUCCESS)
490- rc = -EINVAL;
491- tee_shm_free(shm_arg);
492- return rc;
493-}
494-
495-int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
496- struct page **pages, size_t num_pages,
497- unsigned long start)
498-{
499- /*
500- * We don't want to register supplicant memory in OP-TEE.
501- * Instead information about it will be passed in RPC code.
502- */
503- return check_mem_type(start, num_pages);
504-}
505-
506-int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
507-{
508- return 0;
509-}
510diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
511index 26492d3115f5..27b855325b33 100644
512--- a/drivers/tee/optee/core.c
513+++ b/drivers/tee/optee/core.c
514@@ -1,260 +1,71 @@
515 // SPDX-License-Identifier: GPL-2.0-only
516 /*
517 * Copyright (c) 2015-2021, Linaro Limited
518+ * Copyright (c) 2016, EPAM Systems
519 */
520
521 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
522
523-#include <linux/arm-smccc.h>
524 #include <linux/crash_dump.h>
525 #include <linux/errno.h>
526 #include <linux/io.h>
527+#include <linux/mm.h>
528 #include <linux/module.h>
529-#include <linux/of.h>
530-#include <linux/of_platform.h>
531-#include <linux/platform_device.h>
532 #include <linux/slab.h>
533 #include <linux/string.h>
534 #include <linux/tee_drv.h>
535 #include <linux/types.h>
536-#include <linux/uaccess.h>
537 #include <linux/workqueue.h>
538 #include "optee_private.h"
539-#include "optee_smc.h"
540-#include "shm_pool.h"
541
542-#define DRIVER_NAME "optee"
543-
544-#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
545-
546-static void from_msg_param_value(struct tee_param *p, u32 attr,
547- const struct optee_msg_param *mp)
548-{
549- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
550- attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
551- p->u.value.a = mp->u.value.a;
552- p->u.value.b = mp->u.value.b;
553- p->u.value.c = mp->u.value.c;
554-}
555-
556-static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
557- const struct optee_msg_param *mp)
558-{
559- struct tee_shm *shm;
560- phys_addr_t pa;
561- int rc;
562-
563- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
564- attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
565- p->u.memref.size = mp->u.tmem.size;
566- shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
567- if (!shm) {
568- p->u.memref.shm_offs = 0;
569- p->u.memref.shm = NULL;
570- return 0;
571- }
572-
573- rc = tee_shm_get_pa(shm, 0, &pa);
574- if (rc)
575- return rc;
576-
577- p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
578- p->u.memref.shm = shm;
579-
580- /* Check that the memref is covered by the shm object */
581- if (p->u.memref.size) {
582- size_t o = p->u.memref.shm_offs +
583- p->u.memref.size - 1;
584-
585- rc = tee_shm_get_pa(shm, o, NULL);
586- if (rc)
587- return rc;
588- }
589-
590- return 0;
591-}
592-
593-static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
594- const struct optee_msg_param *mp)
595+int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
596+ struct tee_shm *shm, size_t size,
597+ int (*shm_register)(struct tee_context *ctx,
598+ struct tee_shm *shm,
599+ struct page **pages,
600+ size_t num_pages,
601+ unsigned long start))
602 {
603- struct tee_shm *shm;
604+ unsigned int order = get_order(size);
605+ struct page *page;
606+ int rc = 0;
607
608- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
609- attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
610- p->u.memref.size = mp->u.rmem.size;
611- shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
612-
613- if (shm) {
614- p->u.memref.shm_offs = mp->u.rmem.offs;
615- p->u.memref.shm = shm;
616- } else {
617- p->u.memref.shm_offs = 0;
618- p->u.memref.shm = NULL;
619- }
620-}
621-
622-/**
623- * optee_from_msg_param() - convert from OPTEE_MSG parameters to
624- * struct tee_param
625- * @optee: main service struct
626- * @params: subsystem internal parameter representation
627- * @num_params: number of elements in the parameter arrays
628- * @msg_params: OPTEE_MSG parameters
629- * Returns 0 on success or <0 on failure
630- */
631-static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
632- size_t num_params,
633- const struct optee_msg_param *msg_params)
634-{
635- int rc;
636- size_t n;
637+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
638+ if (!page)
639+ return -ENOMEM;
640
641- for (n = 0; n < num_params; n++) {
642- struct tee_param *p = params + n;
643- const struct optee_msg_param *mp = msg_params + n;
644- u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
645+ shm->kaddr = page_address(page);
646+ shm->paddr = page_to_phys(page);
647+ shm->size = PAGE_SIZE << order;
648
649- switch (attr) {
650- case OPTEE_MSG_ATTR_TYPE_NONE:
651- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
652- memset(&p->u, 0, sizeof(p->u));
653- break;
654- case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
655- case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
656- case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
657- from_msg_param_value(p, attr, mp);
658- break;
659- case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
660- case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
661- case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
662- rc = from_msg_param_tmp_mem(p, attr, mp);
663- if (rc)
664- return rc;
665- break;
666- case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
667- case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
668- case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
669- from_msg_param_reg_mem(p, attr, mp);
670- break;
671+ if (shm_register) {
672+ unsigned int nr_pages = 1 << order, i;
673+ struct page **pages;
674
675- default:
676- return -EINVAL;
677+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
678+ if (!pages) {
679+ rc = -ENOMEM;
680+ goto err;
681 }
682- }
683- return 0;
684-}
685-
686-static void to_msg_param_value(struct optee_msg_param *mp,
687- const struct tee_param *p)
688-{
689- mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
690- TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
691- mp->u.value.a = p->u.value.a;
692- mp->u.value.b = p->u.value.b;
693- mp->u.value.c = p->u.value.c;
694-}
695-
696-static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
697- const struct tee_param *p)
698-{
699- int rc;
700- phys_addr_t pa;
701
702- mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
703- TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
704-
705- mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
706- mp->u.tmem.size = p->u.memref.size;
707+ for (i = 0; i < nr_pages; i++) {
708+ pages[i] = page;
709+ page++;
710+ }
711
712- if (!p->u.memref.shm) {
713- mp->u.tmem.buf_ptr = 0;
714- return 0;
715+ shm->flags |= TEE_SHM_REGISTER;
716+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
717+ (unsigned long)shm->kaddr);
718+ kfree(pages);
719+ if (rc)
720+ goto err;
721 }
722
723- rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
724- if (rc)
725- return rc;
726-
727- mp->u.tmem.buf_ptr = pa;
728- mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
729- OPTEE_MSG_ATTR_CACHE_SHIFT;
730-
731- return 0;
732-}
733-
734-static int to_msg_param_reg_mem(struct optee_msg_param *mp,
735- const struct tee_param *p)
736-{
737- mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
738- TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
739-
740- mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
741- mp->u.rmem.size = p->u.memref.size;
742- mp->u.rmem.offs = p->u.memref.shm_offs;
743- return 0;
744-}
745-
746-/**
747- * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
748- * @optee: main service struct
749- * @msg_params: OPTEE_MSG parameters
750- * @num_params: number of elements in the parameter arrays
751- * @params: subsystem itnernal parameter representation
752- * Returns 0 on success or <0 on failure
753- */
754-static int optee_to_msg_param(struct optee *optee,
755- struct optee_msg_param *msg_params,
756- size_t num_params, const struct tee_param *params)
757-{
758- int rc;
759- size_t n;
760-
761- for (n = 0; n < num_params; n++) {
762- const struct tee_param *p = params + n;
763- struct optee_msg_param *mp = msg_params + n;
764-
765- switch (p->attr) {
766- case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
767- mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
768- memset(&mp->u, 0, sizeof(mp->u));
769- break;
770- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
771- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
772- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
773- to_msg_param_value(mp, p);
774- break;
775- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
776- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
777- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
778- if (tee_shm_is_registered(p->u.memref.shm))
779- rc = to_msg_param_reg_mem(mp, p);
780- else
781- rc = to_msg_param_tmp_mem(mp, p);
782- if (rc)
783- return rc;
784- break;
785- default:
786- return -EINVAL;
787- }
788- }
789 return 0;
790-}
791
792-static void optee_get_version(struct tee_device *teedev,
793- struct tee_ioctl_version_data *vers)
794-{
795- struct tee_ioctl_version_data v = {
796- .impl_id = TEE_IMPL_ID_OPTEE,
797- .impl_caps = TEE_OPTEE_CAP_TZ,
798- .gen_caps = TEE_GEN_CAP_GP,
799- };
800- struct optee *optee = tee_get_drvdata(teedev);
801-
802- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
803- v.gen_caps |= TEE_GEN_CAP_REG_MEM;
804- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
805- v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
806- *vers = v;
807+err:
808+ __free_pages(page, order);
809+ return rc;
810 }
811
812 static void optee_bus_scan(struct work_struct *work)
813@@ -262,7 +73,7 @@ static void optee_bus_scan(struct work_struct *work)
814 WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
815 }
816
817-static int optee_open(struct tee_context *ctx)
818+int optee_open(struct tee_context *ctx, bool cap_memref_null)
819 {
820 struct optee_context_data *ctxdata;
821 struct tee_device *teedev = ctx->teedev;
822@@ -300,11 +111,7 @@ static int optee_open(struct tee_context *ctx)
823 mutex_init(&ctxdata->mutex);
824 INIT_LIST_HEAD(&ctxdata->sess_list);
825
826- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
827- ctx->cap_memref_null = true;
828- else
829- ctx->cap_memref_null = false;
830-
831+ ctx->cap_memref_null = cap_memref_null;
832 ctx->data = ctxdata;
833 return 0;
834 }
835@@ -330,12 +137,12 @@ static void optee_release_helper(struct tee_context *ctx,
836 ctx->data = NULL;
837 }
838
839-static void optee_release(struct tee_context *ctx)
840+void optee_release(struct tee_context *ctx)
841 {
842 optee_release_helper(ctx, optee_close_session_helper);
843 }
844
845-static void optee_release_supp(struct tee_context *ctx)
846+void optee_release_supp(struct tee_context *ctx)
847 {
848 struct optee *optee = tee_get_drvdata(ctx->teedev);
849
850@@ -347,287 +154,11 @@ static void optee_release_supp(struct tee_context *ctx)
851 optee_supp_release(&optee->supp);
852 }
853
854-static const struct tee_driver_ops optee_clnt_ops = {
855- .get_version = optee_get_version,
856- .open = optee_open,
857- .release = optee_release,
858- .open_session = optee_open_session,
859- .close_session = optee_close_session,
860- .invoke_func = optee_invoke_func,
861- .cancel_req = optee_cancel_req,
862- .shm_register = optee_shm_register,
863- .shm_unregister = optee_shm_unregister,
864-};
865-
866-static const struct tee_desc optee_clnt_desc = {
867- .name = DRIVER_NAME "-clnt",
868- .ops = &optee_clnt_ops,
869- .owner = THIS_MODULE,
870-};
871-
872-static const struct tee_driver_ops optee_supp_ops = {
873- .get_version = optee_get_version,
874- .open = optee_open,
875- .release = optee_release_supp,
876- .supp_recv = optee_supp_recv,
877- .supp_send = optee_supp_send,
878- .shm_register = optee_shm_register_supp,
879- .shm_unregister = optee_shm_unregister_supp,
880-};
881-
882-static const struct tee_desc optee_supp_desc = {
883- .name = DRIVER_NAME "-supp",
884- .ops = &optee_supp_ops,
885- .owner = THIS_MODULE,
886- .flags = TEE_DESC_PRIVILEGED,
887-};
888-
889-static const struct optee_ops optee_ops = {
890- .do_call_with_arg = optee_do_call_with_arg,
891- .to_msg_param = optee_to_msg_param,
892- .from_msg_param = optee_from_msg_param,
893-};
894-
895-static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
896-{
897- struct arm_smccc_res res;
898-
899- invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
900-
901- if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
902- res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
903- return true;
904- return false;
905-}
906-
907-static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
908-{
909- union {
910- struct arm_smccc_res smccc;
911- struct optee_smc_call_get_os_revision_result result;
912- } res = {
913- .result = {
914- .build_id = 0
915- }
916- };
917-
918- invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
919- &res.smccc);
920-
921- if (res.result.build_id)
922- pr_info("revision %lu.%lu (%08lx)", res.result.major,
923- res.result.minor, res.result.build_id);
924- else
925- pr_info("revision %lu.%lu", res.result.major, res.result.minor);
926-}
927-
928-static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
929-{
930- union {
931- struct arm_smccc_res smccc;
932- struct optee_smc_calls_revision_result result;
933- } res;
934-
935- invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
936-
937- if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
938- (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
939- return true;
940- return false;
941-}
942-
943-static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
944- u32 *sec_caps)
945-{
946- union {
947- struct arm_smccc_res smccc;
948- struct optee_smc_exchange_capabilities_result result;
949- } res;
950- u32 a1 = 0;
951-
952- /*
953- * TODO This isn't enough to tell if it's UP system (from kernel
954- * point of view) or not, is_smp() returns the the information
955- * needed, but can't be called directly from here.
956- */
957- if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
958- a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
959-
960- invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
961- &res.smccc);
962-
963- if (res.result.status != OPTEE_SMC_RETURN_OK)
964- return false;
965-
966- *sec_caps = res.result.capabilities;
967- return true;
968-}
969-
970-static struct tee_shm_pool *optee_config_dyn_shm(void)
971-{
972- struct tee_shm_pool_mgr *priv_mgr;
973- struct tee_shm_pool_mgr *dmabuf_mgr;
974- void *rc;
975-
976- rc = optee_shm_pool_alloc_pages();
977- if (IS_ERR(rc))
978- return rc;
979- priv_mgr = rc;
980-
981- rc = optee_shm_pool_alloc_pages();
982- if (IS_ERR(rc)) {
983- tee_shm_pool_mgr_destroy(priv_mgr);
984- return rc;
985- }
986- dmabuf_mgr = rc;
987-
988- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
989- if (IS_ERR(rc)) {
990- tee_shm_pool_mgr_destroy(priv_mgr);
991- tee_shm_pool_mgr_destroy(dmabuf_mgr);
992- }
993-
994- return rc;
995-}
996-
997-static struct tee_shm_pool *
998-optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
999+void optee_remove_common(struct optee *optee)
1000 {
1001- union {
1002- struct arm_smccc_res smccc;
1003- struct optee_smc_get_shm_config_result result;
1004- } res;
1005- unsigned long vaddr;
1006- phys_addr_t paddr;
1007- size_t size;
1008- phys_addr_t begin;
1009- phys_addr_t end;
1010- void *va;
1011- struct tee_shm_pool_mgr *priv_mgr;
1012- struct tee_shm_pool_mgr *dmabuf_mgr;
1013- void *rc;
1014- const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
1015-
1016- invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1017- if (res.result.status != OPTEE_SMC_RETURN_OK) {
1018- pr_err("static shm service not available\n");
1019- return ERR_PTR(-ENOENT);
1020- }
1021-
1022- if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1023- pr_err("only normal cached shared memory supported\n");
1024- return ERR_PTR(-EINVAL);
1025- }
1026-
1027- begin = roundup(res.result.start, PAGE_SIZE);
1028- end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1029- paddr = begin;
1030- size = end - begin;
1031-
1032- if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
1033- pr_err("too small shared memory area\n");
1034- return ERR_PTR(-EINVAL);
1035- }
1036-
1037- va = memremap(paddr, size, MEMREMAP_WB);
1038- if (!va) {
1039- pr_err("shared memory ioremap failed\n");
1040- return ERR_PTR(-EINVAL);
1041- }
1042- vaddr = (unsigned long)va;
1043-
1044- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
1045- 3 /* 8 bytes aligned */);
1046- if (IS_ERR(rc))
1047- goto err_memunmap;
1048- priv_mgr = rc;
1049-
1050- vaddr += sz;
1051- paddr += sz;
1052- size -= sz;
1053-
1054- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
1055- if (IS_ERR(rc))
1056- goto err_free_priv_mgr;
1057- dmabuf_mgr = rc;
1058-
1059- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1060- if (IS_ERR(rc))
1061- goto err_free_dmabuf_mgr;
1062-
1063- *memremaped_shm = va;
1064-
1065- return rc;
1066-
1067-err_free_dmabuf_mgr:
1068- tee_shm_pool_mgr_destroy(dmabuf_mgr);
1069-err_free_priv_mgr:
1070- tee_shm_pool_mgr_destroy(priv_mgr);
1071-err_memunmap:
1072- memunmap(va);
1073- return rc;
1074-}
1075-
1076-/* Simple wrapper functions to be able to use a function pointer */
1077-static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1078- unsigned long a2, unsigned long a3,
1079- unsigned long a4, unsigned long a5,
1080- unsigned long a6, unsigned long a7,
1081- struct arm_smccc_res *res)
1082-{
1083- arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1084-}
1085-
1086-static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1087- unsigned long a2, unsigned long a3,
1088- unsigned long a4, unsigned long a5,
1089- unsigned long a6, unsigned long a7,
1090- struct arm_smccc_res *res)
1091-{
1092- arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1093-}
1094-
1095-static optee_invoke_fn *get_invoke_func(struct device *dev)
1096-{
1097- const char *method;
1098-
1099- pr_info("probing for conduit method.\n");
1100-
1101- if (device_property_read_string(dev, "method", &method)) {
1102- pr_warn("missing \"method\" property\n");
1103- return ERR_PTR(-ENXIO);
1104- }
1105-
1106- if (!strcmp("hvc", method))
1107- return optee_smccc_hvc;
1108- else if (!strcmp("smc", method))
1109- return optee_smccc_smc;
1110-
1111- pr_warn("invalid \"method\" property: %s\n", method);
1112- return ERR_PTR(-EINVAL);
1113-}
1114-
1115-/* optee_remove - Device Removal Routine
1116- * @pdev: platform device information struct
1117- *
1118- * optee_remove is called by platform subsystem to alert the driver
1119- * that it should release the device
1120- */
1121-
1122-static int optee_remove(struct platform_device *pdev)
1123-{
1124- struct optee *optee = platform_get_drvdata(pdev);
1125-
1126 /* Unregister OP-TEE specific client devices on TEE bus */
1127 optee_unregister_devices();
1128
1129- /*
1130- * Ask OP-TEE to free all cached shared memory objects to decrease
1131- * reference counters and also avoid wild pointers in secure world
1132- * into the old shared memory range.
1133- */
1134- optee_disable_shm_cache(optee);
1135-
1136 /*
1137 * The two devices have to be unregistered before we can free the
1138 * other resources.
1139@@ -636,39 +167,13 @@ static int optee_remove(struct platform_device *pdev)
1140 tee_device_unregister(optee->teedev);
1141
1142 tee_shm_pool_free(optee->pool);
1143- if (optee->memremaped_shm)
1144- memunmap(optee->memremaped_shm);
1145 optee_wait_queue_exit(&optee->wait_queue);
1146 optee_supp_uninit(&optee->supp);
1147 mutex_destroy(&optee->call_queue.mutex);
1148-
1149- kfree(optee);
1150-
1151- return 0;
1152-}
1153-
1154-/* optee_shutdown - Device Removal Routine
1155- * @pdev: platform device information struct
1156- *
1157- * platform_shutdown is called by the platform subsystem to alert
1158- * the driver that a shutdown, reboot, or kexec is happening and
1159- * device must be disabled.
1160- */
1161-static void optee_shutdown(struct platform_device *pdev)
1162-{
1163- optee_disable_shm_cache(platform_get_drvdata(pdev));
1164 }
1165
1166-static int optee_probe(struct platform_device *pdev)
1167+static int optee_core_init(void)
1168 {
1169- optee_invoke_fn *invoke_fn;
1170- struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1171- struct optee *optee = NULL;
1172- void *memremaped_shm = NULL;
1173- struct tee_device *teedev;
1174- u32 sec_caps;
1175- int rc;
1176-
1177 /*
1178 * The kernel may have crashed at the same time that all available
1179 * secure world threads were suspended and we cannot reschedule the
1180@@ -679,139 +184,15 @@ static int optee_probe(struct platform_device *pdev)
1181 if (is_kdump_kernel())
1182 return -ENODEV;
1183
1184- invoke_fn = get_invoke_func(&pdev->dev);
1185- if (IS_ERR(invoke_fn))
1186- return PTR_ERR(invoke_fn);
1187-
1188- if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1189- pr_warn("api uid mismatch\n");
1190- return -EINVAL;
1191- }
1192-
1193- optee_msg_get_os_revision(invoke_fn);
1194-
1195- if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1196- pr_warn("api revision mismatch\n");
1197- return -EINVAL;
1198- }
1199-
1200- if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
1201- pr_warn("capabilities mismatch\n");
1202- return -EINVAL;
1203- }
1204-
1205- /*
1206- * Try to use dynamic shared memory if possible
1207- */
1208- if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1209- pool = optee_config_dyn_shm();
1210-
1211- /*
1212- * If dynamic shared memory is not available or failed - try static one
1213- */
1214- if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
1215- pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1216-
1217- if (IS_ERR(pool))
1218- return PTR_ERR(pool);
1219-
1220- optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1221- if (!optee) {
1222- rc = -ENOMEM;
1223- goto err;
1224- }
1225-
1226- optee->ops = &optee_ops;
1227- optee->invoke_fn = invoke_fn;
1228- optee->sec_caps = sec_caps;
1229-
1230- teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1231- if (IS_ERR(teedev)) {
1232- rc = PTR_ERR(teedev);
1233- goto err;
1234- }
1235- optee->teedev = teedev;
1236-
1237- teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1238- if (IS_ERR(teedev)) {
1239- rc = PTR_ERR(teedev);
1240- goto err;
1241- }
1242- optee->supp_teedev = teedev;
1243-
1244- rc = tee_device_register(optee->teedev);
1245- if (rc)
1246- goto err;
1247-
1248- rc = tee_device_register(optee->supp_teedev);
1249- if (rc)
1250- goto err;
1251-
1252- mutex_init(&optee->call_queue.mutex);
1253- INIT_LIST_HEAD(&optee->call_queue.waiters);
1254- optee_wait_queue_init(&optee->wait_queue);
1255- optee_supp_init(&optee->supp);
1256- optee->memremaped_shm = memremaped_shm;
1257- optee->pool = pool;
1258-
1259- /*
1260- * Ensure that there are no pre-existing shm objects before enabling
1261- * the shm cache so that there's no chance of receiving an invalid
1262- * address during shutdown. This could occur, for example, if we're
1263- * kexec booting from an older kernel that did not properly cleanup the
1264- * shm cache.
1265- */
1266- optee_disable_unmapped_shm_cache(optee);
1267-
1268- optee_enable_shm_cache(optee);
1269-
1270- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1271- pr_info("dynamic shared memory is enabled\n");
1272-
1273- platform_set_drvdata(pdev, optee);
1274-
1275- rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1276- if (rc) {
1277- optee_remove(pdev);
1278- return rc;
1279- }
1280-
1281- pr_info("initialized driver\n");
1282- return 0;
1283-err:
1284- if (optee) {
1285- /*
1286- * tee_device_unregister() is safe to call even if the
1287- * devices hasn't been registered with
1288- * tee_device_register() yet.
1289- */
1290- tee_device_unregister(optee->supp_teedev);
1291- tee_device_unregister(optee->teedev);
1292- kfree(optee);
1293- }
1294- if (pool)
1295- tee_shm_pool_free(pool);
1296- if (memremaped_shm)
1297- memunmap(memremaped_shm);
1298- return rc;
1299+ return optee_smc_abi_register();
1300 }
1301+module_init(optee_core_init);
1302
1303-static const struct of_device_id optee_dt_match[] = {
1304- { .compatible = "linaro,optee-tz" },
1305- {},
1306-};
1307-MODULE_DEVICE_TABLE(of, optee_dt_match);
1308-
1309-static struct platform_driver optee_driver = {
1310- .probe = optee_probe,
1311- .remove = optee_remove,
1312- .shutdown = optee_shutdown,
1313- .driver = {
1314- .name = "optee",
1315- .of_match_table = optee_dt_match,
1316- },
1317-};
1318-module_platform_driver(optee_driver);
1319+static void optee_core_exit(void)
1320+{
1321+ optee_smc_abi_unregister();
1322+}
1323+module_exit(optee_core_exit);
1324
1325 MODULE_AUTHOR("Linaro");
1326 MODULE_DESCRIPTION("OP-TEE driver");
1327diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
1328index beca97017996..40af6b059b20 100644
1329--- a/drivers/tee/optee/optee_private.h
1330+++ b/drivers/tee/optee/optee_private.h
1331@@ -12,6 +12,8 @@
1332 #include <linux/types.h>
1333 #include "optee_msg.h"
1334
1335+#define DRIVER_NAME "optee"
1336+
1337 #define OPTEE_MAX_ARG_SIZE 1024
1338
1339 /* Some Global Platform error codes used in this driver */
1340@@ -29,6 +31,11 @@ typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
1341 unsigned long, unsigned long,
1342 struct arm_smccc_res *);
1343
1344+struct optee_call_waiter {
1345+ struct list_head list_node;
1346+ struct completion c;
1347+};
1348+
1349 struct optee_call_queue {
1350 /* Serializes access to this struct */
1351 struct mutex mutex;
1352@@ -66,6 +73,19 @@ struct optee_supp {
1353 struct completion reqs_c;
1354 };
1355
1356+/**
1357+ * struct optee_smc - SMC ABI specifics
1358+ * @invoke_fn: function to issue smc or hvc
1359+ * @memremaped_shm virtual address of memory in shared memory pool
1360+ * @sec_caps: secure world capabilities defined by
1361+ * OPTEE_SMC_SEC_CAP_* in optee_smc.h
1362+ */
1363+struct optee_smc {
1364+ optee_invoke_fn *invoke_fn;
1365+ void *memremaped_shm;
1366+ u32 sec_caps;
1367+};
1368+
1369 struct optee;
1370
1371 /**
1372@@ -95,15 +115,12 @@ struct optee_ops {
1373 * @ops: internal callbacks for different ways to reach secure
1374 * world
1375 * @teedev: client device
1376- * @invoke_fn: function to issue smc or hvc
1377+ * @smc: specific to SMC ABI
1378 * @call_queue: queue of threads waiting to call @invoke_fn
1379 * @wait_queue: queue of threads from secure world waiting for a
1380 * secure world sync object
1381 * @supp: supplicant synchronization struct for RPC to supplicant
1382 * @pool: shared memory pool
1383- * @memremaped_shm virtual address of memory in shared memory pool
1384- * @sec_caps: secure world capabilities defined by
1385- * OPTEE_SMC_SEC_CAP_* in optee_smc.h
1386 * @scan_bus_done flag if device registation was already done.
1387 * @scan_bus_wq workqueue to scan optee bus and register optee drivers
1388 * @scan_bus_work workq to scan optee bus and register optee drivers
1389@@ -112,13 +129,11 @@ struct optee {
1390 struct tee_device *supp_teedev;
1391 struct tee_device *teedev;
1392 const struct optee_ops *ops;
1393- optee_invoke_fn *invoke_fn;
1394+ struct optee_smc smc;
1395 struct optee_call_queue call_queue;
1396 struct optee_wait_queue wait_queue;
1397 struct optee_supp supp;
1398 struct tee_shm_pool *pool;
1399- void *memremaped_shm;
1400- u32 sec_caps;
1401 bool scan_bus_done;
1402 struct workqueue_struct *scan_bus_wq;
1403 struct work_struct scan_bus_work;
1404@@ -153,10 +168,6 @@ struct optee_call_ctx {
1405 size_t num_entries;
1406 };
1407
1408-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
1409- struct optee_call_ctx *call_ctx);
1410-void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
1411-
1412 void optee_wait_queue_init(struct optee_wait_queue *wq);
1413 void optee_wait_queue_exit(struct optee_wait_queue *wq);
1414
1415@@ -174,7 +185,6 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
1416 int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
1417 struct tee_param *param);
1418
1419-int optee_do_call_with_arg(struct tee_context *ctx, struct tee_shm *arg);
1420 int optee_open_session(struct tee_context *ctx,
1421 struct tee_ioctl_open_session_arg *arg,
1422 struct tee_param *param);
1423@@ -184,30 +194,60 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
1424 struct tee_param *param);
1425 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
1426
1427-void optee_enable_shm_cache(struct optee *optee);
1428-void optee_disable_shm_cache(struct optee *optee);
1429-void optee_disable_unmapped_shm_cache(struct optee *optee);
1430-
1431-int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
1432- struct page **pages, size_t num_pages,
1433- unsigned long start);
1434-int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
1435-
1436-int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
1437- struct page **pages, size_t num_pages,
1438- unsigned long start);
1439-int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
1440-
1441-u64 *optee_allocate_pages_list(size_t num_entries);
1442-void optee_free_pages_list(void *array, size_t num_entries);
1443-void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
1444- size_t page_offset);
1445-
1446 #define PTA_CMD_GET_DEVICES 0x0
1447 #define PTA_CMD_GET_DEVICES_SUPP 0x1
1448 int optee_enumerate_devices(u32 func);
1449 void optee_unregister_devices(void);
1450
1451+int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
1452+ struct tee_shm *shm, size_t size,
1453+ int (*shm_register)(struct tee_context *ctx,
1454+ struct tee_shm *shm,
1455+ struct page **pages,
1456+ size_t num_pages,
1457+ unsigned long start));
1458+
1459+
1460+void optee_remove_common(struct optee *optee);
1461+int optee_open(struct tee_context *ctx, bool cap_memref_null);
1462+void optee_release(struct tee_context *ctx);
1463+void optee_release_supp(struct tee_context *ctx);
1464+
1465+static inline void optee_from_msg_param_value(struct tee_param *p, u32 attr,
1466+ const struct optee_msg_param *mp)
1467+{
1468+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
1469+ attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
1470+ p->u.value.a = mp->u.value.a;
1471+ p->u.value.b = mp->u.value.b;
1472+ p->u.value.c = mp->u.value.c;
1473+}
1474+
1475+static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
1476+ const struct tee_param *p)
1477+{
1478+ mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
1479+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
1480+ mp->u.value.a = p->u.value.a;
1481+ mp->u.value.b = p->u.value.b;
1482+ mp->u.value.c = p->u.value.c;
1483+}
1484+
1485+void optee_cq_wait_init(struct optee_call_queue *cq,
1486+ struct optee_call_waiter *w);
1487+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
1488+ struct optee_call_waiter *w);
1489+void optee_cq_wait_final(struct optee_call_queue *cq,
1490+ struct optee_call_waiter *w);
1491+int optee_check_mem_type(unsigned long start, size_t num_pages);
1492+struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
1493+ struct optee_msg_arg **msg_arg);
1494+
1495+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz);
1496+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
1497+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
1498+ struct optee_msg_arg *arg);
1499+
1500 /*
1501 * Small helpers
1502 */
1503@@ -223,4 +263,8 @@ static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
1504 *reg1 = val;
1505 }
1506
1507+/* Registration of the ABIs */
1508+int optee_smc_abi_register(void);
1509+void optee_smc_abi_unregister(void);
1510+
1511 #endif /*OPTEE_PRIVATE_H*/
1512diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
1513index 309258d47790..cd642e340eaf 100644
1514--- a/drivers/tee/optee/rpc.c
1515+++ b/drivers/tee/optee/rpc.c
1516@@ -6,12 +6,10 @@
1517 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1518
1519 #include <linux/delay.h>
1520-#include <linux/device.h>
1521 #include <linux/i2c.h>
1522 #include <linux/slab.h>
1523 #include <linux/tee_drv.h>
1524 #include "optee_private.h"
1525-#include "optee_smc.h"
1526 #include "optee_rpc_cmd.h"
1527
1528 struct wq_entry {
1529@@ -266,7 +264,7 @@ static void handle_rpc_supp_cmd(struct tee_context *ctx, struct optee *optee,
1530 kfree(params);
1531 }
1532
1533-static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
1534+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
1535 {
1536 u32 ret;
1537 struct tee_param param;
1538@@ -289,103 +287,7 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
1539 return shm;
1540 }
1541
1542-static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
1543- struct optee_msg_arg *arg,
1544- struct optee_call_ctx *call_ctx)
1545-{
1546- phys_addr_t pa;
1547- struct tee_shm *shm;
1548- size_t sz;
1549- size_t n;
1550-
1551- arg->ret_origin = TEEC_ORIGIN_COMMS;
1552-
1553- if (!arg->num_params ||
1554- arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
1555- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
1556- return;
1557- }
1558-
1559- for (n = 1; n < arg->num_params; n++) {
1560- if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
1561- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
1562- return;
1563- }
1564- }
1565-
1566- sz = arg->params[0].u.value.b;
1567- switch (arg->params[0].u.value.a) {
1568- case OPTEE_RPC_SHM_TYPE_APPL:
1569- shm = cmd_alloc_suppl(ctx, sz);
1570- break;
1571- case OPTEE_RPC_SHM_TYPE_KERNEL:
1572- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
1573- break;
1574- default:
1575- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
1576- return;
1577- }
1578-
1579- if (IS_ERR(shm)) {
1580- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
1581- return;
1582- }
1583-
1584- if (tee_shm_get_pa(shm, 0, &pa)) {
1585- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
1586- goto bad;
1587- }
1588-
1589- sz = tee_shm_get_size(shm);
1590-
1591- if (tee_shm_is_registered(shm)) {
1592- struct page **pages;
1593- u64 *pages_list;
1594- size_t page_num;
1595-
1596- pages = tee_shm_get_pages(shm, &page_num);
1597- if (!pages || !page_num) {
1598- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
1599- goto bad;
1600- }
1601-
1602- pages_list = optee_allocate_pages_list(page_num);
1603- if (!pages_list) {
1604- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
1605- goto bad;
1606- }
1607-
1608- call_ctx->pages_list = pages_list;
1609- call_ctx->num_entries = page_num;
1610-
1611- arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
1612- OPTEE_MSG_ATTR_NONCONTIG;
1613- /*
1614- * In the least bits of u.tmem.buf_ptr we store buffer offset
1615- * from 4k page, as described in OP-TEE ABI.
1616- */
1617- arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
1618- (tee_shm_get_page_offset(shm) &
1619- (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
1620- arg->params[0].u.tmem.size = tee_shm_get_size(shm);
1621- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
1622-
1623- optee_fill_pages_list(pages_list, pages, page_num,
1624- tee_shm_get_page_offset(shm));
1625- } else {
1626- arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
1627- arg->params[0].u.tmem.buf_ptr = pa;
1628- arg->params[0].u.tmem.size = sz;
1629- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
1630- }
1631-
1632- arg->ret = TEEC_SUCCESS;
1633- return;
1634-bad:
1635- tee_shm_free(shm);
1636-}
1637-
1638-static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
1639+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
1640 {
1641 struct tee_param param;
1642
1643@@ -410,60 +312,9 @@ static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
1644 optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param);
1645 }
1646
1647-static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
1648- struct optee_msg_arg *arg)
1649-{
1650- struct tee_shm *shm;
1651-
1652- arg->ret_origin = TEEC_ORIGIN_COMMS;
1653-
1654- if (arg->num_params != 1 ||
1655- arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
1656- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
1657- return;
1658- }
1659-
1660- shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
1661- switch (arg->params[0].u.value.a) {
1662- case OPTEE_RPC_SHM_TYPE_APPL:
1663- cmd_free_suppl(ctx, shm);
1664- break;
1665- case OPTEE_RPC_SHM_TYPE_KERNEL:
1666- tee_shm_free(shm);
1667- break;
1668- default:
1669- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
1670- }
1671- arg->ret = TEEC_SUCCESS;
1672-}
1673-
1674-static void free_pages_list(struct optee_call_ctx *call_ctx)
1675-{
1676- if (call_ctx->pages_list) {
1677- optee_free_pages_list(call_ctx->pages_list,
1678- call_ctx->num_entries);
1679- call_ctx->pages_list = NULL;
1680- call_ctx->num_entries = 0;
1681- }
1682-}
1683-
1684-void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
1685-{
1686- free_pages_list(call_ctx);
1687-}
1688-
1689-static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
1690- struct tee_shm *shm,
1691- struct optee_call_ctx *call_ctx)
1692+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
1693+ struct optee_msg_arg *arg)
1694 {
1695- struct optee_msg_arg *arg;
1696-
1697- arg = tee_shm_get_va(shm, 0);
1698- if (IS_ERR(arg)) {
1699- pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
1700- return;
1701- }
1702-
1703 switch (arg->cmd) {
1704 case OPTEE_RPC_CMD_GET_TIME:
1705 handle_rpc_func_cmd_get_time(arg);
1706@@ -474,13 +325,6 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
1707 case OPTEE_RPC_CMD_SUSPEND:
1708 handle_rpc_func_cmd_wait(arg);
1709 break;
1710- case OPTEE_RPC_CMD_SHM_ALLOC:
1711- free_pages_list(call_ctx);
1712- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
1713- break;
1714- case OPTEE_RPC_CMD_SHM_FREE:
1715- handle_rpc_func_cmd_shm_free(ctx, arg);
1716- break;
1717 case OPTEE_RPC_CMD_I2C_TRANSFER:
1718 handle_rpc_func_cmd_i2c_transfer(ctx, arg);
1719 break;
1720@@ -489,58 +333,4 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
1721 }
1722 }
1723
1724-/**
1725- * optee_handle_rpc() - handle RPC from secure world
1726- * @ctx: context doing the RPC
1727- * @param: value of registers for the RPC
1728- * @call_ctx: call context. Preserved during one OP-TEE invocation
1729- *
1730- * Result of RPC is written back into @param.
1731- */
1732-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
1733- struct optee_call_ctx *call_ctx)
1734-{
1735- struct tee_device *teedev = ctx->teedev;
1736- struct optee *optee = tee_get_drvdata(teedev);
1737- struct tee_shm *shm;
1738- phys_addr_t pa;
1739-
1740- switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
1741- case OPTEE_SMC_RPC_FUNC_ALLOC:
1742- shm = tee_shm_alloc(ctx, param->a1,
1743- TEE_SHM_MAPPED | TEE_SHM_PRIV);
1744- if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
1745- reg_pair_from_64(&param->a1, &param->a2, pa);
1746- reg_pair_from_64(&param->a4, &param->a5,
1747- (unsigned long)shm);
1748- } else {
1749- param->a1 = 0;
1750- param->a2 = 0;
1751- param->a4 = 0;
1752- param->a5 = 0;
1753- }
1754- break;
1755- case OPTEE_SMC_RPC_FUNC_FREE:
1756- shm = reg_pair_to_ptr(param->a1, param->a2);
1757- tee_shm_free(shm);
1758- break;
1759- case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
1760- /*
1761- * A foreign interrupt was raised while secure world was
1762- * executing, since they are handled in Linux a dummy RPC is
1763- * performed to let Linux take the interrupt through the normal
1764- * vector.
1765- */
1766- break;
1767- case OPTEE_SMC_RPC_FUNC_CMD:
1768- shm = reg_pair_to_ptr(param->a1, param->a2);
1769- handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
1770- break;
1771- default:
1772- pr_warn("Unknown RPC func 0x%x\n",
1773- (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
1774- break;
1775- }
1776
1777- param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
1778-}
1779diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h
1780deleted file mode 100644
1781index 28109d991c4b..000000000000
1782--- a/drivers/tee/optee/shm_pool.h
1783+++ /dev/null
1784@@ -1,14 +0,0 @@
1785-/* SPDX-License-Identifier: GPL-2.0-only */
1786-/*
1787- * Copyright (c) 2015, Linaro Limited
1788- * Copyright (c) 2016, EPAM Systems
1789- */
1790-
1791-#ifndef SHM_POOL_H
1792-#define SHM_POOL_H
1793-
1794-#include <linux/tee_drv.h>
1795-
1796-struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
1797-
1798-#endif
1799diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
1800new file mode 100644
1801index 000000000000..9a787fb4f5e5
1802--- /dev/null
1803+++ b/drivers/tee/optee/smc_abi.c
1804@@ -0,0 +1,1361 @@
1805+// SPDX-License-Identifier: GPL-2.0-only
1806+/*
1807+ * Copyright (c) 2015-2021, Linaro Limited
1808+ * Copyright (c) 2016, EPAM Systems
1809+ */
1810+
1811+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1812+
1813+#include <linux/arm-smccc.h>
1814+#include <linux/errno.h>
1815+#include <linux/io.h>
1816+#include <linux/sched.h>
1817+#include <linux/module.h>
1818+#include <linux/of.h>
1819+#include <linux/of_platform.h>
1820+#include <linux/platform_device.h>
1821+#include <linux/slab.h>
1822+#include <linux/string.h>
1823+#include <linux/tee_drv.h>
1824+#include <linux/types.h>
1825+#include <linux/workqueue.h>
1826+#include "optee_private.h"
1827+#include "optee_smc.h"
1828+#include "optee_rpc_cmd.h"
1829+#define CREATE_TRACE_POINTS
1830+#include "optee_trace.h"
1831+
1832+/*
1833+ * This file implement the SMC ABI used when communicating with secure world
1834+ * OP-TEE OS via raw SMCs.
1835+ * This file is divided into the following sections:
1836+ * 1. Convert between struct tee_param and struct optee_msg_param
1837+ * 2. Low level support functions to register shared memory in secure world
1838+ * 3. Dynamic shared memory pool based on alloc_pages()
1839+ * 4. Do a normal scheduled call into secure world
1840+ * 5. Driver initialization.
1841+ */
1842+
1843+#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
1844+
1845+/*
1846+ * 1. Convert between struct tee_param and struct optee_msg_param
1847+ *
1848+ * optee_from_msg_param() and optee_to_msg_param() are the main
1849+ * functions.
1850+ */
1851+
1852+static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
1853+ const struct optee_msg_param *mp)
1854+{
1855+ struct tee_shm *shm;
1856+ phys_addr_t pa;
1857+ int rc;
1858+
1859+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
1860+ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
1861+ p->u.memref.size = mp->u.tmem.size;
1862+ shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
1863+ if (!shm) {
1864+ p->u.memref.shm_offs = 0;
1865+ p->u.memref.shm = NULL;
1866+ return 0;
1867+ }
1868+
1869+ rc = tee_shm_get_pa(shm, 0, &pa);
1870+ if (rc)
1871+ return rc;
1872+
1873+ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
1874+ p->u.memref.shm = shm;
1875+
1876+ /* Check that the memref is covered by the shm object */
1877+ if (p->u.memref.size) {
1878+ size_t o = p->u.memref.shm_offs +
1879+ p->u.memref.size - 1;
1880+
1881+ rc = tee_shm_get_pa(shm, o, NULL);
1882+ if (rc)
1883+ return rc;
1884+ }
1885+
1886+ return 0;
1887+}
1888+
1889+static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
1890+ const struct optee_msg_param *mp)
1891+{
1892+ struct tee_shm *shm;
1893+
1894+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
1895+ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
1896+ p->u.memref.size = mp->u.rmem.size;
1897+ shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
1898+
1899+ if (shm) {
1900+ p->u.memref.shm_offs = mp->u.rmem.offs;
1901+ p->u.memref.shm = shm;
1902+ } else {
1903+ p->u.memref.shm_offs = 0;
1904+ p->u.memref.shm = NULL;
1905+ }
1906+}
1907+
1908+/**
1909+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
1910+ * struct tee_param
1911+ * @optee: main service struct
1912+ * @params: subsystem internal parameter representation
1913+ * @num_params: number of elements in the parameter arrays
1914+ * @msg_params: OPTEE_MSG parameters
1915+ * Returns 0 on success or <0 on failure
1916+ */
1917+static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
1918+ size_t num_params,
1919+ const struct optee_msg_param *msg_params)
1920+{
1921+ int rc;
1922+ size_t n;
1923+
1924+ for (n = 0; n < num_params; n++) {
1925+ struct tee_param *p = params + n;
1926+ const struct optee_msg_param *mp = msg_params + n;
1927+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
1928+
1929+ switch (attr) {
1930+ case OPTEE_MSG_ATTR_TYPE_NONE:
1931+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
1932+ memset(&p->u, 0, sizeof(p->u));
1933+ break;
1934+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
1935+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
1936+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
1937+ optee_from_msg_param_value(p, attr, mp);
1938+ break;
1939+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
1940+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
1941+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
1942+ rc = from_msg_param_tmp_mem(p, attr, mp);
1943+ if (rc)
1944+ return rc;
1945+ break;
1946+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
1947+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
1948+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
1949+ from_msg_param_reg_mem(p, attr, mp);
1950+ break;
1951+
1952+ default:
1953+ return -EINVAL;
1954+ }
1955+ }
1956+ return 0;
1957+}
1958+
1959+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
1960+ const struct tee_param *p)
1961+{
1962+ int rc;
1963+ phys_addr_t pa;
1964+
1965+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
1966+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
1967+
1968+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
1969+ mp->u.tmem.size = p->u.memref.size;
1970+
1971+ if (!p->u.memref.shm) {
1972+ mp->u.tmem.buf_ptr = 0;
1973+ return 0;
1974+ }
1975+
1976+ rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
1977+ if (rc)
1978+ return rc;
1979+
1980+ mp->u.tmem.buf_ptr = pa;
1981+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
1982+ OPTEE_MSG_ATTR_CACHE_SHIFT;
1983+
1984+ return 0;
1985+}
1986+
1987+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
1988+ const struct tee_param *p)
1989+{
1990+ mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
1991+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
1992+
1993+ mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
1994+ mp->u.rmem.size = p->u.memref.size;
1995+ mp->u.rmem.offs = p->u.memref.shm_offs;
1996+ return 0;
1997+}
1998+
1999+/**
2000+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
2001+ * @optee: main service struct
2002+ * @msg_params: OPTEE_MSG parameters
2003+ * @num_params: number of elements in the parameter arrays
2004+ * @params: subsystem itnernal parameter representation
2005+ * Returns 0 on success or <0 on failure
2006+ */
2007+static int optee_to_msg_param(struct optee *optee,
2008+ struct optee_msg_param *msg_params,
2009+ size_t num_params, const struct tee_param *params)
2010+{
2011+ int rc;
2012+ size_t n;
2013+
2014+ for (n = 0; n < num_params; n++) {
2015+ const struct tee_param *p = params + n;
2016+ struct optee_msg_param *mp = msg_params + n;
2017+
2018+ switch (p->attr) {
2019+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
2020+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
2021+ memset(&mp->u, 0, sizeof(mp->u));
2022+ break;
2023+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
2024+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
2025+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
2026+ optee_to_msg_param_value(mp, p);
2027+ break;
2028+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
2029+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
2030+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
2031+ if (tee_shm_is_registered(p->u.memref.shm))
2032+ rc = to_msg_param_reg_mem(mp, p);
2033+ else
2034+ rc = to_msg_param_tmp_mem(mp, p);
2035+ if (rc)
2036+ return rc;
2037+ break;
2038+ default:
2039+ return -EINVAL;
2040+ }
2041+ }
2042+ return 0;
2043+}
2044+
2045+/*
2046+ * 2. Low level support functions to register shared memory in secure world
2047+ *
2048+ * Functions to enable/disable shared memory caching in secure world, that
2049+ * is, lazy freeing of previously allocated shared memory. Freeing is
2050+ * performed when a request has been compled.
2051+ *
2052+ * Functions to register and unregister shared memory both for normal
2053+ * clients and for tee-supplicant.
2054+ */
2055+
2056+/**
2057+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
2058+ * in OP-TEE
2059+ * @optee: main service struct
2060+ */
2061+static void optee_enable_shm_cache(struct optee *optee)
2062+{
2063+ struct optee_call_waiter w;
2064+
2065+ /* We need to retry until secure world isn't busy. */
2066+ optee_cq_wait_init(&optee->call_queue, &w);
2067+ while (true) {
2068+ struct arm_smccc_res res;
2069+
2070+ optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
2071+ 0, 0, 0, 0, 0, 0, 0, &res);
2072+ if (res.a0 == OPTEE_SMC_RETURN_OK)
2073+ break;
2074+ optee_cq_wait_for_completion(&optee->call_queue, &w);
2075+ }
2076+ optee_cq_wait_final(&optee->call_queue, &w);
2077+}
2078+
2079+/**
2080+ * __optee_disable_shm_cache() - Disables caching of some shared memory
2081+ * allocation in OP-TEE
2082+ * @optee: main service struct
2083+ * @is_mapped: true if the cached shared memory addresses were mapped by this
2084+ * kernel, are safe to dereference, and should be freed
2085+ */
2086+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
2087+{
2088+ struct optee_call_waiter w;
2089+
2090+ /* We need to retry until secure world isn't busy. */
2091+ optee_cq_wait_init(&optee->call_queue, &w);
2092+ while (true) {
2093+ union {
2094+ struct arm_smccc_res smccc;
2095+ struct optee_smc_disable_shm_cache_result result;
2096+ } res;
2097+
2098+ optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
2099+ 0, 0, 0, 0, 0, 0, 0, &res.smccc);
2100+ if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
2101+ break; /* All shm's freed */
2102+ if (res.result.status == OPTEE_SMC_RETURN_OK) {
2103+ struct tee_shm *shm;
2104+
2105+ /*
2106+ * Shared memory references that were not mapped by
2107+ * this kernel must be ignored to prevent a crash.
2108+ */
2109+ if (!is_mapped)
2110+ continue;
2111+
2112+ shm = reg_pair_to_ptr(res.result.shm_upper32,
2113+ res.result.shm_lower32);
2114+ tee_shm_free(shm);
2115+ } else {
2116+ optee_cq_wait_for_completion(&optee->call_queue, &w);
2117+ }
2118+ }
2119+ optee_cq_wait_final(&optee->call_queue, &w);
2120+}
2121+
2122+/**
2123+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
2124+ * allocations in OP-TEE
2125+ * @optee: main service struct
2126+ */
2127+static void optee_disable_shm_cache(struct optee *optee)
2128+{
2129+ return __optee_disable_shm_cache(optee, true);
2130+}
2131+
2132+/**
2133+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
2134+ * allocations in OP-TEE which are not
2135+ * currently mapped
2136+ * @optee: main service struct
2137+ */
2138+static void optee_disable_unmapped_shm_cache(struct optee *optee)
2139+{
2140+ return __optee_disable_shm_cache(optee, false);
2141+}
2142+
2143+#define PAGELIST_ENTRIES_PER_PAGE \
2144+ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
2145+
2146+/*
2147+ * The final entry in each pagelist page is a pointer to the next
2148+ * pagelist page.
2149+ */
2150+static size_t get_pages_list_size(size_t num_entries)
2151+{
2152+ int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
2153+
2154+ return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
2155+}
2156+
2157+static u64 *optee_allocate_pages_list(size_t num_entries)
2158+{
2159+ return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
2160+}
2161+
2162+static void optee_free_pages_list(void *list, size_t num_entries)
2163+{
2164+ free_pages_exact(list, get_pages_list_size(num_entries));
2165+}
2166+
2167+/**
2168+ * optee_fill_pages_list() - write list of user pages to given shared
2169+ * buffer.
2170+ *
2171+ * @dst: page-aligned buffer where list of pages will be stored
2172+ * @pages: array of pages that represents shared buffer
2173+ * @num_pages: number of entries in @pages
2174+ * @page_offset: offset of user buffer from page start
2175+ *
2176+ * @dst should be big enough to hold list of user page addresses and
2177+ * links to the next pages of buffer
2178+ */
2179+static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
2180+ size_t page_offset)
2181+{
2182+ int n = 0;
2183+ phys_addr_t optee_page;
2184+ /*
2185+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
2186+ * for details.
2187+ */
2188+ struct {
2189+ u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
2190+ u64 next_page_data;
2191+ } *pages_data;
2192+
2193+ /*
2194+ * Currently OP-TEE uses 4k page size and it does not looks
2195+ * like this will change in the future. On other hand, there are
2196+ * no know ARM architectures with page size < 4k.
2197+ * Thus the next built assert looks redundant. But the following
2198+ * code heavily relies on this assumption, so it is better be
2199+ * safe than sorry.
2200+ */
2201+ BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
2202+
2203+ pages_data = (void *)dst;
2204+ /*
2205+ * If linux page is bigger than 4k, and user buffer offset is
2206+ * larger than 4k/8k/12k/etc this will skip first 4k pages,
2207+ * because they bear no value data for OP-TEE.
2208+ */
2209+ optee_page = page_to_phys(*pages) +
2210+ round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
2211+
2212+ while (true) {
2213+ pages_data->pages_list[n++] = optee_page;
2214+
2215+ if (n == PAGELIST_ENTRIES_PER_PAGE) {
2216+ pages_data->next_page_data =
2217+ virt_to_phys(pages_data + 1);
2218+ pages_data++;
2219+ n = 0;
2220+ }
2221+
2222+ optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
2223+ if (!(optee_page & ~PAGE_MASK)) {
2224+ if (!--num_pages)
2225+ break;
2226+ pages++;
2227+ optee_page = page_to_phys(*pages);
2228+ }
2229+ }
2230+}
2231+
2232+static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
2233+ struct page **pages, size_t num_pages,
2234+ unsigned long start)
2235+{
2236+ struct optee *optee = tee_get_drvdata(ctx->teedev);
2237+ struct optee_msg_arg *msg_arg;
2238+ struct tee_shm *shm_arg;
2239+ u64 *pages_list;
2240+ int rc;
2241+
2242+ if (!num_pages)
2243+ return -EINVAL;
2244+
2245+ rc = optee_check_mem_type(start, num_pages);
2246+ if (rc)
2247+ return rc;
2248+
2249+ pages_list = optee_allocate_pages_list(num_pages);
2250+ if (!pages_list)
2251+ return -ENOMEM;
2252+
2253+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
2254+ if (IS_ERR(shm_arg)) {
2255+ rc = PTR_ERR(shm_arg);
2256+ goto out;
2257+ }
2258+
2259+ optee_fill_pages_list(pages_list, pages, num_pages,
2260+ tee_shm_get_page_offset(shm));
2261+
2262+ msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
2263+ msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
2264+ OPTEE_MSG_ATTR_NONCONTIG;
2265+ msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
2266+ msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
2267+ /*
2268+ * In the least bits of msg_arg->params->u.tmem.buf_ptr we
2269+ * store buffer offset from 4k page, as described in OP-TEE ABI.
2270+ */
2271+ msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
2272+ (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
2273+
2274+ if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
2275+ msg_arg->ret != TEEC_SUCCESS)
2276+ rc = -EINVAL;
2277+
2278+ tee_shm_free(shm_arg);
2279+out:
2280+ optee_free_pages_list(pages_list, num_pages);
2281+ return rc;
2282+}
2283+
2284+static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
2285+{
2286+ struct optee *optee = tee_get_drvdata(ctx->teedev);
2287+ struct optee_msg_arg *msg_arg;
2288+ struct tee_shm *shm_arg;
2289+ int rc = 0;
2290+
2291+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
2292+ if (IS_ERR(shm_arg))
2293+ return PTR_ERR(shm_arg);
2294+
2295+ msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
2296+
2297+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
2298+ msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
2299+
2300+ if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
2301+ msg_arg->ret != TEEC_SUCCESS)
2302+ rc = -EINVAL;
2303+ tee_shm_free(shm_arg);
2304+ return rc;
2305+}
2306+
2307+static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
2308+ struct page **pages, size_t num_pages,
2309+ unsigned long start)
2310+{
2311+ /*
2312+ * We don't want to register supplicant memory in OP-TEE.
2313+ * Instead information about it will be passed in RPC code.
2314+ */
2315+ return optee_check_mem_type(start, num_pages);
2316+}
2317+
2318+static int optee_shm_unregister_supp(struct tee_context *ctx,
2319+ struct tee_shm *shm)
2320+{
2321+ return 0;
2322+}
2323+
2324+/*
2325+ * 3. Dynamic shared memory pool based on alloc_pages()
2326+ *
2327+ * Implements an OP-TEE specific shared memory pool which is used
2328+ * when dynamic shared memory is supported by secure world.
2329+ *
2330+ * The main function is optee_shm_pool_alloc_pages().
2331+ */
2332+
2333+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
2334+ struct tee_shm *shm, size_t size)
2335+{
2336+ /*
2337+ * Shared memory private to the OP-TEE driver doesn't need
2338+ * to be registered with OP-TEE.
2339+ */
2340+ if (shm->flags & TEE_SHM_PRIV)
2341+ return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
2342+
2343+ return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
2344+}
2345+
2346+static void pool_op_free(struct tee_shm_pool_mgr *poolm,
2347+ struct tee_shm *shm)
2348+{
2349+ if (!(shm->flags & TEE_SHM_PRIV))
2350+ optee_shm_unregister(shm->ctx, shm);
2351+
2352+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
2353+ shm->kaddr = NULL;
2354+}
2355+
2356+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
2357+{
2358+ kfree(poolm);
2359+}
2360+
2361+static const struct tee_shm_pool_mgr_ops pool_ops = {
2362+ .alloc = pool_op_alloc,
2363+ .free = pool_op_free,
2364+ .destroy_poolmgr = pool_op_destroy_poolmgr,
2365+};
2366+
2367+/**
2368+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
2369+ *
2370+ * This pool is used when OP-TEE supports dymanic SHM. In this case
2371+ * command buffers and such are allocated from kernel's own memory.
2372+ */
2373+static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
2374+{
2375+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
2376+
2377+ if (!mgr)
2378+ return ERR_PTR(-ENOMEM);
2379+
2380+ mgr->ops = &pool_ops;
2381+
2382+ return mgr;
2383+}
2384+
2385+/*
2386+ * 4. Do a normal scheduled call into secure world
2387+ *
2388+ * The function optee_smc_do_call_with_arg() performs a normal scheduled
2389+ * call into secure world. During this call may normal world request help
2390+ * from normal world using RPCs, Remote Procedure Calls. This includes
2391+ * delivery of non-secure interrupts to for instance allow rescheduling of
2392+ * the current task.
2393+ */
2394+
2395+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
2396+ struct optee_msg_arg *arg)
2397+{
2398+ struct tee_shm *shm;
2399+
2400+ arg->ret_origin = TEEC_ORIGIN_COMMS;
2401+
2402+ if (arg->num_params != 1 ||
2403+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
2404+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
2405+ return;
2406+ }
2407+
2408+ shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
2409+ switch (arg->params[0].u.value.a) {
2410+ case OPTEE_RPC_SHM_TYPE_APPL:
2411+ optee_rpc_cmd_free_suppl(ctx, shm);
2412+ break;
2413+ case OPTEE_RPC_SHM_TYPE_KERNEL:
2414+ tee_shm_free(shm);
2415+ break;
2416+ default:
2417+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
2418+ }
2419+ arg->ret = TEEC_SUCCESS;
2420+}
2421+
2422+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
2423+ struct optee_msg_arg *arg,
2424+ struct optee_call_ctx *call_ctx)
2425+{
2426+ phys_addr_t pa;
2427+ struct tee_shm *shm;
2428+ size_t sz;
2429+ size_t n;
2430+
2431+ arg->ret_origin = TEEC_ORIGIN_COMMS;
2432+
2433+ if (!arg->num_params ||
2434+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
2435+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
2436+ return;
2437+ }
2438+
2439+ for (n = 1; n < arg->num_params; n++) {
2440+ if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
2441+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
2442+ return;
2443+ }
2444+ }
2445+
2446+ sz = arg->params[0].u.value.b;
2447+ switch (arg->params[0].u.value.a) {
2448+ case OPTEE_RPC_SHM_TYPE_APPL:
2449+ shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
2450+ break;
2451+ case OPTEE_RPC_SHM_TYPE_KERNEL:
2452+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
2453+ break;
2454+ default:
2455+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
2456+ return;
2457+ }
2458+
2459+ if (IS_ERR(shm)) {
2460+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
2461+ return;
2462+ }
2463+
2464+ if (tee_shm_get_pa(shm, 0, &pa)) {
2465+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
2466+ goto bad;
2467+ }
2468+
2469+ sz = tee_shm_get_size(shm);
2470+
2471+ if (tee_shm_is_registered(shm)) {
2472+ struct page **pages;
2473+ u64 *pages_list;
2474+ size_t page_num;
2475+
2476+ pages = tee_shm_get_pages(shm, &page_num);
2477+ if (!pages || !page_num) {
2478+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
2479+ goto bad;
2480+ }
2481+
2482+ pages_list = optee_allocate_pages_list(page_num);
2483+ if (!pages_list) {
2484+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
2485+ goto bad;
2486+ }
2487+
2488+ call_ctx->pages_list = pages_list;
2489+ call_ctx->num_entries = page_num;
2490+
2491+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
2492+ OPTEE_MSG_ATTR_NONCONTIG;
2493+ /*
2494+ * In the least bits of u.tmem.buf_ptr we store buffer offset
2495+ * from 4k page, as described in OP-TEE ABI.
2496+ */
2497+ arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
2498+ (tee_shm_get_page_offset(shm) &
2499+ (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
2500+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
2501+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
2502+
2503+ optee_fill_pages_list(pages_list, pages, page_num,
2504+ tee_shm_get_page_offset(shm));
2505+ } else {
2506+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
2507+ arg->params[0].u.tmem.buf_ptr = pa;
2508+ arg->params[0].u.tmem.size = sz;
2509+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
2510+ }
2511+
2512+ arg->ret = TEEC_SUCCESS;
2513+ return;
2514+bad:
2515+ tee_shm_free(shm);
2516+}
2517+
2518+static void free_pages_list(struct optee_call_ctx *call_ctx)
2519+{
2520+ if (call_ctx->pages_list) {
2521+ optee_free_pages_list(call_ctx->pages_list,
2522+ call_ctx->num_entries);
2523+ call_ctx->pages_list = NULL;
2524+ call_ctx->num_entries = 0;
2525+ }
2526+}
2527+
2528+static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
2529+{
2530+ free_pages_list(call_ctx);
2531+}
2532+
2533+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
2534+ struct tee_shm *shm,
2535+ struct optee_call_ctx *call_ctx)
2536+{
2537+ struct optee_msg_arg *arg;
2538+
2539+ arg = tee_shm_get_va(shm, 0);
2540+ if (IS_ERR(arg)) {
2541+ pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
2542+ return;
2543+ }
2544+
2545+ switch (arg->cmd) {
2546+ case OPTEE_RPC_CMD_SHM_ALLOC:
2547+ free_pages_list(call_ctx);
2548+ handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
2549+ break;
2550+ case OPTEE_RPC_CMD_SHM_FREE:
2551+ handle_rpc_func_cmd_shm_free(ctx, arg);
2552+ break;
2553+ default:
2554+ optee_rpc_cmd(ctx, optee, arg);
2555+ }
2556+}
2557+
2558+/**
2559+ * optee_handle_rpc() - handle RPC from secure world
2560+ * @ctx: context doing the RPC
2561+ * @param: value of registers for the RPC
2562+ * @call_ctx: call context. Preserved during one OP-TEE invocation
2563+ *
2564+ * Result of RPC is written back into @param.
2565+ */
2566+static void optee_handle_rpc(struct tee_context *ctx,
2567+ struct optee_rpc_param *param,
2568+ struct optee_call_ctx *call_ctx)
2569+{
2570+ struct tee_device *teedev = ctx->teedev;
2571+ struct optee *optee = tee_get_drvdata(teedev);
2572+ struct tee_shm *shm;
2573+ phys_addr_t pa;
2574+
2575+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
2576+ case OPTEE_SMC_RPC_FUNC_ALLOC:
2577+ shm = tee_shm_alloc(ctx, param->a1,
2578+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
2579+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
2580+ reg_pair_from_64(&param->a1, &param->a2, pa);
2581+ reg_pair_from_64(&param->a4, &param->a5,
2582+ (unsigned long)shm);
2583+ } else {
2584+ param->a1 = 0;
2585+ param->a2 = 0;
2586+ param->a4 = 0;
2587+ param->a5 = 0;
2588+ }
2589+ break;
2590+ case OPTEE_SMC_RPC_FUNC_FREE:
2591+ shm = reg_pair_to_ptr(param->a1, param->a2);
2592+ tee_shm_free(shm);
2593+ break;
2594+ case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
2595+ /*
2596+ * A foreign interrupt was raised while secure world was
2597+ * executing, since they are handled in Linux a dummy RPC is
2598+ * performed to let Linux take the interrupt through the normal
2599+ * vector.
2600+ */
2601+ break;
2602+ case OPTEE_SMC_RPC_FUNC_CMD:
2603+ shm = reg_pair_to_ptr(param->a1, param->a2);
2604+ handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
2605+ break;
2606+ default:
2607+ pr_warn("Unknown RPC func 0x%x\n",
2608+ (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
2609+ break;
2610+ }
2611+
2612+ param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
2613+}
2614+
2615+/**
2616+ * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
2617+ * @ctx: calling context
2618+ * @arg: shared memory holding the message to pass to secure world
2619+ *
2620+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
2621+ * Remote Procedure Calls (RPC) from OP-TEE.
2622+ *
2623+ * Returns return code from secure world, 0 is OK
2624+ */
2625+static int optee_smc_do_call_with_arg(struct tee_context *ctx,
2626+ struct tee_shm *arg)
2627+{
2628+ struct optee *optee = tee_get_drvdata(ctx->teedev);
2629+ struct optee_call_waiter w;
2630+ struct optee_rpc_param param = { };
2631+ struct optee_call_ctx call_ctx = { };
2632+ phys_addr_t parg;
2633+ int rc;
2634+
2635+ rc = tee_shm_get_pa(arg, 0, &parg);
2636+ if (rc)
2637+ return rc;
2638+
2639+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
2640+ reg_pair_from_64(&param.a1, &param.a2, parg);
2641+ /* Initialize waiter */
2642+ optee_cq_wait_init(&optee->call_queue, &w);
2643+ while (true) {
2644+ struct arm_smccc_res res;
2645+
2646+ trace_optee_invoke_fn_begin(&param);
2647+ optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
2648+ param.a4, param.a5, param.a6, param.a7,
2649+ &res);
2650+ trace_optee_invoke_fn_end(&param, &res);
2651+
2652+ if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
2653+ /*
2654+ * Out of threads in secure world, wait for a thread
2655+ * become available.
2656+ */
2657+ optee_cq_wait_for_completion(&optee->call_queue, &w);
2658+ } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
2659+ cond_resched();
2660+ param.a0 = res.a0;
2661+ param.a1 = res.a1;
2662+ param.a2 = res.a2;
2663+ param.a3 = res.a3;
2664+ optee_handle_rpc(ctx, &param, &call_ctx);
2665+ } else {
2666+ rc = res.a0;
2667+ break;
2668+ }
2669+ }
2670+
2671+ optee_rpc_finalize_call(&call_ctx);
2672+ /*
2673+ * We're done with our thread in secure world, if there's any
2674+ * thread waiters wake up one.
2675+ */
2676+ optee_cq_wait_final(&optee->call_queue, &w);
2677+
2678+ return rc;
2679+}
2680+
2681+/*
2682+ * 5. Driver initialization
2683+ *
2684+ * During driver inititialization is secure world probed to find out which
2685+ * features it supports so the driver can be initialized with a matching
2686+ * configuration. This involves for instance support for dynamic shared
2687+ * memory instead of a static memory carvout.
2688+ */
2689+
2690+static void optee_get_version(struct tee_device *teedev,
2691+ struct tee_ioctl_version_data *vers)
2692+{
2693+ struct tee_ioctl_version_data v = {
2694+ .impl_id = TEE_IMPL_ID_OPTEE,
2695+ .impl_caps = TEE_OPTEE_CAP_TZ,
2696+ .gen_caps = TEE_GEN_CAP_GP,
2697+ };
2698+ struct optee *optee = tee_get_drvdata(teedev);
2699+
2700+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
2701+ v.gen_caps |= TEE_GEN_CAP_REG_MEM;
2702+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
2703+ v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
2704+ *vers = v;
2705+}
2706+
2707+static int optee_smc_open(struct tee_context *ctx)
2708+{
2709+ struct optee *optee = tee_get_drvdata(ctx->teedev);
2710+ u32 sec_caps = optee->smc.sec_caps;
2711+
2712+ return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
2713+}
2714+
2715+static const struct tee_driver_ops optee_clnt_ops = {
2716+ .get_version = optee_get_version,
2717+ .open = optee_smc_open,
2718+ .release = optee_release,
2719+ .open_session = optee_open_session,
2720+ .close_session = optee_close_session,
2721+ .invoke_func = optee_invoke_func,
2722+ .cancel_req = optee_cancel_req,
2723+ .shm_register = optee_shm_register,
2724+ .shm_unregister = optee_shm_unregister,
2725+};
2726+
2727+static const struct tee_desc optee_clnt_desc = {
2728+ .name = DRIVER_NAME "-clnt",
2729+ .ops = &optee_clnt_ops,
2730+ .owner = THIS_MODULE,
2731+};
2732+
2733+static const struct tee_driver_ops optee_supp_ops = {
2734+ .get_version = optee_get_version,
2735+ .open = optee_smc_open,
2736+ .release = optee_release_supp,
2737+ .supp_recv = optee_supp_recv,
2738+ .supp_send = optee_supp_send,
2739+ .shm_register = optee_shm_register_supp,
2740+ .shm_unregister = optee_shm_unregister_supp,
2741+};
2742+
2743+static const struct tee_desc optee_supp_desc = {
2744+ .name = DRIVER_NAME "-supp",
2745+ .ops = &optee_supp_ops,
2746+ .owner = THIS_MODULE,
2747+ .flags = TEE_DESC_PRIVILEGED,
2748+};
2749+
2750+static const struct optee_ops optee_ops = {
2751+ .do_call_with_arg = optee_smc_do_call_with_arg,
2752+ .to_msg_param = optee_to_msg_param,
2753+ .from_msg_param = optee_from_msg_param,
2754+};
2755+
2756+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
2757+{
2758+ struct arm_smccc_res res;
2759+
2760+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
2761+
2762+ if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
2763+ res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
2764+ return true;
2765+ return false;
2766+}
2767+
2768+static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
2769+{
2770+ union {
2771+ struct arm_smccc_res smccc;
2772+ struct optee_smc_call_get_os_revision_result result;
2773+ } res = {
2774+ .result = {
2775+ .build_id = 0
2776+ }
2777+ };
2778+
2779+ invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
2780+ &res.smccc);
2781+
2782+ if (res.result.build_id)
2783+ pr_info("revision %lu.%lu (%08lx)", res.result.major,
2784+ res.result.minor, res.result.build_id);
2785+ else
2786+ pr_info("revision %lu.%lu", res.result.major, res.result.minor);
2787+}
2788+
2789+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
2790+{
2791+ union {
2792+ struct arm_smccc_res smccc;
2793+ struct optee_smc_calls_revision_result result;
2794+ } res;
2795+
2796+ invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
2797+
2798+ if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
2799+ (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
2800+ return true;
2801+ return false;
2802+}
2803+
2804+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
2805+ u32 *sec_caps)
2806+{
2807+ union {
2808+ struct arm_smccc_res smccc;
2809+ struct optee_smc_exchange_capabilities_result result;
2810+ } res;
2811+ u32 a1 = 0;
2812+
2813+ /*
2814+ * TODO This isn't enough to tell if it's UP system (from kernel
2815+ * point of view) or not, is_smp() returns the information
2816+ * needed, but can't be called directly from here.
2817+ */
2818+ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
2819+ a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
2820+
2821+ invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
2822+ &res.smccc);
2823+
2824+ if (res.result.status != OPTEE_SMC_RETURN_OK)
2825+ return false;
2826+
2827+ *sec_caps = res.result.capabilities;
2828+ return true;
2829+}
2830+
2831+static struct tee_shm_pool *optee_config_dyn_shm(void)
2832+{
2833+ struct tee_shm_pool_mgr *priv_mgr;
2834+ struct tee_shm_pool_mgr *dmabuf_mgr;
2835+ void *rc;
2836+
2837+ rc = optee_shm_pool_alloc_pages();
2838+ if (IS_ERR(rc))
2839+ return rc;
2840+ priv_mgr = rc;
2841+
2842+ rc = optee_shm_pool_alloc_pages();
2843+ if (IS_ERR(rc)) {
2844+ tee_shm_pool_mgr_destroy(priv_mgr);
2845+ return rc;
2846+ }
2847+ dmabuf_mgr = rc;
2848+
2849+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
2850+ if (IS_ERR(rc)) {
2851+ tee_shm_pool_mgr_destroy(priv_mgr);
2852+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
2853+ }
2854+
2855+ return rc;
2856+}
2857+
2858+static struct tee_shm_pool *
2859+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
2860+{
2861+ union {
2862+ struct arm_smccc_res smccc;
2863+ struct optee_smc_get_shm_config_result result;
2864+ } res;
2865+ unsigned long vaddr;
2866+ phys_addr_t paddr;
2867+ size_t size;
2868+ phys_addr_t begin;
2869+ phys_addr_t end;
2870+ void *va;
2871+ struct tee_shm_pool_mgr *priv_mgr;
2872+ struct tee_shm_pool_mgr *dmabuf_mgr;
2873+ void *rc;
2874+ const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
2875+
2876+ invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
2877+ if (res.result.status != OPTEE_SMC_RETURN_OK) {
2878+ pr_err("static shm service not available\n");
2879+ return ERR_PTR(-ENOENT);
2880+ }
2881+
2882+ if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
2883+ pr_err("only normal cached shared memory supported\n");
2884+ return ERR_PTR(-EINVAL);
2885+ }
2886+
2887+ begin = roundup(res.result.start, PAGE_SIZE);
2888+ end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
2889+ paddr = begin;
2890+ size = end - begin;
2891+
2892+ if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
2893+ pr_err("too small shared memory area\n");
2894+ return ERR_PTR(-EINVAL);
2895+ }
2896+
2897+ va = memremap(paddr, size, MEMREMAP_WB);
2898+ if (!va) {
2899+ pr_err("shared memory ioremap failed\n");
2900+ return ERR_PTR(-EINVAL);
2901+ }
2902+ vaddr = (unsigned long)va;
2903+
2904+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
2905+ 3 /* 8 bytes aligned */);
2906+ if (IS_ERR(rc))
2907+ goto err_memunmap;
2908+ priv_mgr = rc;
2909+
2910+ vaddr += sz;
2911+ paddr += sz;
2912+ size -= sz;
2913+
2914+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
2915+ if (IS_ERR(rc))
2916+ goto err_free_priv_mgr;
2917+ dmabuf_mgr = rc;
2918+
2919+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
2920+ if (IS_ERR(rc))
2921+ goto err_free_dmabuf_mgr;
2922+
2923+ *memremaped_shm = va;
2924+
2925+ return rc;
2926+
2927+err_free_dmabuf_mgr:
2928+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
2929+err_free_priv_mgr:
2930+ tee_shm_pool_mgr_destroy(priv_mgr);
2931+err_memunmap:
2932+ memunmap(va);
2933+ return rc;
2934+}
2935+
2936+/* Simple wrapper functions to be able to use a function pointer */
2937+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
2938+ unsigned long a2, unsigned long a3,
2939+ unsigned long a4, unsigned long a5,
2940+ unsigned long a6, unsigned long a7,
2941+ struct arm_smccc_res *res)
2942+{
2943+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
2944+}
2945+
2946+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
2947+ unsigned long a2, unsigned long a3,
2948+ unsigned long a4, unsigned long a5,
2949+ unsigned long a6, unsigned long a7,
2950+ struct arm_smccc_res *res)
2951+{
2952+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
2953+}
2954+
2955+static optee_invoke_fn *get_invoke_func(struct device *dev)
2956+{
2957+ const char *method;
2958+
2959+ pr_info("probing for conduit method.\n");
2960+
2961+ if (device_property_read_string(dev, "method", &method)) {
2962+ pr_warn("missing \"method\" property\n");
2963+ return ERR_PTR(-ENXIO);
2964+ }
2965+
2966+ if (!strcmp("hvc", method))
2967+ return optee_smccc_hvc;
2968+ else if (!strcmp("smc", method))
2969+ return optee_smccc_smc;
2970+
2971+ pr_warn("invalid \"method\" property: %s\n", method);
2972+ return ERR_PTR(-EINVAL);
2973+}
2974+
2975+/* optee_remove - Device Removal Routine
2976+ * @pdev: platform device information struct
2977+ *
2978+ * optee_remove is called by platform subsystem to alert the driver
2979+ * that it should release the device
2980+ */
2981+static int optee_smc_remove(struct platform_device *pdev)
2982+{
2983+ struct optee *optee = platform_get_drvdata(pdev);
2984+
2985+ /*
2986+ * Ask OP-TEE to free all cached shared memory objects to decrease
2987+ * reference counters and also avoid wild pointers in secure world
2988+ * into the old shared memory range.
2989+ */
2990+ optee_disable_shm_cache(optee);
2991+
2992+ optee_remove_common(optee);
2993+
2994+ if (optee->smc.memremaped_shm)
2995+ memunmap(optee->smc.memremaped_shm);
2996+
2997+ kfree(optee);
2998+
2999+ return 0;
3000+}
3001+
3002+/* optee_shutdown - Device Removal Routine
3003+ * @pdev: platform device information struct
3004+ *
3005+ * platform_shutdown is called by the platform subsystem to alert
3006+ * the driver that a shutdown, reboot, or kexec is happening and
3007+ * device must be disabled.
3008+ */
3009+static void optee_shutdown(struct platform_device *pdev)
3010+{
3011+ optee_disable_shm_cache(platform_get_drvdata(pdev));
3012+}
3013+
3014+static int optee_probe(struct platform_device *pdev)
3015+{
3016+ optee_invoke_fn *invoke_fn;
3017+ struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
3018+ struct optee *optee = NULL;
3019+ void *memremaped_shm = NULL;
3020+ struct tee_device *teedev;
3021+ u32 sec_caps;
3022+ int rc;
3023+
3024+ invoke_fn = get_invoke_func(&pdev->dev);
3025+ if (IS_ERR(invoke_fn))
3026+ return PTR_ERR(invoke_fn);
3027+
3028+ if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
3029+ pr_warn("api uid mismatch\n");
3030+ return -EINVAL;
3031+ }
3032+
3033+ optee_msg_get_os_revision(invoke_fn);
3034+
3035+ if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
3036+ pr_warn("api revision mismatch\n");
3037+ return -EINVAL;
3038+ }
3039+
3040+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
3041+ pr_warn("capabilities mismatch\n");
3042+ return -EINVAL;
3043+ }
3044+
3045+ /*
3046+ * Try to use dynamic shared memory if possible
3047+ */
3048+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
3049+ pool = optee_config_dyn_shm();
3050+
3051+ /*
3052+ * If dynamic shared memory is not available or failed - try static one
3053+ */
3054+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
3055+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
3056+
3057+ if (IS_ERR(pool))
3058+ return PTR_ERR(pool);
3059+
3060+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
3061+ if (!optee) {
3062+ rc = -ENOMEM;
3063+ goto err;
3064+ }
3065+
3066+ optee->ops = &optee_ops;
3067+ optee->smc.invoke_fn = invoke_fn;
3068+ optee->smc.sec_caps = sec_caps;
3069+
3070+ teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
3071+ if (IS_ERR(teedev)) {
3072+ rc = PTR_ERR(teedev);
3073+ goto err;
3074+ }
3075+ optee->teedev = teedev;
3076+
3077+ teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
3078+ if (IS_ERR(teedev)) {
3079+ rc = PTR_ERR(teedev);
3080+ goto err;
3081+ }
3082+ optee->supp_teedev = teedev;
3083+
3084+ rc = tee_device_register(optee->teedev);
3085+ if (rc)
3086+ goto err;
3087+
3088+ rc = tee_device_register(optee->supp_teedev);
3089+ if (rc)
3090+ goto err;
3091+
3092+ mutex_init(&optee->call_queue.mutex);
3093+ INIT_LIST_HEAD(&optee->call_queue.waiters);
3094+ optee_wait_queue_init(&optee->wait_queue);
3095+ optee_supp_init(&optee->supp);
3096+ optee->smc.memremaped_shm = memremaped_shm;
3097+ optee->pool = pool;
3098+
3099+ /*
3100+ * Ensure that there are no pre-existing shm objects before enabling
3101+ * the shm cache so that there's no chance of receiving an invalid
3102+ * address during shutdown. This could occur, for example, if we're
3103+ * kexec booting from an older kernel that did not properly cleanup the
3104+ * shm cache.
3105+ */
3106+ optee_disable_unmapped_shm_cache(optee);
3107+
3108+ optee_enable_shm_cache(optee);
3109+
3110+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
3111+ pr_info("dynamic shared memory is enabled\n");
3112+
3113+ platform_set_drvdata(pdev, optee);
3114+
3115+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
3116+ if (rc) {
3117+ optee_smc_remove(pdev);
3118+ return rc;
3119+ }
3120+
3121+ pr_info("initialized driver\n");
3122+ return 0;
3123+err:
3124+ if (optee) {
3125+ /*
3126+ * tee_device_unregister() is safe to call even if the
3127+ * devices hasn't been registered with
3128+ * tee_device_register() yet.
3129+ */
3130+ tee_device_unregister(optee->supp_teedev);
3131+ tee_device_unregister(optee->teedev);
3132+ kfree(optee);
3133+ }
3134+ if (pool)
3135+ tee_shm_pool_free(pool);
3136+ if (memremaped_shm)
3137+ memunmap(memremaped_shm);
3138+ return rc;
3139+}
3140+
3141+static const struct of_device_id optee_dt_match[] = {
3142+ { .compatible = "linaro,optee-tz" },
3143+ {},
3144+};
3145+MODULE_DEVICE_TABLE(of, optee_dt_match);
3146+
3147+static struct platform_driver optee_driver = {
3148+ .probe = optee_probe,
3149+ .remove = optee_smc_remove,
3150+ .shutdown = optee_shutdown,
3151+ .driver = {
3152+ .name = "optee",
3153+ .of_match_table = optee_dt_match,
3154+ },
3155+};
3156+
3157+int optee_smc_abi_register(void)
3158+{
3159+ return platform_driver_register(&optee_driver);
3160+}
3161+
3162+void optee_smc_abi_unregister(void)
3163+{
3164+ platform_driver_unregister(&optee_driver);
3165+}
3166--
31672.34.1
3168