blob: b3187603cda8fcdccdc47e416c2d3cf0ee357791 [file] [log] [blame]
Patrick Williams2194f502022-10-16 14:26:09 -05001From 8db3072225e852c2ef8bcc6c95f5b22f05104f35 Mon Sep 17 00:00:00 2001
Brad Bishopbec4ebc2022-08-03 09:55:16 -04002From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com>
3Date: Mon, 18 Nov 2013 20:46:48 -0800
Patrick Williams2194f502022-10-16 14:26:09 -05004Subject: [PATCH 27/40] ANDROID: trusty: Backport of trusty driver
Brad Bishopbec4ebc2022-08-03 09:55:16 -04005
6This adds Trusty driver from android-trusty-5.10
7
8Original commits:
9b60d55f33484 ANDROID: trusty-ipc: Allow registering multiple handles
10629a4d3318cc ANDROID: trusty: Support setting trusty_shared_mem_id_t
1194a36a1374e7 ANDROID: trusty-log: Don't copy Trusty logs to linux kernel log
12efc21cced8af ANDROID: trusty-log: rework buffer allocation
138cb1a07ca814 ANDROID: trusty-ipc: Fix lock protection of shared_handles
1452cdd137fae0 ANDROID: trusty-log: support poll()
1524c3649dceb9 ANDROID: trusty-irq: enqueue work in trusty_irq_cpu_up
1605a05bdd921e ANDROID: trusty: Add config TRUSTY_CRASH_IS_PANIC
17b5fbdba2ec72 ANDROID: trusty-ipc: Fix crash when running out of txbuffers
1846da5b95605e ANDROID: trusty: Allow TRUSTY_LEND of buffers
192ebfb16645af ANDROID: trusty-virtio: remove unnecessary include of dma-mapping.h
20bf9d994a65a2 ANDROID: trusty-log: Complement logging sink with unthrottled virtual file
21d5cb51d0365d ANDROID: trusty-log: Refactor logging state to support concurrent sinks
22b421a5ad3eb3 ANDROID: trusty-log: Sanitize u32 overflow of the log ring buffer write index
2358e9681c57af ANDROID: trusty-log: On trusty panic, unthrottle sink to the kernel log
24ba12be0f203a ANDROID: trusty-log: Update trusty log buffer size to hold a complete Trusty crash logs
25a8a3f83e52b6 ANDROID: trusty_qemu_defconfig: Enable dma-buf and ion system heaps
26988b52b392a1 ANDROID: trusty: Support setting FF-A Tag
27f544e96489aa ANDROID: Add trusty_qemu_defconfig
288a9b09317f29 ANDROID: trusty-ipc: Switch from memfd to dma_buf
295460418ec9a4 ANDROID: trusty-irq: document new way of specifying IPIs
30da3c30b943c2 ANDROID: trusty-irq: specify IPIs in new way
315b5bb7f74856 ANDROID: trusty: Add trusty-test driver
32e80d87f422fd ANDROID: trusty: Add trusty-ipc driver
3303c248cbf693 ANDROID: trusty: Add trusty-virtio driver
341047661edb97 ANDROID: trusty: Add trusty-log driver
3518fd5c59b423 ANDROID: trusty: Add trusty-irq driver
36479c39a683f8 ANDROID: trusty: Add trusty-core driver
37
38Upstream-Status: Backport
39Change-Id: I91f71b891a1091383a298e7fb2f9030382a19ca5
40Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com>
Patrick Williams2194f502022-10-16 14:26:09 -050041Signed-off-by: Rupinderjit Singh <rupinderjit.singh@arm.com>
Brad Bishopbec4ebc2022-08-03 09:55:16 -040042---
43 .../devicetree/bindings/trusty/trusty-irq.txt | 67 +
44 .../devicetree/bindings/trusty/trusty-smc.txt | 6 +
45 arch/arm/configs/trusty_qemu_defconfig | 291 +++
46 .../configs/trusty_qemu_defconfig.fragment | 26 +
47 drivers/Kconfig | 2 +
48 drivers/Makefile | 1 +
49 drivers/trusty/Kconfig | 116 +
50 drivers/trusty/Makefile | 14 +
51 drivers/trusty/trusty-ipc.c | 2256 +++++++++++++++++
52 drivers/trusty/trusty-irq.c | 645 +++++
53 drivers/trusty/trusty-log.c | 830 ++++++
54 drivers/trusty/trusty-log.h | 28 +
55 drivers/trusty/trusty-mem.c | 139 +
56 drivers/trusty/trusty-smc-arm.S | 41 +
57 drivers/trusty/trusty-smc-arm64.S | 35 +
58 drivers/trusty/trusty-smc.h | 26 +
59 drivers/trusty/trusty-test.c | 440 ++++
60 drivers/trusty/trusty-test.h | 13 +
61 drivers/trusty/trusty-virtio.c | 840 ++++++
62 drivers/trusty/trusty.c | 981 +++++++
63 include/linux/trusty/arm_ffa.h | 590 +++++
64 include/linux/trusty/sm_err.h | 28 +
65 include/linux/trusty/smcall.h | 124 +
66 include/linux/trusty/trusty.h | 131 +
67 include/linux/trusty/trusty_ipc.h | 89 +
68 include/uapi/linux/trusty/ipc.h | 65 +
69 include/uapi/linux/virtio_ids.h | 1 +
70 27 files changed, 7825 insertions(+)
71 create mode 100644 Documentation/devicetree/bindings/trusty/trusty-irq.txt
72 create mode 100644 Documentation/devicetree/bindings/trusty/trusty-smc.txt
73 create mode 100644 arch/arm/configs/trusty_qemu_defconfig
74 create mode 100644 arch/arm64/configs/trusty_qemu_defconfig.fragment
75 create mode 100644 drivers/trusty/Kconfig
76 create mode 100644 drivers/trusty/Makefile
77 create mode 100644 drivers/trusty/trusty-ipc.c
78 create mode 100644 drivers/trusty/trusty-irq.c
79 create mode 100644 drivers/trusty/trusty-log.c
80 create mode 100644 drivers/trusty/trusty-log.h
81 create mode 100644 drivers/trusty/trusty-mem.c
82 create mode 100644 drivers/trusty/trusty-smc-arm.S
83 create mode 100644 drivers/trusty/trusty-smc-arm64.S
84 create mode 100644 drivers/trusty/trusty-smc.h
85 create mode 100644 drivers/trusty/trusty-test.c
86 create mode 100644 drivers/trusty/trusty-test.h
87 create mode 100644 drivers/trusty/trusty-virtio.c
88 create mode 100644 drivers/trusty/trusty.c
89 create mode 100644 include/linux/trusty/arm_ffa.h
90 create mode 100644 include/linux/trusty/sm_err.h
91 create mode 100644 include/linux/trusty/smcall.h
92 create mode 100644 include/linux/trusty/trusty.h
93 create mode 100644 include/linux/trusty/trusty_ipc.h
94 create mode 100644 include/uapi/linux/trusty/ipc.h
95
96diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt
97new file mode 100644
98index 000000000000..cbb545ad452b
99--- /dev/null
100+++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt
101@@ -0,0 +1,67 @@
102+Trusty irq interface
103+
104+Trusty requires non-secure irqs to be forwarded to the secure OS.
105+
106+Required properties:
107+- compatible: "android,trusty-irq-v1"
108+
109+Optional properties:
110+
111+- interrupt-templates: is an optional property that works together
112+ with "interrupt-ranges" to specify secure side to kernel IRQs mapping.
113+
114+ It is a list of entries, each one of which defines a group of interrupts
115+ having common properties, and has the following format:
116+ < phandle irq_id_pos [templ_data]>
117+ phandle - phandle of interrupt controller this template is for
118+ irq_id_pos - the position of irq id in interrupt specifier array
119+ for interrupt controller referenced by phandle.
120+ templ_data - is an array of u32 values (could be empty) in the same
121+ format as interrupt specifier for interrupt controller
122+ referenced by phandle but with omitted irq id field.
123+
124+- interrupt-ranges: list of entries that specifies secure side to kernel
125+ IRQs mapping.
126+
127+ Each entry in the "interrupt-ranges" list has the following format:
128+ <beg end templ_idx>
129+ beg - first entry in this range
130+ end - last entry in this range
131+ templ_idx - index of entry in "interrupt-templates" property
132+ that must be used as a template for all interrupts
133+ in this range
134+
135+- ipi-range: optional mapping of a linear range of trusty IRQs to a linear range
136+ of IPIs (inter-processor interrupts). This has the following format:
137+ <beg end ipi_base>
138+ beg - first trusty IRQ number that is an IPI
139+ end - last trusty IRQ number that is an IPI
140+ ipi_base - IPI number of 'beg'
141+
142+Example:
143+{
144+ gic: interrupt-controller@50041000 {
145+ compatible = "arm,gic-400";
146+ #interrupt-cells = <3>;
147+ interrupt-controller;
148+ ...
149+ };
150+ ...
151+ trusty {
152+ compatible = "android,trusty-smc-v1";
153+ ranges;
154+ #address-cells = <2>;
155+ #size-cells = <2>;
156+
157+ irq {
158+ compatible = "android,trusty-irq-v1";
159+ interrupt-templates = <&gic 1 GIC_PPI 0>,
160+ <&gic 1 GIC_SPI 0>;
161+ interrupt-ranges = <16 31 0>,
162+ <32 223 1>;
163+ ipi-range = <8 15 8>;
164+ };
165+ }
166+}
167+
168+Must be a child of the node that provides the trusty std/fast call interface.
169diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
170new file mode 100644
171index 000000000000..1b39ad317c67
172--- /dev/null
173+++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
174@@ -0,0 +1,6 @@
175+Trusty smc interface
176+
177+Trusty is running in secure mode on the same (arm) cpu(s) as the current os.
178+
179+Required properties:
180+- compatible: "android,trusty-smc-v1"
181diff --git a/arch/arm/configs/trusty_qemu_defconfig b/arch/arm/configs/trusty_qemu_defconfig
182new file mode 100644
183index 000000000000..46ad9504c23d
184--- /dev/null
185+++ b/arch/arm/configs/trusty_qemu_defconfig
186@@ -0,0 +1,291 @@
187+# CONFIG_LOCALVERSION_AUTO is not set
188+# CONFIG_SWAP is not set
189+CONFIG_POSIX_MQUEUE=y
190+CONFIG_AUDIT=y
191+CONFIG_NO_HZ=y
192+CONFIG_HIGH_RES_TIMERS=y
193+CONFIG_PREEMPT=y
194+CONFIG_BSD_PROCESS_ACCT=y
195+CONFIG_BSD_PROCESS_ACCT_V3=y
196+CONFIG_TASKSTATS=y
197+CONFIG_TASK_DELAY_ACCT=y
198+CONFIG_TASK_XACCT=y
199+CONFIG_TASK_IO_ACCOUNTING=y
200+CONFIG_IKCONFIG=y
201+CONFIG_IKCONFIG_PROC=y
202+CONFIG_LOG_BUF_SHIFT=14
203+CONFIG_RT_GROUP_SCHED=y
204+CONFIG_CGROUP_FREEZER=y
205+CONFIG_CGROUP_CPUACCT=y
206+CONFIG_CGROUP_DEBUG=y
207+CONFIG_SCHED_AUTOGROUP=y
208+CONFIG_BLK_DEV_INITRD=y
209+CONFIG_KALLSYMS_ALL=y
210+CONFIG_EMBEDDED=y
211+# CONFIG_COMPAT_BRK is not set
212+CONFIG_PROFILING=y
213+CONFIG_ARCH_VIRT=y
214+CONFIG_PCI=y
215+CONFIG_PCI_HOST_GENERIC=y
216+CONFIG_SMP=y
217+CONFIG_HIGHMEM=y
218+CONFIG_SECCOMP=y
219+CONFIG_CMDLINE="console=ttyAMA0"
220+CONFIG_PM_AUTOSLEEP=y
221+CONFIG_PM_WAKELOCKS=y
222+CONFIG_PM_WAKELOCKS_LIMIT=0
223+# CONFIG_PM_WAKELOCKS_GC is not set
224+CONFIG_PM_DEBUG=y
225+# CONFIG_BLK_DEV_BSG is not set
226+# CONFIG_IOSCHED_DEADLINE is not set
227+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
228+CONFIG_KSM=y
229+CONFIG_NET=y
230+CONFIG_PACKET=y
231+CONFIG_UNIX=y
232+CONFIG_XFRM_USER=y
233+CONFIG_NET_KEY=y
234+CONFIG_INET=y
235+CONFIG_IP_MULTICAST=y
236+CONFIG_IP_ADVANCED_ROUTER=y
237+CONFIG_IP_MULTIPLE_TABLES=y
238+CONFIG_IP_PNP=y
239+CONFIG_IP_PNP_DHCP=y
240+CONFIG_IP_PNP_BOOTP=y
241+CONFIG_INET_ESP=y
242+CONFIG_INET_DIAG_DESTROY=y
243+CONFIG_IPV6_ROUTER_PREF=y
244+CONFIG_IPV6_ROUTE_INFO=y
245+CONFIG_IPV6_OPTIMISTIC_DAD=y
246+CONFIG_INET6_AH=y
247+CONFIG_INET6_ESP=y
248+CONFIG_INET6_IPCOMP=y
249+CONFIG_IPV6_MIP6=y
250+CONFIG_IPV6_MULTIPLE_TABLES=y
251+CONFIG_NETFILTER=y
252+CONFIG_NF_CONNTRACK=y
253+CONFIG_NF_CONNTRACK_SECMARK=y
254+CONFIG_NF_CONNTRACK_EVENTS=y
255+CONFIG_NF_CONNTRACK_AMANDA=y
256+CONFIG_NF_CONNTRACK_FTP=y
257+CONFIG_NF_CONNTRACK_H323=y
258+CONFIG_NF_CONNTRACK_IRC=y
259+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
260+CONFIG_NF_CONNTRACK_PPTP=y
261+CONFIG_NF_CONNTRACK_SANE=y
262+CONFIG_NF_CONNTRACK_TFTP=y
263+CONFIG_NF_CT_NETLINK=y
264+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
265+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
266+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
267+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
268+CONFIG_NETFILTER_XT_TARGET_MARK=y
269+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
270+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
271+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
272+CONFIG_NETFILTER_XT_TARGET_TRACE=y
273+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
274+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
275+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
276+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
277+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
278+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
279+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
280+CONFIG_NETFILTER_XT_MATCH_HELPER=y
281+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
282+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
283+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
284+CONFIG_NETFILTER_XT_MATCH_MAC=y
285+CONFIG_NETFILTER_XT_MATCH_MARK=y
286+CONFIG_NETFILTER_XT_MATCH_POLICY=y
287+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
288+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
289+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
290+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
291+CONFIG_NETFILTER_XT_MATCH_STATE=y
292+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
293+CONFIG_NETFILTER_XT_MATCH_STRING=y
294+CONFIG_NETFILTER_XT_MATCH_TIME=y
295+CONFIG_NETFILTER_XT_MATCH_U32=y
296+CONFIG_IP_NF_IPTABLES=y
297+CONFIG_IP_NF_MATCH_AH=y
298+CONFIG_IP_NF_MATCH_ECN=y
299+CONFIG_IP_NF_MATCH_RPFILTER=y
300+CONFIG_IP_NF_MATCH_TTL=y
301+CONFIG_IP_NF_FILTER=y
302+CONFIG_IP_NF_TARGET_REJECT=y
303+CONFIG_IP_NF_MANGLE=y
304+CONFIG_IP_NF_TARGET_ECN=y
305+CONFIG_IP_NF_TARGET_TTL=y
306+CONFIG_IP_NF_RAW=y
307+CONFIG_IP_NF_SECURITY=y
308+CONFIG_IP_NF_ARPTABLES=y
309+CONFIG_IP_NF_ARPFILTER=y
310+CONFIG_IP_NF_ARP_MANGLE=y
311+CONFIG_IP6_NF_IPTABLES=y
312+CONFIG_IP6_NF_MATCH_AH=y
313+CONFIG_IP6_NF_MATCH_EUI64=y
314+CONFIG_IP6_NF_MATCH_FRAG=y
315+CONFIG_IP6_NF_MATCH_OPTS=y
316+CONFIG_IP6_NF_MATCH_HL=y
317+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
318+CONFIG_IP6_NF_MATCH_MH=y
319+CONFIG_IP6_NF_MATCH_RT=y
320+CONFIG_IP6_NF_TARGET_HL=y
321+CONFIG_IP6_NF_FILTER=y
322+CONFIG_IP6_NF_TARGET_REJECT=y
323+CONFIG_IP6_NF_MANGLE=y
324+CONFIG_IP6_NF_RAW=y
325+CONFIG_BRIDGE=y
326+CONFIG_NET_SCHED=y
327+CONFIG_NET_SCH_HTB=y
328+CONFIG_NET_CLS_U32=y
329+CONFIG_NET_EMATCH=y
330+CONFIG_NET_EMATCH_U32=y
331+CONFIG_NET_CLS_ACT=y
332+# CONFIG_WIRELESS is not set
333+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
334+CONFIG_BLK_DEV_LOOP=y
335+CONFIG_BLK_DEV_RAM=y
336+CONFIG_BLK_DEV_RAM_SIZE=8192
337+CONFIG_VIRTIO_BLK=y
338+CONFIG_SCSI=y
339+# CONFIG_SCSI_PROC_FS is not set
340+CONFIG_BLK_DEV_SD=y
341+# CONFIG_SCSI_LOWLEVEL is not set
342+CONFIG_MD=y
343+CONFIG_BLK_DEV_DM=y
344+CONFIG_DM_CRYPT=y
345+CONFIG_DM_UEVENT=y
346+CONFIG_DM_VERITY=y
347+CONFIG_DM_VERITY_FEC=y
348+CONFIG_NETDEVICES=y
349+CONFIG_TUN=y
350+CONFIG_VIRTIO_NET=y
351+CONFIG_E1000=y
352+CONFIG_E1000E=y
353+CONFIG_PPP=y
354+CONFIG_PPP_BSDCOMP=y
355+CONFIG_PPP_DEFLATE=y
356+CONFIG_PPP_MPPE=y
357+# CONFIG_WLAN is not set
358+CONFIG_INPUT_EVDEV=y
359+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
360+# CONFIG_INPUT_MOUSE is not set
361+CONFIG_INPUT_JOYSTICK=y
362+CONFIG_INPUT_TABLET=y
363+CONFIG_INPUT_MISC=y
364+CONFIG_INPUT_UINPUT=y
365+# CONFIG_SERIO_SERPORT is not set
366+# CONFIG_VT is not set
367+# CONFIG_LEGACY_PTYS is not set
368+# CONFIG_DEVMEM is not set
369+CONFIG_SERIAL_AMBA_PL011=y
370+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
371+CONFIG_VIRTIO_CONSOLE=y
372+# CONFIG_HW_RANDOM is not set
373+CONFIG_BATTERY_GOLDFISH=y
374+# CONFIG_HWMON is not set
375+CONFIG_TRUSTY=y
376+CONFIG_MEDIA_SUPPORT=y
377+CONFIG_FB=y
378+CONFIG_FB_GOLDFISH=y
379+CONFIG_FB_SIMPLE=y
380+CONFIG_BACKLIGHT_LCD_SUPPORT=y
381+CONFIG_LOGO=y
382+# CONFIG_LOGO_LINUX_MONO is not set
383+# CONFIG_LOGO_LINUX_VGA16 is not set
384+CONFIG_SOUND=y
385+CONFIG_SND=y
386+CONFIG_HIDRAW=y
387+CONFIG_UHID=y
388+CONFIG_HID_A4TECH=y
389+CONFIG_HID_ACRUX=y
390+CONFIG_HID_ACRUX_FF=y
391+CONFIG_HID_APPLE=y
392+CONFIG_HID_BELKIN=y
393+CONFIG_HID_CHERRY=y
394+CONFIG_HID_CHICONY=y
395+CONFIG_HID_PRODIKEYS=y
396+CONFIG_HID_CYPRESS=y
397+CONFIG_HID_DRAGONRISE=y
398+CONFIG_DRAGONRISE_FF=y
399+CONFIG_HID_EMS_FF=y
400+CONFIG_HID_ELECOM=y
401+CONFIG_HID_EZKEY=y
402+CONFIG_HID_KEYTOUCH=y
403+CONFIG_HID_KYE=y
404+CONFIG_HID_WALTOP=y
405+CONFIG_HID_GYRATION=y
406+CONFIG_HID_TWINHAN=y
407+CONFIG_HID_KENSINGTON=y
408+CONFIG_HID_LCPOWER=y
409+CONFIG_HID_LOGITECH=y
410+CONFIG_HID_LOGITECH_DJ=y
411+CONFIG_LOGITECH_FF=y
412+CONFIG_LOGIRUMBLEPAD2_FF=y
413+CONFIG_LOGIG940_FF=y
414+CONFIG_HID_MAGICMOUSE=y
415+CONFIG_HID_MICROSOFT=y
416+CONFIG_HID_MONTEREY=y
417+CONFIG_HID_MULTITOUCH=y
418+CONFIG_HID_ORTEK=y
419+CONFIG_HID_PANTHERLORD=y
420+CONFIG_PANTHERLORD_FF=y
421+CONFIG_HID_PETALYNX=y
422+CONFIG_HID_PICOLCD=y
423+CONFIG_HID_PRIMAX=y
424+CONFIG_HID_SAITEK=y
425+CONFIG_HID_SAMSUNG=y
426+CONFIG_HID_SPEEDLINK=y
427+CONFIG_HID_SUNPLUS=y
428+CONFIG_HID_GREENASIA=y
429+CONFIG_GREENASIA_FF=y
430+CONFIG_HID_SMARTJOYPLUS=y
431+CONFIG_SMARTJOYPLUS_FF=y
432+CONFIG_HID_TIVO=y
433+CONFIG_HID_TOPSEED=y
434+CONFIG_HID_THRUSTMASTER=y
435+CONFIG_HID_ZEROPLUS=y
436+CONFIG_HID_ZYDACRON=y
437+# CONFIG_USB_SUPPORT is not set
438+CONFIG_RTC_CLASS=y
439+CONFIG_VIRTIO_PCI=y
440+CONFIG_VIRTIO_MMIO=y
441+CONFIG_STAGING=y
442+CONFIG_ASHMEM=y
443+CONFIG_ION=y
444+CONFIG_GOLDFISH_AUDIO=y
445+CONFIG_GOLDFISH=y
446+CONFIG_GOLDFISH_PIPE=y
447+# CONFIG_IOMMU_SUPPORT is not set
448+CONFIG_ANDROID=y
449+CONFIG_ANDROID_BINDER_IPC=y
450+CONFIG_EXT2_FS=y
451+CONFIG_EXT4_FS=y
452+CONFIG_EXT4_FS_SECURITY=y
453+CONFIG_QUOTA=y
454+CONFIG_FUSE_FS=y
455+CONFIG_CUSE=y
456+CONFIG_MSDOS_FS=y
457+CONFIG_VFAT_FS=y
458+CONFIG_TMPFS=y
459+CONFIG_TMPFS_POSIX_ACL=y
460+# CONFIG_MISC_FILESYSTEMS is not set
461+CONFIG_NFS_FS=y
462+CONFIG_ROOT_NFS=y
463+CONFIG_NLS_CODEPAGE_437=y
464+CONFIG_NLS_ISO8859_1=y
465+CONFIG_SECURITY=y
466+CONFIG_SECURITY_NETWORK=y
467+CONFIG_SECURITY_SELINUX=y
468+CONFIG_DYNAMIC_DEBUG=y
469+CONFIG_DEBUG_INFO=y
470+CONFIG_DEBUG_FS=y
471+CONFIG_MAGIC_SYSRQ=y
472+CONFIG_PANIC_TIMEOUT=5
473+# CONFIG_SCHED_DEBUG is not set
474+CONFIG_SCHEDSTATS=y
475+# CONFIG_FTRACE is not set
476+CONFIG_DMA_API_DEBUG=y
477+CONFIG_ATOMIC64_SELFTEST=y
478diff --git a/arch/arm64/configs/trusty_qemu_defconfig.fragment b/arch/arm64/configs/trusty_qemu_defconfig.fragment
479new file mode 100644
480index 000000000000..166eef1797fd
481--- /dev/null
482+++ b/arch/arm64/configs/trusty_qemu_defconfig.fragment
483@@ -0,0 +1,26 @@
484+# From goldfish
485+CONFIG_VIRTIO_BLK=y
486+CONFIG_VIRTIO_CONSOLE=y
487+CONFIG_VIRTIO_INPUT=y
488+CONFIG_VIRTIO_MMIO=y
489+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
490+CONFIG_VIRTIO_NET=y
491+CONFIG_VIRTIO_PCI=y
492+CONFIG_VIRTIO_PMEM=y
493+# From Trusty
494+CONFIG_TRUSTY=y
495+CONFIG_DMA_API_DEBUG=y
496+CONFIG_DYNAMIC_DEBUG=y
497+CONFIG_PROVE_LOCKING=y
498+CONFIG_DEBUG_ATOMIC_SLEEP=y
499+CONFIG_SEMIHOSTING_EXIT=y
500+CONFIG_E1000=y
501+CONFIG_E1000E=y
502+CONFIG_REBOOT_EMULATOR_EXIT=y
503+CONFIG_DMABUF_HEAPS_SYSTEM=y
504+# securefb test uses ION
505+CONFIG_ION=y
506+CONFIG_ION_SYSTEM_HEAP=y
507+# LTO slows down build times considerably. Disable it.
508+# CONFIG_LTO_CLANG is not set
509+# CONFIG_LTO_CLANG_FULL is not set
510diff --git a/drivers/Kconfig b/drivers/Kconfig
Patrick Williams2194f502022-10-16 14:26:09 -0500511index 0d399ddaa185..e346c35f42b4 100644
Brad Bishopbec4ebc2022-08-03 09:55:16 -0400512--- a/drivers/Kconfig
513+++ b/drivers/Kconfig
Patrick Williams2194f502022-10-16 14:26:09 -0500514@@ -85,6 +85,8 @@ source "drivers/hwmon/Kconfig"
Brad Bishopbec4ebc2022-08-03 09:55:16 -0400515
516 source "drivers/thermal/Kconfig"
517
518+source "drivers/trusty/Kconfig"
519+
520 source "drivers/watchdog/Kconfig"
521
522 source "drivers/ssb/Kconfig"
523diff --git a/drivers/Makefile b/drivers/Makefile
Patrick Williams2194f502022-10-16 14:26:09 -0500524index a110338c860c..d3165b877622 100644
Brad Bishopbec4ebc2022-08-03 09:55:16 -0400525--- a/drivers/Makefile
526+++ b/drivers/Makefile
Patrick Williams2194f502022-10-16 14:26:09 -0500527@@ -117,6 +117,7 @@ obj-$(CONFIG_W1) += w1/
Brad Bishopbec4ebc2022-08-03 09:55:16 -0400528 obj-y += power/
529 obj-$(CONFIG_HWMON) += hwmon/
530 obj-$(CONFIG_THERMAL) += thermal/
531+obj-$(CONFIG_TRUSTY) += trusty/
532 obj-$(CONFIG_WATCHDOG) += watchdog/
533 obj-$(CONFIG_MD) += md/
534 obj-$(CONFIG_BT) += bluetooth/
535diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig
536new file mode 100644
537index 000000000000..fcde7f097acf
538--- /dev/null
539+++ b/drivers/trusty/Kconfig
540@@ -0,0 +1,116 @@
541+# SPDX-License-Identifier: GPL-2.0-only
542+#
543+# Trusty driver
544+#
545+
546+menu "Trusty driver"
547+
548+config TRUSTY
549+ tristate "Trusty core driver"
550+ depends on ARM || ARM64
551+ help
552+ Trusty is a secure OS that provides a Trusted Execution Environment
553+ (TEE) for Android. Trusty runs on the same processor as Linux but is
554+ isolated from the rest of the system by both hardware and software.
555+
556+ This option enables the core part of the Linux kernel driver for
557+ Trusty. This doesn't do much by itself; you'll need to enable some of
558+ the sub-modules too.
559+
560+ If you build this as a module, it will be called trusty-core.
561+
562+if TRUSTY
563+
564+config TRUSTY_IRQ
565+ tristate "Trusty IRQ support"
566+ default y
567+ help
568+ Enable forwarding of IRQs from Linux to Trusty. This module retrieves
569+ from Trusty a list of IRQs that Trusty uses, and it registers handlers
570+ for them which notify Trusty that the IRQ has been received.
571+
572+ If you build this as a module, it will be called trusty-irq.
573+
574+ Usually this is needed for Trusty to work, so say 'y' or 'm'.
575+
576+config TRUSTY_LOG
577+ tristate "Trusty log support"
578+ default y
579+ help
580+ Print log messages generated by the secure OS to the Linux kernel log.
581+
582+ While this module is loaded, messages are retrieved and printed after
583+ each call into Trusty, and also during Linux kernel panics.
584+
585+ If you build this as a module, it will be called trusty-log.
586+
587+config TRUSTY_TEST
588+ tristate "Trusty stdcall test"
589+ default y
590+ help
591+ Allow running tests of the Trusty stdcall interface. Running these
592+ tests is initiated by userspace writing to a sysfs file.
593+
594+ This depends on having a test sevice running on the Trusty side.
595+
596+ If you build this as a module, it will be called trusty-test.
597+
598+config TRUSTY_VIRTIO
599+ tristate "Trusty virtio support"
600+ select VIRTIO
601+ default y
602+ help
603+ Enable the Trusty virtio driver, which is responsible for management
604+ and interaction with virtio devices exposed by Trusty. This driver
605+ requests the virtio device descriptors from Trusty, then parses them
606+ and adds the corresponding virtio devices.
607+
608+ If you build this as a module, it will be called trusty-virtio.
609+
610+config TRUSTY_VIRTIO_IPC
611+ tristate "Trusty Virtio IPC driver"
612+ depends on TRUSTY_VIRTIO
613+ default y
614+ help
615+ Enable support for communicating with Trusty services.
616+
617+ If you build this as a module, it will be called trusty-ipc.
618+
619+config TRUSTY_DMA_BUF_FFA_TAG
620+ bool "Availability of trusty_dma_buf_get_ffa_tag"
621+ default n
622+ help
623+ Whether trusty_dma_buf_get_ffa_tag is provided on this platform.
624+ Providing this function will allow the platform to select what tag
625+ should be passed to the SPM when attempting to transfer the buffer
626+ to secure world. The value passed here is implementation defined and
627+ may depend on your SPM.
628+
629+ If set to N, a default implementation which returns 0 will be used.
630+
631+config TRUSTY_DMA_BUF_SHARED_MEM_ID
632+ bool "Availability of trusty_dma_buf_get_shared_mem_id"
633+ default n
634+ help
635+ Whether trusty_dma_buf_get_shared_mem_id is provided on this platform.
636+ Providing this function allows the platform to manage memory
637+ transaction life cycle of DMA bufs independently of Trusty IPC driver.
638+ The latter can query trusty_shared_mem_id_t value allocated for a
639+ given DMA buf using trusty_dma_buf_get_shared_mem_id interface.
640+
641+ If set to N, a default implementation which does not allocate any IDs
642+ will be used.
643+
644+config TRUSTY_CRASH_IS_PANIC
645+ bool "When trusty panics, then panic the kernel"
646+ help
647+ This option will treat Trusty panics as fatal. This is useful if
648+ your system cannot recover from Trusty panic/halt and you require
649+ the system to reboot to recover.
650+
651+ If N, it will contine to run the kernel, but trusty operations will
652+ return errors.
653+
654+endif # TRUSTY
655+
656+endmenu
657diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile
658new file mode 100644
659index 000000000000..2cf1cfccf97b
660--- /dev/null
661+++ b/drivers/trusty/Makefile
662@@ -0,0 +1,14 @@
663+# SPDX-License-Identifier: GPL-2.0-only
664+#
665+# Makefile for trusty components
666+#
667+
668+obj-$(CONFIG_TRUSTY) += trusty-core.o
669+trusty-core-objs += trusty.o trusty-mem.o
670+trusty-core-$(CONFIG_ARM) += trusty-smc-arm.o
671+trusty-core-$(CONFIG_ARM64) += trusty-smc-arm64.o
672+obj-$(CONFIG_TRUSTY_IRQ) += trusty-irq.o
673+obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o
674+obj-$(CONFIG_TRUSTY_TEST) += trusty-test.o
675+obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o
676+obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o
677diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c
678new file mode 100644
679index 000000000000..82d6ddeb41f4
680--- /dev/null
681+++ b/drivers/trusty/trusty-ipc.c
682@@ -0,0 +1,2256 @@
683+// SPDX-License-Identifier: GPL-2.0-only
684+/*
685+ * Copyright (C) 2020 Google, Inc.
686+ */
687+
688+#include <linux/aio.h>
689+#include <linux/kernel.h>
690+#include <linux/module.h>
691+#include <linux/cdev.h>
692+#include <linux/slab.h>
693+#include <linux/fs.h>
694+#include <linux/poll.h>
695+#include <linux/idr.h>
696+#include <linux/completion.h>
697+#include <linux/dma-buf.h>
698+#include <linux/sched.h>
699+#include <linux/sched/signal.h>
700+#include <linux/compat.h>
701+#include <linux/uio.h>
702+#include <linux/file.h>
703+
704+#include <linux/virtio.h>
705+#include <linux/virtio_ids.h>
706+#include <linux/virtio_config.h>
707+
708+#include <linux/trusty/trusty.h>
709+#include <linux/trusty/trusty_ipc.h>
710+
711+#include <uapi/linux/trusty/ipc.h>
712+
713+#define MAX_DEVICES 4
714+
715+#define REPLY_TIMEOUT 5000
716+#define TXBUF_TIMEOUT 15000
717+
718+#define MAX_SRV_NAME_LEN 256
719+#define MAX_DEV_NAME_LEN 32
720+
721+#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE
722+#define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE
723+
724+#define TIPC_CTRL_ADDR 53
725+#define TIPC_ANY_ADDR 0xFFFFFFFF
726+
727+#define TIPC_MIN_LOCAL_ADDR 1024
728+
729+#ifdef CONFIG_COMPAT
730+#define TIPC_IOC32_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, compat_uptr_t)
731+#endif
732+
733+struct tipc_virtio_dev;
734+
735+struct tipc_dev_config {
736+ u32 msg_buf_max_size;
737+ u32 msg_buf_alignment;
738+ char dev_name[MAX_DEV_NAME_LEN];
739+} __packed;
740+
741+struct tipc_shm {
742+ trusty_shared_mem_id_t obj_id;
743+ u64 size;
744+ u64 tag;
745+};
746+
747+struct tipc_msg_hdr {
748+ u32 src;
749+ u32 dst;
750+ u16 reserved;
751+ u16 shm_cnt;
752+ u16 len;
753+ u16 flags;
754+ u8 data[];
755+} __packed;
756+
757+enum tipc_ctrl_msg_types {
758+ TIPC_CTRL_MSGTYPE_GO_ONLINE = 1,
759+ TIPC_CTRL_MSGTYPE_GO_OFFLINE,
760+ TIPC_CTRL_MSGTYPE_CONN_REQ,
761+ TIPC_CTRL_MSGTYPE_CONN_RSP,
762+ TIPC_CTRL_MSGTYPE_DISC_REQ,
763+ TIPC_CTRL_MSGTYPE_RELEASE,
764+};
765+
766+struct tipc_ctrl_msg {
767+ u32 type;
768+ u32 body_len;
769+ u8 body[];
770+} __packed;
771+
772+struct tipc_conn_req_body {
773+ char name[MAX_SRV_NAME_LEN];
774+} __packed;
775+
776+struct tipc_conn_rsp_body {
777+ u32 target;
778+ u32 status;
779+ u32 remote;
780+ u32 max_msg_size;
781+ u32 max_msg_cnt;
782+} __packed;
783+
784+struct tipc_disc_req_body {
785+ u32 target;
786+} __packed;
787+
788+struct tipc_release_body {
789+ trusty_shared_mem_id_t id;
790+} __packed;
791+
792+struct tipc_cdev_node {
793+ struct cdev cdev;
794+ struct device *dev;
795+ unsigned int minor;
796+};
797+
798+enum tipc_device_state {
799+ VDS_OFFLINE = 0,
800+ VDS_ONLINE,
801+ VDS_DEAD,
802+};
803+
804+struct tipc_virtio_dev {
805+ struct kref refcount;
806+ struct mutex lock; /* protects access to this device */
807+ struct virtio_device *vdev;
808+ struct virtqueue *rxvq;
809+ struct virtqueue *txvq;
810+ unsigned int msg_buf_cnt;
811+ unsigned int msg_buf_max_cnt;
812+ size_t msg_buf_max_sz;
813+ unsigned int free_msg_buf_cnt;
814+ struct list_head free_buf_list;
815+ wait_queue_head_t sendq;
816+ struct idr addr_idr;
817+ enum tipc_device_state state;
818+ struct tipc_cdev_node cdev_node;
819+ /* protects shared_handles, dev lock never acquired while held */
820+ struct mutex shared_handles_lock;
821+ struct rb_root shared_handles;
822+ char cdev_name[MAX_DEV_NAME_LEN];
823+};
824+
825+enum tipc_chan_state {
826+ TIPC_DISCONNECTED = 0,
827+ TIPC_CONNECTING,
828+ TIPC_CONNECTED,
829+ TIPC_STALE,
830+};
831+
832+struct tipc_chan {
833+ struct mutex lock; /* protects channel state */
834+ struct kref refcount;
835+ enum tipc_chan_state state;
836+ struct tipc_virtio_dev *vds;
837+ const struct tipc_chan_ops *ops;
838+ void *ops_arg;
839+ u32 remote;
840+ u32 local;
841+ u32 max_msg_size;
842+ u32 max_msg_cnt;
843+ char srv_name[MAX_SRV_NAME_LEN];
844+};
845+
846+struct tipc_shared_handle {
847+ struct rb_node node;
848+ struct tipc_shm tipc;
849+ struct tipc_virtio_dev *vds;
850+ struct dma_buf *dma_buf;
851+ bool shared;
852+ /*
853+ * Following fields are only used if dma_buf does not own a
854+ * trusty_shared_mem_id_t.
855+ */
856+ struct dma_buf_attachment *attach;
857+ struct sg_table *sgt;
858+};
859+
860+static struct class *tipc_class;
861+static unsigned int tipc_major;
862+
863+static struct virtio_device *default_vdev;
864+
865+static DEFINE_IDR(tipc_devices);
866+static DEFINE_MUTEX(tipc_devices_lock);
867+
868+static int _match_any(int id, void *p, void *data)
869+{
870+ return id;
871+}
872+
873+static int _match_data(int id, void *p, void *data)
874+{
875+ return (p == data);
876+}
877+
878+static void *_alloc_shareable_mem(size_t sz, gfp_t gfp)
879+{
880+ return alloc_pages_exact(sz, gfp);
881+}
882+
883+static void _free_shareable_mem(size_t sz, void *va)
884+{
885+ free_pages_exact(va, sz);
886+}
887+
888+static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds,
889+ bool share_write)
890+{
891+ int ret;
892+ struct tipc_msg_buf *mb;
893+ size_t sz = vds->msg_buf_max_sz;
894+ pgprot_t pgprot = share_write ? PAGE_KERNEL : PAGE_KERNEL_RO;
895+
896+ /* allocate tracking structure */
897+ mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL);
898+ if (!mb)
899+ return NULL;
900+
901+ /* allocate buffer that can be shared with secure world */
902+ mb->buf_va = _alloc_shareable_mem(sz, GFP_KERNEL);
903+ if (!mb->buf_va)
904+ goto err_alloc;
905+
906+ sg_init_one(&mb->sg, mb->buf_va, sz);
907+ ret = trusty_share_memory_compat(vds->vdev->dev.parent->parent,
908+ &mb->buf_id, &mb->sg, 1, pgprot);
909+ if (ret) {
910+ dev_err(&vds->vdev->dev, "trusty_share_memory failed: %d\n",
911+ ret);
912+ goto err_share;
913+ }
914+
915+ mb->buf_sz = sz;
916+ mb->shm_cnt = 0;
917+
918+ return mb;
919+
920+err_share:
921+ _free_shareable_mem(sz, mb->buf_va);
922+err_alloc:
923+ kfree(mb);
924+ return NULL;
925+}
926+
927+static void vds_free_msg_buf(struct tipc_virtio_dev *vds,
928+ struct tipc_msg_buf *mb)
929+{
930+ int ret;
931+
932+ ret = trusty_reclaim_memory(vds->vdev->dev.parent->parent, mb->buf_id,
933+ &mb->sg, 1);
934+ if (WARN_ON(ret)) {
935+ dev_err(&vds->vdev->dev,
936+ "trusty_revoke_memory failed: %d txbuf %lld\n",
937+ ret, mb->buf_id);
938+
939+ /*
940+ * It is not safe to free this memory if trusty_revoke_memory
941+ * fails. Leak it in that case.
942+ */
943+ } else {
944+ _free_shareable_mem(mb->buf_sz, mb->buf_va);
945+ }
946+ kfree(mb);
947+}
948+
949+static void vds_free_msg_buf_list(struct tipc_virtio_dev *vds,
950+ struct list_head *list)
951+{
952+ struct tipc_msg_buf *mb = NULL;
953+
954+ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
955+ while (mb) {
956+ list_del(&mb->node);
957+ vds_free_msg_buf(vds, mb);
958+ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
959+ }
960+}
961+
962+static inline void mb_reset(struct tipc_msg_buf *mb)
963+{
964+ mb->wpos = 0;
965+ mb->rpos = 0;
966+}
967+
968+static inline void mb_reset_read(struct tipc_msg_buf *mb)
969+{
970+ mb->rpos = 0;
971+}
972+
973+static void _free_vds(struct kref *kref)
974+{
975+ struct tipc_virtio_dev *vds =
976+ container_of(kref, struct tipc_virtio_dev, refcount);
977+ /*
978+ * If this WARN triggers, we're leaking remote memory references.
979+ *
980+ * No need to lock shared_handles_lock. All references to this lock
981+ * should already be gone by this point, since we are freeing it in this
982+ * function.
983+ */
984+ WARN_ON(!RB_EMPTY_ROOT(&vds->shared_handles));
985+ kfree(vds);
986+}
987+
988+static void _free_chan(struct kref *kref)
989+{
990+ struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount);
991+
992+ if (ch->ops && ch->ops->handle_release)
993+ ch->ops->handle_release(ch->ops_arg);
994+
995+ kref_put(&ch->vds->refcount, _free_vds);
996+ kfree(ch);
997+}
998+
999+static bool _put_txbuf_locked(struct tipc_virtio_dev *vds,
1000+ struct tipc_msg_buf *mb)
1001+{
1002+ list_add_tail(&mb->node, &vds->free_buf_list);
1003+ return vds->free_msg_buf_cnt++ == 0;
1004+}
1005+
1006+static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds)
1007+{
1008+ struct tipc_msg_buf *mb;
1009+
1010+ if (vds->state != VDS_ONLINE)
1011+ return ERR_PTR(-ENODEV);
1012+
1013+ if (vds->free_msg_buf_cnt) {
1014+ /* take it out of free list */
1015+ mb = list_first_entry(&vds->free_buf_list,
1016+ struct tipc_msg_buf, node);
1017+ list_del(&mb->node);
1018+ mb->shm_cnt = 0;
1019+ vds->free_msg_buf_cnt--;
1020+ } else {
1021+ if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt)
1022+ return ERR_PTR(-EAGAIN);
1023+
1024+ /* try to allocate it */
1025+ mb = vds_alloc_msg_buf(vds, false);
1026+ if (!mb)
1027+ return ERR_PTR(-ENOMEM);
1028+
1029+ vds->msg_buf_cnt++;
1030+ }
1031+ return mb;
1032+}
1033+
1034+static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds)
1035+{
1036+ struct tipc_msg_buf *mb;
1037+
1038+ mutex_lock(&vds->lock);
1039+ mb = _get_txbuf_locked(vds);
1040+ mutex_unlock(&vds->lock);
1041+
1042+ return mb;
1043+}
1044+
1045+static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb)
1046+{
1047+ mutex_lock(&vds->lock);
1048+ _put_txbuf_locked(vds, mb);
1049+ wake_up_interruptible(&vds->sendq);
1050+ mutex_unlock(&vds->lock);
1051+}
1052+
1053+static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds,
1054+ long timeout)
1055+{
1056+ struct tipc_msg_buf *mb;
1057+
1058+ mb = _vds_get_txbuf(vds);
1059+
1060+ if ((PTR_ERR(mb) == -EAGAIN) && timeout) {
1061+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
1062+
1063+ timeout = msecs_to_jiffies(timeout);
1064+ add_wait_queue(&vds->sendq, &wait);
1065+ for (;;) {
1066+ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
1067+ timeout);
1068+ if (!timeout) {
1069+ mb = ERR_PTR(-ETIMEDOUT);
1070+ break;
1071+ }
1072+
1073+ if (signal_pending(current)) {
1074+ mb = ERR_PTR(-ERESTARTSYS);
1075+ break;
1076+ }
1077+
1078+ mb = _vds_get_txbuf(vds);
1079+ if (PTR_ERR(mb) != -EAGAIN)
1080+ break;
1081+ }
1082+ remove_wait_queue(&vds->sendq, &wait);
1083+ }
1084+
1085+ if (IS_ERR(mb))
1086+ return mb;
1087+
1088+ if (WARN_ON(!mb))
1089+ return ERR_PTR(-EINVAL);
1090+
1091+ /* reset and reserve space for message header */
1092+ mb_reset(mb);
1093+ mb_put_data(mb, sizeof(struct tipc_msg_hdr));
1094+
1095+ return mb;
1096+}
1097+
1098+static int vds_queue_txbuf(struct tipc_virtio_dev *vds,
1099+ struct tipc_msg_buf *mb)
1100+{
1101+ int err;
1102+ struct scatterlist sg;
1103+ bool need_notify = false;
1104+
1105+ mutex_lock(&vds->lock);
1106+ if (vds->state == VDS_ONLINE) {
1107+ sg_init_one(&sg, mb, mb->wpos);
1108+ err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL);
1109+ need_notify = virtqueue_kick_prepare(vds->txvq);
1110+ } else {
1111+ err = -ENODEV;
1112+ }
1113+ mutex_unlock(&vds->lock);
1114+
1115+ if (need_notify)
1116+ virtqueue_notify(vds->txvq);
1117+
1118+ return err;
1119+}
1120+
1121+static int vds_add_channel(struct tipc_virtio_dev *vds,
1122+ struct tipc_chan *chan)
1123+{
1124+ int ret;
1125+
1126+ mutex_lock(&vds->lock);
1127+ if (vds->state == VDS_ONLINE) {
1128+ ret = idr_alloc(&vds->addr_idr, chan,
1129+ TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1,
1130+ GFP_KERNEL);
1131+ if (ret > 0) {
1132+ chan->local = ret;
1133+ kref_get(&chan->refcount);
1134+ ret = 0;
1135+ }
1136+ } else {
1137+ ret = -EINVAL;
1138+ }
1139+ mutex_unlock(&vds->lock);
1140+
1141+ return ret;
1142+}
1143+
1144+static void vds_del_channel(struct tipc_virtio_dev *vds,
1145+ struct tipc_chan *chan)
1146+{
1147+ mutex_lock(&vds->lock);
1148+ if (chan->local) {
1149+ idr_remove(&vds->addr_idr, chan->local);
1150+ chan->local = 0;
1151+ chan->remote = 0;
1152+ kref_put(&chan->refcount, _free_chan);
1153+ }
1154+ mutex_unlock(&vds->lock);
1155+}
1156+
1157+static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds,
1158+ u32 addr)
1159+{
1160+ int id;
1161+ struct tipc_chan *chan = NULL;
1162+
1163+ mutex_lock(&vds->lock);
1164+ if (addr == TIPC_ANY_ADDR) {
1165+ id = idr_for_each(&vds->addr_idr, _match_any, NULL);
1166+ if (id > 0)
1167+ chan = idr_find(&vds->addr_idr, id);
1168+ } else {
1169+ chan = idr_find(&vds->addr_idr, addr);
1170+ }
1171+ if (chan)
1172+ kref_get(&chan->refcount);
1173+ mutex_unlock(&vds->lock);
1174+
1175+ return chan;
1176+}
1177+
1178+static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds,
1179+ const struct tipc_chan_ops *ops,
1180+ void *ops_arg)
1181+{
1182+ int ret;
1183+ struct tipc_chan *chan = NULL;
1184+
1185+ if (!vds)
1186+ return ERR_PTR(-ENOENT);
1187+
1188+ if (!ops)
1189+ return ERR_PTR(-EINVAL);
1190+
1191+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1192+ if (!chan)
1193+ return ERR_PTR(-ENOMEM);
1194+
1195+ kref_get(&vds->refcount);
1196+ chan->vds = vds;
1197+ chan->ops = ops;
1198+ chan->ops_arg = ops_arg;
1199+ mutex_init(&chan->lock);
1200+ kref_init(&chan->refcount);
1201+ chan->state = TIPC_DISCONNECTED;
1202+
1203+ ret = vds_add_channel(vds, chan);
1204+ if (ret) {
1205+ kfree(chan);
1206+ kref_put(&vds->refcount, _free_vds);
1207+ return ERR_PTR(ret);
1208+ }
1209+
1210+ return chan;
1211+}
1212+
1213+static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst)
1214+{
1215+ struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr));
1216+
1217+ hdr->src = src;
1218+ hdr->dst = dst;
1219+ hdr->len = mb_avail_data(mb);
1220+ hdr->flags = 0;
1221+ hdr->shm_cnt = mb->shm_cnt;
1222+ hdr->reserved = 0;
1223+}
1224+
1225+static int tipc_shared_handle_new(struct tipc_shared_handle **shared_handle,
1226+ struct tipc_virtio_dev *vds)
1227+{
1228+ struct tipc_shared_handle *out = kzalloc(sizeof(*out), GFP_KERNEL);
1229+
1230+ if (!out)
1231+ return -ENOMEM;
1232+
1233+ out->vds = vds;
1234+ *shared_handle = out;
1235+
1236+ return 0;
1237+}
1238+
1239+static struct device *tipc_shared_handle_dev(struct tipc_shared_handle
1240+ *shared_handle)
1241+{
1242+ return shared_handle->vds->vdev->dev.parent->parent;
1243+}
1244+
1245+static bool is_same_memory_region(struct tipc_shared_handle *h1,
1246+ struct tipc_shared_handle *h2)
1247+{
1248+ return h1->tipc.obj_id == h2->tipc.obj_id &&
1249+ h1->tipc.size == h2->tipc.size &&
1250+ h1->tipc.tag == h2->tipc.tag &&
1251+ h1->dma_buf == h2->dma_buf &&
1252+ h1->shared == h2->shared;
1253+}
1254+
1255+static bool dma_buf_owns_shared_mem_id(struct tipc_shared_handle *h)
1256+{
1257+ /* h->shared is true only if dma_buf did not own an shared memory ID */
1258+ return !h->shared;
1259+}
1260+
1261+static void tipc_shared_handle_register(struct tipc_shared_handle
1262+ *new_handle)
1263+{
1264+ struct tipc_virtio_dev *vds = new_handle->vds;
1265+ struct rb_node **new;
1266+ struct rb_node *parent = NULL;
1267+
1268+ mutex_lock(&vds->shared_handles_lock);
1269+
1270+ new = &vds->shared_handles.rb_node;
1271+ while (*new) {
1272+ struct tipc_shared_handle *handle =
1273+ rb_entry(*new, struct tipc_shared_handle, node);
1274+ parent = *new;
1275+ /*
1276+ * An obj_id can be registered multiple times if it's owned by a
1277+ * dma_buf, because in this case we use the same obj_id across
1278+ * multiple memory transfer operations.
1279+ */
1280+ if (handle->tipc.obj_id == new_handle->tipc.obj_id) {
1281+ if (dma_buf_owns_shared_mem_id(new_handle)) {
1282+ WARN_ON(!is_same_memory_region(handle,
1283+ new_handle));
1284+ } else {
1285+ WARN(1, "This handle is already registered");
1286+ goto already_registered;
1287+ }
1288+ }
1289+
1290+ if (handle->tipc.obj_id > new_handle->tipc.obj_id)
1291+ new = &((*new)->rb_left);
1292+ else
1293+ new = &((*new)->rb_right);
1294+ }
1295+
1296+ rb_link_node(&new_handle->node, parent, new);
1297+ rb_insert_color(&new_handle->node, &vds->shared_handles);
1298+
1299+already_registered:
1300+ mutex_unlock(&vds->shared_handles_lock);
1301+}
1302+
1303+static struct tipc_shared_handle *tipc_shared_handle_take(struct tipc_virtio_dev
1304+ *vds,
1305+ trusty_shared_mem_id_t
1306+ obj_id)
1307+{
1308+ struct rb_node *node;
1309+ struct tipc_shared_handle *out = NULL;
1310+
1311+ mutex_lock(&vds->shared_handles_lock);
1312+
1313+ node = vds->shared_handles.rb_node;
1314+ while (node) {
1315+ struct tipc_shared_handle *handle =
1316+ rb_entry(node, struct tipc_shared_handle, node);
1317+ if (obj_id == handle->tipc.obj_id) {
1318+ rb_erase(node, &vds->shared_handles);
1319+ out = handle;
1320+ break;
1321+ } else if (obj_id < handle->tipc.obj_id) {
1322+ node = node->rb_left;
1323+ } else {
1324+ node = node->rb_right;
1325+ }
1326+ }
1327+
1328+ mutex_unlock(&vds->shared_handles_lock);
1329+
1330+ return out;
1331+}
1332+
1333+static int tipc_shared_handle_drop(struct tipc_shared_handle *shared_handle)
1334+{
1335+ int ret;
1336+ struct tipc_virtio_dev *vds = shared_handle->vds;
1337+ struct device *dev = tipc_shared_handle_dev(shared_handle);
1338+
1339+ if (shared_handle->shared) {
1340+ /*
1341+ * If this warning fires, it means this shared handle was still
1342+ * in the set of active handles. This shouldn't happen (calling
1343+ * code should ensure it is out if the tree) but this serves as
1344+ * an extra check before it is released.
1345+ *
1346+ * However, the take itself should clean this incorrect state up
1347+ * by removing the handle from the tree.
1348+ *
1349+ * This warning is only applicable when registering a handle
1350+ * multiple times is not allowed, i.e. when dma_buf doesn't own
1351+ * the handle.
1352+ */
1353+ WARN_ON(tipc_shared_handle_take(vds,
1354+ shared_handle->tipc.obj_id));
1355+
1356+ ret = trusty_reclaim_memory(dev,
1357+ shared_handle->tipc.obj_id,
1358+ shared_handle->sgt->sgl,
1359+ shared_handle->sgt->orig_nents);
1360+ if (ret) {
1361+ /*
1362+ * We can't safely release this, it may still be in
1363+ * use outside Linux.
1364+ */
1365+ dev_warn(dev, "Failed to drop handle, leaking...\n");
1366+ return ret;
1367+ }
1368+ }
1369+
1370+ if (shared_handle->sgt)
1371+ dma_buf_unmap_attachment(shared_handle->attach,
1372+ shared_handle->sgt, DMA_BIDIRECTIONAL);
1373+ if (shared_handle->attach)
1374+ dma_buf_detach(shared_handle->dma_buf, shared_handle->attach);
1375+ if (shared_handle->dma_buf)
1376+ dma_buf_put(shared_handle->dma_buf);
1377+
1378+ kfree(shared_handle);
1379+
1380+ return 0;
1381+}
1382+
1383+/*****************************************************************************/
1384+
1385+struct tipc_chan *tipc_create_channel(struct device *dev,
1386+ const struct tipc_chan_ops *ops,
1387+ void *ops_arg)
1388+{
1389+ struct virtio_device *vd;
1390+ struct tipc_chan *chan;
1391+ struct tipc_virtio_dev *vds;
1392+
1393+ mutex_lock(&tipc_devices_lock);
1394+ if (dev) {
1395+ vd = container_of(dev, struct virtio_device, dev);
1396+ } else {
1397+ vd = default_vdev;
1398+ if (!vd) {
1399+ mutex_unlock(&tipc_devices_lock);
1400+ return ERR_PTR(-ENOENT);
1401+ }
1402+ }
1403+ vds = vd->priv;
1404+ kref_get(&vds->refcount);
1405+ mutex_unlock(&tipc_devices_lock);
1406+
1407+ chan = vds_create_channel(vds, ops, ops_arg);
1408+ kref_put(&vds->refcount, _free_vds);
1409+ return chan;
1410+}
1411+EXPORT_SYMBOL(tipc_create_channel);
1412+
1413+struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan)
1414+{
1415+ return vds_alloc_msg_buf(chan->vds, true);
1416+}
1417+EXPORT_SYMBOL(tipc_chan_get_rxbuf);
1418+
1419+void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
1420+{
1421+ vds_free_msg_buf(chan->vds, mb);
1422+}
1423+EXPORT_SYMBOL(tipc_chan_put_rxbuf);
1424+
1425+struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan,
1426+ long timeout)
1427+{
1428+ return vds_get_txbuf(chan->vds, timeout);
1429+}
1430+EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout);
1431+
1432+void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
1433+{
1434+ vds_put_txbuf(chan->vds, mb);
1435+}
1436+EXPORT_SYMBOL(tipc_chan_put_txbuf);
1437+
1438+int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb)
1439+{
1440+ int err;
1441+
1442+ mutex_lock(&chan->lock);
1443+ switch (chan->state) {
1444+ case TIPC_CONNECTED:
1445+ fill_msg_hdr(mb, chan->local, chan->remote);
1446+ err = vds_queue_txbuf(chan->vds, mb);
1447+ if (err) {
1448+ /* this should never happen */
1449+ dev_err(&chan->vds->vdev->dev,
1450+ "%s: failed to queue tx buffer (%d)\n",
1451+ __func__, err);
1452+ }
1453+ break;
1454+ case TIPC_DISCONNECTED:
1455+ case TIPC_CONNECTING:
1456+ err = -ENOTCONN;
1457+ break;
1458+ case TIPC_STALE:
1459+ err = -ESHUTDOWN;
1460+ break;
1461+ default:
1462+ err = -EBADFD;
1463+ dev_err(&chan->vds->vdev->dev,
1464+ "%s: unexpected channel state %d\n",
1465+ __func__, chan->state);
1466+ }
1467+ mutex_unlock(&chan->lock);
1468+ return err;
1469+}
1470+EXPORT_SYMBOL(tipc_chan_queue_msg);
1471+
1472+
1473+int tipc_chan_connect(struct tipc_chan *chan, const char *name)
1474+{
1475+ int err;
1476+ struct tipc_ctrl_msg *msg;
1477+ struct tipc_conn_req_body *body;
1478+ struct tipc_msg_buf *txbuf;
1479+
1480+ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
1481+ if (IS_ERR(txbuf))
1482+ return PTR_ERR(txbuf);
1483+
1484+ /* reserve space for connection request control message */
1485+ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
1486+ body = (struct tipc_conn_req_body *)msg->body;
1487+
1488+ /* fill message */
1489+ msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ;
1490+ msg->body_len = sizeof(*body);
1491+
1492+ strncpy(body->name, name, sizeof(body->name));
1493+ body->name[sizeof(body->name)-1] = '\0';
1494+
1495+ mutex_lock(&chan->lock);
1496+ switch (chan->state) {
1497+ case TIPC_DISCONNECTED:
1498+ /* save service name we are connecting to */
1499+ strcpy(chan->srv_name, body->name);
1500+
1501+ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
1502+ err = vds_queue_txbuf(chan->vds, txbuf);
1503+ if (err) {
1504+ /* this should never happen */
1505+ dev_err(&chan->vds->vdev->dev,
1506+ "%s: failed to queue tx buffer (%d)\n",
1507+ __func__, err);
1508+ } else {
1509+ chan->state = TIPC_CONNECTING;
1510+ txbuf = NULL; /* prevents discarding buffer */
1511+ }
1512+ break;
1513+ case TIPC_CONNECTED:
1514+ case TIPC_CONNECTING:
1515+ /* check if we are trying to connect to the same service */
1516+ if (strcmp(chan->srv_name, body->name) == 0)
1517+ err = 0;
1518+ else
1519+ if (chan->state == TIPC_CONNECTING)
1520+ err = -EALREADY; /* in progress */
1521+ else
1522+ err = -EISCONN; /* already connected */
1523+ break;
1524+
1525+ case TIPC_STALE:
1526+ err = -ESHUTDOWN;
1527+ break;
1528+ default:
1529+ err = -EBADFD;
1530+ dev_err(&chan->vds->vdev->dev,
1531+ "%s: unexpected channel state %d\n",
1532+ __func__, chan->state);
1533+ break;
1534+ }
1535+ mutex_unlock(&chan->lock);
1536+
1537+ if (txbuf)
1538+ tipc_chan_put_txbuf(chan, txbuf); /* discard it */
1539+
1540+ return err;
1541+}
1542+EXPORT_SYMBOL(tipc_chan_connect);
1543+
1544+int tipc_chan_shutdown(struct tipc_chan *chan)
1545+{
1546+ int err;
1547+ struct tipc_ctrl_msg *msg;
1548+ struct tipc_disc_req_body *body;
1549+ struct tipc_msg_buf *txbuf = NULL;
1550+
1551+ /* get tx buffer */
1552+ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
1553+ if (IS_ERR(txbuf))
1554+ return PTR_ERR(txbuf);
1555+
1556+ mutex_lock(&chan->lock);
1557+ if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) {
1558+ /* reserve space for disconnect request control message */
1559+ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
1560+ body = (struct tipc_disc_req_body *)msg->body;
1561+
1562+ msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ;
1563+ msg->body_len = sizeof(*body);
1564+ body->target = chan->remote;
1565+
1566+ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
1567+ err = vds_queue_txbuf(chan->vds, txbuf);
1568+ if (err) {
1569+ /* this should never happen */
1570+ dev_err(&chan->vds->vdev->dev,
1571+ "%s: failed to queue tx buffer (%d)\n",
1572+ __func__, err);
1573+ }
1574+ } else {
1575+ err = -ENOTCONN;
1576+ }
1577+ chan->state = TIPC_STALE;
1578+ mutex_unlock(&chan->lock);
1579+
1580+ if (err) {
1581+ /* release buffer */
1582+ tipc_chan_put_txbuf(chan, txbuf);
1583+ }
1584+
1585+ return err;
1586+}
1587+EXPORT_SYMBOL(tipc_chan_shutdown);
1588+
1589+void tipc_chan_destroy(struct tipc_chan *chan)
1590+{
1591+ vds_del_channel(chan->vds, chan);
1592+ kref_put(&chan->refcount, _free_chan);
1593+}
1594+EXPORT_SYMBOL(tipc_chan_destroy);
1595+
1596+/***************************************************************************/
1597+
1598+struct tipc_dn_chan {
1599+ int state;
1600+ struct mutex lock; /* protects rx_msg_queue list and channel state */
1601+ struct tipc_chan *chan;
1602+ wait_queue_head_t readq;
1603+ struct completion reply_comp;
1604+ struct list_head rx_msg_queue;
1605+};
1606+
1607+static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout)
1608+{
1609+ int ret;
1610+
1611+ ret = wait_for_completion_interruptible_timeout(&dn->reply_comp,
1612+ msecs_to_jiffies(timeout));
1613+ if (ret < 0)
1614+ return ret;
1615+
1616+ mutex_lock(&dn->lock);
1617+ if (!ret) {
1618+ /* no reply from remote */
1619+ dn->state = TIPC_STALE;
1620+ ret = -ETIMEDOUT;
1621+ } else {
1622+ /* got reply */
1623+ if (dn->state == TIPC_CONNECTED)
1624+ ret = 0;
1625+ else if (dn->state == TIPC_DISCONNECTED)
1626+ if (!list_empty(&dn->rx_msg_queue))
1627+ ret = 0;
1628+ else
1629+ ret = -ENOTCONN;
1630+ else
1631+ ret = -EIO;
1632+ }
1633+ mutex_unlock(&dn->lock);
1634+
1635+ return ret;
1636+}
1637+
1638+static struct tipc_msg_buf *dn_handle_msg(void *data,
1639+ struct tipc_msg_buf *rxbuf)
1640+{
1641+ struct tipc_dn_chan *dn = data;
1642+ struct tipc_msg_buf *newbuf = rxbuf;
1643+
1644+ mutex_lock(&dn->lock);
1645+ if (dn->state == TIPC_CONNECTED) {
1646+ /* get new buffer */
1647+ newbuf = tipc_chan_get_rxbuf(dn->chan);
1648+ if (newbuf) {
1649+ /* queue an old buffer and return a new one */
1650+ list_add_tail(&rxbuf->node, &dn->rx_msg_queue);
1651+ wake_up_interruptible(&dn->readq);
1652+ } else {
1653+ /*
1654+ * return an old buffer effectively discarding
1655+ * incoming message
1656+ */
1657+ dev_err(&dn->chan->vds->vdev->dev,
1658+ "%s: discard incoming message\n", __func__);
1659+ newbuf = rxbuf;
1660+ }
1661+ }
1662+ mutex_unlock(&dn->lock);
1663+
1664+ return newbuf;
1665+}
1666+
1667+static void dn_connected(struct tipc_dn_chan *dn)
1668+{
1669+ mutex_lock(&dn->lock);
1670+ dn->state = TIPC_CONNECTED;
1671+
1672+ /* complete all pending */
1673+ complete(&dn->reply_comp);
1674+
1675+ mutex_unlock(&dn->lock);
1676+}
1677+
1678+static void dn_disconnected(struct tipc_dn_chan *dn)
1679+{
1680+ mutex_lock(&dn->lock);
1681+ dn->state = TIPC_DISCONNECTED;
1682+
1683+ /* complete all pending */
1684+ complete(&dn->reply_comp);
1685+
1686+ /* wakeup all readers */
1687+ wake_up_interruptible_all(&dn->readq);
1688+
1689+ mutex_unlock(&dn->lock);
1690+}
1691+
1692+static void dn_shutdown(struct tipc_dn_chan *dn)
1693+{
1694+ mutex_lock(&dn->lock);
1695+
1696+ /* set state to STALE */
1697+ dn->state = TIPC_STALE;
1698+
1699+ /* complete all pending */
1700+ complete(&dn->reply_comp);
1701+
1702+ /* wakeup all readers */
1703+ wake_up_interruptible_all(&dn->readq);
1704+
1705+ mutex_unlock(&dn->lock);
1706+}
1707+
1708+static void dn_handle_event(void *data, int event)
1709+{
1710+ struct tipc_dn_chan *dn = data;
1711+
1712+ switch (event) {
1713+ case TIPC_CHANNEL_SHUTDOWN:
1714+ dn_shutdown(dn);
1715+ break;
1716+
1717+ case TIPC_CHANNEL_DISCONNECTED:
1718+ dn_disconnected(dn);
1719+ break;
1720+
1721+ case TIPC_CHANNEL_CONNECTED:
1722+ dn_connected(dn);
1723+ break;
1724+
1725+ default:
1726+ dev_err(&dn->chan->vds->vdev->dev,
1727+ "%s: unhandled event %d\n", __func__, event);
1728+ break;
1729+ }
1730+}
1731+
1732+static void dn_handle_release(void *data)
1733+{
1734+ kfree(data);
1735+}
1736+
1737+static const struct tipc_chan_ops _dn_ops = {
1738+ .handle_msg = dn_handle_msg,
1739+ .handle_event = dn_handle_event,
1740+ .handle_release = dn_handle_release,
1741+};
1742+
1743+#define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev)
1744+#define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node)
1745+
1746+static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn)
1747+{
1748+ int ret;
1749+ struct tipc_virtio_dev *vds = NULL;
1750+
1751+ mutex_lock(&tipc_devices_lock);
1752+ ret = idr_for_each(&tipc_devices, _match_data, cdn);
1753+ if (ret) {
1754+ vds = cdn_to_vds(cdn);
1755+ kref_get(&vds->refcount);
1756+ }
1757+ mutex_unlock(&tipc_devices_lock);
1758+ return vds;
1759+}
1760+
1761+static int tipc_open(struct inode *inode, struct file *filp)
1762+{
1763+ int ret;
1764+ struct tipc_virtio_dev *vds;
1765+ struct tipc_dn_chan *dn;
1766+ struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev);
1767+
1768+ vds = _dn_lookup_vds(cdn);
1769+ if (!vds) {
1770+ ret = -ENOENT;
1771+ goto err_vds_lookup;
1772+ }
1773+
1774+ dn = kzalloc(sizeof(*dn), GFP_KERNEL);
1775+ if (!dn) {
1776+ ret = -ENOMEM;
1777+ goto err_alloc_chan;
1778+ }
1779+
1780+ mutex_init(&dn->lock);
1781+ init_waitqueue_head(&dn->readq);
1782+ init_completion(&dn->reply_comp);
1783+ INIT_LIST_HEAD(&dn->rx_msg_queue);
1784+
1785+ dn->state = TIPC_DISCONNECTED;
1786+
1787+ dn->chan = vds_create_channel(vds, &_dn_ops, dn);
1788+ if (IS_ERR(dn->chan)) {
1789+ ret = PTR_ERR(dn->chan);
1790+ goto err_create_chan;
1791+ }
1792+
1793+ filp->private_data = dn;
1794+ kref_put(&vds->refcount, _free_vds);
1795+ return 0;
1796+
1797+err_create_chan:
1798+ kfree(dn);
1799+err_alloc_chan:
1800+ kref_put(&vds->refcount, _free_vds);
1801+err_vds_lookup:
1802+ return ret;
1803+}
1804+
1805+
1806+static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name)
1807+{
1808+ int ret;
1809+ char name[MAX_SRV_NAME_LEN];
1810+
1811+ /* copy in service name from user space */
1812+ ret = strncpy_from_user(name, usr_name, sizeof(name));
1813+ if (ret < 0)
1814+ return ret;
1815+ if (ret == sizeof(name))
1816+ return -ENAMETOOLONG;
1817+
1818+ /* send connect request */
1819+ ret = tipc_chan_connect(dn->chan, name);
1820+ if (ret)
1821+ return ret;
1822+
1823+ /* and wait for reply */
1824+ return dn_wait_for_reply(dn, REPLY_TIMEOUT);
1825+}
1826+
1827+static int dn_share_fd(struct tipc_dn_chan *dn, int fd,
1828+ enum transfer_kind transfer_kind,
1829+ struct tipc_shared_handle **out)
1830+{
1831+ int ret = 0;
1832+ struct tipc_shared_handle *shared_handle = NULL;
1833+ struct file *file = NULL;
1834+ struct device *dev = &dn->chan->vds->vdev->dev;
1835+ bool writable = false;
1836+ pgprot_t prot;
1837+ u64 tag = 0;
1838+ trusty_shared_mem_id_t mem_id;
1839+ bool lend;
1840+
1841+ if (dn->state != TIPC_CONNECTED) {
1842+ dev_dbg(dev, "Tried to share fd while not connected\n");
1843+ return -ENOTCONN;
1844+ }
1845+
1846+ file = fget(fd);
1847+ if (!file) {
1848+ dev_dbg(dev, "Invalid fd (%d)\n", fd);
1849+ return -EBADF;
1850+ }
1851+
1852+ if (!(file->f_mode & FMODE_READ)) {
1853+ dev_dbg(dev, "Cannot create write-only mapping\n");
1854+ fput(file);
1855+ return -EACCES;
1856+ }
1857+
1858+ writable = file->f_mode & FMODE_WRITE;
1859+ prot = writable ? PAGE_KERNEL : PAGE_KERNEL_RO;
1860+ fput(file);
1861+ file = NULL;
1862+
1863+ ret = tipc_shared_handle_new(&shared_handle, dn->chan->vds);
1864+ if (ret)
1865+ return ret;
1866+
1867+ shared_handle->dma_buf = dma_buf_get(fd);
1868+ if (IS_ERR(shared_handle->dma_buf)) {
1869+ ret = PTR_ERR(shared_handle->dma_buf);
1870+ shared_handle->dma_buf = NULL;
1871+ dev_dbg(dev, "Unable to get dma buf from fd (%d)\n", ret);
1872+ goto cleanup_handle;
1873+ }
1874+
1875+ tag = trusty_dma_buf_get_ffa_tag(shared_handle->dma_buf);
1876+ ret = trusty_dma_buf_get_shared_mem_id(shared_handle->dma_buf, &mem_id);
1877+ /*
1878+ * Buffers with a preallocated mem_id should only be sent to Trusty
1879+ * using TRUSTY_SEND_SECURE. And conversely, TRUSTY_SEND_SECURE should
1880+ * only be used to send buffers with preallcoated mem_id.
1881+ */
1882+ if (!ret) {
1883+ /* Use shared memory ID owned by dma_buf */
1884+ /* TODO: Enforce transfer_kind == TRUSTY_SEND_SECURE */
1885+ WARN_ONCE(transfer_kind != TRUSTY_SEND_SECURE,
1886+ "Use TRUSTY_SEND_SECURE instead");
1887+ goto mem_id_allocated;
1888+ }
1889+
1890+ if (ret != -ENODATA) {
1891+ dev_err(dev, "dma_buf can't be transferred (%d)\n", ret);
1892+ goto cleanup_handle;
1893+ }
1894+
1895+ if (transfer_kind == TRUSTY_SEND_SECURE) {
1896+ dev_err(dev, "No mem ID for TRUSTY_SEND_SECURE\n");
1897+ goto cleanup_handle;
1898+ }
1899+ lend = (transfer_kind == TRUSTY_LEND);
1900+
1901+ shared_handle->attach = dma_buf_attach(shared_handle->dma_buf, dev);
1902+ if (IS_ERR(shared_handle->attach)) {
1903+ ret = PTR_ERR(shared_handle->attach);
1904+ shared_handle->attach = NULL;
1905+ dev_dbg(dev, "Unable to attach to dma_buf (%d)\n", ret);
1906+ goto cleanup_handle;
1907+ }
1908+
1909+ shared_handle->sgt = dma_buf_map_attachment(shared_handle->attach,
1910+ DMA_BIDIRECTIONAL);
1911+ if (IS_ERR(shared_handle->sgt)) {
1912+ ret = PTR_ERR(shared_handle->sgt);
1913+ shared_handle->sgt = NULL;
1914+ dev_dbg(dev, "Failed to match attachment (%d)\n", ret);
1915+ goto cleanup_handle;
1916+ }
1917+
1918+ ret = trusty_transfer_memory(tipc_shared_handle_dev(shared_handle),
1919+ &mem_id, shared_handle->sgt->sgl,
1920+ shared_handle->sgt->orig_nents, prot, tag,
1921+ lend);
1922+
1923+ if (ret < 0) {
1924+ dev_dbg(dev, "Transferring memory failed: %d\n", ret);
1925+ /*
1926+ * The handle now has a sgt containing the pages, so we no
1927+ * longer need to clean up the pages directly.
1928+ */
1929+ goto cleanup_handle;
1930+ }
1931+ shared_handle->shared = true;
1932+
1933+mem_id_allocated:
1934+ shared_handle->tipc.obj_id = mem_id;
1935+ shared_handle->tipc.size = shared_handle->dma_buf->size;
1936+ shared_handle->tipc.tag = tag;
1937+ *out = shared_handle;
1938+ return 0;
1939+
1940+cleanup_handle:
1941+ tipc_shared_handle_drop(shared_handle);
1942+ return ret;
1943+}
1944+
1945+static ssize_t txbuf_write_iter(struct tipc_msg_buf *txbuf,
1946+ struct iov_iter *iter)
1947+{
1948+ size_t len;
1949+ /* message length */
1950+ len = iov_iter_count(iter);
1951+
1952+ /* check available space */
1953+ if (len > mb_avail_space(txbuf))
1954+ return -EMSGSIZE;
1955+
1956+ /* copy in message data */
1957+ if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len)
1958+ return -EFAULT;
1959+
1960+ return len;
1961+}
1962+
1963+static ssize_t txbuf_write_handles(struct tipc_msg_buf *txbuf,
1964+ struct tipc_shared_handle **shm_handles,
1965+ size_t shm_cnt)
1966+{
1967+ size_t idx;
1968+
1969+ /* message length */
1970+ size_t len = shm_cnt * sizeof(struct tipc_shm);
1971+
1972+ /* check available space */
1973+ if (len > mb_avail_space(txbuf))
1974+ return -EMSGSIZE;
1975+
1976+ /* copy over handles */
1977+ for (idx = 0; idx < shm_cnt; idx++) {
1978+ memcpy(mb_put_data(txbuf, sizeof(struct tipc_shm)),
1979+ &shm_handles[idx]->tipc,
1980+ sizeof(struct tipc_shm));
1981+ }
1982+
1983+ txbuf->shm_cnt += shm_cnt;
1984+
1985+ return len;
1986+}
1987+
1988+static long filp_send_ioctl(struct file *filp,
1989+ const struct tipc_send_msg_req __user *arg)
1990+{
1991+ struct tipc_send_msg_req req;
1992+ struct iovec fast_iovs[UIO_FASTIOV];
1993+ struct iovec *iov = fast_iovs;
1994+ struct iov_iter iter;
1995+ struct trusty_shm *shm = NULL;
1996+ struct tipc_shared_handle **shm_handles = NULL;
1997+ int shm_idx = 0;
1998+ int release_idx;
1999+ struct tipc_dn_chan *dn = filp->private_data;
2000+ struct tipc_virtio_dev *vds = dn->chan->vds;
2001+ struct device *dev = &vds->vdev->dev;
2002+ long timeout = TXBUF_TIMEOUT;
2003+ struct tipc_msg_buf *txbuf = NULL;
2004+ long ret = 0;
2005+ ssize_t data_len = 0;
2006+ ssize_t shm_len = 0;
2007+
2008+ if (copy_from_user(&req, arg, sizeof(req)))
2009+ return -EFAULT;
2010+
2011+ if (req.shm_cnt > U16_MAX)
2012+ return -E2BIG;
2013+
2014+ shm = kmalloc_array(req.shm_cnt, sizeof(*shm), GFP_KERNEL);
2015+ if (!shm)
2016+ return -ENOMEM;
2017+
2018+ shm_handles = kmalloc_array(req.shm_cnt, sizeof(*shm_handles),
2019+ GFP_KERNEL);
2020+ if (!shm_handles) {
2021+ ret = -ENOMEM;
2022+ goto shm_handles_alloc_failed;
2023+ }
2024+
2025+ if (copy_from_user(shm, u64_to_user_ptr(req.shm),
2026+ req.shm_cnt * sizeof(struct trusty_shm))) {
2027+ ret = -EFAULT;
2028+ goto load_shm_args_failed;
2029+ }
2030+
2031+ ret = import_iovec(READ, u64_to_user_ptr(req.iov), req.iov_cnt,
2032+ ARRAY_SIZE(fast_iovs), &iov, &iter);
2033+ if (ret < 0) {
2034+ dev_dbg(dev, "Failed to import iovec\n");
2035+ goto iov_import_failed;
2036+ }
2037+
2038+ for (shm_idx = 0; shm_idx < req.shm_cnt; shm_idx++) {
2039+ switch (shm[shm_idx].transfer) {
2040+ case TRUSTY_SHARE:
2041+ case TRUSTY_LEND:
2042+ case TRUSTY_SEND_SECURE:
2043+ break;
2044+ default:
2045+ dev_err(dev, "Unknown transfer type: 0x%x\n",
2046+ shm[shm_idx].transfer);
2047+ goto shm_share_failed;
2048+ }
2049+ ret = dn_share_fd(dn, shm[shm_idx].fd, shm[shm_idx].transfer,
2050+ &shm_handles[shm_idx]);
2051+ if (ret) {
2052+ dev_dbg(dev, "Forwarding memory failed\n"
2053+ );
2054+ goto shm_share_failed;
2055+ }
2056+ }
2057+
2058+ if (filp->f_flags & O_NONBLOCK)
2059+ timeout = 0;
2060+
2061+ txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
2062+ if (IS_ERR(txbuf)) {
2063+ dev_dbg(dev, "Failed to get txbuffer\n");
2064+ ret = PTR_ERR(txbuf);
2065+ goto get_txbuf_failed;
2066+ }
2067+
2068+ data_len = txbuf_write_iter(txbuf, &iter);
2069+ if (data_len < 0) {
2070+ ret = data_len;
2071+ goto txbuf_write_failed;
2072+ }
2073+
2074+ shm_len = txbuf_write_handles(txbuf, shm_handles, req.shm_cnt);
2075+ if (shm_len < 0) {
2076+ ret = shm_len;
2077+ goto txbuf_write_failed;
2078+ }
2079+
2080+ /*
2081+ * These need to be aded to the index before queueing the message.
2082+ * As soon as the message is sent, we may receive a message back from
2083+ * Trusty saying it's no longer in use, and the shared_handle needs
2084+ * to be there when that happens.
2085+ */
2086+ for (shm_idx = 0; shm_idx < req.shm_cnt; shm_idx++)
2087+ tipc_shared_handle_register(shm_handles[shm_idx]);
2088+
2089+ ret = tipc_chan_queue_msg(dn->chan, txbuf);
2090+
2091+ if (ret)
2092+ goto queue_failed;
2093+
2094+ ret = data_len;
2095+
2096+common_cleanup:
2097+ kfree(iov);
2098+iov_import_failed:
2099+load_shm_args_failed:
2100+ kfree(shm_handles);
2101+shm_handles_alloc_failed:
2102+ kfree(shm);
2103+ return ret;
2104+
2105+queue_failed:
2106+ for (release_idx = 0; release_idx < req.shm_cnt; release_idx++)
2107+ tipc_shared_handle_take(vds,
2108+ shm_handles[release_idx]->tipc.obj_id);
2109+txbuf_write_failed:
2110+ tipc_chan_put_txbuf(dn->chan, txbuf);
2111+get_txbuf_failed:
2112+shm_share_failed:
2113+ for (shm_idx--; shm_idx >= 0; shm_idx--)
2114+ tipc_shared_handle_drop(shm_handles[shm_idx]);
2115+ goto common_cleanup;
2116+}
2117+
2118+static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2119+{
2120+ struct tipc_dn_chan *dn = filp->private_data;
2121+
2122+ switch (cmd) {
2123+ case TIPC_IOC_CONNECT:
2124+ return dn_connect_ioctl(dn, (char __user *)arg);
2125+ case TIPC_IOC_SEND_MSG:
2126+ return filp_send_ioctl(filp,
2127+ (const struct tipc_send_msg_req __user *)
2128+ arg);
2129+ default:
2130+ dev_dbg(&dn->chan->vds->vdev->dev,
2131+ "Unhandled ioctl cmd: 0x%x\n", cmd);
2132+ return -ENOTTY;
2133+ }
2134+}
2135+
2136+#ifdef CONFIG_COMPAT
2137+static long tipc_compat_ioctl(struct file *filp,
2138+ unsigned int cmd, unsigned long arg)
2139+{
2140+ struct tipc_dn_chan *dn = filp->private_data;
2141+
2142+ switch (cmd) {
2143+ case TIPC_IOC32_CONNECT:
2144+ cmd = TIPC_IOC_CONNECT;
2145+ break;
2146+ default:
2147+ dev_dbg(&dn->chan->vds->vdev->dev,
2148+ "Unhandled compat ioctl command: 0x%x\n", cmd);
2149+ return -ENOTTY;
2150+ }
2151+ return tipc_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2152+}
2153+#endif
2154+
2155+static inline bool _got_rx(struct tipc_dn_chan *dn)
2156+{
2157+ if (dn->state != TIPC_CONNECTED)
2158+ return true;
2159+
2160+ if (!list_empty(&dn->rx_msg_queue))
2161+ return true;
2162+
2163+ return false;
2164+}
2165+
2166+static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2167+{
2168+ ssize_t ret;
2169+ size_t len;
2170+ struct tipc_msg_buf *mb;
2171+ struct file *filp = iocb->ki_filp;
2172+ struct tipc_dn_chan *dn = filp->private_data;
2173+
2174+ mutex_lock(&dn->lock);
2175+
2176+ while (list_empty(&dn->rx_msg_queue)) {
2177+ if (dn->state != TIPC_CONNECTED) {
2178+ if (dn->state == TIPC_CONNECTING)
2179+ ret = -ENOTCONN;
2180+ else if (dn->state == TIPC_DISCONNECTED)
2181+ ret = -ENOTCONN;
2182+ else if (dn->state == TIPC_STALE)
2183+ ret = -ESHUTDOWN;
2184+ else
2185+ ret = -EBADFD;
2186+ goto out;
2187+ }
2188+
2189+ mutex_unlock(&dn->lock);
2190+
2191+ if (filp->f_flags & O_NONBLOCK)
2192+ return -EAGAIN;
2193+
2194+ if (wait_event_interruptible(dn->readq, _got_rx(dn)))
2195+ return -ERESTARTSYS;
2196+
2197+ mutex_lock(&dn->lock);
2198+ }
2199+
2200+ mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node);
2201+
2202+ len = mb_avail_data(mb);
2203+ if (len > iov_iter_count(iter)) {
2204+ ret = -EMSGSIZE;
2205+ goto out;
2206+ }
2207+
2208+ if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) {
2209+ ret = -EFAULT;
2210+ goto out;
2211+ }
2212+
2213+ ret = len;
2214+ list_del(&mb->node);
2215+ tipc_chan_put_rxbuf(dn->chan, mb);
2216+
2217+out:
2218+ mutex_unlock(&dn->lock);
2219+ return ret;
2220+}
2221+
2222+static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter)
2223+{
2224+ struct file *filp = iocb->ki_filp;
2225+ struct tipc_dn_chan *dn = filp->private_data;
2226+ long timeout = TXBUF_TIMEOUT;
2227+ struct tipc_msg_buf *txbuf = NULL;
2228+ ssize_t ret = 0;
2229+ ssize_t len = 0;
2230+
2231+ if (filp->f_flags & O_NONBLOCK)
2232+ timeout = 0;
2233+
2234+ txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
2235+
2236+ if (IS_ERR(txbuf))
2237+ return PTR_ERR(txbuf);
2238+
2239+ len = txbuf_write_iter(txbuf, iter);
2240+ if (len < 0)
2241+ goto err_out;
2242+
2243+ /* queue message */
2244+ ret = tipc_chan_queue_msg(dn->chan, txbuf);
2245+ if (ret)
2246+ goto err_out;
2247+
2248+ return len;
2249+
2250+err_out:
2251+ tipc_chan_put_txbuf(dn->chan, txbuf);
2252+ return ret;
2253+}
2254+
2255+static __poll_t tipc_poll(struct file *filp, poll_table *wait)
2256+{
2257+ __poll_t mask = 0;
2258+ struct tipc_dn_chan *dn = filp->private_data;
2259+
2260+ mutex_lock(&dn->lock);
2261+
2262+ poll_wait(filp, &dn->readq, wait);
2263+
2264+ /* Writes always succeed for now */
2265+ mask |= EPOLLOUT | EPOLLWRNORM;
2266+
2267+ if (!list_empty(&dn->rx_msg_queue))
2268+ mask |= EPOLLIN | EPOLLRDNORM;
2269+
2270+ if (dn->state != TIPC_CONNECTED)
2271+ mask |= EPOLLERR;
2272+
2273+ mutex_unlock(&dn->lock);
2274+ return mask;
2275+}
2276+
2277+
2278+static int tipc_release(struct inode *inode, struct file *filp)
2279+{
2280+ struct tipc_dn_chan *dn = filp->private_data;
2281+
2282+ dn_shutdown(dn);
2283+
2284+ /* free all pending buffers */
2285+ vds_free_msg_buf_list(dn->chan->vds, &dn->rx_msg_queue);
2286+
2287+ /* shutdown channel */
2288+ tipc_chan_shutdown(dn->chan);
2289+
2290+ /* and destroy it */
2291+ tipc_chan_destroy(dn->chan);
2292+
2293+ return 0;
2294+}
2295+
2296+static const struct file_operations tipc_fops = {
2297+ .open = tipc_open,
2298+ .release = tipc_release,
2299+ .unlocked_ioctl = tipc_ioctl,
2300+#ifdef CONFIG_COMPAT
2301+ .compat_ioctl = tipc_compat_ioctl,
2302+#endif
2303+ .read_iter = tipc_read_iter,
2304+ .write_iter = tipc_write_iter,
2305+ .poll = tipc_poll,
2306+ .owner = THIS_MODULE,
2307+};
2308+
2309+/*****************************************************************************/
2310+
2311+static void chan_trigger_event(struct tipc_chan *chan, int event)
2312+{
2313+ if (!event)
2314+ return;
2315+
2316+ chan->ops->handle_event(chan->ops_arg, event);
2317+}
2318+
2319+static void _cleanup_vq(struct tipc_virtio_dev *vds, struct virtqueue *vq)
2320+{
2321+ struct tipc_msg_buf *mb;
2322+
2323+ while ((mb = virtqueue_detach_unused_buf(vq)) != NULL)
2324+ vds_free_msg_buf(vds, mb);
2325+}
2326+
2327+static int _create_cdev_node(struct device *parent,
2328+ struct tipc_cdev_node *cdn,
2329+ const char *name)
2330+{
2331+ int ret;
2332+ dev_t devt;
2333+
2334+ if (!name) {
2335+ dev_dbg(parent, "%s: cdev name has to be provided\n",
2336+ __func__);
2337+ return -EINVAL;
2338+ }
2339+
2340+ /* allocate minor */
2341+ ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES, GFP_KERNEL);
2342+ if (ret < 0) {
2343+ dev_dbg(parent, "%s: failed (%d) to get id\n",
2344+ __func__, ret);
2345+ return ret;
2346+ }
2347+
2348+ cdn->minor = ret;
2349+ cdev_init(&cdn->cdev, &tipc_fops);
2350+ cdn->cdev.owner = THIS_MODULE;
2351+
2352+ /* Add character device */
2353+ devt = MKDEV(tipc_major, cdn->minor);
2354+ ret = cdev_add(&cdn->cdev, devt, 1);
2355+ if (ret) {
2356+ dev_dbg(parent, "%s: cdev_add failed (%d)\n",
2357+ __func__, ret);
2358+ goto err_add_cdev;
2359+ }
2360+
2361+ /* Create a device node */
2362+ cdn->dev = device_create(tipc_class, parent,
2363+ devt, NULL, "trusty-ipc-%s", name);
2364+ if (IS_ERR(cdn->dev)) {
2365+ ret = PTR_ERR(cdn->dev);
2366+ dev_dbg(parent, "%s: device_create failed: %d\n",
2367+ __func__, ret);
2368+ goto err_device_create;
2369+ }
2370+
2371+ return 0;
2372+
2373+err_device_create:
2374+ cdn->dev = NULL;
2375+ cdev_del(&cdn->cdev);
2376+err_add_cdev:
2377+ idr_remove(&tipc_devices, cdn->minor);
2378+ return ret;
2379+}
2380+
2381+static void create_cdev_node(struct tipc_virtio_dev *vds,
2382+ struct tipc_cdev_node *cdn)
2383+{
2384+ int err;
2385+
2386+ mutex_lock(&tipc_devices_lock);
2387+
2388+ if (!default_vdev) {
2389+ kref_get(&vds->refcount);
2390+ default_vdev = vds->vdev;
2391+ }
2392+
2393+ if (vds->cdev_name[0] && !cdn->dev) {
2394+ kref_get(&vds->refcount);
2395+ err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name);
2396+ if (err) {
2397+ dev_err(&vds->vdev->dev,
2398+ "failed (%d) to create cdev node\n", err);
2399+ kref_put(&vds->refcount, _free_vds);
2400+ }
2401+ }
2402+ mutex_unlock(&tipc_devices_lock);
2403+}
2404+
2405+static void destroy_cdev_node(struct tipc_virtio_dev *vds,
2406+ struct tipc_cdev_node *cdn)
2407+{
2408+ mutex_lock(&tipc_devices_lock);
2409+ if (cdn->dev) {
2410+ device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor));
2411+ cdev_del(&cdn->cdev);
2412+ idr_remove(&tipc_devices, cdn->minor);
2413+ cdn->dev = NULL;
2414+ kref_put(&vds->refcount, _free_vds);
2415+ }
2416+
2417+ if (default_vdev == vds->vdev) {
2418+ default_vdev = NULL;
2419+ kref_put(&vds->refcount, _free_vds);
2420+ }
2421+
2422+ mutex_unlock(&tipc_devices_lock);
2423+}
2424+
2425+static void _go_online(struct tipc_virtio_dev *vds)
2426+{
2427+ mutex_lock(&vds->lock);
2428+ if (vds->state == VDS_OFFLINE)
2429+ vds->state = VDS_ONLINE;
2430+ mutex_unlock(&vds->lock);
2431+
2432+ create_cdev_node(vds, &vds->cdev_node);
2433+
2434+ dev_info(&vds->vdev->dev, "is online\n");
2435+}
2436+
2437+static void _go_offline(struct tipc_virtio_dev *vds)
2438+{
2439+ struct tipc_chan *chan;
2440+
2441+ /* change state to OFFLINE */
2442+ mutex_lock(&vds->lock);
2443+ if (vds->state != VDS_ONLINE) {
2444+ mutex_unlock(&vds->lock);
2445+ return;
2446+ }
2447+ vds->state = VDS_OFFLINE;
2448+ mutex_unlock(&vds->lock);
2449+
2450+ /* wakeup all waiters */
2451+ wake_up_interruptible_all(&vds->sendq);
2452+
2453+ /* shutdown all channels */
2454+ while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) {
2455+ mutex_lock(&chan->lock);
2456+ chan->state = TIPC_STALE;
2457+ chan->remote = 0;
2458+ chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN);
2459+ mutex_unlock(&chan->lock);
2460+ kref_put(&chan->refcount, _free_chan);
2461+ }
2462+
2463+ /* shutdown device node */
2464+ destroy_cdev_node(vds, &vds->cdev_node);
2465+
2466+ dev_info(&vds->vdev->dev, "is offline\n");
2467+}
2468+
2469+static void _handle_conn_rsp(struct tipc_virtio_dev *vds,
2470+ struct tipc_conn_rsp_body *rsp, size_t len)
2471+{
2472+ struct tipc_chan *chan;
2473+
2474+ if (sizeof(*rsp) != len) {
2475+ dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n",
2476+ __func__, len);
2477+ return;
2478+ }
2479+
2480+ dev_dbg(&vds->vdev->dev,
2481+ "%s: connection response: for addr 0x%x: status %d remote addr 0x%x\n",
2482+ __func__, rsp->target, rsp->status, rsp->remote);
2483+
2484+ /* Lookup channel */
2485+ chan = vds_lookup_channel(vds, rsp->target);
2486+ if (chan) {
2487+ mutex_lock(&chan->lock);
2488+ if (chan->state == TIPC_CONNECTING) {
2489+ if (!rsp->status) {
2490+ chan->state = TIPC_CONNECTED;
2491+ chan->remote = rsp->remote;
2492+ chan->max_msg_cnt = rsp->max_msg_cnt;
2493+ chan->max_msg_size = rsp->max_msg_size;
2494+ chan_trigger_event(chan,
2495+ TIPC_CHANNEL_CONNECTED);
2496+ } else {
2497+ chan->state = TIPC_DISCONNECTED;
2498+ chan->remote = 0;
2499+ chan_trigger_event(chan,
2500+ TIPC_CHANNEL_DISCONNECTED);
2501+ }
2502+ }
2503+ mutex_unlock(&chan->lock);
2504+ kref_put(&chan->refcount, _free_chan);
2505+ }
2506+}
2507+
2508+static void _handle_disc_req(struct tipc_virtio_dev *vds,
2509+ struct tipc_disc_req_body *req, size_t len)
2510+{
2511+ struct tipc_chan *chan;
2512+
2513+ if (sizeof(*req) != len) {
2514+ dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n",
2515+ __func__, len);
2516+ return;
2517+ }
2518+
2519+ dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n",
2520+ __func__, req->target);
2521+
2522+ chan = vds_lookup_channel(vds, req->target);
2523+ if (chan) {
2524+ mutex_lock(&chan->lock);
2525+ if (chan->state == TIPC_CONNECTED ||
2526+ chan->state == TIPC_CONNECTING) {
2527+ chan->state = TIPC_DISCONNECTED;
2528+ chan->remote = 0;
2529+ chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED);
2530+ }
2531+ mutex_unlock(&chan->lock);
2532+ kref_put(&chan->refcount, _free_chan);
2533+ }
2534+}
2535+
2536+static void _handle_release(struct tipc_virtio_dev *vds,
2537+ struct tipc_release_body *req, size_t len)
2538+{
2539+ struct tipc_shared_handle *handle = NULL;
2540+ struct device *dev = &vds->vdev->dev;
2541+ int ret = 0;
2542+
2543+ if (len < sizeof(*req)) {
2544+ dev_err(dev, "Received undersized release control message\n");
2545+ return;
2546+ }
2547+
2548+ handle = tipc_shared_handle_take(vds, req->id);
2549+ if (!handle) {
2550+ dev_err(dev,
2551+ "Received release control message for untracked handle: 0x%llx\n",
2552+ req->id);
2553+ return;
2554+ }
2555+
2556+ ret = tipc_shared_handle_drop(handle);
2557+
2558+ if (ret) {
2559+ dev_err(dev,
2560+ "Failed to release handle 0x%llx upon request: (%d)\n",
2561+ req->id, ret);
2562+ /*
2563+ * Put the handle back in case we got a spurious release now and
2564+ * get a real one later. This path should not happen, we're
2565+ * just trying to be robust.
2566+ */
2567+ tipc_shared_handle_register(handle);
2568+ }
2569+}
2570+
2571+static void _handle_ctrl_msg(struct tipc_virtio_dev *vds,
2572+ void *data, int len, u32 src)
2573+{
2574+ struct tipc_ctrl_msg *msg = data;
2575+
2576+ if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) {
2577+ dev_err(&vds->vdev->dev,
2578+ "%s: Invalid message length ( %d vs. %d)\n",
2579+ __func__, (int)(sizeof(*msg) + msg->body_len), len);
2580+ return;
2581+ }
2582+
2583+ dev_dbg(&vds->vdev->dev,
2584+ "%s: Incoming ctrl message: src 0x%x type %d len %d\n",
2585+ __func__, src, msg->type, msg->body_len);
2586+
2587+ switch (msg->type) {
2588+ case TIPC_CTRL_MSGTYPE_GO_ONLINE:
2589+ _go_online(vds);
2590+ break;
2591+
2592+ case TIPC_CTRL_MSGTYPE_GO_OFFLINE:
2593+ _go_offline(vds);
2594+ break;
2595+
2596+ case TIPC_CTRL_MSGTYPE_CONN_RSP:
2597+ _handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body,
2598+ msg->body_len);
2599+ break;
2600+
2601+ case TIPC_CTRL_MSGTYPE_DISC_REQ:
2602+ _handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body,
2603+ msg->body_len);
2604+ break;
2605+
2606+ case TIPC_CTRL_MSGTYPE_RELEASE:
2607+ _handle_release(vds, (struct tipc_release_body *)msg->body,
2608+ msg->body_len);
2609+ break;
2610+
2611+ default:
2612+ dev_warn(&vds->vdev->dev,
2613+ "%s: Unexpected message type: %d\n",
2614+ __func__, msg->type);
2615+ }
2616+}
2617+
2618+static void handle_dropped_chan_msg(struct tipc_virtio_dev *vds,
2619+ struct tipc_msg_buf *mb,
2620+ struct tipc_msg_hdr *msg)
2621+{
2622+ int shm_idx;
2623+ struct tipc_shm *shm;
2624+ struct tipc_shared_handle *shared_handle;
2625+ struct device *dev = &vds->vdev->dev;
2626+ size_t len;
2627+
2628+ if (msg->len < msg->shm_cnt * sizeof(*shm)) {
2629+ dev_err(dev, "shm_cnt does not fit in dropped message");
2630+ /* The message is corrupt, so we can't recover resources */
2631+ return;
2632+ }
2633+
2634+ len = msg->len - msg->shm_cnt * sizeof(*shm);
2635+ /* skip normal data */
2636+ (void)mb_get_data(mb, len);
2637+
2638+ for (shm_idx = 0; shm_idx < msg->shm_cnt; shm_idx++) {
2639+ shm = mb_get_data(mb, sizeof(*shm));
2640+ shared_handle = tipc_shared_handle_take(vds, shm->obj_id);
2641+ if (shared_handle) {
2642+ if (tipc_shared_handle_drop(shared_handle))
2643+ dev_err(dev,
2644+ "Failed to drop handle found in dropped buffer");
2645+ } else {
2646+ dev_err(dev,
2647+ "Found handle in dropped buffer which was not registered to tipc device...");
2648+ }
2649+ }
2650+}
2651+
2652+static void handle_dropped_mb(struct tipc_virtio_dev *vds,
2653+ struct tipc_msg_buf *mb)
2654+{
2655+ struct tipc_msg_hdr *msg;
2656+
2657+ mb_reset_read(mb);
2658+ msg = mb_get_data(mb, sizeof(*msg));
2659+ if (msg->dst != TIPC_CTRL_ADDR) {
2660+ handle_dropped_chan_msg(vds, mb, msg);
2661+ }
2662+}
2663+
2664+static int _handle_rxbuf(struct tipc_virtio_dev *vds,
2665+ struct tipc_msg_buf *rxbuf, size_t rxlen)
2666+{
2667+ int err;
2668+ struct scatterlist sg;
2669+ struct tipc_msg_hdr *msg;
2670+ struct device *dev = &vds->vdev->dev;
2671+
2672+ /* message sanity check */
2673+ if (rxlen > rxbuf->buf_sz) {
2674+ dev_warn(dev, "inbound msg is too big: %zd\n", rxlen);
2675+ goto drop_it;
2676+ }
2677+
2678+ if (rxlen < sizeof(*msg)) {
2679+ dev_warn(dev, "inbound msg is too short: %zd\n", rxlen);
2680+ goto drop_it;
2681+ }
2682+
2683+ /* reset buffer and put data */
2684+ mb_reset(rxbuf);
2685+ mb_put_data(rxbuf, rxlen);
2686+
2687+ /* get message header */
2688+ msg = mb_get_data(rxbuf, sizeof(*msg));
2689+ if (mb_avail_data(rxbuf) != msg->len) {
2690+ dev_warn(dev, "inbound msg length mismatch: (%zu vs. %d)\n",
2691+ mb_avail_data(rxbuf), msg->len);
2692+ goto drop_it;
2693+ }
2694+
2695+ dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d, shm_cnt: %d\n",
2696+ msg->src, msg->dst, msg->len, msg->flags, msg->reserved,
2697+ msg->shm_cnt);
2698+
2699+ /* message directed to control endpoint is a special case */
2700+ if (msg->dst == TIPC_CTRL_ADDR) {
2701+ _handle_ctrl_msg(vds, msg->data, msg->len, msg->src);
2702+ } else {
2703+ struct tipc_chan *chan = NULL;
2704+ /* Lookup channel */
2705+ chan = vds_lookup_channel(vds, msg->dst);
2706+ if (chan) {
2707+ /* handle it */
2708+ rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf);
2709+ kref_put(&chan->refcount, _free_chan);
2710+ if (WARN_ON(!rxbuf))
2711+ return -EINVAL;
2712+ }
2713+ }
2714+
2715+drop_it:
2716+ /* add the buffer back to the virtqueue */
2717+ sg_init_one(&sg, rxbuf, rxbuf->buf_sz);
2718+ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
2719+ if (err < 0) {
2720+ dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
2721+ return err;
2722+ }
2723+
2724+ return 0;
2725+}
2726+
2727+static void _rxvq_cb(struct virtqueue *rxvq)
2728+{
2729+ unsigned int len;
2730+ struct tipc_msg_buf *mb;
2731+ unsigned int msg_cnt = 0;
2732+ struct tipc_virtio_dev *vds = rxvq->vdev->priv;
2733+
2734+ while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) {
2735+ if (_handle_rxbuf(vds, mb, len))
2736+ break;
2737+ msg_cnt++;
2738+ }
2739+
2740+ /* tell the other size that we added rx buffers */
2741+ if (msg_cnt)
2742+ virtqueue_kick(rxvq);
2743+}
2744+
2745+static void _txvq_cb(struct virtqueue *txvq)
2746+{
2747+ unsigned int len;
2748+ struct tipc_msg_buf *mb;
2749+ bool need_wakeup = false;
2750+ struct tipc_virtio_dev *vds = txvq->vdev->priv;
2751+
2752+ /* detach all buffers */
2753+ mutex_lock(&vds->lock);
2754+ while ((mb = virtqueue_get_buf(txvq, &len)) != NULL) {
2755+ if ((int)len < 0)
2756+ handle_dropped_mb(vds, mb);
2757+ need_wakeup |= _put_txbuf_locked(vds, mb);
2758+ }
2759+ mutex_unlock(&vds->lock);
2760+
2761+ if (need_wakeup) {
2762+ /* wake up potential senders waiting for a tx buffer */
2763+ wake_up_interruptible_all(&vds->sendq);
2764+ }
2765+}
2766+
2767+static int tipc_virtio_probe(struct virtio_device *vdev)
2768+{
2769+ int err, i;
2770+ struct tipc_virtio_dev *vds;
2771+ struct tipc_dev_config config;
2772+ struct virtqueue *vqs[2];
2773+ vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb};
2774+ static const char * const vq_names[] = { "rx", "tx" };
2775+
2776+ vds = kzalloc(sizeof(*vds), GFP_KERNEL);
2777+ if (!vds)
2778+ return -ENOMEM;
2779+
2780+ vds->vdev = vdev;
2781+
2782+ mutex_init(&vds->lock);
2783+ mutex_init(&vds->shared_handles_lock);
2784+ kref_init(&vds->refcount);
2785+ init_waitqueue_head(&vds->sendq);
2786+ INIT_LIST_HEAD(&vds->free_buf_list);
2787+ idr_init(&vds->addr_idr);
2788+ vds->shared_handles = RB_ROOT;
2789+ dma_coerce_mask_and_coherent(&vds->vdev->dev,
2790+ *vds->vdev->dev.parent->parent->dma_mask);
2791+
2792+ /* set default max message size and alignment */
2793+ memset(&config, 0, sizeof(config));
2794+ config.msg_buf_max_size = DEFAULT_MSG_BUF_SIZE;
2795+ config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN;
2796+
2797+ /* get configuration if present */
2798+ vdev->config->get(vdev, 0, &config, sizeof(config));
2799+
2800+ /* copy dev name */
2801+ strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name));
2802+ vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0';
2803+
2804+ /* find tx virtqueues (rx and tx and in this order) */
2805+ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL,
2806+ NULL);
2807+ if (err)
2808+ goto err_find_vqs;
2809+
2810+ vds->rxvq = vqs[0];
2811+ vds->txvq = vqs[1];
2812+
2813+ /* save max buffer size and count */
2814+ vds->msg_buf_max_sz = config.msg_buf_max_size;
2815+ vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq);
2816+
2817+ /* set up the receive buffers */
2818+ for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) {
2819+ struct scatterlist sg;
2820+ struct tipc_msg_buf *rxbuf;
2821+
2822+ rxbuf = vds_alloc_msg_buf(vds, true);
2823+ if (!rxbuf) {
2824+ dev_err(&vdev->dev, "failed to allocate rx buffer\n");
2825+ err = -ENOMEM;
2826+ goto err_free_rx_buffers;
2827+ }
2828+
2829+ sg_init_one(&sg, rxbuf, rxbuf->buf_sz);
2830+ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
2831+ WARN_ON(err); /* sanity check; this can't really happen */
2832+ }
2833+
2834+ vdev->priv = vds;
2835+ vds->state = VDS_OFFLINE;
2836+
2837+ dev_dbg(&vdev->dev, "%s: done\n", __func__);
2838+ return 0;
2839+
2840+err_free_rx_buffers:
2841+ _cleanup_vq(vds, vds->rxvq);
2842+err_find_vqs:
2843+ kref_put(&vds->refcount, _free_vds);
2844+ return err;
2845+}
2846+
2847+static void tipc_virtio_remove(struct virtio_device *vdev)
2848+{
2849+ struct tipc_virtio_dev *vds = vdev->priv;
2850+
2851+ _go_offline(vds);
2852+
2853+ mutex_lock(&vds->lock);
2854+ vds->state = VDS_DEAD;
2855+ vds->vdev = NULL;
2856+ mutex_unlock(&vds->lock);
2857+
2858+ vdev->config->reset(vdev);
2859+
2860+ idr_destroy(&vds->addr_idr);
2861+
2862+ _cleanup_vq(vds, vds->rxvq);
2863+ _cleanup_vq(vds, vds->txvq);
2864+ vds_free_msg_buf_list(vds, &vds->free_buf_list);
2865+
2866+ vdev->config->del_vqs(vds->vdev);
2867+
2868+ kref_put(&vds->refcount, _free_vds);
2869+}
2870+
2871+static const struct virtio_device_id tipc_virtio_id_table[] = {
2872+ { VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID },
2873+ { 0 },
2874+};
2875+
2876+static const unsigned int features[] = {
2877+ 0,
2878+};
2879+
2880+static struct virtio_driver virtio_tipc_driver = {
2881+ .feature_table = features,
2882+ .feature_table_size = ARRAY_SIZE(features),
2883+ .driver.name = KBUILD_MODNAME,
2884+ .driver.owner = THIS_MODULE,
2885+ .id_table = tipc_virtio_id_table,
2886+ .probe = tipc_virtio_probe,
2887+ .remove = tipc_virtio_remove,
2888+};
2889+
2890+static int __init tipc_init(void)
2891+{
2892+ int ret;
2893+ dev_t dev;
2894+
2895+ ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME);
2896+ if (ret) {
2897+ pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret);
2898+ return ret;
2899+ }
2900+
2901+ tipc_major = MAJOR(dev);
2902+ tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME);
2903+ if (IS_ERR(tipc_class)) {
2904+ ret = PTR_ERR(tipc_class);
2905+ pr_err("%s: class_create failed: %d\n", __func__, ret);
2906+ goto err_class_create;
2907+ }
2908+
2909+ ret = register_virtio_driver(&virtio_tipc_driver);
2910+ if (ret) {
2911+ pr_err("failed to register virtio driver: %d\n", ret);
2912+ goto err_register_virtio_drv;
2913+ }
2914+
2915+ return 0;
2916+
2917+err_register_virtio_drv:
2918+ class_destroy(tipc_class);
2919+
2920+err_class_create:
2921+ unregister_chrdev_region(dev, MAX_DEVICES);
2922+ return ret;
2923+}
2924+
2925+static void __exit tipc_exit(void)
2926+{
2927+ unregister_virtio_driver(&virtio_tipc_driver);
2928+ class_destroy(tipc_class);
2929+ unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES);
2930+}
2931+
2932+/* We need to init this early */
2933+subsys_initcall(tipc_init);
2934+module_exit(tipc_exit);
2935+
2936+MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table);
2937+MODULE_DESCRIPTION("Trusty IPC driver");
2938+MODULE_LICENSE("GPL v2");
2939diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c
2940new file mode 100644
2941index 000000000000..5c6076108d0e
2942--- /dev/null
2943+++ b/drivers/trusty/trusty-irq.c
2944@@ -0,0 +1,645 @@
2945+// SPDX-License-Identifier: GPL-2.0-only
2946+/*
2947+ * Copyright (C) 2013 Google, Inc.
2948+ */
2949+
2950+#include <linux/cpu.h>
2951+#include <linux/interrupt.h>
2952+#include <linux/irq.h>
2953+#include <linux/irqdomain.h>
2954+#include <linux/module.h>
2955+#include <linux/of.h>
2956+#include <linux/of_irq.h>
2957+#include <linux/platform_device.h>
2958+#include <linux/slab.h>
2959+#include <linux/string.h>
2960+#include <linux/trusty/smcall.h>
2961+#include <linux/trusty/sm_err.h>
2962+#include <linux/trusty/trusty.h>
2963+
2964+struct trusty_irq {
2965+ struct trusty_irq_state *is;
2966+ struct hlist_node node;
2967+ unsigned int irq;
2968+ bool percpu;
2969+ bool enable;
2970+ bool doorbell;
2971+ struct trusty_irq __percpu *percpu_ptr;
2972+};
2973+
2974+struct trusty_irq_irqset {
2975+ struct hlist_head pending;
2976+ struct hlist_head inactive;
2977+};
2978+
2979+struct trusty_irq_state {
2980+ struct device *dev;
2981+ struct device *trusty_dev;
2982+ struct trusty_irq_irqset normal_irqs;
2983+ spinlock_t normal_irqs_lock;
2984+ struct trusty_irq_irqset __percpu *percpu_irqs;
2985+ struct notifier_block trusty_call_notifier;
2986+ struct hlist_node cpuhp_node;
2987+};
2988+
2989+static int trusty_irq_cpuhp_slot = -1;
2990+
2991+static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is,
2992+ struct trusty_irq_irqset *irqset,
2993+ bool percpu)
2994+{
2995+ struct hlist_node *n;
2996+ struct trusty_irq *trusty_irq;
2997+
2998+ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
2999+ dev_dbg(is->dev,
3000+ "%s: enable pending irq %d, percpu %d, cpu %d\n",
3001+ __func__, trusty_irq->irq, percpu, smp_processor_id());
3002+ if (percpu)
3003+ enable_percpu_irq(trusty_irq->irq, 0);
3004+ else
3005+ enable_irq(trusty_irq->irq);
3006+ hlist_del(&trusty_irq->node);
3007+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
3008+ }
3009+}
3010+
3011+static void trusty_irq_enable_irqset(struct trusty_irq_state *is,
3012+ struct trusty_irq_irqset *irqset)
3013+{
3014+ struct trusty_irq *trusty_irq;
3015+
3016+ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
3017+ if (trusty_irq->enable) {
3018+ dev_warn(is->dev,
3019+ "%s: percpu irq %d already enabled, cpu %d\n",
3020+ __func__, trusty_irq->irq, smp_processor_id());
3021+ continue;
3022+ }
3023+ dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n",
3024+ __func__, trusty_irq->irq, smp_processor_id());
3025+ enable_percpu_irq(trusty_irq->irq, 0);
3026+ trusty_irq->enable = true;
3027+ }
3028+}
3029+
3030+static void trusty_irq_disable_irqset(struct trusty_irq_state *is,
3031+ struct trusty_irq_irqset *irqset)
3032+{
3033+ struct hlist_node *n;
3034+ struct trusty_irq *trusty_irq;
3035+
3036+ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
3037+ if (!trusty_irq->enable) {
3038+ dev_warn(is->dev,
3039+ "irq %d already disabled, percpu %d, cpu %d\n",
3040+ trusty_irq->irq, trusty_irq->percpu,
3041+ smp_processor_id());
3042+ continue;
3043+ }
3044+ dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n",
3045+ __func__, trusty_irq->irq, trusty_irq->percpu,
3046+ smp_processor_id());
3047+ trusty_irq->enable = false;
3048+ if (trusty_irq->percpu)
3049+ disable_percpu_irq(trusty_irq->irq);
3050+ else
3051+ disable_irq_nosync(trusty_irq->irq);
3052+ }
3053+ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
3054+ if (!trusty_irq->enable) {
3055+ dev_warn(is->dev,
3056+ "pending irq %d already disabled, percpu %d, cpu %d\n",
3057+ trusty_irq->irq, trusty_irq->percpu,
3058+ smp_processor_id());
3059+ }
3060+ dev_dbg(is->dev,
3061+ "%s: disable pending irq %d, percpu %d, cpu %d\n",
3062+ __func__, trusty_irq->irq, trusty_irq->percpu,
3063+ smp_processor_id());
3064+ trusty_irq->enable = false;
3065+ hlist_del(&trusty_irq->node);
3066+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
3067+ }
3068+}
3069+
3070+static int trusty_irq_call_notify(struct notifier_block *nb,
3071+ unsigned long action, void *data)
3072+{
3073+ struct trusty_irq_state *is;
3074+
3075+ if (WARN_ON(!irqs_disabled()))
3076+ return NOTIFY_DONE;
3077+
3078+ if (action != TRUSTY_CALL_PREPARE)
3079+ return NOTIFY_DONE;
3080+
3081+ is = container_of(nb, struct trusty_irq_state, trusty_call_notifier);
3082+
3083+ spin_lock(&is->normal_irqs_lock);
3084+ trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false);
3085+ spin_unlock(&is->normal_irqs_lock);
3086+ trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true);
3087+
3088+ return NOTIFY_OK;
3089+}
3090+
3091+static irqreturn_t trusty_irq_handler(int irq, void *data)
3092+{
3093+ struct trusty_irq *trusty_irq = data;
3094+ struct trusty_irq_state *is = trusty_irq->is;
3095+ struct trusty_irq_irqset *irqset;
3096+
3097+ dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n",
3098+ __func__, irq, trusty_irq->irq, smp_processor_id(),
3099+ trusty_irq->enable);
3100+
3101+ if (!trusty_irq->doorbell) {
3102+ if (trusty_irq->percpu) {
3103+ disable_percpu_irq(irq);
3104+ irqset = this_cpu_ptr(is->percpu_irqs);
3105+ } else {
3106+ disable_irq_nosync(irq);
3107+ irqset = &is->normal_irqs;
3108+ }
3109+
3110+ spin_lock(&is->normal_irqs_lock);
3111+ if (trusty_irq->enable) {
3112+ hlist_del(&trusty_irq->node);
3113+ hlist_add_head(&trusty_irq->node, &irqset->pending);
3114+ }
3115+ spin_unlock(&is->normal_irqs_lock);
3116+ }
3117+
3118+ trusty_enqueue_nop(is->trusty_dev, NULL);
3119+
3120+ dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq);
3121+
3122+ return IRQ_HANDLED;
3123+}
3124+
3125+static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node)
3126+{
3127+ unsigned long irq_flags;
3128+ struct trusty_irq_state *is;
3129+
3130+ is = container_of(node, struct trusty_irq_state, cpuhp_node);
3131+
3132+ dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu);
3133+
3134+ local_irq_save(irq_flags);
3135+ trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs));
3136+ local_irq_restore(irq_flags);
3137+
3138+ /*
3139+ * Temporary workaround blindly enqueuing work to force trusty scheduler
3140+ * to run after a cpu suspend.
3141+ * Root causing the workqueue being inappropriately empty
3142+ * (e.g. loss of an IPI) may make this workaround unnecessary
3143+ * in the future.
3144+ */
3145+ trusty_enqueue_nop(is->trusty_dev, NULL);
3146+
3147+ return 0;
3148+}
3149+
3150+static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node)
3151+{
3152+ unsigned long irq_flags;
3153+ struct trusty_irq_state *is;
3154+
3155+ is = container_of(node, struct trusty_irq_state, cpuhp_node);
3156+
3157+ dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu);
3158+
3159+ local_irq_save(irq_flags);
3160+ trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs));
3161+ local_irq_restore(irq_flags);
3162+
3163+ return 0;
3164+}
3165+
3166+static int trusty_irq_map_ipi(struct trusty_irq_state *is, int irq)
3167+{
3168+ int ret;
3169+ u32 ipi_range[3];
3170+ struct device_node *gic;
3171+ struct of_phandle_args oirq = {};
3172+ u32 beg, end, ipi_base;
3173+
3174+ ret = of_property_read_u32_array(is->dev->of_node, "ipi-range",
3175+ ipi_range, ARRAY_SIZE(ipi_range));
3176+ if (ret != 0)
3177+ return -ENODATA;
3178+ beg = ipi_range[0];
3179+ end = ipi_range[1];
3180+ ipi_base = ipi_range[2];
3181+
3182+ if (irq < beg || irq > end)
3183+ return -ENODATA;
3184+
3185+ gic = of_irq_find_parent(is->dev->of_node);
3186+ if (!gic)
3187+ return -ENXIO;
3188+
3189+ oirq.np = gic;
3190+ oirq.args_count = 1;
3191+ oirq.args[0] = ipi_base + (irq - beg);
3192+
3193+ ret = irq_create_of_mapping(&oirq);
3194+
3195+ of_node_put(gic);
3196+ return (!ret) ? -EINVAL : ret;
3197+}
3198+
3199+static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq)
3200+{
3201+ int ret;
3202+ int index;
3203+ u32 irq_pos;
3204+ u32 templ_idx;
3205+ u32 range_base;
3206+ u32 range_end;
3207+ struct of_phandle_args oirq;
3208+
3209+ /* check if this is an IPI (inter-processor interrupt) */
3210+ ret = trusty_irq_map_ipi(is, irq);
3211+ if (ret != -ENODATA)
3212+ return ret;
3213+
3214+ /* check if "interrupt-ranges" property is present */
3215+ if (!of_find_property(is->dev->of_node, "interrupt-ranges", NULL)) {
3216+ /* fallback to old behavior to be backward compatible with
3217+ * systems that do not need IRQ domains.
3218+ */
3219+ return irq;
3220+ }
3221+
3222+ /* find irq range */
3223+ for (index = 0;; index += 3) {
3224+ ret = of_property_read_u32_index(is->dev->of_node,
3225+ "interrupt-ranges",
3226+ index, &range_base);
3227+ if (ret)
3228+ return ret;
3229+
3230+ ret = of_property_read_u32_index(is->dev->of_node,
3231+ "interrupt-ranges",
3232+ index + 1, &range_end);
3233+ if (ret)
3234+ return ret;
3235+
3236+ if (irq >= range_base && irq <= range_end)
3237+ break;
3238+ }
3239+
3240+ /* read the rest of range entry: template index and irq_pos */
3241+ ret = of_property_read_u32_index(is->dev->of_node,
3242+ "interrupt-ranges",
3243+ index + 2, &templ_idx);
3244+ if (ret)
3245+ return ret;
3246+
3247+ /* read irq template */
3248+ ret = of_parse_phandle_with_args(is->dev->of_node,
3249+ "interrupt-templates",
3250+ "#interrupt-cells",
3251+ templ_idx, &oirq);
3252+ if (ret)
3253+ return ret;
3254+
3255+ WARN_ON(!oirq.np);
3256+ WARN_ON(!oirq.args_count);
3257+
3258+ /*
3259+ * An IRQ template is a non empty array of u32 values describing group
3260+ * of interrupts having common properties. The u32 entry with index
3261+ * zero contains the position of irq_id in interrupt specifier array
3262+ * followed by data representing interrupt specifier array with irq id
3263+ * field omitted, so to convert irq template to interrupt specifier
3264+ * array we have to move down one slot the first irq_pos entries and
3265+ * replace the resulting gap with real irq id.
3266+ */
3267+ irq_pos = oirq.args[0];
3268+
3269+ if (irq_pos >= oirq.args_count) {
3270+ dev_err(is->dev, "irq pos is out of range: %d\n", irq_pos);
3271+ return -EINVAL;
3272+ }
3273+
3274+ for (index = 1; index <= irq_pos; index++)
3275+ oirq.args[index - 1] = oirq.args[index];
3276+
3277+ oirq.args[irq_pos] = irq - range_base;
3278+
3279+ ret = irq_create_of_mapping(&oirq);
3280+
3281+ return (!ret) ? -EINVAL : ret;
3282+}
3283+
3284+static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq)
3285+{
3286+ int ret;
3287+ int irq;
3288+ unsigned long irq_flags;
3289+ struct trusty_irq *trusty_irq;
3290+
3291+ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq);
3292+
3293+ irq = trusty_irq_create_irq_mapping(is, tirq);
3294+ if (irq < 0) {
3295+ dev_err(is->dev,
3296+ "trusty_irq_create_irq_mapping failed (%d)\n", irq);
3297+ return irq;
3298+ }
3299+
3300+ trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL);
3301+ if (!trusty_irq)
3302+ return -ENOMEM;
3303+
3304+ trusty_irq->is = is;
3305+ trusty_irq->irq = irq;
3306+ trusty_irq->enable = true;
3307+
3308+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
3309+ hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive);
3310+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
3311+
3312+ ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD,
3313+ "trusty", trusty_irq);
3314+ if (ret) {
3315+ dev_err(is->dev, "request_irq failed %d\n", ret);
3316+ goto err_request_irq;
3317+ }
3318+ return 0;
3319+
3320+err_request_irq:
3321+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
3322+ hlist_del(&trusty_irq->node);
3323+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
3324+ kfree(trusty_irq);
3325+ return ret;
3326+}
3327+
3328+static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq,
3329+ unsigned int type)
3330+{
3331+ int ret;
3332+ int irq;
3333+ unsigned int cpu;
3334+ struct trusty_irq __percpu *trusty_irq_handler_data;
3335+
3336+ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq);
3337+
3338+ irq = trusty_irq_create_irq_mapping(is, tirq);
3339+ if (irq <= 0) {
3340+ dev_err(is->dev,
3341+ "trusty_irq_create_irq_mapping failed (%d)\n", irq);
3342+ return irq;
3343+ }
3344+
3345+ trusty_irq_handler_data = alloc_percpu(struct trusty_irq);
3346+ if (!trusty_irq_handler_data)
3347+ return -ENOMEM;
3348+
3349+ for_each_possible_cpu(cpu) {
3350+ struct trusty_irq *trusty_irq;
3351+ struct trusty_irq_irqset *irqset;
3352+
3353+ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
3354+ irqset = per_cpu_ptr(is->percpu_irqs, cpu);
3355+
3356+ trusty_irq->is = is;
3357+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
3358+ trusty_irq->irq = irq;
3359+ trusty_irq->percpu = true;
3360+ trusty_irq->doorbell = type == TRUSTY_IRQ_TYPE_DOORBELL;
3361+ trusty_irq->percpu_ptr = trusty_irq_handler_data;
3362+ }
3363+
3364+ ret = request_percpu_irq(irq, trusty_irq_handler, "trusty",
3365+ trusty_irq_handler_data);
3366+ if (ret) {
3367+ dev_err(is->dev, "request_percpu_irq failed %d\n", ret);
3368+ goto err_request_percpu_irq;
3369+ }
3370+
3371+ return 0;
3372+
3373+err_request_percpu_irq:
3374+ for_each_possible_cpu(cpu) {
3375+ struct trusty_irq *trusty_irq;
3376+
3377+ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
3378+ hlist_del(&trusty_irq->node);
3379+ }
3380+
3381+ free_percpu(trusty_irq_handler_data);
3382+ return ret;
3383+}
3384+
3385+static int trusty_smc_get_next_irq(struct trusty_irq_state *is,
3386+ unsigned long min_irq, unsigned int type)
3387+{
3388+ return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ,
3389+ min_irq, type, 0);
3390+}
3391+
3392+static int trusty_irq_init_one(struct trusty_irq_state *is,
3393+ int irq, unsigned int type)
3394+{
3395+ int ret;
3396+
3397+ irq = trusty_smc_get_next_irq(is, irq, type);
3398+ if (irq < 0)
3399+ return irq;
3400+
3401+ if (type != TRUSTY_IRQ_TYPE_NORMAL)
3402+ ret = trusty_irq_init_per_cpu_irq(is, irq, type);
3403+ else
3404+ ret = trusty_irq_init_normal_irq(is, irq);
3405+
3406+ if (ret) {
3407+ dev_warn(is->dev,
3408+ "failed to initialize irq %d, irq will be ignored\n",
3409+ irq);
3410+ }
3411+
3412+ return irq + 1;
3413+}
3414+
3415+static void trusty_irq_free_irqs(struct trusty_irq_state *is)
3416+{
3417+ struct trusty_irq *irq;
3418+ struct hlist_node *n;
3419+ unsigned int cpu;
3420+
3421+ hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) {
3422+ dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq);
3423+ free_irq(irq->irq, irq);
3424+ hlist_del(&irq->node);
3425+ kfree(irq);
3426+ }
3427+ hlist_for_each_entry_safe(irq, n,
3428+ &this_cpu_ptr(is->percpu_irqs)->inactive,
3429+ node) {
3430+ struct trusty_irq __percpu *trusty_irq_handler_data;
3431+
3432+ dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq);
3433+ trusty_irq_handler_data = irq->percpu_ptr;
3434+ free_percpu_irq(irq->irq, trusty_irq_handler_data);
3435+ for_each_possible_cpu(cpu) {
3436+ struct trusty_irq *irq_tmp;
3437+
3438+ irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu);
3439+ hlist_del(&irq_tmp->node);
3440+ }
3441+ free_percpu(trusty_irq_handler_data);
3442+ }
3443+}
3444+
3445+static int trusty_irq_probe(struct platform_device *pdev)
3446+{
3447+ int ret;
3448+ int irq;
3449+ unsigned long irq_flags;
3450+ struct trusty_irq_state *is;
3451+
3452+ is = kzalloc(sizeof(*is), GFP_KERNEL);
3453+ if (!is) {
3454+ ret = -ENOMEM;
3455+ goto err_alloc_is;
3456+ }
3457+
3458+ is->dev = &pdev->dev;
3459+ is->trusty_dev = is->dev->parent;
3460+ spin_lock_init(&is->normal_irqs_lock);
3461+ is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset);
3462+ if (!is->percpu_irqs) {
3463+ ret = -ENOMEM;
3464+ goto err_alloc_pending_percpu_irqs;
3465+ }
3466+
3467+ platform_set_drvdata(pdev, is);
3468+
3469+ is->trusty_call_notifier.notifier_call = trusty_irq_call_notify;
3470+ ret = trusty_call_notifier_register(is->trusty_dev,
3471+ &is->trusty_call_notifier);
3472+ if (ret) {
3473+ dev_err(&pdev->dev,
3474+ "failed to register trusty call notifier\n");
3475+ goto err_trusty_call_notifier_register;
3476+ }
3477+
3478+ for (irq = 0; irq >= 0;)
3479+ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_PER_CPU);
3480+ for (irq = 0; irq >= 0;)
3481+ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_NORMAL);
3482+ for (irq = 0; irq >= 0;)
3483+ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_DOORBELL);
3484+
3485+ ret = cpuhp_state_add_instance(trusty_irq_cpuhp_slot, &is->cpuhp_node);
3486+ if (ret < 0) {
3487+ dev_err(&pdev->dev, "cpuhp_state_add_instance failed %d\n",
3488+ ret);
3489+ goto err_add_cpuhp_instance;
3490+ }
3491+
3492+ return 0;
3493+
3494+err_add_cpuhp_instance:
3495+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
3496+ trusty_irq_disable_irqset(is, &is->normal_irqs);
3497+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
3498+ trusty_irq_free_irqs(is);
3499+ trusty_call_notifier_unregister(is->trusty_dev,
3500+ &is->trusty_call_notifier);
3501+err_trusty_call_notifier_register:
3502+ free_percpu(is->percpu_irqs);
3503+err_alloc_pending_percpu_irqs:
3504+ kfree(is);
3505+err_alloc_is:
3506+ return ret;
3507+}
3508+
3509+static int trusty_irq_remove(struct platform_device *pdev)
3510+{
3511+ int ret;
3512+ unsigned long irq_flags;
3513+ struct trusty_irq_state *is = platform_get_drvdata(pdev);
3514+
3515+ ret = cpuhp_state_remove_instance(trusty_irq_cpuhp_slot,
3516+ &is->cpuhp_node);
3517+ if (WARN_ON(ret))
3518+ return ret;
3519+
3520+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
3521+ trusty_irq_disable_irqset(is, &is->normal_irqs);
3522+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
3523+
3524+ trusty_irq_free_irqs(is);
3525+
3526+ trusty_call_notifier_unregister(is->trusty_dev,
3527+ &is->trusty_call_notifier);
3528+ free_percpu(is->percpu_irqs);
3529+ kfree(is);
3530+
3531+ return 0;
3532+}
3533+
3534+static const struct of_device_id trusty_test_of_match[] = {
3535+ { .compatible = "android,trusty-irq-v1", },
3536+ {},
3537+};
3538+
3539+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
3540+
3541+static struct platform_driver trusty_irq_driver = {
3542+ .probe = trusty_irq_probe,
3543+ .remove = trusty_irq_remove,
3544+ .driver = {
3545+ .name = "trusty-irq",
3546+ .of_match_table = trusty_test_of_match,
3547+ },
3548+};
3549+
3550+static int __init trusty_irq_driver_init(void)
3551+{
3552+ int ret;
3553+
3554+ /* allocate dynamic cpuhp state slot */
3555+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
3556+ "trusty-irq:cpu:online",
3557+ trusty_irq_cpu_up,
3558+ trusty_irq_cpu_down);
3559+ if (ret < 0)
3560+ return ret;
3561+ trusty_irq_cpuhp_slot = ret;
3562+
3563+ /* Register platform driver */
3564+ ret = platform_driver_register(&trusty_irq_driver);
3565+ if (ret < 0)
3566+ goto err_driver_register;
3567+
3568+ return ret;
3569+
3570+err_driver_register:
3571+ /* undo cpuhp slot allocation */
3572+ cpuhp_remove_multi_state(trusty_irq_cpuhp_slot);
3573+ trusty_irq_cpuhp_slot = -1;
3574+
3575+ return ret;
3576+}
3577+
3578+static void __exit trusty_irq_driver_exit(void)
3579+{
3580+ platform_driver_unregister(&trusty_irq_driver);
3581+ cpuhp_remove_multi_state(trusty_irq_cpuhp_slot);
3582+ trusty_irq_cpuhp_slot = -1;
3583+}
3584+
3585+module_init(trusty_irq_driver_init);
3586+module_exit(trusty_irq_driver_exit);
3587+
3588+MODULE_LICENSE("GPL v2");
3589+MODULE_DESCRIPTION("Trusty IRQ driver");
3590diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c
3591new file mode 100644
3592index 000000000000..7b279fe63766
3593--- /dev/null
3594+++ b/drivers/trusty/trusty-log.c
3595@@ -0,0 +1,830 @@
3596+// SPDX-License-Identifier: GPL-2.0-only
3597+/*
3598+ * Copyright (C) 2015 Google, Inc.
3599+ */
3600+#include <linux/platform_device.h>
3601+#include <linux/trusty/smcall.h>
3602+#include <linux/trusty/trusty.h>
3603+#include <linux/notifier.h>
3604+#include <linux/scatterlist.h>
3605+#include <linux/slab.h>
3606+#include <linux/mm.h>
3607+#include <linux/mod_devicetable.h>
3608+#include <linux/module.h>
3609+#include <linux/moduleparam.h>
3610+#include <linux/log2.h>
3611+#include <linux/miscdevice.h>
3612+#include <linux/poll.h>
3613+#include <linux/seq_file.h>
3614+#include <asm/page.h>
3615+#include "trusty-log.h"
3616+
3617+/*
3618+ * Rationale for the chosen default log buffer size:
3619+ * - the log buffer shall contain unthrottled Trusty crash dump.
3620+ * - the register list portion of a crash dump is about 1KB
3621+ * - the memory-around-registers portion of a crash dump can be up to 12 KB
3622+ * - an average size backtrace is about 1 KB
3623+ * - average length of non-crash trusty logs during boot is about 85 characters
3624+ * - a crash dump with 50 lines of context therefore requires up to 18 KB
3625+ * - buffer size needs to be power-of-two number of bytes
3626+ * - rounding up to power of two from 18 KB gives 32 KB
3627+ * The log size can be adjusted by setting the "trusty_log.log_size" parameter
3628+ * on the kernel command line. The specified value will be adjusted as needed.
3629+ */
3630+
3631+#define TRUSTY_LOG_DEFAULT_SIZE (32768)
3632+#define TRUSTY_LOG_MIN_SIZE (PAGE_SIZE / 2)
3633+#define TRUSTY_LOG_MAX_SIZE (1 * 1024 * 1024 * 1024)
3634+#define TRUSTY_LINE_BUFFER_SIZE (256)
3635+
3636+static size_t log_size_param = TRUSTY_LOG_DEFAULT_SIZE;
3637+
3638+static int trusty_log_size_set(const char *val, const struct kernel_param *kp)
3639+{
3640+ unsigned long long requested = memparse(val, NULL);
3641+
3642+ if (requested < TRUSTY_LOG_MIN_SIZE)
3643+ requested = TRUSTY_LOG_MIN_SIZE;
3644+ if (requested > TRUSTY_LOG_MAX_SIZE)
3645+ requested = TRUSTY_LOG_MAX_SIZE;
3646+ requested = rounddown_pow_of_two(requested);
3647+ log_size_param = requested;
3648+ return 0;
3649+}
3650+
3651+static int trusty_log_size_get(char *buffer, const struct kernel_param *kp)
3652+{
3653+ sprintf(buffer, "%zu", log_size_param);
3654+ return strlen(buffer);
3655+}
3656+
3657+module_param_call(log_size, trusty_log_size_set, trusty_log_size_get, NULL,
3658+ 0644);
3659+/*
3660+ * If we log too much and a UART or other slow source is connected, we can stall
3661+ * out another thread which is doing printk.
3662+ *
3663+ * Trusty crash logs are currently ~16 lines, so 100 should include context and
3664+ * the crash most of the time.
3665+ */
3666+static struct ratelimit_state trusty_log_rate_limit =
3667+ RATELIMIT_STATE_INIT("trusty_log", 1 * HZ, 100);
3668+
3669+/**
3670+ * struct trusty_log_sfile - trusty log misc device state
3671+ *
3672+ * @misc: misc device created for the trusty log virtual file
3673+ * @device_name: misc device name following the convention
3674+ * "trusty-<name><id>"
3675+ */
3676+struct trusty_log_sfile {
3677+ struct miscdevice misc;
3678+ char device_name[64];
3679+};
3680+
3681+/**
3682+ * struct trusty_log_sink_state - trusty log sink state
3683+ *
3684+ * @get: current read unwrapped index
3685+ * @trusty_panicked: trusty panic status at the start of the sink interation
3686+ * (only used for kernel log sink)
3687+ * @sfile: seq_file used for sinking to a virtual file (misc device);
3688+ * set to NULL for the kernel log sink.
3689+ * @ignore_overflow: ignore_overflow used to coalesce overflow messages and
3690+ * avoid reporting an overflow when sinking the oldest
3691+ * line to the virtual file (only used for virtual file sink)
3692+ *
3693+ * A sink state structure is used for both the kernel log sink
3694+ * and the virtual device sink.
3695+ * An instance of the sink state structure is dynamically created
3696+ * for each read iteration of the trusty log virtual file (misc device).
3697+ *
3698+ */
3699+struct trusty_log_sink_state {
3700+ u32 get;
3701+ bool trusty_panicked;
3702+
3703+ /* virtual file sink specific attributes */
3704+ struct seq_file *sfile;
3705+ bool ignore_overflow;
3706+};
3707+
3708+struct trusty_log_state {
3709+ struct device *dev;
3710+ struct device *trusty_dev;
3711+ struct trusty_log_sfile log_sfile;
3712+
3713+ struct log_rb *log;
3714+ struct trusty_log_sink_state klog_sink;
3715+
3716+ u32 log_num_pages;
3717+ struct scatterlist *sg;
3718+ trusty_shared_mem_id_t log_pages_shared_mem_id;
3719+
3720+ struct notifier_block call_notifier;
3721+ struct notifier_block panic_notifier;
3722+ char line_buffer[TRUSTY_LINE_BUFFER_SIZE];
3723+ wait_queue_head_t poll_waiters;
3724+ /* this lock protects access to wake_put */
3725+ spinlock_t wake_up_lock;
3726+ u32 last_wake_put;
3727+};
3728+
3729+static inline u32 u32_add_overflow(u32 a, u32 b)
3730+{
3731+ u32 d;
3732+
3733+ if (check_add_overflow(a, b, &d)) {
3734+ /*
3735+ * silence the overflow,
3736+ * what matters in the log buffer context
3737+ * is the casted addition
3738+ */
3739+ }
3740+ return d;
3741+}
3742+
3743+static inline u32 u32_sub_overflow(u32 a, u32 b)
3744+{
3745+ u32 d;
3746+
3747+ if (check_sub_overflow(a, b, &d)) {
3748+ /*
3749+ * silence the overflow,
3750+ * what matters in the log buffer context
3751+ * is the casted substraction
3752+ */
3753+ }
3754+ return d;
3755+}
3756+
3757+static int log_read_line(struct trusty_log_state *s, u32 put, u32 get)
3758+{
3759+ struct log_rb *log = s->log;
3760+ int i;
3761+ char c = '\0';
3762+ size_t max_to_read =
3763+ min_t(size_t,
3764+ u32_sub_overflow(put, get),
3765+ sizeof(s->line_buffer) - 1);
3766+ size_t mask = log->sz - 1;
3767+
3768+ for (i = 0; i < max_to_read && c != '\n';) {
3769+ c = log->data[get & mask];
3770+ s->line_buffer[i++] = c;
3771+ get = u32_add_overflow(get, 1);
3772+ }
3773+ s->line_buffer[i] = '\0';
3774+
3775+ return i;
3776+}
3777+
3778+/**
3779+ * trusty_log_has_data() - returns true when more data is available to sink
3780+ * @s: Current log state.
3781+ * @sink: trusty_log_sink_state holding the get index on a given sink
3782+ *
3783+ * Return: true if data is available.
3784+ */
3785+static bool trusty_log_has_data(struct trusty_log_state *s,
3786+ struct trusty_log_sink_state *sink)
3787+{
3788+ struct log_rb *log = s->log;
3789+
3790+ return (log->put != sink->get);
3791+}
3792+
3793+/**
3794+ * trusty_log_start() - initialize the sink iteration either to kernel log
3795+ * or to secondary log_sfile
3796+ * @s: Current log state.
3797+ * @sink: trusty_log_sink_state holding the get index on a given sink
3798+ * @index: Unwrapped ring buffer index from where iteration shall start
3799+ *
3800+ * Return: 0 if successful, negative error code otherwise
3801+ */
3802+static int trusty_log_start(struct trusty_log_state *s,
3803+ struct trusty_log_sink_state *sink,
3804+ u32 index)
3805+{
3806+ struct log_rb *log;
3807+
3808+ if (WARN_ON(!s))
3809+ return -EINVAL;
3810+
3811+ log = s->log;
3812+ if (WARN_ON(!is_power_of_2(log->sz)))
3813+ return -EINVAL;
3814+
3815+ sink->get = index;
3816+ return 0;
3817+}
3818+
3819+/**
3820+ * trusty_log_show() - sink log entry at current iteration
3821+ * @s: Current log state.
3822+ * @sink: trusty_log_sink_state holding the get index on a given sink
3823+ */
3824+static void trusty_log_show(struct trusty_log_state *s,
3825+ struct trusty_log_sink_state *sink)
3826+{
3827+ struct log_rb *log = s->log;
3828+ u32 alloc, put, get;
3829+ int read_chars;
3830+
3831+ /*
3832+ * For this ring buffer, at any given point, alloc >= put >= get.
3833+ * The producer side of the buffer is not locked, so the put and alloc
3834+ * pointers must be read in a defined order (put before alloc) so
3835+ * that the above condition is maintained. A read barrier is needed
3836+ * to make sure the hardware and compiler keep the reads ordered.
3837+ */
3838+ get = sink->get;
3839+ put = log->put;
3840+
3841+ /* Make sure that the read of put occurs before the read of log data */
3842+ rmb();
3843+
3844+ /* Read a line from the log */
3845+ read_chars = log_read_line(s, put, get);
3846+
3847+ /* Force the loads from log_read_line to complete. */
3848+ rmb();
3849+ alloc = log->alloc;
3850+
3851+ /*
3852+ * Discard the line that was just read if the data could
3853+ * have been corrupted by the producer.
3854+ */
3855+ if (u32_sub_overflow(alloc, get) > log->sz) {
3856+ /*
3857+ * this condition is acceptable in the case of the sfile sink
3858+ * when attempting to read the oldest entry (at alloc-log->sz)
3859+ * which may be overrun by a new one when ring buffer write
3860+ * index wraps around.
3861+ * So the overrun is not reported in case the oldest line
3862+ * was being read.
3863+ */
3864+ if (sink->sfile) {
3865+ if (!sink->ignore_overflow)
3866+ seq_puts(sink->sfile, "log overflow.\n");
3867+ /* coalesce subsequent contiguous overflows. */
3868+ sink->ignore_overflow = true;
3869+ } else {
3870+ dev_err(s->dev, "log overflow.\n");
3871+ }
3872+ sink->get = u32_sub_overflow(alloc, log->sz);
3873+ return;
3874+ }
3875+ /* compute next line index */
3876+ sink->get = u32_add_overflow(get, read_chars);
3877+ /* once a line is valid, ignore_overflow must be disabled */
3878+ sink->ignore_overflow = false;
3879+ if (sink->sfile) {
3880+ seq_printf(sink->sfile, "%s", s->line_buffer);
3881+ } else {
3882+ if (sink->trusty_panicked ||
3883+ __ratelimit(&trusty_log_rate_limit)) {
3884+ dev_info(s->dev, "%s", s->line_buffer);
3885+ }
3886+ }
3887+}
3888+
3889+static void *trusty_log_seq_start(struct seq_file *sfile, loff_t *pos)
3890+{
3891+ struct trusty_log_sfile *lb;
3892+ struct trusty_log_state *s;
3893+ struct log_rb *log;
3894+ struct trusty_log_sink_state *log_sfile_sink;
3895+ u32 index;
3896+ int rc;
3897+
3898+ if (WARN_ON(!pos))
3899+ return ERR_PTR(-EINVAL);
3900+
3901+ lb = sfile->private;
3902+ if (WARN_ON(!lb))
3903+ return ERR_PTR(-EINVAL);
3904+
3905+ log_sfile_sink = kzalloc(sizeof(*log_sfile_sink), GFP_KERNEL);
3906+ if (!log_sfile_sink)
3907+ return ERR_PTR(-ENOMEM);
3908+
3909+ s = container_of(lb, struct trusty_log_state, log_sfile);
3910+ log_sfile_sink->sfile = sfile;
3911+ log = s->log;
3912+ if (*pos == 0) {
3913+ /* start at the oldest line */
3914+ index = 0;
3915+ if (log->alloc > log->sz)
3916+ index = u32_sub_overflow(log->alloc, log->sz);
3917+ } else {
3918+ /*
3919+ * '*pos>0': pos hold the 32bits unwrapped index from where
3920+ * to start iterating
3921+ */
3922+ index = (u32)*pos;
3923+ }
3924+ pr_debug("%s start=%u\n", __func__, index);
3925+
3926+ log_sfile_sink->ignore_overflow = true;
3927+ rc = trusty_log_start(s, log_sfile_sink, index);
3928+ if (rc < 0)
3929+ goto free_sink;
3930+
3931+ if (!trusty_log_has_data(s, log_sfile_sink))
3932+ goto free_sink;
3933+
3934+ return log_sfile_sink;
3935+
3936+free_sink:
3937+ pr_debug("%s kfree\n", __func__);
3938+ kfree(log_sfile_sink);
3939+ return rc < 0 ? ERR_PTR(rc) : NULL;
3940+}
3941+
3942+static void *trusty_log_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
3943+{
3944+ struct trusty_log_sfile *lb;
3945+ struct trusty_log_state *s;
3946+ struct trusty_log_sink_state *log_sfile_sink = v;
3947+ int rc = 0;
3948+
3949+ if (WARN_ON(!log_sfile_sink))
3950+ return ERR_PTR(-EINVAL);
3951+
3952+ lb = sfile->private;
3953+ if (WARN_ON(!lb)) {
3954+ rc = -EINVAL;
3955+ goto end_of_iter;
3956+ }
3957+ s = container_of(lb, struct trusty_log_state, log_sfile);
3958+
3959+ if (WARN_ON(!pos)) {
3960+ rc = -EINVAL;
3961+ goto end_of_iter;
3962+ }
3963+ /*
3964+ * When starting a virtual file sink, the start function is invoked
3965+ * with a pos argument which value is set to zero.
3966+ * Subsequent starts are invoked with pos being set to
3967+ * the unwrapped read index (get).
3968+ * Upon u32 wraparound, the get index could be reset to zero.
3969+ * Thus a msb is used to distinguish the `get` zero value
3970+ * from the `start of file` zero value.
3971+ */
3972+ *pos = (1UL << 32) + log_sfile_sink->get;
3973+ if (!trusty_log_has_data(s, log_sfile_sink))
3974+ goto end_of_iter;
3975+
3976+ return log_sfile_sink;
3977+
3978+end_of_iter:
3979+ pr_debug("%s kfree\n", __func__);
3980+ kfree(log_sfile_sink);
3981+ return rc < 0 ? ERR_PTR(rc) : NULL;
3982+}
3983+
3984+static void trusty_log_seq_stop(struct seq_file *sfile, void *v)
3985+{
3986+ /*
3987+ * When iteration completes or on error, the next callback frees
3988+ * the sink structure and returns NULL/error-code.
3989+ * In that case stop (being invoked with void* v set to the last next
3990+ * return value) would be invoked with v == NULL or error code.
3991+ * When user space stops the iteration earlier than the end
3992+ * (in case of user-space memory allocation limit for example)
3993+ * then the stop function receives a non NULL get pointer
3994+ * and is in charge or freeing the sink structure.
3995+ */
3996+ struct trusty_log_sink_state *log_sfile_sink = v;
3997+
3998+ /* nothing to do - sink structure already freed */
3999+ if (IS_ERR_OR_NULL(log_sfile_sink))
4000+ return;
4001+
4002+ kfree(log_sfile_sink);
4003+
4004+ pr_debug("%s kfree\n", __func__);
4005+}
4006+
4007+static int trusty_log_seq_show(struct seq_file *sfile, void *v)
4008+{
4009+ struct trusty_log_sfile *lb;
4010+ struct trusty_log_state *s;
4011+ struct trusty_log_sink_state *log_sfile_sink = v;
4012+
4013+ if (WARN_ON(!log_sfile_sink))
4014+ return -EINVAL;
4015+
4016+ lb = sfile->private;
4017+ if (WARN_ON(!lb))
4018+ return -EINVAL;
4019+
4020+ s = container_of(lb, struct trusty_log_state, log_sfile);
4021+
4022+ trusty_log_show(s, log_sfile_sink);
4023+ return 0;
4024+}
4025+
4026+static void trusty_dump_logs(struct trusty_log_state *s)
4027+{
4028+ int rc;
4029+ /*
4030+ * note: klog_sink.get initialized to zero by kzalloc
4031+ */
4032+ s->klog_sink.trusty_panicked = trusty_get_panic_status(s->trusty_dev);
4033+
4034+ rc = trusty_log_start(s, &s->klog_sink, s->klog_sink.get);
4035+ if (rc < 0)
4036+ return;
4037+
4038+ while (trusty_log_has_data(s, &s->klog_sink))
4039+ trusty_log_show(s, &s->klog_sink);
4040+}
4041+
4042+static int trusty_log_call_notify(struct notifier_block *nb,
4043+ unsigned long action, void *data)
4044+{
4045+ struct trusty_log_state *s;
4046+ unsigned long flags;
4047+ u32 cur_put;
4048+
4049+ if (action != TRUSTY_CALL_RETURNED)
4050+ return NOTIFY_DONE;
4051+
4052+ s = container_of(nb, struct trusty_log_state, call_notifier);
4053+ spin_lock_irqsave(&s->wake_up_lock, flags);
4054+ cur_put = s->log->put;
4055+ if (cur_put != s->last_wake_put) {
4056+ s->last_wake_put = cur_put;
4057+ wake_up_all(&s->poll_waiters);
4058+ }
4059+ spin_unlock_irqrestore(&s->wake_up_lock, flags);
4060+ return NOTIFY_OK;
4061+}
4062+
4063+static int trusty_log_panic_notify(struct notifier_block *nb,
4064+ unsigned long action, void *data)
4065+{
4066+ struct trusty_log_state *s;
4067+
4068+ /*
4069+ * Don't grab the spin lock to hold up the panic notifier, even
4070+ * though this is racy.
4071+ */
4072+ s = container_of(nb, struct trusty_log_state, panic_notifier);
4073+ dev_info(s->dev, "panic notifier - trusty version %s",
4074+ trusty_version_str_get(s->trusty_dev));
4075+ trusty_dump_logs(s);
4076+ return NOTIFY_OK;
4077+}
4078+
4079+const struct seq_operations trusty_log_seq_ops = {
4080+ .start = trusty_log_seq_start,
4081+ .stop = trusty_log_seq_stop,
4082+ .next = trusty_log_seq_next,
4083+ .show = trusty_log_seq_show,
4084+};
4085+
4086+static int trusty_log_sfile_dev_open(struct inode *inode, struct file *file)
4087+{
4088+ struct trusty_log_sfile *ls;
4089+ struct seq_file *sfile;
4090+ int rc;
4091+
4092+ /*
4093+ * file->private_data contains a pointer to the misc_device struct
4094+ * passed to misc_register()
4095+ */
4096+ if (WARN_ON(!file->private_data))
4097+ return -EINVAL;
4098+
4099+ ls = container_of(file->private_data, struct trusty_log_sfile, misc);
4100+
4101+ /*
4102+ * seq_open uses file->private_data to store the seq_file associated
4103+ * with the struct file, but it must be NULL when seq_open is called
4104+ */
4105+ file->private_data = NULL;
4106+ rc = seq_open(file, &trusty_log_seq_ops);
4107+ if (rc < 0)
4108+ return rc;
4109+
4110+ sfile = file->private_data;
4111+ if (WARN_ON(!sfile))
4112+ return -EINVAL;
4113+
4114+ sfile->private = ls;
4115+ return 0;
4116+}
4117+
4118+static unsigned int trusty_log_sfile_dev_poll(struct file *filp,
4119+ struct poll_table_struct *wait)
4120+{
4121+ struct seq_file *sfile;
4122+ struct trusty_log_sfile *lb;
4123+ struct trusty_log_state *s;
4124+ struct log_rb *log;
4125+
4126+ /*
4127+ * trusty_log_sfile_dev_open() pointed filp->private_data to a
4128+ * seq_file, and that seq_file->private to the trusty_log_sfile
4129+ * field of a trusty_log_state
4130+ */
4131+ sfile = filp->private_data;
4132+ lb = sfile->private;
4133+ s = container_of(lb, struct trusty_log_state, log_sfile);
4134+ poll_wait(filp, &s->poll_waiters, wait);
4135+ log = s->log;
4136+
4137+ /*
4138+ * Userspace has read up to filp->f_pos so far. Update klog_sink
4139+ * to indicate that, so that we don't end up dumping the entire
4140+ * Trusty log in case of panic.
4141+ */
4142+ s->klog_sink.get = (u32)filp->f_pos;
4143+
4144+ if (log->put != (u32)filp->f_pos) {
4145+ /* data ready to read */
4146+ return EPOLLIN | EPOLLRDNORM;
4147+ }
4148+ /* no data available, go to sleep */
4149+ return 0;
4150+}
4151+
4152+static const struct file_operations log_sfile_dev_operations = {
4153+ .owner = THIS_MODULE,
4154+ .open = trusty_log_sfile_dev_open,
4155+ .poll = trusty_log_sfile_dev_poll,
4156+ .read = seq_read,
4157+ .release = seq_release,
4158+};
4159+
4160+static int trusty_log_sfile_register(struct trusty_log_state *s)
4161+{
4162+ int ret;
4163+ struct trusty_log_sfile *ls = &s->log_sfile;
4164+
4165+ if (WARN_ON(!ls))
4166+ return -EINVAL;
4167+
4168+ snprintf(ls->device_name, sizeof(ls->device_name),
4169+ "trusty-log%d", s->dev->id);
4170+ ls->misc.minor = MISC_DYNAMIC_MINOR;
4171+ ls->misc.name = ls->device_name;
4172+ ls->misc.fops = &log_sfile_dev_operations;
4173+
4174+ ret = misc_register(&ls->misc);
4175+ if (ret) {
4176+ dev_err(s->dev,
4177+ "log_sfile error while doing misc_register ret=%d\n",
4178+ ret);
4179+ return ret;
4180+ }
4181+ dev_info(s->dev, "/dev/%s registered\n",
4182+ ls->device_name);
4183+ return 0;
4184+}
4185+
4186+static void trusty_log_sfile_unregister(struct trusty_log_state *s)
4187+{
4188+ struct trusty_log_sfile *ls = &s->log_sfile;
4189+
4190+ misc_deregister(&ls->misc);
4191+ if (s->dev) {
4192+ dev_info(s->dev, "/dev/%s unregistered\n",
4193+ ls->misc.name);
4194+ }
4195+}
4196+
4197+static bool trusty_supports_logging(struct device *device)
4198+{
4199+ int result;
4200+
4201+ result = trusty_std_call32(device, SMC_SC_SHARED_LOG_VERSION,
4202+ TRUSTY_LOG_API_VERSION, 0, 0);
4203+ if (result == SM_ERR_UNDEFINED_SMC) {
4204+ dev_info(device, "trusty-log not supported on secure side.\n");
4205+ return false;
4206+ } else if (result < 0) {
4207+ dev_err(device,
4208+ "trusty std call (SMC_SC_SHARED_LOG_VERSION) failed: %d\n",
4209+ result);
4210+ return false;
4211+ }
4212+
4213+ if (result != TRUSTY_LOG_API_VERSION) {
4214+ dev_info(device, "unsupported api version: %d, supported: %d\n",
4215+ result, TRUSTY_LOG_API_VERSION);
4216+ return false;
4217+ }
4218+ return true;
4219+}
4220+
4221+static int trusty_log_init(struct platform_device *pdev)
4222+{
4223+ struct trusty_log_state *s;
4224+ struct scatterlist *sg;
4225+ unsigned char *mem;
4226+ int i;
4227+ int result;
4228+ trusty_shared_mem_id_t mem_id;
4229+ int log_size;
4230+
4231+ s = kzalloc(sizeof(*s), GFP_KERNEL);
4232+ if (!s) {
4233+ result = -ENOMEM;
4234+ goto error_alloc_state;
4235+ }
4236+
4237+ s->dev = &pdev->dev;
4238+ s->trusty_dev = s->dev->parent;
4239+
4240+ s->log_num_pages = DIV_ROUND_UP(log_size_param + sizeof(struct log_rb),
4241+ PAGE_SIZE);
4242+ s->sg = kcalloc(s->log_num_pages, sizeof(*s->sg), GFP_KERNEL);
4243+ if (!s->sg) {
4244+ result = -ENOMEM;
4245+ goto error_alloc_sg;
4246+ }
4247+
4248+ log_size = s->log_num_pages * PAGE_SIZE;
4249+ mem = vzalloc(log_size);
4250+ if (!mem) {
4251+ result = -ENOMEM;
4252+ goto error_alloc_log;
4253+ }
4254+
4255+ s->log = (struct log_rb *)mem;
4256+
4257+ sg_init_table(s->sg, s->log_num_pages);
4258+ for_each_sg(s->sg, sg, s->log_num_pages, i) {
4259+ struct page *pg = vmalloc_to_page(mem + (i * PAGE_SIZE));
4260+
4261+ if (!pg) {
4262+ result = -ENOMEM;
4263+ goto err_share_memory;
4264+ }
4265+ sg_set_page(sg, pg, PAGE_SIZE, 0);
4266+ }
4267+ /*
4268+ * This will fail for Trusty api version < TRUSTY_API_VERSION_MEM_OBJ
4269+ * if s->log_num_pages > 1
4270+ * Use trusty_share_memory_compat instead of trusty_share_memory in case
4271+ * s->log_num_pages == 1 and api version < TRUSTY_API_VERSION_MEM_OBJ,
4272+ * In that case SMC_SC_SHARED_LOG_ADD expects a different value than
4273+ * what trusty_share_memory returns
4274+ */
4275+ result = trusty_share_memory_compat(s->trusty_dev, &mem_id, s->sg,
4276+ s->log_num_pages, PAGE_KERNEL);
4277+ if (result) {
4278+ dev_err(s->dev, "trusty_share_memory failed: %d\n", result);
4279+ goto err_share_memory;
4280+ }
4281+ s->log_pages_shared_mem_id = mem_id;
4282+
4283+ result = trusty_std_call32(s->trusty_dev,
4284+ SMC_SC_SHARED_LOG_ADD,
4285+ (u32)(mem_id), (u32)(mem_id >> 32),
4286+ log_size);
4287+ if (result < 0) {
4288+ dev_err(s->dev,
4289+ "trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d 0x%llx\n",
4290+ result, mem_id);
4291+ goto error_std_call;
4292+ }
4293+
4294+ init_waitqueue_head(&s->poll_waiters);
4295+ spin_lock_init(&s->wake_up_lock);
4296+
4297+ s->call_notifier.notifier_call = trusty_log_call_notify;
4298+ result = trusty_call_notifier_register(s->trusty_dev,
4299+ &s->call_notifier);
4300+ if (result < 0) {
4301+ dev_err(&pdev->dev,
4302+ "failed to register trusty call notifier\n");
4303+ goto error_call_notifier;
4304+ }
4305+
4306+ s->panic_notifier.notifier_call = trusty_log_panic_notify;
4307+ result = atomic_notifier_chain_register(&panic_notifier_list,
4308+ &s->panic_notifier);
4309+ if (result < 0) {
4310+ dev_err(&pdev->dev,
4311+ "failed to register panic notifier\n");
4312+ goto error_panic_notifier;
4313+ }
4314+
4315+ result = trusty_log_sfile_register(s);
4316+ if (result < 0) {
4317+ dev_err(&pdev->dev, "failed to register log_sfile\n");
4318+ goto error_log_sfile;
4319+ }
4320+
4321+ platform_set_drvdata(pdev, s);
4322+
4323+ return 0;
4324+
4325+error_log_sfile:
4326+ atomic_notifier_chain_unregister(&panic_notifier_list,
4327+ &s->panic_notifier);
4328+error_panic_notifier:
4329+ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier);
4330+error_call_notifier:
4331+ trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM,
4332+ (u32)mem_id, (u32)(mem_id >> 32), 0);
4333+error_std_call:
4334+ if (WARN_ON(trusty_reclaim_memory(s->trusty_dev, mem_id, s->sg,
4335+ s->log_num_pages))) {
4336+ dev_err(&pdev->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
4337+ result, mem_id);
4338+ /*
4339+ * It is not safe to free this memory if trusty_revoke_memory
4340+ * fails. Leak it in that case.
4341+ */
4342+ } else {
4343+err_share_memory:
4344+ vfree(s->log);
4345+ }
4346+error_alloc_log:
4347+ kfree(s->sg);
4348+error_alloc_sg:
4349+ kfree(s);
4350+error_alloc_state:
4351+ return result;
4352+}
4353+
4354+static int trusty_log_probe(struct platform_device *pdev)
4355+{
4356+ int rc;
4357+
4358+ if (!trusty_supports_logging(pdev->dev.parent))
4359+ return -ENXIO;
4360+
4361+ rc = trusty_log_init(pdev);
4362+ if (rc && log_size_param > TRUSTY_LOG_MIN_SIZE) {
4363+ dev_warn(&pdev->dev, "init failed, retrying with 1-page log\n");
4364+ log_size_param = TRUSTY_LOG_MIN_SIZE;
4365+ rc = trusty_log_init(pdev);
4366+ }
4367+ return rc;
4368+}
4369+
4370+static int trusty_log_remove(struct platform_device *pdev)
4371+{
4372+ int result;
4373+ struct trusty_log_state *s = platform_get_drvdata(pdev);
4374+ trusty_shared_mem_id_t mem_id = s->log_pages_shared_mem_id;
4375+
4376+ trusty_log_sfile_unregister(s);
4377+ atomic_notifier_chain_unregister(&panic_notifier_list,
4378+ &s->panic_notifier);
4379+ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier);
4380+
4381+ result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM,
4382+ (u32)mem_id, (u32)(mem_id >> 32), 0);
4383+ if (result) {
4384+ dev_err(&pdev->dev,
4385+ "trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n",
4386+ result);
4387+ }
4388+ result = trusty_reclaim_memory(s->trusty_dev, mem_id, s->sg,
4389+ s->log_num_pages);
4390+ if (WARN_ON(result)) {
4391+ dev_err(&pdev->dev,
4392+ "trusty failed to remove shared memory: %d\n", result);
4393+ } else {
4394+ /*
4395+ * It is not safe to free this memory if trusty_revoke_memory
4396+ * fails. Leak it in that case.
4397+ */
4398+ vfree(s->log);
4399+ }
4400+ kfree(s->sg);
4401+ kfree(s);
4402+
4403+ return 0;
4404+}
4405+
4406+static const struct of_device_id trusty_test_of_match[] = {
4407+ { .compatible = "android,trusty-log-v1", },
4408+ {},
4409+};
4410+
4411+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
4412+
4413+static struct platform_driver trusty_log_driver = {
4414+ .probe = trusty_log_probe,
4415+ .remove = trusty_log_remove,
4416+ .driver = {
4417+ .name = "trusty-log",
4418+ .of_match_table = trusty_test_of_match,
4419+ },
4420+};
4421+
4422+module_platform_driver(trusty_log_driver);
4423+
4424+MODULE_LICENSE("GPL v2");
4425+MODULE_DESCRIPTION("Trusty logging driver");
4426diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h
4427new file mode 100644
4428index 000000000000..7b5e6096b51e
4429--- /dev/null
4430+++ b/drivers/trusty/trusty-log.h
4431@@ -0,0 +1,28 @@
4432+/* SPDX-License-Identifier: MIT */
4433+/*
4434+ * Copyright (c) 2015 Google, Inc.
4435+ *
4436+ * Trusty also has a copy of this header. Please keep the copies in sync.
4437+ */
4438+#ifndef _TRUSTY_LOG_H_
4439+#define _TRUSTY_LOG_H_
4440+
4441+/*
4442+ * Ring buffer that supports one secure producer thread and one
4443+ * linux side consumer thread.
4444+ */
4445+struct log_rb {
4446+ volatile uint32_t alloc;
4447+ volatile uint32_t put;
4448+ uint32_t sz;
4449+ volatile char data[];
4450+} __packed;
4451+
4452+#define SMC_SC_SHARED_LOG_VERSION SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 0)
4453+#define SMC_SC_SHARED_LOG_ADD SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 1)
4454+#define SMC_SC_SHARED_LOG_RM SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 2)
4455+
4456+#define TRUSTY_LOG_API_VERSION 1
4457+
4458+#endif
4459+
4460diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c
4461new file mode 100644
4462index 000000000000..8a360298e501
4463--- /dev/null
4464+++ b/drivers/trusty/trusty-mem.c
4465@@ -0,0 +1,139 @@
4466+// SPDX-License-Identifier: GPL-2.0-only
4467+/*
4468+ * Copyright (C) 2015 Google, Inc.
4469+ */
4470+
4471+#include <linux/types.h>
4472+#include <linux/printk.h>
4473+#include <linux/trusty/arm_ffa.h>
4474+#include <linux/trusty/trusty.h>
4475+#include <linux/trusty/smcall.h>
4476+
4477+#define MEM_ATTR_STRONGLY_ORDERED (0x00U)
4478+#define MEM_ATTR_DEVICE (0x04U)
4479+#define MEM_ATTR_NORMAL_NON_CACHEABLE (0x44U)
4480+#define MEM_ATTR_NORMAL_WRITE_THROUGH (0xAAU)
4481+#define MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE (0xEEU)
4482+#define MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE (0xFFU)
4483+
4484+#define ATTR_RDONLY (1U << 7)
4485+#define ATTR_INNER_SHAREABLE (3U << 8)
4486+
4487+static int get_mem_attr(struct page *page, pgprot_t pgprot)
4488+{
4489+#if defined(CONFIG_ARM64)
4490+ u64 mair;
4491+ unsigned int attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2;
4492+
4493+ asm ("mrs %0, mair_el1\n" : "=&r" (mair));
4494+ return (mair >> (attr_index * 8)) & 0xff;
4495+
4496+#elif defined(CONFIG_ARM_LPAE)
4497+ u32 mair;
4498+ unsigned int attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2);
4499+
4500+ if (attr_index >= 4) {
4501+ attr_index -= 4;
4502+ asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair));
4503+ } else {
4504+ asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair));
4505+ }
4506+ return (mair >> (attr_index * 8)) & 0xff;
4507+
4508+#elif defined(CONFIG_ARM)
4509+ /* check memory type */
4510+ switch (pgprot_val(pgprot) & L_PTE_MT_MASK) {
4511+ case L_PTE_MT_WRITEALLOC:
4512+ return MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE;
4513+
4514+ case L_PTE_MT_BUFFERABLE:
4515+ return MEM_ATTR_NORMAL_NON_CACHEABLE;
4516+
4517+ case L_PTE_MT_WRITEBACK:
4518+ return MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE;
4519+
4520+ case L_PTE_MT_WRITETHROUGH:
4521+ return MEM_ATTR_NORMAL_WRITE_THROUGH;
4522+
4523+ case L_PTE_MT_UNCACHED:
4524+ return MEM_ATTR_STRONGLY_ORDERED;
4525+
4526+ case L_PTE_MT_DEV_SHARED:
4527+ case L_PTE_MT_DEV_NONSHARED:
4528+ return MEM_ATTR_DEVICE;
4529+
4530+ default:
4531+ return -EINVAL;
4532+ }
4533+#else
4534+ return 0;
4535+#endif
4536+}
4537+
4538+int trusty_encode_page_info(struct ns_mem_page_info *inf,
4539+ struct page *page, pgprot_t pgprot)
4540+{
4541+ int mem_attr;
4542+ u64 pte;
4543+ u8 ffa_mem_attr;
4544+ u8 ffa_mem_perm = 0;
4545+
4546+ if (!inf || !page)
4547+ return -EINVAL;
4548+
4549+ /* get physical address */
4550+ pte = (u64)page_to_phys(page);
4551+
4552+ /* get memory attributes */
4553+ mem_attr = get_mem_attr(page, pgprot);
4554+ if (mem_attr < 0)
4555+ return mem_attr;
4556+
4557+ switch (mem_attr) {
4558+ case MEM_ATTR_STRONGLY_ORDERED:
4559+ ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRNE;
4560+ break;
4561+
4562+ case MEM_ATTR_DEVICE:
4563+ ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRE;
4564+ break;
4565+
4566+ case MEM_ATTR_NORMAL_NON_CACHEABLE:
4567+ ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED;
4568+ break;
4569+
4570+ case MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE:
4571+ case MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE:
4572+ ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB;
4573+ break;
4574+
4575+ default:
4576+ return -EINVAL;
4577+ }
4578+
4579+ inf->paddr = pte;
4580+
4581+ /* add other attributes */
4582+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE)
4583+ pte |= pgprot_val(pgprot);
4584+#elif defined(CONFIG_ARM)
4585+ if (pgprot_val(pgprot) & L_PTE_RDONLY)
4586+ pte |= ATTR_RDONLY;
4587+ if (pgprot_val(pgprot) & L_PTE_SHARED)
4588+ pte |= ATTR_INNER_SHAREABLE; /* inner sharable */
4589+#endif
4590+
4591+ if (!(pte & ATTR_RDONLY))
4592+ ffa_mem_perm |= FFA_MEM_PERM_RW;
4593+ else
4594+ ffa_mem_perm |= FFA_MEM_PERM_RO;
4595+
4596+ if ((pte & ATTR_INNER_SHAREABLE) == ATTR_INNER_SHAREABLE)
4597+ ffa_mem_attr |= FFA_MEM_ATTR_INNER_SHAREABLE;
4598+
4599+ inf->ffa_mem_attr = ffa_mem_attr;
4600+ inf->ffa_mem_perm = ffa_mem_perm;
4601+ inf->compat_attr = (pte & 0x0000FFFFFFFFFFFFull) |
4602+ ((u64)mem_attr << 48);
4603+ return 0;
4604+}
4605diff --git a/drivers/trusty/trusty-smc-arm.S b/drivers/trusty/trusty-smc-arm.S
4606new file mode 100644
4607index 000000000000..8ff83547d33f
4608--- /dev/null
4609+++ b/drivers/trusty/trusty-smc-arm.S
4610@@ -0,0 +1,41 @@
4611+/* SPDX-License-Identifier: GPL-2.0-only */
4612+/*
4613+ * Copyright (C) 2020 Google, Inc.
4614+ */
4615+
4616+#include <linux/linkage.h>
4617+
4618+.arch_extension sec
4619+
4620+ENTRY(trusty_smc8)
4621+ /* Save stack location where r3-r7 smc arguments are stored */
4622+ mov r12, sp
4623+
4624+ /* Save original r4-r7 values as caller expects these to be preserved */
4625+ push {r4-r7}
4626+
4627+ /* Save return value pointer and return address */
4628+ push {r0, lr}
4629+
4630+ /* arm abi shifts arguments when returning a struct, shift them back */
4631+ mov r0, r1
4632+ mov r1, r2
4633+ mov r2, r3
4634+
4635+ /* Load stack based arguments */
4636+ ldmia r12, {r3-r7}
4637+
4638+ smc #0
4639+
4640+ /* Restore return address and get return value pointer */
4641+ pop {r12, lr}
4642+
4643+ /* Copy 8-register smc return value to struct smc_ret8 return value */
4644+ stmia r12, {r0-r7}
4645+
4646+ /* Restore original r4-r7 values */
4647+ pop {r4-r7}
4648+
4649+ /* Return */
4650+ bx lr
4651+ENDPROC(trusty_smc8)
4652diff --git a/drivers/trusty/trusty-smc-arm64.S b/drivers/trusty/trusty-smc-arm64.S
4653new file mode 100644
4654index 000000000000..14c8fed28a5e
4655--- /dev/null
4656+++ b/drivers/trusty/trusty-smc-arm64.S
4657@@ -0,0 +1,35 @@
4658+/* SPDX-License-Identifier: GPL-2.0-only */
4659+/*
4660+ * Copyright (C) 2020 Google, Inc.
4661+ */
4662+
4663+#include <linux/linkage.h>
4664+
4665+.macro push ra, rb
4666+stp \ra, \rb, [sp,#-16]!
4667+.endm
4668+
4669+.macro pop ra, rb
4670+ldp \ra, \rb, [sp], #16
4671+.endm
4672+
4673+lr .req x30
4674+
4675+SYM_FUNC_START(trusty_smc8)
4676+ /*
4677+ * Save x8 (return value ptr) and lr. The SMC calling convention says el3
4678+ * does not need to preserve x8. The normal ABI does not require either x8
4679+ * or lr to be preserved.
4680+ */
4681+ push x8, lr
4682+ smc #0
4683+ pop x8, lr
4684+
4685+ /* Copy 8-register smc return value to struct smc_ret8 return value */
4686+ stp x0, x1, [x8], #16
4687+ stp x2, x3, [x8], #16
4688+ stp x4, x5, [x8], #16
4689+ stp x6, x7, [x8], #16
4690+
4691+ ret
4692+SYM_FUNC_END(trusty_smc8)
4693diff --git a/drivers/trusty/trusty-smc.h b/drivers/trusty/trusty-smc.h
4694new file mode 100644
4695index 000000000000..b53e5abb4d05
4696--- /dev/null
4697+++ b/drivers/trusty/trusty-smc.h
4698@@ -0,0 +1,26 @@
4699+/* SPDX-License-Identifier: GPL-2.0-only */
4700+/*
4701+ * Copyright (C) 2020 Google, Inc.
4702+ */
4703+#ifndef _TRUSTY_SMC_H
4704+#define _TRUSTY_SMC_H
4705+
4706+#include <linux/types.h>
4707+
4708+struct smc_ret8 {
4709+ unsigned long r0;
4710+ unsigned long r1;
4711+ unsigned long r2;
4712+ unsigned long r3;
4713+ unsigned long r4;
4714+ unsigned long r5;
4715+ unsigned long r6;
4716+ unsigned long r7;
4717+};
4718+
4719+struct smc_ret8 trusty_smc8(unsigned long r0, unsigned long r1,
4720+ unsigned long r2, unsigned long r3,
4721+ unsigned long r4, unsigned long r5,
4722+ unsigned long r6, unsigned long r7);
4723+
4724+#endif /* _TRUSTY_SMC_H */
4725diff --git a/drivers/trusty/trusty-test.c b/drivers/trusty/trusty-test.c
4726new file mode 100644
4727index 000000000000..844868981fa5
4728--- /dev/null
4729+++ b/drivers/trusty/trusty-test.c
4730@@ -0,0 +1,440 @@
4731+// SPDX-License-Identifier: GPL-2.0-only
4732+/*
4733+ * Copyright (C) 2020 Google, Inc.
4734+ */
4735+
4736+#include <linux/ctype.h>
4737+#include <linux/list.h>
4738+#include <linux/platform_device.h>
4739+#include <linux/trusty/smcall.h>
4740+#include <linux/trusty/trusty.h>
4741+#include <linux/scatterlist.h>
4742+#include <linux/slab.h>
4743+#include <linux/mm.h>
4744+#include <linux/mod_devicetable.h>
4745+#include <linux/module.h>
4746+
4747+#include "trusty-test.h"
4748+
4749+struct trusty_test_state {
4750+ struct device *dev;
4751+ struct device *trusty_dev;
4752+};
4753+
4754+struct trusty_test_shmem_obj {
4755+ struct list_head node;
4756+ size_t page_count;
4757+ struct page **pages;
4758+ void *buf;
4759+ struct sg_table sgt;
4760+ trusty_shared_mem_id_t mem_id;
4761+};
4762+
4763+/*
4764+ * Allocate a test object with @page_count number of pages, map it and add it to
4765+ * @list.
4766+ * For multi-page allocations, order the pages so they are not contiguous.
4767+ */
4768+static int trusty_test_alloc_obj(struct trusty_test_state *s,
4769+ size_t page_count,
4770+ struct list_head *list)
4771+{
4772+ size_t i;
4773+ int ret = -ENOMEM;
4774+ struct trusty_test_shmem_obj *obj;
4775+
4776+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4777+ if (!obj)
4778+ goto err_alloc_obj;
4779+ obj->page_count = page_count;
4780+
4781+ obj->pages = kmalloc_array(page_count, sizeof(*obj->pages), GFP_KERNEL);
4782+ if (!obj->pages) {
4783+ ret = -ENOMEM;
4784+ dev_err(s->dev, "failed to allocate page array, count %zd\n",
4785+ page_count);
4786+ goto err_alloc_pages;
4787+ }
4788+
4789+ for (i = 0; i < page_count; i++) {
4790+ obj->pages[i] = alloc_page(GFP_KERNEL);
4791+ if (!obj->pages[i]) {
4792+ ret = -ENOMEM;
4793+ dev_err(s->dev, "failed to allocate page %zd/%zd\n",
4794+ i, page_count);
4795+ goto err_alloc_page;
4796+ }
4797+ if (i > 0 && obj->pages[i - 1] + 1 == obj->pages[i]) {
4798+ /* swap adacent pages to increase fragmentation */
4799+ swap(obj->pages[i - 1], obj->pages[i]);
4800+ }
4801+ }
4802+
4803+ obj->buf = vmap(obj->pages, page_count, VM_MAP, PAGE_KERNEL);
4804+ if (!obj->buf) {
4805+ ret = -ENOMEM;
4806+ dev_err(s->dev, "failed to map test buffer page count %zd\n",
4807+ page_count);
4808+ goto err_map_pages;
4809+ }
4810+
4811+ ret = sg_alloc_table_from_pages(&obj->sgt, obj->pages, page_count,
4812+ 0, page_count * PAGE_SIZE, GFP_KERNEL);
4813+ if (ret) {
4814+ dev_err(s->dev, "sg_alloc_table_from_pages failed: %d\n", ret);
4815+ goto err_alloc_sgt;
4816+ }
4817+ list_add_tail(&obj->node, list);
4818+ dev_dbg(s->dev, "buffer has %d page runs\n", obj->sgt.nents);
4819+ return 0;
4820+
4821+err_alloc_sgt:
4822+ vunmap(obj->buf);
4823+err_map_pages:
4824+ for (i = page_count; i > 0; i--) {
4825+ __free_page(obj->pages[i - 1]);
4826+err_alloc_page:
4827+ ;
4828+ }
4829+ kfree(obj->pages);
4830+err_alloc_pages:
4831+ kfree(obj);
4832+err_alloc_obj:
4833+ return ret;
4834+}
4835+
4836+/* Unlink, unmap and free a test object and its pages */
4837+static void trusty_test_free_obj(struct trusty_test_state *s,
4838+ struct trusty_test_shmem_obj *obj)
4839+{
4840+ size_t i;
4841+
4842+ list_del(&obj->node);
4843+ sg_free_table(&obj->sgt);
4844+ vunmap(obj->buf);
4845+ for (i = obj->page_count; i > 0; i--)
4846+ __free_page(obj->pages[i - 1]);
4847+ kfree(obj->pages);
4848+ kfree(obj);
4849+}
4850+
4851+/*
4852+ * Share all the pages of all the test object in &obj_list.
4853+ * If sharing a test object fails, free it so that every test object that
4854+ * remains in @obj_list has been shared when this function returns.
4855+ * Return a error if any test object failed to be shared.
4856+ */
4857+static int trusty_test_share_objs(struct trusty_test_state *s,
4858+ struct list_head *obj_list, size_t size)
4859+{
4860+ int ret = 0;
4861+ int tmpret;
4862+ struct trusty_test_shmem_obj *obj;
4863+ struct trusty_test_shmem_obj *next_obj;
4864+ ktime_t t1;
4865+ ktime_t t2;
4866+
4867+ list_for_each_entry_safe(obj, next_obj, obj_list, node) {
4868+ t1 = ktime_get();
4869+ tmpret = trusty_share_memory(s->trusty_dev, &obj->mem_id,
4870+ obj->sgt.sgl, obj->sgt.nents,
4871+ PAGE_KERNEL);
4872+ t2 = ktime_get();
4873+ if (tmpret) {
4874+ ret = tmpret;
4875+ dev_err(s->dev,
4876+ "trusty_share_memory failed: %d, size=%zd\n",
4877+ ret, size);
4878+
4879+ /*
4880+ * Free obj and continue, so we can revoke the
4881+ * whole list in trusty_test_reclaim_objs.
4882+ */
4883+ trusty_test_free_obj(s, obj);
4884+ }
4885+ dev_dbg(s->dev, "share id=0x%llx, size=%zu took %lld ns\n",
4886+ obj->mem_id, size,
4887+ ktime_to_ns(ktime_sub(t2, t1)));
4888+ }
4889+
4890+ return ret;
4891+}
4892+
4893+/* Reclaim memory shared with trusty for all test objects in @obj_list. */
4894+static int trusty_test_reclaim_objs(struct trusty_test_state *s,
4895+ struct list_head *obj_list, size_t size)
4896+{
4897+ int ret = 0;
4898+ int tmpret;
4899+ struct trusty_test_shmem_obj *obj;
4900+ struct trusty_test_shmem_obj *next_obj;
4901+ ktime_t t1;
4902+ ktime_t t2;
4903+
4904+ list_for_each_entry_safe(obj, next_obj, obj_list, node) {
4905+ t1 = ktime_get();
4906+ tmpret = trusty_reclaim_memory(s->trusty_dev, obj->mem_id,
4907+ obj->sgt.sgl, obj->sgt.nents);
4908+ t2 = ktime_get();
4909+ if (tmpret) {
4910+ ret = tmpret;
4911+ dev_err(s->dev,
4912+ "trusty_reclaim_memory failed: %d, id=0x%llx\n",
4913+ ret, obj->mem_id);
4914+
4915+ /*
4916+ * It is not safe to free this memory if
4917+ * trusty_reclaim_memory fails. Leak it in that
4918+ * case.
4919+ */
4920+ list_del(&obj->node);
4921+ }
4922+ dev_dbg(s->dev, "revoke id=0x%llx, size=%zu took %lld ns\n",
4923+ obj->mem_id, size,
4924+ ktime_to_ns(ktime_sub(t2, t1)));
4925+ }
4926+
4927+ return ret;
4928+}
4929+
4930+/*
4931+ * Test a test object. First, initialize the memory, then make a std call into
4932+ * trusty which will read it and return an error if the initialized value does
4933+ * not match what it expects. If trusty reads the correct values, it will modify
4934+ * the memory and return 0. This function then checks that it can read the
4935+ * correct modified value.
4936+ */
4937+static int trusty_test_rw(struct trusty_test_state *s,
4938+ struct trusty_test_shmem_obj *obj)
4939+{
4940+ size_t size = obj->page_count * PAGE_SIZE;
4941+ int ret;
4942+ size_t i;
4943+ u64 *buf = obj->buf;
4944+ ktime_t t1;
4945+ ktime_t t2;
4946+
4947+ for (i = 0; i < size / sizeof(*buf); i++)
4948+ buf[i] = i;
4949+
4950+ t1 = ktime_get();
4951+ ret = trusty_std_call32(s->trusty_dev, SMC_SC_TEST_SHARED_MEM_RW,
4952+ (u32)(obj->mem_id), (u32)(obj->mem_id >> 32),
4953+ size);
4954+ t2 = ktime_get();
4955+ if (ret < 0) {
4956+ dev_err(s->dev,
4957+ "trusty std call (SMC_SC_TEST_SHARED_MEM_RW) failed: %d 0x%llx\n",
4958+ ret, obj->mem_id);
4959+ return ret;
4960+ }
4961+
4962+ for (i = 0; i < size / sizeof(*buf); i++) {
4963+ if (buf[i] != size - i) {
4964+ dev_err(s->dev,
4965+ "input mismatch at %zd, got 0x%llx instead of 0x%zx\n",
4966+ i, buf[i], size - i);
4967+ return -EIO;
4968+ }
4969+ }
4970+
4971+ dev_dbg(s->dev, "rw id=0x%llx, size=%zu took %lld ns\n", obj->mem_id,
4972+ size, ktime_to_ns(ktime_sub(t2, t1)));
4973+
4974+ return 0;
4975+}
4976+
4977+/*
4978+ * Run test on every test object in @obj_list. Repeat @repeat_access times.
4979+ */
4980+static int trusty_test_rw_objs(struct trusty_test_state *s,
4981+ struct list_head *obj_list,
4982+ size_t repeat_access)
4983+{
4984+ int ret;
4985+ size_t i;
4986+ struct trusty_test_shmem_obj *obj;
4987+
4988+ for (i = 0; i < repeat_access; i++) {
4989+ /*
4990+ * Repeat test in case the memory attributes don't match
4991+ * and either side see old data.
4992+ */
4993+ list_for_each_entry(obj, obj_list, node) {
4994+ ret = trusty_test_rw(s, obj);
4995+ if (ret)
4996+ return ret;
4997+ }
4998+ }
4999+
5000+ return 0;
5001+}
5002+
5003+/*
5004+ * Allocate @obj_count test object that each have @page_count pages. Share each
5005+ * object @repeat_share times, each time running tests on every object
5006+ * @repeat_access times.
5007+ */
5008+static int trusty_test_run(struct trusty_test_state *s, size_t page_count,
5009+ size_t obj_count, size_t repeat_share,
5010+ size_t repeat_access)
5011+{
5012+ int ret = 0;
5013+ int tmpret;
5014+ size_t i;
5015+ size_t size = page_count * PAGE_SIZE;
5016+ LIST_HEAD(obj_list);
5017+ struct trusty_test_shmem_obj *obj;
5018+ struct trusty_test_shmem_obj *next_obj;
5019+
5020+ for (i = 0; i < obj_count && !ret; i++)
5021+ ret = trusty_test_alloc_obj(s, page_count, &obj_list);
5022+
5023+ for (i = 0; i < repeat_share && !ret; i++) {
5024+ ret = trusty_test_share_objs(s, &obj_list, size);
5025+ if (ret) {
5026+ dev_err(s->dev,
5027+ "trusty_share_memory failed: %d, i=%zd/%zd, size=%zd\n",
5028+ ret, i, repeat_share, size);
5029+ } else {
5030+ ret = trusty_test_rw_objs(s, &obj_list, repeat_access);
5031+ if (ret)
5032+ dev_err(s->dev,
5033+ "test failed: %d, i=%zd/%zd, size=%zd\n",
5034+ ret, i, repeat_share, size);
5035+ }
5036+ tmpret = trusty_test_reclaim_objs(s, &obj_list, size);
5037+ if (tmpret) {
5038+ ret = tmpret;
5039+ dev_err(s->dev,
5040+ "trusty_reclaim_memory failed: %d, i=%zd/%zd\n",
5041+ ret, i, repeat_share);
5042+ }
5043+ }
5044+
5045+ list_for_each_entry_safe(obj, next_obj, &obj_list, node)
5046+ trusty_test_free_obj(s, obj);
5047+
5048+ dev_info(s->dev, "[ %s ] size %zd, obj_count %zd, repeat_share %zd, repeat_access %zd\n",
5049+ ret ? "FAILED" : "PASSED", size, obj_count, repeat_share,
5050+ repeat_access);
5051+
5052+ return ret;
5053+}
5054+
5055+/*
5056+ * Get an optional numeric argument from @buf, update @buf and return the value.
5057+ * If @buf does not start with ",", return @default_val instead.
5058+ */
5059+static size_t trusty_test_get_arg(const char **buf, size_t default_val)
5060+{
5061+ char *buf_next;
5062+ size_t ret;
5063+
5064+ if (**buf != ',')
5065+ return default_val;
5066+
5067+ (*buf)++;
5068+ ret = simple_strtoul(*buf, &buf_next, 0);
5069+ if (buf_next == *buf)
5070+ return default_val;
5071+
5072+ *buf = buf_next;
5073+
5074+ return ret;
5075+}
5076+
5077+/*
5078+ * Run tests described by a string in this format:
5079+ * <obj_size>,<obj_count=1>,<repeat_share=1>,<repeat_access=3>
5080+ */
5081+static ssize_t trusty_test_run_store(struct device *dev,
5082+ struct device_attribute *attr,
5083+ const char *buf, size_t count)
5084+{
5085+ struct platform_device *pdev = to_platform_device(dev);
5086+ struct trusty_test_state *s = platform_get_drvdata(pdev);
5087+ size_t size;
5088+ size_t obj_count;
5089+ size_t repeat_share;
5090+ size_t repeat_access;
5091+ int ret;
5092+ char *buf_next;
5093+
5094+ while (true) {
5095+ while (isspace(*buf))
5096+ buf++;
5097+ size = simple_strtoul(buf, &buf_next, 0);
5098+ if (buf_next == buf)
5099+ return count;
5100+ buf = buf_next;
5101+ obj_count = trusty_test_get_arg(&buf, 1);
5102+ repeat_share = trusty_test_get_arg(&buf, 1);
5103+ repeat_access = trusty_test_get_arg(&buf, 3);
5104+
5105+ ret = trusty_test_run(s, DIV_ROUND_UP(size, PAGE_SIZE),
5106+ obj_count, repeat_share, repeat_access);
5107+ if (ret)
5108+ return ret;
5109+ }
5110+}
5111+
5112+static DEVICE_ATTR_WO(trusty_test_run);
5113+
5114+static struct attribute *trusty_test_attrs[] = {
5115+ &dev_attr_trusty_test_run.attr,
5116+ NULL,
5117+};
5118+ATTRIBUTE_GROUPS(trusty_test);
5119+
5120+static int trusty_test_probe(struct platform_device *pdev)
5121+{
5122+ struct trusty_test_state *s;
5123+ int ret;
5124+
5125+ ret = trusty_std_call32(pdev->dev.parent, SMC_SC_TEST_VERSION,
5126+ TRUSTY_STDCALLTEST_API_VERSION, 0, 0);
5127+ if (ret != TRUSTY_STDCALLTEST_API_VERSION)
5128+ return -ENOENT;
5129+
5130+ s = kzalloc(sizeof(*s), GFP_KERNEL);
5131+ if (!s)
5132+ return -ENOMEM;
5133+
5134+ s->dev = &pdev->dev;
5135+ s->trusty_dev = s->dev->parent;
5136+
5137+ platform_set_drvdata(pdev, s);
5138+
5139+ return 0;
5140+}
5141+
5142+static int trusty_test_remove(struct platform_device *pdev)
5143+{
5144+ struct trusty_log_state *s = platform_get_drvdata(pdev);
5145+
5146+ kfree(s);
5147+ return 0;
5148+}
5149+
5150+static const struct of_device_id trusty_test_of_match[] = {
5151+ { .compatible = "android,trusty-test-v1", },
5152+ {},
5153+};
5154+
5155+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
5156+
5157+static struct platform_driver trusty_test_driver = {
5158+ .probe = trusty_test_probe,
5159+ .remove = trusty_test_remove,
5160+ .driver = {
5161+ .name = "trusty-test",
5162+ .of_match_table = trusty_test_of_match,
5163+ .dev_groups = trusty_test_groups,
5164+ },
5165+};
5166+
5167+module_platform_driver(trusty_test_driver);
5168+
5169+MODULE_LICENSE("GPL v2");
5170+MODULE_DESCRIPTION("Trusty test driver");
5171diff --git a/drivers/trusty/trusty-test.h b/drivers/trusty/trusty-test.h
5172new file mode 100644
5173index 000000000000..eea7beb96876
5174--- /dev/null
5175+++ b/drivers/trusty/trusty-test.h
5176@@ -0,0 +1,13 @@
5177+/* SPDX-License-Identifier: GPL-2.0-only */
5178+/*
5179+ * Copyright (c) 2020 Google, Inc.
5180+ */
5181+#ifndef _TRUSTY_TEST_H
5182+#define _TRUSTY_TEST_H
5183+
5184+#define SMC_SC_TEST_VERSION SMC_STDCALL_NR(SMC_ENTITY_TEST, 0)
5185+#define SMC_SC_TEST_SHARED_MEM_RW SMC_STDCALL_NR(SMC_ENTITY_TEST, 1)
5186+
5187+#define TRUSTY_STDCALLTEST_API_VERSION 1
5188+
5189+#endif /* _TRUSTY_TEST_H */
5190diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c
5191new file mode 100644
5192index 000000000000..fea59cd2e218
5193--- /dev/null
5194+++ b/drivers/trusty/trusty-virtio.c
5195@@ -0,0 +1,840 @@
5196+// SPDX-License-Identifier: GPL-2.0-only
5197+/*
5198+ * Trusty Virtio driver
5199+ *
5200+ * Copyright (C) 2015 Google, Inc.
5201+ */
5202+#include <linux/device.h>
5203+#include <linux/err.h>
5204+#include <linux/kernel.h>
5205+
5206+#include <linux/dma-map-ops.h>
5207+#include <linux/module.h>
5208+#include <linux/mutex.h>
5209+#include <linux/notifier.h>
5210+#include <linux/workqueue.h>
5211+#include <linux/remoteproc.h>
5212+#include <linux/slab.h>
5213+
5214+#include <linux/platform_device.h>
5215+#include <linux/trusty/smcall.h>
5216+#include <linux/trusty/trusty.h>
5217+#include <linux/trusty/trusty_ipc.h>
5218+
5219+#include <linux/virtio.h>
5220+#include <linux/virtio_config.h>
5221+#include <linux/virtio_ids.h>
5222+#include <linux/virtio_ring.h>
5223+
5224+#include <linux/atomic.h>
5225+
5226+#define RSC_DESCR_VER 1
5227+
5228+struct trusty_vdev;
5229+
5230+struct trusty_ctx {
5231+ struct device *dev;
5232+ void *shared_va;
5233+ struct scatterlist shared_sg;
5234+ trusty_shared_mem_id_t shared_id;
5235+ size_t shared_sz;
5236+ struct work_struct check_vqs;
5237+ struct work_struct kick_vqs;
5238+ struct notifier_block call_notifier;
5239+ struct list_head vdev_list;
5240+ struct mutex mlock; /* protects vdev_list */
5241+ struct workqueue_struct *kick_wq;
5242+ struct workqueue_struct *check_wq;
5243+};
5244+
5245+struct trusty_vring {
5246+ void *vaddr;
5247+ struct scatterlist sg;
5248+ trusty_shared_mem_id_t shared_mem_id;
5249+ size_t size;
5250+ unsigned int align;
5251+ unsigned int elem_num;
5252+ u32 notifyid;
5253+ atomic_t needs_kick;
5254+ struct fw_rsc_vdev_vring *vr_descr;
5255+ struct virtqueue *vq;
5256+ struct trusty_vdev *tvdev;
5257+ struct trusty_nop kick_nop;
5258+};
5259+
5260+struct trusty_vdev {
5261+ struct list_head node;
5262+ struct virtio_device vdev;
5263+ struct trusty_ctx *tctx;
5264+ u32 notifyid;
5265+ unsigned int config_len;
5266+ void *config;
5267+ struct fw_rsc_vdev *vdev_descr;
5268+ unsigned int vring_num;
5269+ struct trusty_vring vrings[];
5270+};
5271+
5272+#define vdev_to_tvdev(vd) container_of((vd), struct trusty_vdev, vdev)
5273+
5274+static void check_all_vqs(struct work_struct *work)
5275+{
5276+ unsigned int i;
5277+ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx,
5278+ check_vqs);
5279+ struct trusty_vdev *tvdev;
5280+
5281+ list_for_each_entry(tvdev, &tctx->vdev_list, node) {
5282+ for (i = 0; i < tvdev->vring_num; i++)
5283+ if (tvdev->vrings[i].vq)
5284+ vring_interrupt(0, tvdev->vrings[i].vq);
5285+ }
5286+}
5287+
5288+static int trusty_call_notify(struct notifier_block *nb,
5289+ unsigned long action, void *data)
5290+{
5291+ struct trusty_ctx *tctx;
5292+
5293+ if (action != TRUSTY_CALL_RETURNED)
5294+ return NOTIFY_DONE;
5295+
5296+ tctx = container_of(nb, struct trusty_ctx, call_notifier);
5297+ queue_work(tctx->check_wq, &tctx->check_vqs);
5298+
5299+ return NOTIFY_OK;
5300+}
5301+
5302+static void kick_vq(struct trusty_ctx *tctx,
5303+ struct trusty_vdev *tvdev,
5304+ struct trusty_vring *tvr)
5305+{
5306+ int ret;
5307+
5308+ dev_dbg(tctx->dev, "%s: vdev_id=%d: vq_id=%d\n",
5309+ __func__, tvdev->notifyid, tvr->notifyid);
5310+
5311+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_KICK_VQ,
5312+ tvdev->notifyid, tvr->notifyid, 0);
5313+ if (ret) {
5314+ dev_err(tctx->dev, "vq notify (%d, %d) returned %d\n",
5315+ tvdev->notifyid, tvr->notifyid, ret);
5316+ }
5317+}
5318+
5319+static void kick_vqs(struct work_struct *work)
5320+{
5321+ unsigned int i;
5322+ struct trusty_vdev *tvdev;
5323+ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx,
5324+ kick_vqs);
5325+ mutex_lock(&tctx->mlock);
5326+ list_for_each_entry(tvdev, &tctx->vdev_list, node) {
5327+ for (i = 0; i < tvdev->vring_num; i++) {
5328+ struct trusty_vring *tvr = &tvdev->vrings[i];
5329+
5330+ if (atomic_xchg(&tvr->needs_kick, 0))
5331+ kick_vq(tctx, tvdev, tvr);
5332+ }
5333+ }
5334+ mutex_unlock(&tctx->mlock);
5335+}
5336+
5337+static bool trusty_virtio_notify(struct virtqueue *vq)
5338+{
5339+ struct trusty_vring *tvr = vq->priv;
5340+ struct trusty_vdev *tvdev = tvr->tvdev;
5341+ struct trusty_ctx *tctx = tvdev->tctx;
5342+ u32 api_ver = trusty_get_api_version(tctx->dev->parent);
5343+
5344+ if (api_ver < TRUSTY_API_VERSION_SMP_NOP) {
5345+ atomic_set(&tvr->needs_kick, 1);
5346+ queue_work(tctx->kick_wq, &tctx->kick_vqs);
5347+ } else {
5348+ trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop);
5349+ }
5350+
5351+ return true;
5352+}
5353+
5354+static int trusty_load_device_descr(struct trusty_ctx *tctx,
5355+ trusty_shared_mem_id_t id, size_t sz)
5356+{
5357+ int ret;
5358+
5359+ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
5360+
5361+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_GET_DESCR,
5362+ (u32)id, id >> 32, sz);
5363+ if (ret < 0) {
5364+ dev_err(tctx->dev, "%s: virtio get descr returned (%d)\n",
5365+ __func__, ret);
5366+ return -ENODEV;
5367+ }
5368+ return ret;
5369+}
5370+
5371+static void trusty_virtio_stop(struct trusty_ctx *tctx,
5372+ trusty_shared_mem_id_t id, size_t sz)
5373+{
5374+ int ret;
5375+
5376+ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
5377+
5378+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_STOP,
5379+ (u32)id, id >> 32, sz);
5380+ if (ret) {
5381+ dev_err(tctx->dev, "%s: virtio done returned (%d)\n",
5382+ __func__, ret);
5383+ return;
5384+ }
5385+}
5386+
5387+static int trusty_virtio_start(struct trusty_ctx *tctx,
5388+ trusty_shared_mem_id_t id, size_t sz)
5389+{
5390+ int ret;
5391+
5392+ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
5393+
5394+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_START,
5395+ (u32)id, id >> 32, sz);
5396+ if (ret) {
5397+ dev_err(tctx->dev, "%s: virtio start returned (%d)\n",
5398+ __func__, ret);
5399+ return -ENODEV;
5400+ }
5401+ return 0;
5402+}
5403+
5404+static void trusty_virtio_reset(struct virtio_device *vdev)
5405+{
5406+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5407+ struct trusty_ctx *tctx = tvdev->tctx;
5408+
5409+ dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid);
5410+ trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET,
5411+ tvdev->notifyid, 0, 0);
5412+}
5413+
5414+static u64 trusty_virtio_get_features(struct virtio_device *vdev)
5415+{
5416+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5417+
5418+ return tvdev->vdev_descr->dfeatures |
5419+ (1ULL << VIRTIO_F_ACCESS_PLATFORM);
5420+}
5421+
5422+static int trusty_virtio_finalize_features(struct virtio_device *vdev)
5423+{
5424+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5425+ u64 features = vdev->features;
5426+
5427+ /*
5428+ * We set VIRTIO_F_ACCESS_PLATFORM to enable the dma mapping hooks.
5429+ * The other side does not need to know.
5430+ */
5431+ features &= ~(1ULL << VIRTIO_F_ACCESS_PLATFORM);
5432+
5433+ /* Make sure we don't have any features > 32 bits! */
5434+ if (WARN_ON((u32)vdev->features != features))
5435+ return -EINVAL;
5436+
5437+ tvdev->vdev_descr->gfeatures = vdev->features;
5438+ return 0;
5439+}
5440+
5441+static void trusty_virtio_get_config(struct virtio_device *vdev,
5442+ unsigned int offset, void *buf,
5443+ unsigned int len)
5444+{
5445+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5446+
5447+ dev_dbg(&vdev->dev, "%s: %d bytes @ offset %d\n",
5448+ __func__, len, offset);
5449+
5450+ if (tvdev->config) {
5451+ if (offset + len <= tvdev->config_len)
5452+ memcpy(buf, tvdev->config + offset, len);
5453+ }
5454+}
5455+
5456+static void trusty_virtio_set_config(struct virtio_device *vdev,
5457+ unsigned int offset, const void *buf,
5458+ unsigned int len)
5459+{
5460+}
5461+
5462+static u8 trusty_virtio_get_status(struct virtio_device *vdev)
5463+{
5464+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5465+
5466+ return tvdev->vdev_descr->status;
5467+}
5468+
5469+static void trusty_virtio_set_status(struct virtio_device *vdev, u8 status)
5470+{
5471+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5472+
5473+ tvdev->vdev_descr->status = status;
5474+}
5475+
5476+static void _del_vqs(struct virtio_device *vdev)
5477+{
5478+ unsigned int i;
5479+ int ret;
5480+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5481+ struct trusty_vring *tvr = &tvdev->vrings[0];
5482+
5483+ for (i = 0; i < tvdev->vring_num; i++, tvr++) {
5484+ /* dequeue kick_nop */
5485+ trusty_dequeue_nop(tvdev->tctx->dev->parent, &tvr->kick_nop);
5486+
5487+ /* delete vq */
5488+ if (tvr->vq) {
5489+ vring_del_virtqueue(tvr->vq);
5490+ tvr->vq = NULL;
5491+ }
5492+ /* delete vring */
5493+ if (tvr->vaddr) {
5494+ ret = trusty_reclaim_memory(tvdev->tctx->dev->parent,
5495+ tvr->shared_mem_id,
5496+ &tvr->sg, 1);
5497+ if (WARN_ON(ret)) {
5498+ dev_err(&vdev->dev,
5499+ "trusty_revoke_memory failed: %d 0x%llx\n",
5500+ ret, tvr->shared_mem_id);
5501+ /*
5502+ * It is not safe to free this memory if
5503+ * trusty_revoke_memory fails. Leak it in that
5504+ * case.
5505+ */
5506+ } else {
5507+ free_pages_exact(tvr->vaddr, tvr->size);
5508+ }
5509+ tvr->vaddr = NULL;
5510+ }
5511+ }
5512+}
5513+
5514+static void trusty_virtio_del_vqs(struct virtio_device *vdev)
5515+{
5516+ _del_vqs(vdev);
5517+}
5518+
5519+
5520+static struct virtqueue *_find_vq(struct virtio_device *vdev,
5521+ unsigned int id,
5522+ void (*callback)(struct virtqueue *vq),
5523+ const char *name,
5524+ bool ctx)
5525+{
5526+ struct trusty_vring *tvr;
5527+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5528+ phys_addr_t pa;
5529+ int ret;
5530+
5531+ if (!name)
5532+ return ERR_PTR(-EINVAL);
5533+
5534+ if (id >= tvdev->vring_num)
5535+ return ERR_PTR(-EINVAL);
5536+
5537+ tvr = &tvdev->vrings[id];
5538+
5539+ /* actual size of vring (in bytes) */
5540+ tvr->size = PAGE_ALIGN(vring_size(tvr->elem_num, tvr->align));
5541+
5542+ /* allocate memory for the vring. */
5543+ tvr->vaddr = alloc_pages_exact(tvr->size, GFP_KERNEL | __GFP_ZERO);
5544+ if (!tvr->vaddr) {
5545+ dev_err(&vdev->dev, "vring alloc failed\n");
5546+ return ERR_PTR(-ENOMEM);
5547+ }
5548+
5549+ sg_init_one(&tvr->sg, tvr->vaddr, tvr->size);
5550+ ret = trusty_share_memory_compat(tvdev->tctx->dev->parent,
5551+ &tvr->shared_mem_id, &tvr->sg, 1,
5552+ PAGE_KERNEL);
5553+ if (ret) {
5554+ pa = virt_to_phys(tvr->vaddr);
5555+ dev_err(&vdev->dev, "trusty_share_memory failed: %d %pa\n",
5556+ ret, &pa);
5557+ goto err_share_memory;
5558+ }
5559+
5560+ /* save vring address to shared structure */
5561+ tvr->vr_descr->da = (u32)tvr->shared_mem_id;
5562+
5563+ /* da field is only 32 bit wide. Use previously unused 'reserved' field
5564+ * to store top 32 bits of 64-bit shared_mem_id
5565+ */
5566+ tvr->vr_descr->pa = (u32)(tvr->shared_mem_id >> 32);
5567+
5568+ dev_info(&vdev->dev, "vring%d: va(id) %p(%llx) qsz %d notifyid %d\n",
5569+ id, tvr->vaddr, (u64)tvr->shared_mem_id, tvr->elem_num,
5570+ tvr->notifyid);
5571+
5572+ tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align,
5573+ vdev, true, ctx, tvr->vaddr,
5574+ trusty_virtio_notify, callback, name);
5575+ if (!tvr->vq) {
5576+ dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n",
5577+ name);
5578+ goto err_new_virtqueue;
5579+ }
5580+
5581+ tvr->vq->priv = tvr;
5582+
5583+ return tvr->vq;
5584+
5585+err_new_virtqueue:
5586+ ret = trusty_reclaim_memory(tvdev->tctx->dev->parent,
5587+ tvr->shared_mem_id, &tvr->sg, 1);
5588+ if (WARN_ON(ret)) {
5589+ dev_err(&vdev->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
5590+ ret, tvr->shared_mem_id);
5591+ /*
5592+ * It is not safe to free this memory if trusty_revoke_memory
5593+ * fails. Leak it in that case.
5594+ */
5595+ } else {
5596+err_share_memory:
5597+ free_pages_exact(tvr->vaddr, tvr->size);
5598+ }
5599+ tvr->vaddr = NULL;
5600+ return ERR_PTR(-ENOMEM);
5601+}
5602+
5603+static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
5604+ struct virtqueue *vqs[],
5605+ vq_callback_t *callbacks[],
5606+ const char * const names[],
5607+ const bool *ctxs,
5608+ struct irq_affinity *desc)
5609+{
5610+ unsigned int i;
5611+ int ret;
5612+ bool ctx = false;
5613+
5614+ for (i = 0; i < nvqs; i++) {
5615+ ctx = false;
5616+ if (ctxs)
5617+ ctx = ctxs[i];
5618+ vqs[i] = _find_vq(vdev, i, callbacks[i], names[i], ctx);
5619+ if (IS_ERR(vqs[i])) {
5620+ ret = PTR_ERR(vqs[i]);
5621+ _del_vqs(vdev);
5622+ return ret;
5623+ }
5624+ }
5625+ return 0;
5626+}
5627+
5628+static const char *trusty_virtio_bus_name(struct virtio_device *vdev)
5629+{
5630+ return "trusty-virtio";
5631+}
5632+
5633+/* The ops structure which hooks everything together. */
5634+static const struct virtio_config_ops trusty_virtio_config_ops = {
5635+ .get_features = trusty_virtio_get_features,
5636+ .finalize_features = trusty_virtio_finalize_features,
5637+ .get = trusty_virtio_get_config,
5638+ .set = trusty_virtio_set_config,
5639+ .get_status = trusty_virtio_get_status,
5640+ .set_status = trusty_virtio_set_status,
5641+ .reset = trusty_virtio_reset,
5642+ .find_vqs = trusty_virtio_find_vqs,
5643+ .del_vqs = trusty_virtio_del_vqs,
5644+ .bus_name = trusty_virtio_bus_name,
5645+};
5646+
5647+static int trusty_virtio_add_device(struct trusty_ctx *tctx,
5648+ struct fw_rsc_vdev *vdev_descr,
5649+ struct fw_rsc_vdev_vring *vr_descr,
5650+ void *config)
5651+{
5652+ int i, ret;
5653+ struct trusty_vdev *tvdev;
5654+
5655+ tvdev = kzalloc(struct_size(tvdev, vrings, vdev_descr->num_of_vrings),
5656+ GFP_KERNEL);
5657+ if (!tvdev)
5658+ return -ENOMEM;
5659+
5660+ /* setup vdev */
5661+ tvdev->tctx = tctx;
5662+ tvdev->vdev.dev.parent = tctx->dev;
5663+ tvdev->vdev.id.device = vdev_descr->id;
5664+ tvdev->vdev.config = &trusty_virtio_config_ops;
5665+ tvdev->vdev_descr = vdev_descr;
5666+ tvdev->notifyid = vdev_descr->notifyid;
5667+
5668+ /* setup config */
5669+ tvdev->config = config;
5670+ tvdev->config_len = vdev_descr->config_len;
5671+
5672+ /* setup vrings and vdev resource */
5673+ tvdev->vring_num = vdev_descr->num_of_vrings;
5674+
5675+ for (i = 0; i < tvdev->vring_num; i++, vr_descr++) {
5676+ struct trusty_vring *tvr = &tvdev->vrings[i];
5677+
5678+ tvr->tvdev = tvdev;
5679+ tvr->vr_descr = vr_descr;
5680+ tvr->align = vr_descr->align;
5681+ tvr->elem_num = vr_descr->num;
5682+ tvr->notifyid = vr_descr->notifyid;
5683+ trusty_nop_init(&tvr->kick_nop, SMC_NC_VDEV_KICK_VQ,
5684+ tvdev->notifyid, tvr->notifyid);
5685+ }
5686+
5687+ /* register device */
5688+ ret = register_virtio_device(&tvdev->vdev);
5689+ if (ret) {
5690+ dev_err(tctx->dev,
5691+ "Failed (%d) to register device dev type %u\n",
5692+ ret, vdev_descr->id);
5693+ goto err_register;
5694+ }
5695+
5696+ /* add it to tracking list */
5697+ list_add_tail(&tvdev->node, &tctx->vdev_list);
5698+
5699+ return 0;
5700+
5701+err_register:
5702+ kfree(tvdev);
5703+ return ret;
5704+}
5705+
5706+static int trusty_parse_device_descr(struct trusty_ctx *tctx,
5707+ void *descr_va, size_t descr_sz)
5708+{
5709+ u32 i;
5710+ struct resource_table *descr = descr_va;
5711+
5712+ if (descr_sz < sizeof(*descr)) {
5713+ dev_err(tctx->dev, "descr table is too small (0x%x)\n",
5714+ (int)descr_sz);
5715+ return -ENODEV;
5716+ }
5717+
5718+ if (descr->ver != RSC_DESCR_VER) {
5719+ dev_err(tctx->dev, "unexpected descr ver (0x%x)\n",
5720+ (int)descr->ver);
5721+ return -ENODEV;
5722+ }
5723+
5724+ if (descr_sz < (sizeof(*descr) + descr->num * sizeof(u32))) {
5725+ dev_err(tctx->dev, "descr table is too small (0x%x)\n",
5726+ (int)descr->ver);
5727+ return -ENODEV;
5728+ }
5729+
5730+ for (i = 0; i < descr->num; i++) {
5731+ struct fw_rsc_hdr *hdr;
5732+ struct fw_rsc_vdev *vd;
5733+ struct fw_rsc_vdev_vring *vr;
5734+ void *cfg;
5735+ size_t vd_sz;
5736+
5737+ u32 offset = descr->offset[i];
5738+
5739+ if (offset >= descr_sz) {
5740+ dev_err(tctx->dev, "offset is out of bounds (%u)\n",
5741+ offset);
5742+ return -ENODEV;
5743+ }
5744+
5745+ /* check space for rsc header */
5746+ if ((descr_sz - offset) < sizeof(struct fw_rsc_hdr)) {
5747+ dev_err(tctx->dev, "no space for rsc header (%u)\n",
5748+ offset);
5749+ return -ENODEV;
5750+ }
5751+ hdr = (struct fw_rsc_hdr *)((u8 *)descr + offset);
5752+ offset += sizeof(struct fw_rsc_hdr);
5753+
5754+ /* check type */
5755+ if (hdr->type != RSC_VDEV) {
5756+ dev_err(tctx->dev, "unsupported rsc type (%u)\n",
5757+ hdr->type);
5758+ continue;
5759+ }
5760+
5761+ /* got vdev: check space for vdev */
5762+ if ((descr_sz - offset) < sizeof(struct fw_rsc_vdev)) {
5763+ dev_err(tctx->dev, "no space for vdev descr (%u)\n",
5764+ offset);
5765+ return -ENODEV;
5766+ }
5767+ vd = (struct fw_rsc_vdev *)((u8 *)descr + offset);
5768+
5769+ /* check space for vrings and config area */
5770+ vd_sz = sizeof(struct fw_rsc_vdev) +
5771+ vd->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) +
5772+ vd->config_len;
5773+
5774+ if ((descr_sz - offset) < vd_sz) {
5775+ dev_err(tctx->dev, "no space for vdev (%u)\n", offset);
5776+ return -ENODEV;
5777+ }
5778+ vr = (struct fw_rsc_vdev_vring *)vd->vring;
5779+ cfg = (void *)(vr + vd->num_of_vrings);
5780+
5781+ trusty_virtio_add_device(tctx, vd, vr, cfg);
5782+ }
5783+
5784+ return 0;
5785+}
5786+
5787+static void _remove_devices_locked(struct trusty_ctx *tctx)
5788+{
5789+ struct trusty_vdev *tvdev, *next;
5790+
5791+ list_for_each_entry_safe(tvdev, next, &tctx->vdev_list, node) {
5792+ list_del(&tvdev->node);
5793+ unregister_virtio_device(&tvdev->vdev);
5794+ kfree(tvdev);
5795+ }
5796+}
5797+
5798+static void trusty_virtio_remove_devices(struct trusty_ctx *tctx)
5799+{
5800+ mutex_lock(&tctx->mlock);
5801+ _remove_devices_locked(tctx);
5802+ mutex_unlock(&tctx->mlock);
5803+}
5804+
5805+static int trusty_virtio_add_devices(struct trusty_ctx *tctx)
5806+{
5807+ int ret;
5808+ int ret_tmp;
5809+ void *descr_va;
5810+ trusty_shared_mem_id_t descr_id;
5811+ size_t descr_sz;
5812+ size_t descr_buf_sz;
5813+
5814+ /* allocate buffer to load device descriptor into */
5815+ descr_buf_sz = PAGE_SIZE;
5816+ descr_va = alloc_pages_exact(descr_buf_sz, GFP_KERNEL | __GFP_ZERO);
5817+ if (!descr_va) {
5818+ dev_err(tctx->dev, "Failed to allocate shared area\n");
5819+ return -ENOMEM;
5820+ }
5821+
5822+ sg_init_one(&tctx->shared_sg, descr_va, descr_buf_sz);
5823+ ret = trusty_share_memory(tctx->dev->parent, &descr_id,
5824+ &tctx->shared_sg, 1, PAGE_KERNEL);
5825+ if (ret) {
5826+ dev_err(tctx->dev, "trusty_share_memory failed: %d\n", ret);
5827+ goto err_share_memory;
5828+ }
5829+
5830+ /* load device descriptors */
5831+ ret = trusty_load_device_descr(tctx, descr_id, descr_buf_sz);
5832+ if (ret < 0) {
5833+ dev_err(tctx->dev, "failed (%d) to load device descr\n", ret);
5834+ goto err_load_descr;
5835+ }
5836+
5837+ descr_sz = (size_t)ret;
5838+
5839+ mutex_lock(&tctx->mlock);
5840+
5841+ /* parse device descriptor and add virtio devices */
5842+ ret = trusty_parse_device_descr(tctx, descr_va, descr_sz);
5843+ if (ret) {
5844+ dev_err(tctx->dev, "failed (%d) to parse device descr\n", ret);
5845+ goto err_parse_descr;
5846+ }
5847+
5848+ /* register call notifier */
5849+ ret = trusty_call_notifier_register(tctx->dev->parent,
5850+ &tctx->call_notifier);
5851+ if (ret) {
5852+ dev_err(tctx->dev, "%s: failed (%d) to register notifier\n",
5853+ __func__, ret);
5854+ goto err_register_notifier;
5855+ }
5856+
5857+ /* start virtio */
5858+ ret = trusty_virtio_start(tctx, descr_id, descr_sz);
5859+ if (ret) {
5860+ dev_err(tctx->dev, "failed (%d) to start virtio\n", ret);
5861+ goto err_start_virtio;
5862+ }
5863+
5864+ /* attach shared area */
5865+ tctx->shared_va = descr_va;
5866+ tctx->shared_id = descr_id;
5867+ tctx->shared_sz = descr_buf_sz;
5868+
5869+ mutex_unlock(&tctx->mlock);
5870+
5871+ return 0;
5872+
5873+err_start_virtio:
5874+ trusty_call_notifier_unregister(tctx->dev->parent,
5875+ &tctx->call_notifier);
5876+ cancel_work_sync(&tctx->check_vqs);
5877+err_register_notifier:
5878+err_parse_descr:
5879+ _remove_devices_locked(tctx);
5880+ mutex_unlock(&tctx->mlock);
5881+ cancel_work_sync(&tctx->kick_vqs);
5882+ trusty_virtio_stop(tctx, descr_id, descr_sz);
5883+err_load_descr:
5884+ ret_tmp = trusty_reclaim_memory(tctx->dev->parent, descr_id,
5885+ &tctx->shared_sg, 1);
5886+ if (WARN_ON(ret_tmp)) {
5887+ dev_err(tctx->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
5888+ ret_tmp, tctx->shared_id);
5889+ /*
5890+ * It is not safe to free this memory if trusty_revoke_memory
5891+ * fails. Leak it in that case.
5892+ */
5893+ } else {
5894+err_share_memory:
5895+ free_pages_exact(descr_va, descr_buf_sz);
5896+ }
5897+ return ret;
5898+}
5899+
5900+static dma_addr_t trusty_virtio_dma_map_page(struct device *dev,
5901+ struct page *page,
5902+ unsigned long offset, size_t size,
5903+ enum dma_data_direction dir,
5904+ unsigned long attrs)
5905+{
5906+ struct tipc_msg_buf *buf = page_to_virt(page) + offset;
5907+
5908+ return buf->buf_id;
5909+}
5910+
5911+static const struct dma_map_ops trusty_virtio_dma_map_ops = {
5912+ .map_page = trusty_virtio_dma_map_page,
5913+};
5914+
5915+static int trusty_virtio_probe(struct platform_device *pdev)
5916+{
5917+ int ret;
5918+ struct trusty_ctx *tctx;
5919+
5920+ tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
5921+ if (!tctx)
5922+ return -ENOMEM;
5923+
5924+ tctx->dev = &pdev->dev;
5925+ tctx->call_notifier.notifier_call = trusty_call_notify;
5926+ mutex_init(&tctx->mlock);
5927+ INIT_LIST_HEAD(&tctx->vdev_list);
5928+ INIT_WORK(&tctx->check_vqs, check_all_vqs);
5929+ INIT_WORK(&tctx->kick_vqs, kick_vqs);
5930+ platform_set_drvdata(pdev, tctx);
5931+
5932+ set_dma_ops(&pdev->dev, &trusty_virtio_dma_map_ops);
5933+
5934+ tctx->check_wq = alloc_workqueue("trusty-check-wq", WQ_UNBOUND, 0);
5935+ if (!tctx->check_wq) {
5936+ ret = -ENODEV;
5937+ dev_err(&pdev->dev, "Failed create trusty-check-wq\n");
5938+ goto err_create_check_wq;
5939+ }
5940+
5941+ tctx->kick_wq = alloc_workqueue("trusty-kick-wq",
5942+ WQ_UNBOUND | WQ_CPU_INTENSIVE, 0);
5943+ if (!tctx->kick_wq) {
5944+ ret = -ENODEV;
5945+ dev_err(&pdev->dev, "Failed create trusty-kick-wq\n");
5946+ goto err_create_kick_wq;
5947+ }
5948+
5949+ ret = trusty_virtio_add_devices(tctx);
5950+ if (ret) {
5951+ dev_err(&pdev->dev, "Failed to add virtio devices\n");
5952+ goto err_add_devices;
5953+ }
5954+
5955+ dev_info(&pdev->dev, "initializing done\n");
5956+ return 0;
5957+
5958+err_add_devices:
5959+ destroy_workqueue(tctx->kick_wq);
5960+err_create_kick_wq:
5961+ destroy_workqueue(tctx->check_wq);
5962+err_create_check_wq:
5963+ kfree(tctx);
5964+ return ret;
5965+}
5966+
5967+static int trusty_virtio_remove(struct platform_device *pdev)
5968+{
5969+ struct trusty_ctx *tctx = platform_get_drvdata(pdev);
5970+ int ret;
5971+
5972+ /* unregister call notifier and wait until workqueue is done */
5973+ trusty_call_notifier_unregister(tctx->dev->parent,
5974+ &tctx->call_notifier);
5975+ cancel_work_sync(&tctx->check_vqs);
5976+
5977+ /* remove virtio devices */
5978+ trusty_virtio_remove_devices(tctx);
5979+ cancel_work_sync(&tctx->kick_vqs);
5980+
5981+ /* destroy workqueues */
5982+ destroy_workqueue(tctx->kick_wq);
5983+ destroy_workqueue(tctx->check_wq);
5984+
5985+ /* notify remote that shared area goes away */
5986+ trusty_virtio_stop(tctx, tctx->shared_id, tctx->shared_sz);
5987+
5988+ /* free shared area */
5989+ ret = trusty_reclaim_memory(tctx->dev->parent, tctx->shared_id,
5990+ &tctx->shared_sg, 1);
5991+ if (WARN_ON(ret)) {
5992+ dev_err(tctx->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
5993+ ret, tctx->shared_id);
5994+ /*
5995+ * It is not safe to free this memory if trusty_revoke_memory
5996+ * fails. Leak it in that case.
5997+ */
5998+ } else {
5999+ free_pages_exact(tctx->shared_va, tctx->shared_sz);
6000+ }
6001+
6002+ /* free context */
6003+ kfree(tctx);
6004+ return 0;
6005+}
6006+
6007+static const struct of_device_id trusty_of_match[] = {
6008+ {
6009+ .compatible = "android,trusty-virtio-v1",
6010+ },
6011+ {},
6012+};
6013+
6014+MODULE_DEVICE_TABLE(of, trusty_of_match);
6015+
6016+static struct platform_driver trusty_virtio_driver = {
6017+ .probe = trusty_virtio_probe,
6018+ .remove = trusty_virtio_remove,
6019+ .driver = {
6020+ .name = "trusty-virtio",
6021+ .of_match_table = trusty_of_match,
6022+ },
6023+};
6024+
6025+module_platform_driver(trusty_virtio_driver);
6026+
6027+MODULE_LICENSE("GPL v2");
6028+MODULE_DESCRIPTION("Trusty virtio driver");
6029+/*
6030+ * TODO(b/168322325): trusty-virtio and trusty-ipc should be independent.
6031+ * However, trusty-virtio is not completely generic and is aware of trusty-ipc.
6032+ * See header includes. Particularly, trusty-virtio.ko can't be loaded before
6033+ * trusty-ipc.ko.
6034+ */
6035+MODULE_SOFTDEP("pre: trusty-ipc");
6036diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c
6037new file mode 100644
6038index 000000000000..265eab52aea0
6039--- /dev/null
6040+++ b/drivers/trusty/trusty.c
6041@@ -0,0 +1,981 @@
6042+// SPDX-License-Identifier: GPL-2.0-only
6043+/*
6044+ * Copyright (C) 2013 Google, Inc.
6045+ */
6046+
6047+#include <linux/delay.h>
6048+#include <linux/module.h>
6049+#include <linux/of.h>
6050+#include <linux/of_platform.h>
6051+#include <linux/platform_device.h>
6052+#include <linux/slab.h>
6053+#include <linux/stat.h>
6054+#include <linux/string.h>
6055+#include <linux/trusty/arm_ffa.h>
6056+#include <linux/trusty/smcall.h>
6057+#include <linux/trusty/sm_err.h>
6058+#include <linux/trusty/trusty.h>
6059+
6060+#include <linux/scatterlist.h>
6061+#include <linux/dma-mapping.h>
6062+
6063+#include "trusty-smc.h"
6064+
6065+struct trusty_state;
6066+static struct platform_driver trusty_driver;
6067+
6068+struct trusty_work {
6069+ struct trusty_state *ts;
6070+ struct work_struct work;
6071+};
6072+
6073+struct trusty_state {
6074+ struct mutex smc_lock;
6075+ struct atomic_notifier_head notifier;
6076+ struct completion cpu_idle_completion;
6077+ char *version_str;
6078+ u32 api_version;
6079+ bool trusty_panicked;
6080+ struct device *dev;
6081+ struct workqueue_struct *nop_wq;
6082+ struct trusty_work __percpu *nop_works;
6083+ struct list_head nop_queue;
6084+ spinlock_t nop_lock; /* protects nop_queue */
6085+ struct device_dma_parameters dma_parms;
6086+ void *ffa_tx;
6087+ void *ffa_rx;
6088+ u16 ffa_local_id;
6089+ u16 ffa_remote_id;
6090+ struct mutex share_memory_msg_lock; /* protects share_memory_msg */
6091+};
6092+
6093+static inline unsigned long smc(unsigned long r0, unsigned long r1,
6094+ unsigned long r2, unsigned long r3)
6095+{
6096+ return trusty_smc8(r0, r1, r2, r3, 0, 0, 0, 0).r0;
6097+}
6098+
6099+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
6100+{
6101+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6102+
6103+ if (WARN_ON(!s))
6104+ return SM_ERR_INVALID_PARAMETERS;
6105+ if (WARN_ON(!SMC_IS_FASTCALL(smcnr)))
6106+ return SM_ERR_INVALID_PARAMETERS;
6107+ if (WARN_ON(SMC_IS_SMC64(smcnr)))
6108+ return SM_ERR_INVALID_PARAMETERS;
6109+
6110+ return smc(smcnr, a0, a1, a2);
6111+}
6112+EXPORT_SYMBOL(trusty_fast_call32);
6113+
6114+#ifdef CONFIG_64BIT
6115+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2)
6116+{
6117+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6118+
6119+ if (WARN_ON(!s))
6120+ return SM_ERR_INVALID_PARAMETERS;
6121+ if (WARN_ON(!SMC_IS_FASTCALL(smcnr)))
6122+ return SM_ERR_INVALID_PARAMETERS;
6123+ if (WARN_ON(!SMC_IS_SMC64(smcnr)))
6124+ return SM_ERR_INVALID_PARAMETERS;
6125+
6126+ return smc(smcnr, a0, a1, a2);
6127+}
6128+EXPORT_SYMBOL(trusty_fast_call64);
6129+#endif
6130+
6131+static unsigned long trusty_std_call_inner(struct device *dev,
6132+ unsigned long smcnr,
6133+ unsigned long a0, unsigned long a1,
6134+ unsigned long a2)
6135+{
6136+ unsigned long ret;
6137+ int retry = 5;
6138+
6139+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n",
6140+ __func__, smcnr, a0, a1, a2);
6141+ while (true) {
6142+ ret = smc(smcnr, a0, a1, a2);
6143+ while ((s32)ret == SM_ERR_FIQ_INTERRUPTED)
6144+ ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0);
6145+ if ((int)ret != SM_ERR_BUSY || !retry)
6146+ break;
6147+
6148+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n",
6149+ __func__, smcnr, a0, a1, a2);
6150+ retry--;
6151+ }
6152+
6153+ return ret;
6154+}
6155+
6156+static unsigned long trusty_std_call_helper(struct device *dev,
6157+ unsigned long smcnr,
6158+ unsigned long a0, unsigned long a1,
6159+ unsigned long a2)
6160+{
6161+ unsigned long ret;
6162+ int sleep_time = 1;
6163+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6164+
6165+ while (true) {
6166+ local_irq_disable();
6167+ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE,
6168+ NULL);
6169+ ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2);
6170+ if (ret == SM_ERR_PANIC) {
6171+ s->trusty_panicked = true;
6172+ if (IS_ENABLED(CONFIG_TRUSTY_CRASH_IS_PANIC))
6173+ panic("trusty crashed");
6174+ else
6175+ WARN_ONCE(1, "trusty crashed");
6176+ }
6177+
6178+ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED,
6179+ NULL);
6180+ if (ret == SM_ERR_INTERRUPTED) {
6181+ /*
6182+ * Make sure this cpu will eventually re-enter trusty
6183+ * even if the std_call resumes on another cpu.
6184+ */
6185+ trusty_enqueue_nop(dev, NULL);
6186+ }
6187+ local_irq_enable();
6188+
6189+ if ((int)ret != SM_ERR_BUSY)
6190+ break;
6191+
6192+ if (sleep_time == 256)
6193+ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n",
6194+ __func__, smcnr, a0, a1, a2);
6195+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n",
6196+ __func__, smcnr, a0, a1, a2, sleep_time);
6197+
6198+ msleep(sleep_time);
6199+ if (sleep_time < 1000)
6200+ sleep_time <<= 1;
6201+
6202+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n",
6203+ __func__, smcnr, a0, a1, a2);
6204+ }
6205+
6206+ if (sleep_time > 256)
6207+ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n",
6208+ __func__, smcnr, a0, a1, a2);
6209+
6210+ return ret;
6211+}
6212+
6213+static void trusty_std_call_cpu_idle(struct trusty_state *s)
6214+{
6215+ int ret;
6216+
6217+ ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10);
6218+ if (!ret) {
6219+ dev_warn(s->dev,
6220+ "%s: timed out waiting for cpu idle to clear, retry anyway\n",
6221+ __func__);
6222+ }
6223+}
6224+
6225+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
6226+{
6227+ int ret;
6228+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6229+
6230+ if (WARN_ON(SMC_IS_FASTCALL(smcnr)))
6231+ return SM_ERR_INVALID_PARAMETERS;
6232+
6233+ if (WARN_ON(SMC_IS_SMC64(smcnr)))
6234+ return SM_ERR_INVALID_PARAMETERS;
6235+
6236+ if (s->trusty_panicked) {
6237+ /*
6238+ * Avoid calling the notifiers if trusty has panicked as they
6239+ * can trigger more calls.
6240+ */
6241+ return SM_ERR_PANIC;
6242+ }
6243+
6244+ if (smcnr != SMC_SC_NOP) {
6245+ mutex_lock(&s->smc_lock);
6246+ reinit_completion(&s->cpu_idle_completion);
6247+ }
6248+
6249+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n",
6250+ __func__, smcnr, a0, a1, a2);
6251+
6252+ ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2);
6253+ while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) {
6254+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n",
6255+ __func__, smcnr, a0, a1, a2);
6256+ if (ret == SM_ERR_CPU_IDLE)
6257+ trusty_std_call_cpu_idle(s);
6258+ ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0);
6259+ }
6260+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n",
6261+ __func__, smcnr, a0, a1, a2, ret);
6262+
6263+ if (smcnr == SMC_SC_NOP)
6264+ complete(&s->cpu_idle_completion);
6265+ else
6266+ mutex_unlock(&s->smc_lock);
6267+
6268+ return ret;
6269+}
6270+EXPORT_SYMBOL(trusty_std_call32);
6271+
6272+int trusty_share_memory(struct device *dev, u64 *id,
6273+ struct scatterlist *sglist, unsigned int nents,
6274+ pgprot_t pgprot)
6275+{
6276+ return trusty_transfer_memory(dev, id, sglist, nents, pgprot, 0,
6277+ false);
6278+}
6279+EXPORT_SYMBOL(trusty_share_memory);
6280+
6281+int trusty_transfer_memory(struct device *dev, u64 *id,
6282+ struct scatterlist *sglist, unsigned int nents,
6283+ pgprot_t pgprot, u64 tag, bool lend)
6284+{
6285+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6286+ int ret;
6287+ struct ns_mem_page_info pg_inf;
6288+ struct scatterlist *sg;
6289+ size_t count;
6290+ size_t i;
6291+ size_t len;
6292+ u64 ffa_handle = 0;
6293+ size_t total_len;
6294+ size_t endpoint_count = 1;
6295+ struct ffa_mtd *mtd = s->ffa_tx;
6296+ size_t comp_mrd_offset = offsetof(struct ffa_mtd, emad[endpoint_count]);
6297+ struct ffa_comp_mrd *comp_mrd = s->ffa_tx + comp_mrd_offset;
6298+ struct ffa_cons_mrd *cons_mrd = comp_mrd->address_range_array;
6299+ size_t cons_mrd_offset = (void *)cons_mrd - s->ffa_tx;
6300+ struct smc_ret8 smc_ret;
6301+ u32 cookie_low;
6302+ u32 cookie_high;
6303+
6304+ if (WARN_ON(dev->driver != &trusty_driver.driver))
6305+ return -EINVAL;
6306+
6307+ if (WARN_ON(nents < 1))
6308+ return -EINVAL;
6309+
6310+ if (nents != 1 && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
6311+ dev_err(s->dev, "%s: old trusty version does not support non-contiguous memory objects\n",
6312+ __func__);
6313+ return -EOPNOTSUPP;
6314+ }
6315+
6316+ count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
6317+ if (count != nents) {
6318+ dev_err(s->dev, "failed to dma map sg_table\n");
6319+ return -EINVAL;
6320+ }
6321+
6322+ sg = sglist;
6323+ ret = trusty_encode_page_info(&pg_inf, phys_to_page(sg_dma_address(sg)),
6324+ pgprot);
6325+ if (ret) {
6326+ dev_err(s->dev, "%s: trusty_encode_page_info failed\n",
6327+ __func__);
6328+ goto err_encode_page_info;
6329+ }
6330+
6331+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
6332+ *id = pg_inf.compat_attr;
6333+ return 0;
6334+ }
6335+
6336+ len = 0;
6337+ for_each_sg(sglist, sg, nents, i)
6338+ len += sg_dma_len(sg);
6339+
6340+ mutex_lock(&s->share_memory_msg_lock);
6341+
6342+ mtd->sender_id = s->ffa_local_id;
6343+ mtd->memory_region_attributes = pg_inf.ffa_mem_attr;
6344+ mtd->reserved_3 = 0;
6345+ mtd->flags = 0;
6346+ mtd->handle = 0;
6347+ mtd->tag = tag;
6348+ mtd->reserved_24_27 = 0;
6349+ mtd->emad_count = endpoint_count;
6350+ for (i = 0; i < endpoint_count; i++) {
6351+ struct ffa_emad *emad = &mtd->emad[i];
6352+ /* TODO: support stream ids */
6353+ emad->mapd.endpoint_id = s->ffa_remote_id;
6354+ emad->mapd.memory_access_permissions = pg_inf.ffa_mem_perm;
6355+ emad->mapd.flags = 0;
6356+ emad->comp_mrd_offset = comp_mrd_offset;
6357+ emad->reserved_8_15 = 0;
6358+ }
6359+ comp_mrd->total_page_count = len / PAGE_SIZE;
6360+ comp_mrd->address_range_count = nents;
6361+ comp_mrd->reserved_8_15 = 0;
6362+
6363+ total_len = cons_mrd_offset + nents * sizeof(*cons_mrd);
6364+ sg = sglist;
6365+ while (count) {
6366+ size_t lcount =
6367+ min_t(size_t, count, (PAGE_SIZE - cons_mrd_offset) /
6368+ sizeof(*cons_mrd));
6369+ size_t fragment_len = lcount * sizeof(*cons_mrd) +
6370+ cons_mrd_offset;
6371+
6372+ for (i = 0; i < lcount; i++) {
6373+ cons_mrd[i].address = sg_dma_address(sg);
6374+ cons_mrd[i].page_count = sg_dma_len(sg) / PAGE_SIZE;
6375+ cons_mrd[i].reserved_12_15 = 0;
6376+ sg = sg_next(sg);
6377+ }
6378+ count -= lcount;
6379+ if (cons_mrd_offset) {
6380+ u32 smc = lend ? SMC_FC_FFA_MEM_LEND :
6381+ SMC_FC_FFA_MEM_SHARE;
6382+ /* First fragment */
6383+ smc_ret = trusty_smc8(smc, total_len,
6384+ fragment_len, 0, 0, 0, 0, 0);
6385+ } else {
6386+ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_FRAG_TX,
6387+ cookie_low, cookie_high,
6388+ fragment_len, 0, 0, 0, 0);
6389+ }
6390+ if (smc_ret.r0 == SMC_FC_FFA_MEM_FRAG_RX) {
6391+ cookie_low = smc_ret.r1;
6392+ cookie_high = smc_ret.r2;
6393+ dev_dbg(s->dev, "cookie %x %x", cookie_low,
6394+ cookie_high);
6395+ if (!count) {
6396+ /*
6397+ * We have sent all our descriptors. Expected
6398+ * SMC_FC_FFA_SUCCESS, not a request to send
6399+ * another fragment.
6400+ */
6401+ dev_err(s->dev, "%s: fragment_len %zd/%zd, unexpected SMC_FC_FFA_MEM_FRAG_RX\n",
6402+ __func__, fragment_len, total_len);
6403+ ret = -EIO;
6404+ break;
6405+ }
6406+ } else if (smc_ret.r0 == SMC_FC_FFA_SUCCESS) {
6407+ ffa_handle = smc_ret.r2 | (u64)smc_ret.r3 << 32;
6408+ dev_dbg(s->dev, "%s: fragment_len %zu/%zu, got handle 0x%llx\n",
6409+ __func__, fragment_len, total_len,
6410+ ffa_handle);
6411+ if (count) {
6412+ /*
6413+ * We have not sent all our descriptors.
6414+ * Expected SMC_FC_FFA_MEM_FRAG_RX not
6415+ * SMC_FC_FFA_SUCCESS.
6416+ */
6417+ dev_err(s->dev, "%s: fragment_len %zu/%zu, unexpected SMC_FC_FFA_SUCCESS, count %zu != 0\n",
6418+ __func__, fragment_len, total_len,
6419+ count);
6420+ ret = -EIO;
6421+ break;
6422+ }
6423+ } else {
6424+ dev_err(s->dev, "%s: fragment_len %zu/%zu, SMC_FC_FFA_MEM_SHARE failed 0x%lx 0x%lx 0x%lx",
6425+ __func__, fragment_len, total_len,
6426+ smc_ret.r0, smc_ret.r1, smc_ret.r2);
6427+ ret = -EIO;
6428+ break;
6429+ }
6430+
6431+ cons_mrd = s->ffa_tx;
6432+ cons_mrd_offset = 0;
6433+ }
6434+
6435+ mutex_unlock(&s->share_memory_msg_lock);
6436+
6437+ if (!ret) {
6438+ *id = ffa_handle;
6439+ dev_dbg(s->dev, "%s: done\n", __func__);
6440+ return 0;
6441+ }
6442+
6443+ dev_err(s->dev, "%s: failed %d", __func__, ret);
6444+
6445+err_encode_page_info:
6446+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
6447+ return ret;
6448+}
6449+EXPORT_SYMBOL(trusty_transfer_memory);
6450+
6451+/*
6452+ * trusty_share_memory_compat - trusty_share_memory wrapper for old apis
6453+ *
6454+ * Call trusty_share_memory and filter out memory attributes if trusty version
6455+ * is old. Used by clients that used to pass just a physical address to trusty
6456+ * instead of a physical address plus memory attributes value.
6457+ */
6458+int trusty_share_memory_compat(struct device *dev, u64 *id,
6459+ struct scatterlist *sglist, unsigned int nents,
6460+ pgprot_t pgprot)
6461+{
6462+ int ret;
6463+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6464+
6465+ ret = trusty_share_memory(dev, id, sglist, nents, pgprot);
6466+ if (!ret && s->api_version < TRUSTY_API_VERSION_PHYS_MEM_OBJ)
6467+ *id &= 0x0000FFFFFFFFF000ull;
6468+
6469+ return ret;
6470+}
6471+EXPORT_SYMBOL(trusty_share_memory_compat);
6472+
6473+int trusty_reclaim_memory(struct device *dev, u64 id,
6474+ struct scatterlist *sglist, unsigned int nents)
6475+{
6476+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6477+ int ret = 0;
6478+ struct smc_ret8 smc_ret;
6479+
6480+ if (WARN_ON(dev->driver != &trusty_driver.driver))
6481+ return -EINVAL;
6482+
6483+ if (WARN_ON(nents < 1))
6484+ return -EINVAL;
6485+
6486+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
6487+ if (nents != 1) {
6488+ dev_err(s->dev, "%s: not supported\n", __func__);
6489+ return -EOPNOTSUPP;
6490+ }
6491+
6492+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
6493+
6494+ dev_dbg(s->dev, "%s: done\n", __func__);
6495+ return 0;
6496+ }
6497+
6498+ mutex_lock(&s->share_memory_msg_lock);
6499+
6500+ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_RECLAIM, (u32)id, id >> 32, 0, 0,
6501+ 0, 0, 0);
6502+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6503+ dev_err(s->dev, "%s: SMC_FC_FFA_MEM_RECLAIM failed 0x%lx 0x%lx 0x%lx",
6504+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6505+ if (smc_ret.r0 == SMC_FC_FFA_ERROR &&
6506+ smc_ret.r2 == FFA_ERROR_DENIED)
6507+ ret = -EBUSY;
6508+ else
6509+ ret = -EIO;
6510+ }
6511+
6512+ mutex_unlock(&s->share_memory_msg_lock);
6513+
6514+ if (ret != 0)
6515+ return ret;
6516+
6517+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
6518+
6519+ dev_dbg(s->dev, "%s: done\n", __func__);
6520+ return 0;
6521+}
6522+EXPORT_SYMBOL(trusty_reclaim_memory);
6523+
6524+int trusty_call_notifier_register(struct device *dev, struct notifier_block *n)
6525+{
6526+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6527+
6528+ return atomic_notifier_chain_register(&s->notifier, n);
6529+}
6530+EXPORT_SYMBOL(trusty_call_notifier_register);
6531+
6532+int trusty_call_notifier_unregister(struct device *dev,
6533+ struct notifier_block *n)
6534+{
6535+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6536+
6537+ return atomic_notifier_chain_unregister(&s->notifier, n);
6538+}
6539+EXPORT_SYMBOL(trusty_call_notifier_unregister);
6540+
6541+static int trusty_remove_child(struct device *dev, void *data)
6542+{
6543+ platform_device_unregister(to_platform_device(dev));
6544+ return 0;
6545+}
6546+
6547+static ssize_t trusty_version_show(struct device *dev,
6548+ struct device_attribute *attr, char *buf)
6549+{
6550+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6551+
6552+ return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str ?: "unknown");
6553+}
6554+
6555+static DEVICE_ATTR(trusty_version, 0400, trusty_version_show, NULL);
6556+
6557+static struct attribute *trusty_attrs[] = {
6558+ &dev_attr_trusty_version.attr,
6559+ NULL,
6560+};
6561+ATTRIBUTE_GROUPS(trusty);
6562+
6563+const char *trusty_version_str_get(struct device *dev)
6564+{
6565+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6566+
6567+ return s->version_str;
6568+}
6569+EXPORT_SYMBOL(trusty_version_str_get);
6570+
6571+static int trusty_init_msg_buf(struct trusty_state *s, struct device *dev)
6572+{
6573+ phys_addr_t tx_paddr;
6574+ phys_addr_t rx_paddr;
6575+ int ret;
6576+ struct smc_ret8 smc_ret;
6577+
6578+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ)
6579+ return 0;
6580+
6581+ /* Get supported FF-A version and check if it is compatible */
6582+ smc_ret = trusty_smc8(SMC_FC_FFA_VERSION, FFA_CURRENT_VERSION, 0, 0,
6583+ 0, 0, 0, 0);
6584+ if (FFA_VERSION_TO_MAJOR(smc_ret.r0) != FFA_CURRENT_VERSION_MAJOR) {
6585+ dev_err(s->dev,
6586+ "%s: Unsupported FF-A version 0x%lx, expected 0x%x\n",
6587+ __func__, smc_ret.r0, FFA_CURRENT_VERSION);
6588+ ret = -EIO;
6589+ goto err_version;
6590+ }
6591+
6592+ /* Check that SMC_FC_FFA_MEM_SHARE is implemented */
6593+ smc_ret = trusty_smc8(SMC_FC_FFA_FEATURES, SMC_FC_FFA_MEM_SHARE, 0, 0,
6594+ 0, 0, 0, 0);
6595+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6596+ dev_err(s->dev,
6597+ "%s: SMC_FC_FFA_FEATURES(SMC_FC_FFA_MEM_SHARE) failed 0x%lx 0x%lx 0x%lx\n",
6598+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6599+ ret = -EIO;
6600+ goto err_features;
6601+ }
6602+
6603+ /*
6604+ * Set FF-A endpoint IDs.
6605+ *
6606+ * Hardcode 0x8000 for the secure os.
6607+ * TODO: Use FF-A call or device tree to configure this dynamically
6608+ */
6609+ smc_ret = trusty_smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
6610+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6611+ dev_err(s->dev,
6612+ "%s: SMC_FC_FFA_ID_GET failed 0x%lx 0x%lx 0x%lx\n",
6613+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6614+ ret = -EIO;
6615+ goto err_id_get;
6616+ }
6617+
6618+ s->ffa_local_id = smc_ret.r2;
6619+ s->ffa_remote_id = 0x8000;
6620+
6621+ s->ffa_tx = kmalloc(PAGE_SIZE, GFP_KERNEL);
6622+ if (!s->ffa_tx) {
6623+ ret = -ENOMEM;
6624+ goto err_alloc_tx;
6625+ }
6626+ tx_paddr = virt_to_phys(s->ffa_tx);
6627+ if (WARN_ON(tx_paddr & (PAGE_SIZE - 1))) {
6628+ ret = -EINVAL;
6629+ goto err_unaligned_tx_buf;
6630+ }
6631+
6632+ s->ffa_rx = kmalloc(PAGE_SIZE, GFP_KERNEL);
6633+ if (!s->ffa_rx) {
6634+ ret = -ENOMEM;
6635+ goto err_alloc_rx;
6636+ }
6637+ rx_paddr = virt_to_phys(s->ffa_rx);
6638+ if (WARN_ON(rx_paddr & (PAGE_SIZE - 1))) {
6639+ ret = -EINVAL;
6640+ goto err_unaligned_rx_buf;
6641+ }
6642+
6643+ smc_ret = trusty_smc8(SMC_FCZ_FFA_RXTX_MAP, tx_paddr, rx_paddr, 1, 0,
6644+ 0, 0, 0);
6645+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6646+ dev_err(s->dev, "%s: SMC_FCZ_FFA_RXTX_MAP failed 0x%lx 0x%lx 0x%lx\n",
6647+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6648+ ret = -EIO;
6649+ goto err_rxtx_map;
6650+ }
6651+
6652+ return 0;
6653+
6654+err_rxtx_map:
6655+err_unaligned_rx_buf:
6656+ kfree(s->ffa_rx);
6657+ s->ffa_rx = NULL;
6658+err_alloc_rx:
6659+err_unaligned_tx_buf:
6660+ kfree(s->ffa_tx);
6661+ s->ffa_tx = NULL;
6662+err_alloc_tx:
6663+err_id_get:
6664+err_features:
6665+err_version:
6666+ return ret;
6667+}
6668+
6669+static void trusty_free_msg_buf(struct trusty_state *s, struct device *dev)
6670+{
6671+ struct smc_ret8 smc_ret;
6672+
6673+ smc_ret = trusty_smc8(SMC_FC_FFA_RXTX_UNMAP, 0, 0, 0, 0, 0, 0, 0);
6674+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6675+ dev_err(s->dev, "%s: SMC_FC_FFA_RXTX_UNMAP failed 0x%lx 0x%lx 0x%lx\n",
6676+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6677+ } else {
6678+ kfree(s->ffa_rx);
6679+ kfree(s->ffa_tx);
6680+ }
6681+}
6682+
6683+static void trusty_init_version(struct trusty_state *s, struct device *dev)
6684+{
6685+ int ret;
6686+ int i;
6687+ int version_str_len;
6688+
6689+ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0);
6690+ if (ret <= 0)
6691+ goto err_get_size;
6692+
6693+ version_str_len = ret;
6694+
6695+ s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL);
6696+ for (i = 0; i < version_str_len; i++) {
6697+ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0);
6698+ if (ret < 0)
6699+ goto err_get_char;
6700+ s->version_str[i] = ret;
6701+ }
6702+ s->version_str[i] = '\0';
6703+
6704+ dev_info(dev, "trusty version: %s\n", s->version_str);
6705+ return;
6706+
6707+err_get_char:
6708+ kfree(s->version_str);
6709+ s->version_str = NULL;
6710+err_get_size:
6711+ dev_err(dev, "failed to get version: %d\n", ret);
6712+}
6713+
6714+u32 trusty_get_api_version(struct device *dev)
6715+{
6716+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6717+
6718+ return s->api_version;
6719+}
6720+EXPORT_SYMBOL(trusty_get_api_version);
6721+
6722+bool trusty_get_panic_status(struct device *dev)
6723+{
6724+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6725+ if (WARN_ON(dev->driver != &trusty_driver.driver))
6726+ return false;
6727+ return s->trusty_panicked;
6728+}
6729+EXPORT_SYMBOL(trusty_get_panic_status);
6730+
6731+static int trusty_init_api_version(struct trusty_state *s, struct device *dev)
6732+{
6733+ u32 api_version;
6734+
6735+ api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION,
6736+ TRUSTY_API_VERSION_CURRENT, 0, 0);
6737+ if (api_version == SM_ERR_UNDEFINED_SMC)
6738+ api_version = 0;
6739+
6740+ if (api_version > TRUSTY_API_VERSION_CURRENT) {
6741+ dev_err(dev, "unsupported api version %u > %u\n",
6742+ api_version, TRUSTY_API_VERSION_CURRENT);
6743+ return -EINVAL;
6744+ }
6745+
6746+ dev_info(dev, "selected api version: %u (requested %u)\n",
6747+ api_version, TRUSTY_API_VERSION_CURRENT);
6748+ s->api_version = api_version;
6749+
6750+ return 0;
6751+}
6752+
6753+static bool dequeue_nop(struct trusty_state *s, u32 *args)
6754+{
6755+ unsigned long flags;
6756+ struct trusty_nop *nop = NULL;
6757+
6758+ spin_lock_irqsave(&s->nop_lock, flags);
6759+ if (!list_empty(&s->nop_queue)) {
6760+ nop = list_first_entry(&s->nop_queue,
6761+ struct trusty_nop, node);
6762+ list_del_init(&nop->node);
6763+ args[0] = nop->args[0];
6764+ args[1] = nop->args[1];
6765+ args[2] = nop->args[2];
6766+ } else {
6767+ args[0] = 0;
6768+ args[1] = 0;
6769+ args[2] = 0;
6770+ }
6771+ spin_unlock_irqrestore(&s->nop_lock, flags);
6772+ return nop;
6773+}
6774+
6775+static void locked_nop_work_func(struct work_struct *work)
6776+{
6777+ int ret;
6778+ struct trusty_work *tw = container_of(work, struct trusty_work, work);
6779+ struct trusty_state *s = tw->ts;
6780+
6781+ ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0);
6782+ if (ret != 0)
6783+ dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d",
6784+ __func__, ret);
6785+
6786+ dev_dbg(s->dev, "%s: done\n", __func__);
6787+}
6788+
6789+static void nop_work_func(struct work_struct *work)
6790+{
6791+ int ret;
6792+ bool next;
6793+ u32 args[3];
6794+ u32 last_arg0;
6795+ struct trusty_work *tw = container_of(work, struct trusty_work, work);
6796+ struct trusty_state *s = tw->ts;
6797+
6798+ dequeue_nop(s, args);
6799+ do {
6800+ dev_dbg(s->dev, "%s: %x %x %x\n",
6801+ __func__, args[0], args[1], args[2]);
6802+
6803+ last_arg0 = args[0];
6804+ ret = trusty_std_call32(s->dev, SMC_SC_NOP,
6805+ args[0], args[1], args[2]);
6806+
6807+ next = dequeue_nop(s, args);
6808+
6809+ if (ret == SM_ERR_NOP_INTERRUPTED) {
6810+ next = true;
6811+ } else if (ret != SM_ERR_NOP_DONE) {
6812+ dev_err(s->dev, "%s: SMC_SC_NOP %x failed %d",
6813+ __func__, last_arg0, ret);
6814+ if (last_arg0) {
6815+ /*
6816+ * Don't break out of the loop if a non-default
6817+ * nop-handler returns an error.
6818+ */
6819+ next = true;
6820+ }
6821+ }
6822+ } while (next);
6823+
6824+ dev_dbg(s->dev, "%s: done\n", __func__);
6825+}
6826+
6827+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop)
6828+{
6829+ unsigned long flags;
6830+ struct trusty_work *tw;
6831+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6832+
6833+ preempt_disable();
6834+ tw = this_cpu_ptr(s->nop_works);
6835+ if (nop) {
6836+ WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP);
6837+
6838+ spin_lock_irqsave(&s->nop_lock, flags);
6839+ if (list_empty(&nop->node))
6840+ list_add_tail(&nop->node, &s->nop_queue);
6841+ spin_unlock_irqrestore(&s->nop_lock, flags);
6842+ }
6843+ queue_work(s->nop_wq, &tw->work);
6844+ preempt_enable();
6845+}
6846+EXPORT_SYMBOL(trusty_enqueue_nop);
6847+
6848+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop)
6849+{
6850+ unsigned long flags;
6851+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6852+
6853+ if (WARN_ON(!nop))
6854+ return;
6855+
6856+ spin_lock_irqsave(&s->nop_lock, flags);
6857+ if (!list_empty(&nop->node))
6858+ list_del_init(&nop->node);
6859+ spin_unlock_irqrestore(&s->nop_lock, flags);
6860+}
6861+EXPORT_SYMBOL(trusty_dequeue_nop);
6862+
6863+static int trusty_probe(struct platform_device *pdev)
6864+{
6865+ int ret;
6866+ unsigned int cpu;
6867+ work_func_t work_func;
6868+ struct trusty_state *s;
6869+ struct device_node *node = pdev->dev.of_node;
6870+
6871+ if (!node) {
6872+ dev_err(&pdev->dev, "of_node required\n");
6873+ return -EINVAL;
6874+ }
6875+
6876+ s = kzalloc(sizeof(*s), GFP_KERNEL);
6877+ if (!s) {
6878+ ret = -ENOMEM;
6879+ goto err_allocate_state;
6880+ }
6881+
6882+ s->dev = &pdev->dev;
6883+ spin_lock_init(&s->nop_lock);
6884+ INIT_LIST_HEAD(&s->nop_queue);
6885+ mutex_init(&s->smc_lock);
6886+ mutex_init(&s->share_memory_msg_lock);
6887+ ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier);
6888+ init_completion(&s->cpu_idle_completion);
6889+
6890+ s->dev->dma_parms = &s->dma_parms;
6891+ dma_set_max_seg_size(s->dev, 0xfffff000); /* dma_parms limit */
6892+ /*
6893+ * Set dma mask to 48 bits. This is the current limit of
6894+ * trusty_encode_page_info.
6895+ */
6896+ dma_coerce_mask_and_coherent(s->dev, DMA_BIT_MASK(48));
6897+
6898+ platform_set_drvdata(pdev, s);
6899+
6900+ trusty_init_version(s, &pdev->dev);
6901+
6902+ ret = trusty_init_api_version(s, &pdev->dev);
6903+ if (ret < 0)
6904+ goto err_api_version;
6905+
6906+ ret = trusty_init_msg_buf(s, &pdev->dev);
6907+ if (ret < 0)
6908+ goto err_init_msg_buf;
6909+
6910+ s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0);
6911+ if (!s->nop_wq) {
6912+ ret = -ENODEV;
6913+ dev_err(&pdev->dev, "Failed create trusty-nop-wq\n");
6914+ goto err_create_nop_wq;
6915+ }
6916+
6917+ s->nop_works = alloc_percpu(struct trusty_work);
6918+ if (!s->nop_works) {
6919+ ret = -ENOMEM;
6920+ dev_err(&pdev->dev, "Failed to allocate works\n");
6921+ goto err_alloc_works;
6922+ }
6923+
6924+ if (s->api_version < TRUSTY_API_VERSION_SMP)
6925+ work_func = locked_nop_work_func;
6926+ else
6927+ work_func = nop_work_func;
6928+
6929+ for_each_possible_cpu(cpu) {
6930+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
6931+
6932+ tw->ts = s;
6933+ INIT_WORK(&tw->work, work_func);
6934+ }
6935+
6936+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
6937+ if (ret < 0) {
6938+ dev_err(&pdev->dev, "Failed to add children: %d\n", ret);
6939+ goto err_add_children;
6940+ }
6941+
6942+ return 0;
6943+
6944+err_add_children:
6945+ for_each_possible_cpu(cpu) {
6946+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
6947+
6948+ flush_work(&tw->work);
6949+ }
6950+ free_percpu(s->nop_works);
6951+err_alloc_works:
6952+ destroy_workqueue(s->nop_wq);
6953+err_create_nop_wq:
6954+ trusty_free_msg_buf(s, &pdev->dev);
6955+err_init_msg_buf:
6956+err_api_version:
6957+ s->dev->dma_parms = NULL;
6958+ kfree(s->version_str);
6959+ device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
6960+ mutex_destroy(&s->share_memory_msg_lock);
6961+ mutex_destroy(&s->smc_lock);
6962+ kfree(s);
6963+err_allocate_state:
6964+ return ret;
6965+}
6966+
6967+static int trusty_remove(struct platform_device *pdev)
6968+{
6969+ unsigned int cpu;
6970+ struct trusty_state *s = platform_get_drvdata(pdev);
6971+
6972+ device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
6973+
6974+ for_each_possible_cpu(cpu) {
6975+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
6976+
6977+ flush_work(&tw->work);
6978+ }
6979+ free_percpu(s->nop_works);
6980+ destroy_workqueue(s->nop_wq);
6981+
6982+ mutex_destroy(&s->share_memory_msg_lock);
6983+ mutex_destroy(&s->smc_lock);
6984+ trusty_free_msg_buf(s, &pdev->dev);
6985+ s->dev->dma_parms = NULL;
6986+ kfree(s->version_str);
6987+ kfree(s);
6988+ return 0;
6989+}
6990+
6991+static const struct of_device_id trusty_of_match[] = {
6992+ { .compatible = "android,trusty-smc-v1", },
6993+ {},
6994+};
6995+
6996+MODULE_DEVICE_TABLE(trusty, trusty_of_match);
6997+
6998+static struct platform_driver trusty_driver = {
6999+ .probe = trusty_probe,
7000+ .remove = trusty_remove,
7001+ .driver = {
7002+ .name = "trusty",
7003+ .of_match_table = trusty_of_match,
7004+ .dev_groups = trusty_groups,
7005+ },
7006+};
7007+
7008+static int __init trusty_driver_init(void)
7009+{
7010+ return platform_driver_register(&trusty_driver);
7011+}
7012+
7013+static void __exit trusty_driver_exit(void)
7014+{
7015+ platform_driver_unregister(&trusty_driver);
7016+}
7017+
7018+subsys_initcall(trusty_driver_init);
7019+module_exit(trusty_driver_exit);
7020+
7021+MODULE_LICENSE("GPL v2");
7022+MODULE_DESCRIPTION("Trusty core driver");
7023diff --git a/include/linux/trusty/arm_ffa.h b/include/linux/trusty/arm_ffa.h
7024new file mode 100644
7025index 000000000000..ab7b2afb794c
7026--- /dev/null
7027+++ b/include/linux/trusty/arm_ffa.h
7028@@ -0,0 +1,590 @@
7029+/* SPDX-License-Identifier: MIT */
7030+/*
7031+ * Copyright (C) 2020 Google, Inc.
7032+ *
7033+ * Trusty and TF-A also have a copy of this header.
7034+ * Please keep the copies in sync.
7035+ */
7036+#ifndef __LINUX_TRUSTY_ARM_FFA_H
7037+#define __LINUX_TRUSTY_ARM_FFA_H
7038+
7039+/*
7040+ * Subset of Arm PSA Firmware Framework for Arm v8-A 1.0 EAC 1_0
7041+ * (https://developer.arm.com/docs/den0077/a) needed for shared memory.
7042+ */
7043+
7044+#include "smcall.h"
7045+
7046+#ifndef STATIC_ASSERT
7047+#define STATIC_ASSERT(e) _Static_assert(e, #e)
7048+#endif
7049+
7050+#define FFA_CURRENT_VERSION_MAJOR (1U)
7051+#define FFA_CURRENT_VERSION_MINOR (0U)
7052+
7053+#define FFA_VERSION_TO_MAJOR(version) ((version) >> 16)
7054+#define FFA_VERSION_TO_MINOR(version) ((version) & (0xffff))
7055+#define FFA_VERSION(major, minor) (((major) << 16) | (minor))
7056+#define FFA_CURRENT_VERSION \
7057+ FFA_VERSION(FFA_CURRENT_VERSION_MAJOR, FFA_CURRENT_VERSION_MINOR)
7058+
7059+#define SMC_ENTITY_SHARED_MEMORY 4
7060+
7061+#define SMC_FASTCALL_NR_SHARED_MEMORY(nr) \
7062+ SMC_FASTCALL_NR(SMC_ENTITY_SHARED_MEMORY, nr)
7063+#define SMC_FASTCALL64_NR_SHARED_MEMORY(nr) \
7064+ SMC_FASTCALL64_NR(SMC_ENTITY_SHARED_MEMORY, nr)
7065+
7066+/**
7067+ * typedef ffa_endpoint_id16_t - Endpoint ID
7068+ *
7069+ * Current implementation only supports VMIDs. FFA spec also support stream
7070+ * endpoint ids.
7071+ */
7072+typedef uint16_t ffa_endpoint_id16_t;
7073+
7074+/**
7075+ * struct ffa_cons_mrd - Constituent memory region descriptor
7076+ * @address:
7077+ * Start address of contiguous memory region. Must be 4K page aligned.
7078+ * @page_count:
7079+ * Number of 4K pages in region.
7080+ * @reserved_12_15:
7081+ * Reserve bytes 12-15 to pad struct size to 16 bytes.
7082+ */
7083+struct ffa_cons_mrd {
7084+ uint64_t address;
7085+ uint32_t page_count;
7086+ uint32_t reserved_12_15;
7087+};
7088+STATIC_ASSERT(sizeof(struct ffa_cons_mrd) == 16);
7089+
7090+/**
7091+ * struct ffa_comp_mrd - Composite memory region descriptor
7092+ * @total_page_count:
7093+ * Number of 4k pages in memory region. Must match sum of
7094+ * @address_range_array[].page_count.
7095+ * @address_range_count:
7096+ * Number of entries in @address_range_array.
7097+ * @reserved_8_15:
7098+ * Reserve bytes 8-15 to pad struct size to 16 byte alignment and
7099+ * make @address_range_array 16 byte aligned.
7100+ * @address_range_array:
7101+ * Array of &struct ffa_cons_mrd entries.
7102+ */
7103+struct ffa_comp_mrd {
7104+ uint32_t total_page_count;
7105+ uint32_t address_range_count;
7106+ uint64_t reserved_8_15;
7107+ struct ffa_cons_mrd address_range_array[];
7108+};
7109+STATIC_ASSERT(sizeof(struct ffa_comp_mrd) == 16);
7110+
7111+/**
7112+ * typedef ffa_mem_attr8_t - Memory region attributes
7113+ *
7114+ * * @FFA_MEM_ATTR_DEVICE_NGNRNE:
7115+ * Device-nGnRnE.
7116+ * * @FFA_MEM_ATTR_DEVICE_NGNRE:
7117+ * Device-nGnRE.
7118+ * * @FFA_MEM_ATTR_DEVICE_NGRE:
7119+ * Device-nGRE.
7120+ * * @FFA_MEM_ATTR_DEVICE_GRE:
7121+ * Device-GRE.
7122+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED
7123+ * Normal memory. Non-cacheable.
7124+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB
7125+ * Normal memory. Write-back cached.
7126+ * * @FFA_MEM_ATTR_NON_SHAREABLE
7127+ * Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
7128+ * * @FFA_MEM_ATTR_OUTER_SHAREABLE
7129+ * Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
7130+ * * @FFA_MEM_ATTR_INNER_SHAREABLE
7131+ * Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
7132+ */
7133+typedef uint8_t ffa_mem_attr8_t;
7134+#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2))
7135+#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2))
7136+#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2))
7137+#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2))
7138+#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2))
7139+#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2))
7140+#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0)
7141+#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0)
7142+#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0)
7143+
7144+/**
7145+ * typedef ffa_mem_perm8_t - Memory access permissions
7146+ *
7147+ * * @FFA_MEM_ATTR_RO
7148+ * Request or specify read-only mapping.
7149+ * * @FFA_MEM_ATTR_RW
7150+ * Request or allow read-write mapping.
7151+ * * @FFA_MEM_PERM_NX
7152+ * Deny executable mapping.
7153+ * * @FFA_MEM_PERM_X
7154+ * Request executable mapping.
7155+ */
7156+typedef uint8_t ffa_mem_perm8_t;
7157+#define FFA_MEM_PERM_RO (1U << 0)
7158+#define FFA_MEM_PERM_RW (1U << 1)
7159+#define FFA_MEM_PERM_NX (1U << 2)
7160+#define FFA_MEM_PERM_X (1U << 3)
7161+
7162+/**
7163+ * typedef ffa_mem_flag8_t - Endpoint memory flags
7164+ *
7165+ * * @FFA_MEM_FLAG_OTHER
7166+ * Other borrower. Memory region must not be or was not retrieved on behalf
7167+ * of this endpoint.
7168+ */
7169+typedef uint8_t ffa_mem_flag8_t;
7170+#define FFA_MEM_FLAG_OTHER (1U << 0)
7171+
7172+/**
7173+ * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags
7174+ *
7175+ * * @FFA_MTD_FLAG_ZERO_MEMORY
7176+ * Zero memory after unmapping from sender (must be 0 for share).
7177+ * * @FFA_MTD_FLAG_TIME_SLICING
7178+ * Not supported by this implementation.
7179+ * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH
7180+ * Zero memory after unmapping from borrowers (must be 0 for share).
7181+ * * @FFA_MTD_FLAG_TYPE_MASK
7182+ * Bit-mask to extract memory management transaction type from flags.
7183+ * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY
7184+ * Share memory transaction flag.
7185+ * Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from
7186+ * @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that
7187+ * it must have.
7188+ * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK
7189+ * Not supported by this implementation.
7190+ */
7191+typedef uint32_t ffa_mtd_flag32_t;
7192+#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0)
7193+#define FFA_MTD_FLAG_TIME_SLICING (1U << 1)
7194+#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2)
7195+#define FFA_MTD_FLAG_TYPE_MASK (3U << 3)
7196+#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3)
7197+#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5)
7198+
7199+/**
7200+ * struct ffa_mapd - Memory access permissions descriptor
7201+ * @endpoint_id:
7202+ * Endpoint id that @memory_access_permissions and @flags apply to.
7203+ * (&typedef ffa_endpoint_id16_t).
7204+ * @memory_access_permissions:
7205+ * FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t).
7206+ * @flags:
7207+ * FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t).
7208+ */
7209+struct ffa_mapd {
7210+ ffa_endpoint_id16_t endpoint_id;
7211+ ffa_mem_perm8_t memory_access_permissions;
7212+ ffa_mem_flag8_t flags;
7213+};
7214+STATIC_ASSERT(sizeof(struct ffa_mapd) == 4);
7215+
7216+/**
7217+ * struct ffa_emad - Endpoint memory access descriptor.
7218+ * @mapd: &struct ffa_mapd.
7219+ * @comp_mrd_offset:
7220+ * Offset of &struct ffa_comp_mrd form start of &struct ffa_mtd.
7221+ * @reserved_8_15:
7222+ * Reserved bytes 8-15. Must be 0.
7223+ */
7224+struct ffa_emad {
7225+ struct ffa_mapd mapd;
7226+ uint32_t comp_mrd_offset;
7227+ uint64_t reserved_8_15;
7228+};
7229+STATIC_ASSERT(sizeof(struct ffa_emad) == 16);
7230+
7231+/**
7232+ * struct ffa_mtd - Memory transaction descriptor.
7233+ * @sender_id:
7234+ * Sender endpoint id.
7235+ * @memory_region_attributes:
7236+ * FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t).
7237+ * @reserved_3:
7238+ * Reserved bytes 3. Must be 0.
7239+ * @flags:
7240+ * FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t).
7241+ * @handle:
7242+ * Id of shared memory object. Most be 0 for MEM_SHARE.
7243+ * @tag: Client allocated tag. Must match original value.
7244+ * @reserved_24_27:
7245+ * Reserved bytes 24-27. Must be 0.
7246+ * @emad_count:
7247+ * Number of entries in @emad. Must be 1 in current implementation.
7248+ * FFA spec allows more entries.
7249+ * @emad:
7250+ * Endpoint memory access descriptor array (see @struct ffa_emad).
7251+ */
7252+struct ffa_mtd {
7253+ ffa_endpoint_id16_t sender_id;
7254+ ffa_mem_attr8_t memory_region_attributes;
7255+ uint8_t reserved_3;
7256+ ffa_mtd_flag32_t flags;
7257+ uint64_t handle;
7258+ uint64_t tag;
7259+ uint32_t reserved_24_27;
7260+ uint32_t emad_count;
7261+ struct ffa_emad emad[];
7262+};
7263+STATIC_ASSERT(sizeof(struct ffa_mtd) == 32);
7264+
7265+/**
7266+ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor.
7267+ * @handle:
7268+ * Id of shared memory object to relinquish.
7269+ * @flags:
7270+ * If bit 0 is set clear memory after unmapping from borrower. Must be 0
7271+ * for share. Bit[1]: Time slicing. Not supported, must be 0. All other
7272+ * bits are reserved 0.
7273+ * @endpoint_count:
7274+ * Number of entries in @endpoint_array.
7275+ * @endpoint_array:
7276+ * Array of endpoint ids.
7277+ */
7278+struct ffa_mem_relinquish_descriptor {
7279+ uint64_t handle;
7280+ uint32_t flags;
7281+ uint32_t endpoint_count;
7282+ ffa_endpoint_id16_t endpoint_array[];
7283+};
7284+STATIC_ASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16);
7285+
7286+/**
7287+ * enum ffa_error - FF-A error code
7288+ * @FFA_ERROR_NOT_SUPPORTED:
7289+ * Operation contained possibly valid parameters not supported by the
7290+ * current implementation. Does not match FF-A 1.0 EAC 1_0 definition.
7291+ * @FFA_ERROR_INVALID_PARAMETERS:
7292+ * Invalid parameters. Conditions function specific.
7293+ * @FFA_ERROR_NO_MEMORY:
7294+ * Not enough memory.
7295+ * @FFA_ERROR_DENIED:
7296+ * Operation not allowed. Conditions function specific.
7297+ *
7298+ * FF-A 1.0 EAC 1_0 defines other error codes as well but the current
7299+ * implementation does not use them.
7300+ */
7301+enum ffa_error {
7302+ FFA_ERROR_NOT_SUPPORTED = -1,
7303+ FFA_ERROR_INVALID_PARAMETERS = -2,
7304+ FFA_ERROR_NO_MEMORY = -3,
7305+ FFA_ERROR_DENIED = -6,
7306+};
7307+
7308+/**
7309+ * SMC_FC32_FFA_MIN - First 32 bit SMC opcode reserved for FFA
7310+ */
7311+#define SMC_FC32_FFA_MIN SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
7312+
7313+/**
7314+ * SMC_FC32_FFA_MAX - Last 32 bit SMC opcode reserved for FFA
7315+ */
7316+#define SMC_FC32_FFA_MAX SMC_FASTCALL_NR_SHARED_MEMORY(0x7F)
7317+
7318+/**
7319+ * SMC_FC64_FFA_MIN - First 64 bit SMC opcode reserved for FFA
7320+ */
7321+#define SMC_FC64_FFA_MIN SMC_FASTCALL64_NR_SHARED_MEMORY(0x60)
7322+
7323+/**
7324+ * SMC_FC64_FFA_MAX - Last 64 bit SMC opcode reserved for FFA
7325+ */
7326+#define SMC_FC64_FFA_MAX SMC_FASTCALL64_NR_SHARED_MEMORY(0x7F)
7327+
7328+/**
7329+ * SMC_FC_FFA_ERROR - SMC error return opcode
7330+ *
7331+ * Register arguments:
7332+ *
7333+ * * w1: VMID in [31:16], vCPU in [15:0]
7334+ * * w2: Error code (&enum ffa_error)
7335+ */
7336+#define SMC_FC_FFA_ERROR SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
7337+
7338+/**
7339+ * SMC_FC_FFA_SUCCESS - 32 bit SMC success return opcode
7340+ *
7341+ * Register arguments:
7342+ *
7343+ * * w1: VMID in [31:16], vCPU in [15:0]
7344+ * * w2-w7: Function specific
7345+ */
7346+#define SMC_FC_FFA_SUCCESS SMC_FASTCALL_NR_SHARED_MEMORY(0x61)
7347+
7348+/**
7349+ * SMC_FC64_FFA_SUCCESS - 64 bit SMC success return opcode
7350+ *
7351+ * Register arguments:
7352+ *
7353+ * * w1: VMID in [31:16], vCPU in [15:0]
7354+ * * w2/x2-w7/x7: Function specific
7355+ */
7356+#define SMC_FC64_FFA_SUCCESS SMC_FASTCALL64_NR_SHARED_MEMORY(0x61)
7357+
7358+/**
7359+ * SMC_FC_FFA_VERSION - SMC opcode to return supported FF-A version
7360+ *
7361+ * Register arguments:
7362+ *
7363+ * * w1: Major version bit[30:16] and minor version in bit[15:0] supported
7364+ * by caller. Bit[31] must be 0.
7365+ *
7366+ * Return:
7367+ * * w0: &SMC_FC_FFA_SUCCESS
7368+ * * w2: Major version bit[30:16], minor version in bit[15:0], bit[31] must
7369+ * be 0.
7370+ *
7371+ * or
7372+ *
7373+ * * w0: SMC_FC_FFA_ERROR
7374+ * * w2: FFA_ERROR_NOT_SUPPORTED if major version passed in is less than the
7375+ * minimum major version supported.
7376+ */
7377+#define SMC_FC_FFA_VERSION SMC_FASTCALL_NR_SHARED_MEMORY(0x63)
7378+
7379+/**
7380+ * SMC_FC_FFA_FEATURES - SMC opcode to check optional feature support
7381+ *
7382+ * Register arguments:
7383+ *
7384+ * * w1: FF-A function ID
7385+ *
7386+ * Return:
7387+ * * w0: &SMC_FC_FFA_SUCCESS
7388+ * * w2: Bit[0]: Supports custom buffers for memory transactions.
7389+ * Bit[1:0]: For RXTX_MAP min buffer size and alignment boundary.
7390+ * Other bits must be 0.
7391+ * * w3: For FFA_MEM_RETRIEVE_REQ, bit[7-0]: Number of times receiver can
7392+ * retrieve each memory region before relinquishing it specified as
7393+ * ((1U << (value + 1)) - 1 (or value = bits in reference count - 1).
7394+ * For all other bits and commands: must be 0.
7395+ * or
7396+ *
7397+ * * w0: SMC_FC_FFA_ERROR
7398+ * * w2: FFA_ERROR_NOT_SUPPORTED if function is not implemented, or
7399+ * FFA_ERROR_INVALID_PARAMETERS if function id is not valid.
7400+ */
7401+#define SMC_FC_FFA_FEATURES SMC_FASTCALL_NR_SHARED_MEMORY(0x64)
7402+
7403+/**
7404+ * SMC_FC_FFA_RXTX_MAP - 32 bit SMC opcode to map message buffers
7405+ *
7406+ * Register arguments:
7407+ *
7408+ * * w1: TX address
7409+ * * w2: RX address
7410+ * * w3: RX/TX page count in bit[5:0]
7411+ *
7412+ * Return:
7413+ * * w0: &SMC_FC_FFA_SUCCESS
7414+ */
7415+#define SMC_FC_FFA_RXTX_MAP SMC_FASTCALL_NR_SHARED_MEMORY(0x66)
7416+
7417+/**
7418+ * SMC_FC64_FFA_RXTX_MAP - 64 bit SMC opcode to map message buffers
7419+ *
7420+ * Register arguments:
7421+ *
7422+ * * x1: TX address
7423+ * * x2: RX address
7424+ * * x3: RX/TX page count in bit[5:0]
7425+ *
7426+ * Return:
7427+ * * w0: &SMC_FC_FFA_SUCCESS
7428+ */
7429+#define SMC_FC64_FFA_RXTX_MAP SMC_FASTCALL64_NR_SHARED_MEMORY(0x66)
7430+#ifdef CONFIG_64BIT
7431+#define SMC_FCZ_FFA_RXTX_MAP SMC_FC64_FFA_RXTX_MAP
7432+#else
7433+#define SMC_FCZ_FFA_RXTX_MAP SMC_FC_FFA_RXTX_MAP
7434+#endif
7435+
7436+/**
7437+ * SMC_FC_FFA_RXTX_UNMAP - SMC opcode to unmap message buffers
7438+ *
7439+ * Register arguments:
7440+ *
7441+ * * w1: ID in [31:16]
7442+ *
7443+ * Return:
7444+ * * w0: &SMC_FC_FFA_SUCCESS
7445+ */
7446+#define SMC_FC_FFA_RXTX_UNMAP SMC_FASTCALL_NR_SHARED_MEMORY(0x67)
7447+
7448+/**
7449+ * SMC_FC_FFA_ID_GET - SMC opcode to get endpoint id of caller
7450+ *
7451+ * Return:
7452+ * * w0: &SMC_FC_FFA_SUCCESS
7453+ * * w2: ID in bit[15:0], bit[31:16] must be 0.
7454+ */
7455+#define SMC_FC_FFA_ID_GET SMC_FASTCALL_NR_SHARED_MEMORY(0x69)
7456+
7457+/**
7458+ * SMC_FC_FFA_MEM_DONATE - 32 bit SMC opcode to donate memory
7459+ *
7460+ * Not supported.
7461+ */
7462+#define SMC_FC_FFA_MEM_DONATE SMC_FASTCALL_NR_SHARED_MEMORY(0x71)
7463+
7464+/**
7465+ * SMC_FC_FFA_MEM_LEND - 32 bit SMC opcode to lend memory
7466+ *
7467+ * Not currently supported.
7468+ */
7469+#define SMC_FC_FFA_MEM_LEND SMC_FASTCALL_NR_SHARED_MEMORY(0x72)
7470+
7471+/**
7472+ * SMC_FC_FFA_MEM_SHARE - 32 bit SMC opcode to share memory
7473+ *
7474+ * Register arguments:
7475+ *
7476+ * * w1: Total length
7477+ * * w2: Fragment length
7478+ * * w3: Address
7479+ * * w4: Page count
7480+ *
7481+ * Return:
7482+ * * w0: &SMC_FC_FFA_SUCCESS
7483+ * * w2/w3: Handle
7484+ *
7485+ * or
7486+ *
7487+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX
7488+ * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX
7489+ *
7490+ * or
7491+ *
7492+ * * w0: SMC_FC_FFA_ERROR
7493+ * * w2: Error code (&enum ffa_error)
7494+ */
7495+#define SMC_FC_FFA_MEM_SHARE SMC_FASTCALL_NR_SHARED_MEMORY(0x73)
7496+
7497+/**
7498+ * SMC_FC64_FFA_MEM_SHARE - 64 bit SMC opcode to share memory
7499+ *
7500+ * Register arguments:
7501+ *
7502+ * * w1: Total length
7503+ * * w2: Fragment length
7504+ * * x3: Address
7505+ * * w4: Page count
7506+ *
7507+ * Return:
7508+ * * w0: &SMC_FC_FFA_SUCCESS
7509+ * * w2/w3: Handle
7510+ *
7511+ * or
7512+ *
7513+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX
7514+ * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX
7515+ *
7516+ * or
7517+ *
7518+ * * w0: SMC_FC_FFA_ERROR
7519+ * * w2: Error code (&enum ffa_error)
7520+ */
7521+#define SMC_FC64_FFA_MEM_SHARE SMC_FASTCALL64_NR_SHARED_MEMORY(0x73)
7522+
7523+/**
7524+ * SMC_FC_FFA_MEM_RETRIEVE_REQ - 32 bit SMC opcode to retrieve shared memory
7525+ *
7526+ * Register arguments:
7527+ *
7528+ * * w1: Total length
7529+ * * w2: Fragment length
7530+ * * w3: Address
7531+ * * w4: Page count
7532+ *
7533+ * Return:
7534+ * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP
7535+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP
7536+ */
7537+#define SMC_FC_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL_NR_SHARED_MEMORY(0x74)
7538+
7539+/**
7540+ * SMC_FC64_FFA_MEM_RETRIEVE_REQ - 64 bit SMC opcode to retrieve shared memory
7541+ *
7542+ * Register arguments:
7543+ *
7544+ * * w1: Total length
7545+ * * w2: Fragment length
7546+ * * x3: Address
7547+ * * w4: Page count
7548+ *
7549+ * Return:
7550+ * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP
7551+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP
7552+ */
7553+#define SMC_FC64_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL64_NR_SHARED_MEMORY(0x74)
7554+
7555+/**
7556+ * SMC_FC_FFA_MEM_RETRIEVE_RESP - Retrieve 32 bit SMC return opcode
7557+ *
7558+ * Register arguments:
7559+ *
7560+ * * w1: Total length
7561+ * * w2: Fragment length
7562+ */
7563+#define SMC_FC_FFA_MEM_RETRIEVE_RESP SMC_FASTCALL_NR_SHARED_MEMORY(0x75)
7564+
7565+/**
7566+ * SMC_FC_FFA_MEM_RELINQUISH - SMC opcode to relinquish shared memory
7567+ *
7568+ * Input in &struct ffa_mem_relinquish_descriptor format in message buffer.
7569+ *
7570+ * Return:
7571+ * * w0: &SMC_FC_FFA_SUCCESS
7572+ */
7573+#define SMC_FC_FFA_MEM_RELINQUISH SMC_FASTCALL_NR_SHARED_MEMORY(0x76)
7574+
7575+/**
7576+ * SMC_FC_FFA_MEM_RECLAIM - SMC opcode to reclaim shared memory
7577+ *
7578+ * Register arguments:
7579+ *
7580+ * * w1/w2: Handle
7581+ * * w3: Flags
7582+ *
7583+ * Return:
7584+ * * w0: &SMC_FC_FFA_SUCCESS
7585+ */
7586+#define SMC_FC_FFA_MEM_RECLAIM SMC_FASTCALL_NR_SHARED_MEMORY(0x77)
7587+
7588+/**
7589+ * SMC_FC_FFA_MEM_FRAG_RX - SMC opcode to request next fragment.
7590+ *
7591+ * Register arguments:
7592+ *
7593+ * * w1/w2: Cookie
7594+ * * w3: Fragment offset.
7595+ * * w4: Endpoint id ID in [31:16], if client is hypervisor.
7596+ *
7597+ * Return:
7598+ * * w0: &SMC_FC_FFA_MEM_FRAG_TX
7599+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_FRAG_TX
7600+ */
7601+#define SMC_FC_FFA_MEM_FRAG_RX SMC_FASTCALL_NR_SHARED_MEMORY(0x7A)
7602+
7603+/**
7604+ * SMC_FC_FFA_MEM_FRAG_TX - SMC opcode to transmit next fragment
7605+ *
7606+ * Register arguments:
7607+ *
7608+ * * w1/w2: Cookie
7609+ * * w3: Fragment length.
7610+ * * w4: Sender endpoint id ID in [31:16], if client is hypervisor.
7611+ *
7612+ * Return:
7613+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX or &SMC_FC_FFA_SUCCESS.
7614+ * * w1/x1-w5/x5: See opcode in w0.
7615+ */
7616+#define SMC_FC_FFA_MEM_FRAG_TX SMC_FASTCALL_NR_SHARED_MEMORY(0x7B)
7617+
7618+#endif /* __LINUX_TRUSTY_ARM_FFA_H */
7619diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h
7620new file mode 100644
7621index 000000000000..f6504448c6c3
7622--- /dev/null
7623+++ b/include/linux/trusty/sm_err.h
7624@@ -0,0 +1,28 @@
7625+/* SPDX-License-Identifier: MIT */
7626+/*
7627+ * Copyright (c) 2013 Google Inc. All rights reserved
7628+ *
7629+ * Trusty and TF-A also have a copy of this header.
7630+ * Please keep the copies in sync.
7631+ */
7632+#ifndef __LINUX_TRUSTY_SM_ERR_H
7633+#define __LINUX_TRUSTY_SM_ERR_H
7634+
7635+/* Errors from the secure monitor */
7636+#define SM_ERR_UNDEFINED_SMC 0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
7637+#define SM_ERR_INVALID_PARAMETERS -2
7638+#define SM_ERR_INTERRUPTED -3 /* Got interrupted. Call back with restart SMC */
7639+#define SM_ERR_UNEXPECTED_RESTART -4 /* Got an restart SMC when we didn't expect it */
7640+#define SM_ERR_BUSY -5 /* Temporarily busy. Call back with original args */
7641+#define SM_ERR_INTERLEAVED_SMC -6 /* Got a trusted_service SMC when a restart SMC is required */
7642+#define SM_ERR_INTERNAL_FAILURE -7 /* Unknown error */
7643+#define SM_ERR_NOT_SUPPORTED -8
7644+#define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */
7645+#define SM_ERR_END_OF_INPUT -10
7646+#define SM_ERR_PANIC -11 /* Secure OS crashed */
7647+#define SM_ERR_FIQ_INTERRUPTED -12 /* Got interrupted by FIQ. Call back with SMC_SC_RESTART_FIQ on same CPU */
7648+#define SM_ERR_CPU_IDLE -13 /* SMC call waiting for another CPU */
7649+#define SM_ERR_NOP_INTERRUPTED -14 /* Got interrupted. Call back with new SMC_SC_NOP */
7650+#define SM_ERR_NOP_DONE -15 /* Cpu idle after SMC_SC_NOP (not an error) */
7651+
7652+#endif
7653diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h
7654new file mode 100644
7655index 000000000000..aea3f6068593
7656--- /dev/null
7657+++ b/include/linux/trusty/smcall.h
7658@@ -0,0 +1,124 @@
7659+/* SPDX-License-Identifier: MIT */
7660+/*
7661+ * Copyright (c) 2013-2014 Google Inc. All rights reserved
7662+ *
7663+ * Trusty and TF-A also have a copy of this header.
7664+ * Please keep the copies in sync.
7665+ */
7666+#ifndef __LINUX_TRUSTY_SMCALL_H
7667+#define __LINUX_TRUSTY_SMCALL_H
7668+
7669+#define SMC_NUM_ENTITIES 64
7670+#define SMC_NUM_ARGS 4
7671+#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1)
7672+
7673+#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000)
7674+#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000)
7675+#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000) >> 24)
7676+#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF)
7677+
7678+#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1U) << 31) | \
7679+ (((smc64) & 0x1U) << 30) | \
7680+ (((entity) & 0x3FU) << 24) | \
7681+ ((fn) & 0xFFFFU) \
7682+ )
7683+
7684+#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1, 0)
7685+#define SMC_STDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0, 0)
7686+#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1, 1)
7687+#define SMC_STDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0, 1)
7688+
7689+#define SMC_ENTITY_ARCH 0 /* ARM Architecture calls */
7690+#define SMC_ENTITY_CPU 1 /* CPU Service calls */
7691+#define SMC_ENTITY_SIP 2 /* SIP Service calls */
7692+#define SMC_ENTITY_OEM 3 /* OEM Service calls */
7693+#define SMC_ENTITY_STD 4 /* Standard Service calls */
7694+#define SMC_ENTITY_RESERVED 5 /* Reserved for future use */
7695+#define SMC_ENTITY_TRUSTED_APP 48 /* Trusted Application calls */
7696+#define SMC_ENTITY_TRUSTED_OS 50 /* Trusted OS calls */
7697+#define SMC_ENTITY_LOGGING 51 /* Used for secure -> nonsecure logging */
7698+#define SMC_ENTITY_TEST 52 /* Used for secure -> nonsecure tests */
7699+#define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */
7700+
7701+/* FC = Fast call, SC = Standard call */
7702+#define SMC_SC_RESTART_LAST SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
7703+#define SMC_SC_LOCKED_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1)
7704+
7705+/**
7706+ * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq
7707+ *
7708+ * No arguments, no return value.
7709+ *
7710+ * Re-enter trusty after returning to ns to process an fiq. Must be called iff
7711+ * trusty returns SM_ERR_FIQ_INTERRUPTED.
7712+ *
7713+ * Enable by selecting api version TRUSTY_API_VERSION_RESTART_FIQ (1) or later.
7714+ */
7715+#define SMC_SC_RESTART_FIQ SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2)
7716+
7717+/**
7718+ * SMC_SC_NOP - Enter trusty to run pending work.
7719+ *
7720+ * No arguments.
7721+ *
7722+ * Returns SM_ERR_NOP_INTERRUPTED or SM_ERR_NOP_DONE.
7723+ * If SM_ERR_NOP_INTERRUPTED is returned, the call must be repeated.
7724+ *
7725+ * Enable by selecting api version TRUSTY_API_VERSION_SMP (2) or later.
7726+ */
7727+#define SMC_SC_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3)
7728+
7729+/*
7730+ * Return from secure os to non-secure os with return value in r1
7731+ */
7732+#define SMC_SC_NS_RETURN SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
7733+
7734+#define SMC_FC_RESERVED SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
7735+#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1)
7736+#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2)
7737+
7738+#define TRUSTY_IRQ_TYPE_NORMAL (0)
7739+#define TRUSTY_IRQ_TYPE_PER_CPU (1)
7740+#define TRUSTY_IRQ_TYPE_DOORBELL (2)
7741+#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3)
7742+
7743+#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 7)
7744+#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 8)
7745+
7746+#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 9)
7747+#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 10)
7748+
7749+/**
7750+ * SMC_FC_API_VERSION - Find and select supported API version.
7751+ *
7752+ * @r1: Version supported by client.
7753+ *
7754+ * Returns version supported by trusty.
7755+ *
7756+ * If multiple versions are supported, the client should start by calling
7757+ * SMC_FC_API_VERSION with the largest version it supports. Trusty will then
7758+ * return a version it supports. If the client does not support the version
7759+ * returned by trusty and the version returned is less than the version
7760+ * requested, repeat the call with the largest supported version less than the
7761+ * last returned version.
7762+ *
7763+ * This call must be made before any calls that are affected by the api version.
7764+ */
7765+#define TRUSTY_API_VERSION_RESTART_FIQ (1)
7766+#define TRUSTY_API_VERSION_SMP (2)
7767+#define TRUSTY_API_VERSION_SMP_NOP (3)
7768+#define TRUSTY_API_VERSION_PHYS_MEM_OBJ (4)
7769+#define TRUSTY_API_VERSION_MEM_OBJ (5)
7770+#define TRUSTY_API_VERSION_CURRENT (5)
7771+#define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11)
7772+
7773+/* TRUSTED_OS entity calls */
7774+#define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
7775+#define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
7776+#define SMC_SC_VIRTIO_STOP SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
7777+
7778+#define SMC_SC_VDEV_RESET SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
7779+#define SMC_SC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
7780+#define SMC_NC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 25)
7781+
7782+#endif /* __LINUX_TRUSTY_SMCALL_H */
7783diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h
7784new file mode 100644
7785index 000000000000..efbb36999a8b
7786--- /dev/null
7787+++ b/include/linux/trusty/trusty.h
7788@@ -0,0 +1,131 @@
7789+/* SPDX-License-Identifier: GPL-2.0-only */
7790+/*
7791+ * Copyright (C) 2013 Google, Inc.
7792+ */
7793+#ifndef __LINUX_TRUSTY_TRUSTY_H
7794+#define __LINUX_TRUSTY_TRUSTY_H
7795+
7796+#include <linux/kernel.h>
7797+#include <linux/trusty/sm_err.h>
7798+#include <linux/types.h>
7799+#include <linux/device.h>
7800+#include <linux/pagemap.h>
7801+
7802+
7803+#if IS_ENABLED(CONFIG_TRUSTY)
7804+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
7805+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
7806+#ifdef CONFIG_64BIT
7807+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2);
7808+#endif
7809+#else
7810+static inline s32 trusty_std_call32(struct device *dev, u32 smcnr,
7811+ u32 a0, u32 a1, u32 a2)
7812+{
7813+ return SM_ERR_UNDEFINED_SMC;
7814+}
7815+static inline s32 trusty_fast_call32(struct device *dev, u32 smcnr,
7816+ u32 a0, u32 a1, u32 a2)
7817+{
7818+ return SM_ERR_UNDEFINED_SMC;
7819+}
7820+#ifdef CONFIG_64BIT
7821+static inline s64 trusty_fast_call64(struct device *dev,
7822+ u64 smcnr, u64 a0, u64 a1, u64 a2)
7823+{
7824+ return SM_ERR_UNDEFINED_SMC;
7825+}
7826+#endif
7827+#endif
7828+
7829+struct notifier_block;
7830+enum {
7831+ TRUSTY_CALL_PREPARE,
7832+ TRUSTY_CALL_RETURNED,
7833+};
7834+int trusty_call_notifier_register(struct device *dev,
7835+ struct notifier_block *n);
7836+int trusty_call_notifier_unregister(struct device *dev,
7837+ struct notifier_block *n);
7838+const char *trusty_version_str_get(struct device *dev);
7839+u32 trusty_get_api_version(struct device *dev);
7840+bool trusty_get_panic_status(struct device *dev);
7841+
7842+struct ns_mem_page_info {
7843+ u64 paddr;
7844+ u8 ffa_mem_attr;
7845+ u8 ffa_mem_perm;
7846+ u64 compat_attr;
7847+};
7848+
7849+int trusty_encode_page_info(struct ns_mem_page_info *inf,
7850+ struct page *page, pgprot_t pgprot);
7851+
7852+struct scatterlist;
7853+typedef u64 trusty_shared_mem_id_t;
7854+int trusty_share_memory(struct device *dev, trusty_shared_mem_id_t *id,
7855+ struct scatterlist *sglist, unsigned int nents,
7856+ pgprot_t pgprot);
7857+int trusty_share_memory_compat(struct device *dev, trusty_shared_mem_id_t *id,
7858+ struct scatterlist *sglist, unsigned int nents,
7859+ pgprot_t pgprot);
7860+int trusty_transfer_memory(struct device *dev, u64 *id,
7861+ struct scatterlist *sglist, unsigned int nents,
7862+ pgprot_t pgprot, u64 tag, bool lend);
7863+int trusty_reclaim_memory(struct device *dev, trusty_shared_mem_id_t id,
7864+ struct scatterlist *sglist, unsigned int nents);
7865+
7866+struct dma_buf;
7867+#ifdef CONFIG_TRUSTY_DMA_BUF_FFA_TAG
7868+u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf);
7869+#else
7870+static inline u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf)
7871+{
7872+ return 0;
7873+}
7874+#endif
7875+
7876+/* Invalid handle value is defined by FF-A spec */
7877+#ifdef CONFIG_TRUSTY_DMA_BUF_SHARED_MEM_ID
7878+/**
7879+ * trusty_dma_buf_get_shared_mem_id() - Get memory ID corresponding to a dma_buf
7880+ * @dma_buf: DMA buffer
7881+ * @id: Pointer to output trusty_shared_mem_id_t
7882+ *
7883+ * Sets @id to trusty_shared_mem_id_t corresponding to the given @dma_buf.
7884+ * @dma_buf "owns" the ID, i.e. is responsible for allocating/releasing it.
7885+ * @dma_buf with an allocated @id must be in secure memory and should only be
7886+ * sent to Trusty using TRUSTY_SEND_SECURE.
7887+ *
7888+ * Return:
7889+ * * 0 - success
7890+ * * -ENODATA - @dma_buf does not own a trusty_shared_mem_id_t
7891+ * * ... - @dma_buf should not be lent or shared
7892+ */
7893+int trusty_dma_buf_get_shared_mem_id(struct dma_buf *dma_buf,
7894+ trusty_shared_mem_id_t *id);
7895+#else
7896+static inline int trusty_dma_buf_get_shared_mem_id(struct dma_buf *dma_buf,
7897+ trusty_shared_mem_id_t *id)
7898+{
7899+ return -ENODATA;
7900+}
7901+#endif
7902+
7903+struct trusty_nop {
7904+ struct list_head node;
7905+ u32 args[3];
7906+};
7907+
7908+static inline void trusty_nop_init(struct trusty_nop *nop,
7909+ u32 arg0, u32 arg1, u32 arg2) {
7910+ INIT_LIST_HEAD(&nop->node);
7911+ nop->args[0] = arg0;
7912+ nop->args[1] = arg1;
7913+ nop->args[2] = arg2;
7914+}
7915+
7916+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop);
7917+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop);
7918+
7919+#endif
7920diff --git a/include/linux/trusty/trusty_ipc.h b/include/linux/trusty/trusty_ipc.h
7921new file mode 100644
7922index 000000000000..9386392f3a64
7923--- /dev/null
7924+++ b/include/linux/trusty/trusty_ipc.h
7925@@ -0,0 +1,89 @@
7926+/* SPDX-License-Identifier: GPL-2.0-only */
7927+/*
7928+ * Copyright (C) 2015 Google, Inc.
7929+ */
7930+#ifndef __LINUX_TRUSTY_TRUSTY_IPC_H
7931+#define __LINUX_TRUSTY_TRUSTY_IPC_H
7932+
7933+#include <linux/list.h>
7934+#include <linux/scatterlist.h>
7935+#include <linux/trusty/trusty.h>
7936+#include <linux/types.h>
7937+
7938+struct tipc_chan;
7939+
7940+struct tipc_msg_buf {
7941+ void *buf_va;
7942+ struct scatterlist sg;
7943+ trusty_shared_mem_id_t buf_id;
7944+ size_t buf_sz;
7945+ size_t wpos;
7946+ size_t rpos;
7947+ size_t shm_cnt;
7948+ struct list_head node;
7949+};
7950+
7951+enum tipc_chan_event {
7952+ TIPC_CHANNEL_CONNECTED = 1,
7953+ TIPC_CHANNEL_DISCONNECTED,
7954+ TIPC_CHANNEL_SHUTDOWN,
7955+};
7956+
7957+struct tipc_chan_ops {
7958+ void (*handle_event)(void *cb_arg, int event);
7959+ struct tipc_msg_buf *(*handle_msg)(void *cb_arg,
7960+ struct tipc_msg_buf *mb);
7961+ void (*handle_release)(void *cb_arg);
7962+};
7963+
7964+struct tipc_chan *tipc_create_channel(struct device *dev,
7965+ const struct tipc_chan_ops *ops,
7966+ void *cb_arg);
7967+
7968+int tipc_chan_connect(struct tipc_chan *chan, const char *port);
7969+
7970+int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb);
7971+
7972+int tipc_chan_shutdown(struct tipc_chan *chan);
7973+
7974+void tipc_chan_destroy(struct tipc_chan *chan);
7975+
7976+struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan);
7977+
7978+void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb);
7979+
7980+struct tipc_msg_buf *
7981+tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, long timeout);
7982+
7983+void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb);
7984+
7985+static inline size_t mb_avail_space(struct tipc_msg_buf *mb)
7986+{
7987+ return mb->buf_sz - mb->wpos;
7988+}
7989+
7990+static inline size_t mb_avail_data(struct tipc_msg_buf *mb)
7991+{
7992+ return mb->wpos - mb->rpos;
7993+}
7994+
7995+static inline void *mb_put_data(struct tipc_msg_buf *mb, size_t len)
7996+{
7997+ void *pos = (u8 *)mb->buf_va + mb->wpos;
7998+
7999+ BUG_ON(mb->wpos + len > mb->buf_sz);
8000+ mb->wpos += len;
8001+ return pos;
8002+}
8003+
8004+static inline void *mb_get_data(struct tipc_msg_buf *mb, size_t len)
8005+{
8006+ void *pos = (u8 *)mb->buf_va + mb->rpos;
8007+
8008+ BUG_ON(mb->rpos + len > mb->wpos);
8009+ mb->rpos += len;
8010+ return pos;
8011+}
8012+
8013+#endif /* __LINUX_TRUSTY_TRUSTY_IPC_H */
8014+
8015diff --git a/include/uapi/linux/trusty/ipc.h b/include/uapi/linux/trusty/ipc.h
8016new file mode 100644
8017index 000000000000..af91035484f1
8018--- /dev/null
8019+++ b/include/uapi/linux/trusty/ipc.h
8020@@ -0,0 +1,65 @@
8021+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
8022+
8023+#ifndef _UAPI_LINUX_TRUSTY_IPC_H_
8024+#define _UAPI_LINUX_TRUSTY_IPC_H_
8025+
8026+#include <linux/ioctl.h>
8027+#include <linux/types.h>
8028+#include <linux/uio.h>
8029+
8030+/**
8031+ * enum transfer_kind - How to send an fd to Trusty
8032+ * @TRUSTY_SHARE: Memory will be accessible by Linux and Trusty. On ARM it
8033+ * will be mapped as nonsecure. Suitable for shared memory.
8034+ * The paired fd must be a "dma_buf".
8035+ * @TRUSTY_LEND: Memory will be accessible only to Trusty. On ARM it will
8036+ * be transitioned to "Secure" memory if Trusty is in
8037+ * TrustZone. This transfer kind is suitable for donating
8038+ * video buffers or other similar resources. The paired fd
8039+ * may need to come from a platform-specific allocator for
8040+ * memory that may be transitioned to "Secure".
8041+ * @TRUSTY_SEND_SECURE: Send memory that is already "Secure". Memory will be
8042+ * accessible only to Trusty. The paired fd may need to
8043+ * come from a platform-specific allocator that returns
8044+ * "Secure" buffers.
8045+ *
8046+ * Describes how the user would like the resource in question to be sent to
8047+ * Trusty. Options may be valid only for certain kinds of fds.
8048+ */
8049+enum transfer_kind {
8050+ TRUSTY_SHARE = 0,
8051+ TRUSTY_LEND = 1,
8052+ TRUSTY_SEND_SECURE = 2,
8053+};
8054+
8055+/**
8056+ * struct trusty_shm - Describes a transfer of memory to Trusty
8057+ * @fd: The fd to transfer
8058+ * @transfer: How to transfer it - see &enum transfer_kind
8059+ */
8060+struct trusty_shm {
8061+ __s32 fd;
8062+ __u32 transfer;
8063+};
8064+
8065+/**
8066+ * struct tipc_send_msg_req - Request struct for @TIPC_IOC_SEND_MSG
8067+ * @iov: Pointer to an array of &struct iovec describing data to be sent
8068+ * @shm: Pointer to an array of &struct trusty_shm describing any file
8069+ * descriptors to be transferred.
8070+ * @iov_cnt: Number of elements in the @iov array
8071+ * @shm_cnt: Number of elements in the @shm array
8072+ */
8073+struct tipc_send_msg_req {
8074+ __u64 iov;
8075+ __u64 shm;
8076+ __u64 iov_cnt;
8077+ __u64 shm_cnt;
8078+};
8079+
8080+#define TIPC_IOC_MAGIC 'r'
8081+#define TIPC_IOC_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, char *)
8082+#define TIPC_IOC_SEND_MSG _IOW(TIPC_IOC_MAGIC, 0x81, \
8083+ struct tipc_send_msg_req)
8084+
8085+#endif
8086diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
Patrick Williams2194f502022-10-16 14:26:09 -05008087index 80d76b75bccd..909905cd7618 100644
Brad Bishopbec4ebc2022-08-03 09:55:16 -04008088--- a/include/uapi/linux/virtio_ids.h
8089+++ b/include/uapi/linux/virtio_ids.h
Patrick Williams2194f502022-10-16 14:26:09 -05008090@@ -42,6 +42,7 @@
8091 #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
8092 #define VIRTIO_ID_CAIF 12 /* Virtio caif */
8093 #define VIRTIO_ID_MEMORY_BALLOON 13 /* virtio memory balloon */
8094+#define VIRTIO_ID_TRUSTY_IPC 13 /* virtio trusty ipc */
8095 #define VIRTIO_ID_GPU 16 /* virtio GPU */
8096 #define VIRTIO_ID_CLOCK 17 /* virtio clock/timer */
8097 #define VIRTIO_ID_INPUT 18 /* virtio input */
Brad Bishopbec4ebc2022-08-03 09:55:16 -04008098--
Patrick Williams2194f502022-10-16 14:26:09 -050080992.34.1
Brad Bishopbec4ebc2022-08-03 09:55:16 -04008100