blob: 290de51118fc584dd806de16af1b32df3823e9e2 [file] [log] [blame]
Brad Bishopbec4ebc2022-08-03 09:55:16 -04001From 3e1e61f54538e8ce4bcbb5a9a213624eafcae514 Mon Sep 17 00:00:00 2001
2From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com>
3Date: Mon, 18 Nov 2013 20:46:48 -0800
4Subject: [PATCH 18/32] ANDROID: trusty: Backport of trusty driver
5
6This adds Trusty driver from android-trusty-5.10
7
8Original commits:
9b60d55f33484 ANDROID: trusty-ipc: Allow registering multiple handles
10629a4d3318cc ANDROID: trusty: Support setting trusty_shared_mem_id_t
1194a36a1374e7 ANDROID: trusty-log: Don't copy Trusty logs to linux kernel log
12efc21cced8af ANDROID: trusty-log: rework buffer allocation
138cb1a07ca814 ANDROID: trusty-ipc: Fix lock protection of shared_handles
1452cdd137fae0 ANDROID: trusty-log: support poll()
1524c3649dceb9 ANDROID: trusty-irq: enqueue work in trusty_irq_cpu_up
1605a05bdd921e ANDROID: trusty: Add config TRUSTY_CRASH_IS_PANIC
17b5fbdba2ec72 ANDROID: trusty-ipc: Fix crash when running out of txbuffers
1846da5b95605e ANDROID: trusty: Allow TRUSTY_LEND of buffers
192ebfb16645af ANDROID: trusty-virtio: remove unnecessary include of dma-mapping.h
20bf9d994a65a2 ANDROID: trusty-log: Complement logging sink with unthrottled virtual file
21d5cb51d0365d ANDROID: trusty-log: Refactor logging state to support concurrent sinks
22b421a5ad3eb3 ANDROID: trusty-log: Sanitize u32 overflow of the log ring buffer write index
2358e9681c57af ANDROID: trusty-log: On trusty panic, unthrottle sink to the kernel log
24ba12be0f203a ANDROID: trusty-log: Update trusty log buffer size to hold a complete Trusty crash logs
25a8a3f83e52b6 ANDROID: trusty_qemu_defconfig: Enable dma-buf and ion system heaps
26988b52b392a1 ANDROID: trusty: Support setting FF-A Tag
27f544e96489aa ANDROID: Add trusty_qemu_defconfig
288a9b09317f29 ANDROID: trusty-ipc: Switch from memfd to dma_buf
295460418ec9a4 ANDROID: trusty-irq: document new way of specifying IPIs
30da3c30b943c2 ANDROID: trusty-irq: specify IPIs in new way
315b5bb7f74856 ANDROID: trusty: Add trusty-test driver
32e80d87f422fd ANDROID: trusty: Add trusty-ipc driver
3303c248cbf693 ANDROID: trusty: Add trusty-virtio driver
341047661edb97 ANDROID: trusty: Add trusty-log driver
3518fd5c59b423 ANDROID: trusty: Add trusty-irq driver
36479c39a683f8 ANDROID: trusty: Add trusty-core driver
37
38Upstream-Status: Backport
39Change-Id: I91f71b891a1091383a298e7fb2f9030382a19ca5
40Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com>
41---
42 .../devicetree/bindings/trusty/trusty-irq.txt | 67 +
43 .../devicetree/bindings/trusty/trusty-smc.txt | 6 +
44 arch/arm/configs/trusty_qemu_defconfig | 291 +++
45 .../configs/trusty_qemu_defconfig.fragment | 26 +
46 drivers/Kconfig | 2 +
47 drivers/Makefile | 1 +
48 drivers/trusty/Kconfig | 116 +
49 drivers/trusty/Makefile | 14 +
50 drivers/trusty/trusty-ipc.c | 2256 +++++++++++++++++
51 drivers/trusty/trusty-irq.c | 645 +++++
52 drivers/trusty/trusty-log.c | 830 ++++++
53 drivers/trusty/trusty-log.h | 28 +
54 drivers/trusty/trusty-mem.c | 139 +
55 drivers/trusty/trusty-smc-arm.S | 41 +
56 drivers/trusty/trusty-smc-arm64.S | 35 +
57 drivers/trusty/trusty-smc.h | 26 +
58 drivers/trusty/trusty-test.c | 440 ++++
59 drivers/trusty/trusty-test.h | 13 +
60 drivers/trusty/trusty-virtio.c | 840 ++++++
61 drivers/trusty/trusty.c | 981 +++++++
62 include/linux/trusty/arm_ffa.h | 590 +++++
63 include/linux/trusty/sm_err.h | 28 +
64 include/linux/trusty/smcall.h | 124 +
65 include/linux/trusty/trusty.h | 131 +
66 include/linux/trusty/trusty_ipc.h | 89 +
67 include/uapi/linux/trusty/ipc.h | 65 +
68 include/uapi/linux/virtio_ids.h | 1 +
69 27 files changed, 7825 insertions(+)
70 create mode 100644 Documentation/devicetree/bindings/trusty/trusty-irq.txt
71 create mode 100644 Documentation/devicetree/bindings/trusty/trusty-smc.txt
72 create mode 100644 arch/arm/configs/trusty_qemu_defconfig
73 create mode 100644 arch/arm64/configs/trusty_qemu_defconfig.fragment
74 create mode 100644 drivers/trusty/Kconfig
75 create mode 100644 drivers/trusty/Makefile
76 create mode 100644 drivers/trusty/trusty-ipc.c
77 create mode 100644 drivers/trusty/trusty-irq.c
78 create mode 100644 drivers/trusty/trusty-log.c
79 create mode 100644 drivers/trusty/trusty-log.h
80 create mode 100644 drivers/trusty/trusty-mem.c
81 create mode 100644 drivers/trusty/trusty-smc-arm.S
82 create mode 100644 drivers/trusty/trusty-smc-arm64.S
83 create mode 100644 drivers/trusty/trusty-smc.h
84 create mode 100644 drivers/trusty/trusty-test.c
85 create mode 100644 drivers/trusty/trusty-test.h
86 create mode 100644 drivers/trusty/trusty-virtio.c
87 create mode 100644 drivers/trusty/trusty.c
88 create mode 100644 include/linux/trusty/arm_ffa.h
89 create mode 100644 include/linux/trusty/sm_err.h
90 create mode 100644 include/linux/trusty/smcall.h
91 create mode 100644 include/linux/trusty/trusty.h
92 create mode 100644 include/linux/trusty/trusty_ipc.h
93 create mode 100644 include/uapi/linux/trusty/ipc.h
94
95diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt
96new file mode 100644
97index 000000000000..cbb545ad452b
98--- /dev/null
99+++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt
100@@ -0,0 +1,67 @@
101+Trusty irq interface
102+
103+Trusty requires non-secure irqs to be forwarded to the secure OS.
104+
105+Required properties:
106+- compatible: "android,trusty-irq-v1"
107+
108+Optional properties:
109+
110+- interrupt-templates: is an optional property that works together
111+ with "interrupt-ranges" to specify secure side to kernel IRQs mapping.
112+
113+ It is a list of entries, each one of which defines a group of interrupts
114+ having common properties, and has the following format:
115+ < phandle irq_id_pos [templ_data]>
116+ phandle - phandle of interrupt controller this template is for
117+ irq_id_pos - the position of irq id in interrupt specifier array
118+ for interrupt controller referenced by phandle.
119+ templ_data - is an array of u32 values (could be empty) in the same
120+ format as interrupt specifier for interrupt controller
121+ referenced by phandle but with omitted irq id field.
122+
123+- interrupt-ranges: list of entries that specifies secure side to kernel
124+ IRQs mapping.
125+
126+ Each entry in the "interrupt-ranges" list has the following format:
127+ <beg end templ_idx>
128+ beg - first entry in this range
129+ end - last entry in this range
130+ templ_idx - index of entry in "interrupt-templates" property
131+ that must be used as a template for all interrupts
132+ in this range
133+
134+- ipi-range: optional mapping of a linear range of trusty IRQs to a linear range
135+ of IPIs (inter-processor interrupts). This has the following format:
136+ <beg end ipi_base>
137+ beg - first trusty IRQ number that is an IPI
138+ end - last trusty IRQ number that is an IPI
139+ ipi_base - IPI number of 'beg'
140+
141+Example:
142+{
143+ gic: interrupt-controller@50041000 {
144+ compatible = "arm,gic-400";
145+ #interrupt-cells = <3>;
146+ interrupt-controller;
147+ ...
148+ };
149+ ...
150+ trusty {
151+ compatible = "android,trusty-smc-v1";
152+ ranges;
153+ #address-cells = <2>;
154+ #size-cells = <2>;
155+
156+ irq {
157+ compatible = "android,trusty-irq-v1";
158+ interrupt-templates = <&gic 1 GIC_PPI 0>,
159+ <&gic 1 GIC_SPI 0>;
160+ interrupt-ranges = <16 31 0>,
161+ <32 223 1>;
162+ ipi-range = <8 15 8>;
163+ };
164+ }
165+}
166+
167+Must be a child of the node that provides the trusty std/fast call interface.
168diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
169new file mode 100644
170index 000000000000..1b39ad317c67
171--- /dev/null
172+++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt
173@@ -0,0 +1,6 @@
174+Trusty smc interface
175+
176+Trusty is running in secure mode on the same (arm) cpu(s) as the current os.
177+
178+Required properties:
179+- compatible: "android,trusty-smc-v1"
180diff --git a/arch/arm/configs/trusty_qemu_defconfig b/arch/arm/configs/trusty_qemu_defconfig
181new file mode 100644
182index 000000000000..46ad9504c23d
183--- /dev/null
184+++ b/arch/arm/configs/trusty_qemu_defconfig
185@@ -0,0 +1,291 @@
186+# CONFIG_LOCALVERSION_AUTO is not set
187+# CONFIG_SWAP is not set
188+CONFIG_POSIX_MQUEUE=y
189+CONFIG_AUDIT=y
190+CONFIG_NO_HZ=y
191+CONFIG_HIGH_RES_TIMERS=y
192+CONFIG_PREEMPT=y
193+CONFIG_BSD_PROCESS_ACCT=y
194+CONFIG_BSD_PROCESS_ACCT_V3=y
195+CONFIG_TASKSTATS=y
196+CONFIG_TASK_DELAY_ACCT=y
197+CONFIG_TASK_XACCT=y
198+CONFIG_TASK_IO_ACCOUNTING=y
199+CONFIG_IKCONFIG=y
200+CONFIG_IKCONFIG_PROC=y
201+CONFIG_LOG_BUF_SHIFT=14
202+CONFIG_RT_GROUP_SCHED=y
203+CONFIG_CGROUP_FREEZER=y
204+CONFIG_CGROUP_CPUACCT=y
205+CONFIG_CGROUP_DEBUG=y
206+CONFIG_SCHED_AUTOGROUP=y
207+CONFIG_BLK_DEV_INITRD=y
208+CONFIG_KALLSYMS_ALL=y
209+CONFIG_EMBEDDED=y
210+# CONFIG_COMPAT_BRK is not set
211+CONFIG_PROFILING=y
212+CONFIG_ARCH_VIRT=y
213+CONFIG_PCI=y
214+CONFIG_PCI_HOST_GENERIC=y
215+CONFIG_SMP=y
216+CONFIG_HIGHMEM=y
217+CONFIG_SECCOMP=y
218+CONFIG_CMDLINE="console=ttyAMA0"
219+CONFIG_PM_AUTOSLEEP=y
220+CONFIG_PM_WAKELOCKS=y
221+CONFIG_PM_WAKELOCKS_LIMIT=0
222+# CONFIG_PM_WAKELOCKS_GC is not set
223+CONFIG_PM_DEBUG=y
224+# CONFIG_BLK_DEV_BSG is not set
225+# CONFIG_IOSCHED_DEADLINE is not set
226+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
227+CONFIG_KSM=y
228+CONFIG_NET=y
229+CONFIG_PACKET=y
230+CONFIG_UNIX=y
231+CONFIG_XFRM_USER=y
232+CONFIG_NET_KEY=y
233+CONFIG_INET=y
234+CONFIG_IP_MULTICAST=y
235+CONFIG_IP_ADVANCED_ROUTER=y
236+CONFIG_IP_MULTIPLE_TABLES=y
237+CONFIG_IP_PNP=y
238+CONFIG_IP_PNP_DHCP=y
239+CONFIG_IP_PNP_BOOTP=y
240+CONFIG_INET_ESP=y
241+CONFIG_INET_DIAG_DESTROY=y
242+CONFIG_IPV6_ROUTER_PREF=y
243+CONFIG_IPV6_ROUTE_INFO=y
244+CONFIG_IPV6_OPTIMISTIC_DAD=y
245+CONFIG_INET6_AH=y
246+CONFIG_INET6_ESP=y
247+CONFIG_INET6_IPCOMP=y
248+CONFIG_IPV6_MIP6=y
249+CONFIG_IPV6_MULTIPLE_TABLES=y
250+CONFIG_NETFILTER=y
251+CONFIG_NF_CONNTRACK=y
252+CONFIG_NF_CONNTRACK_SECMARK=y
253+CONFIG_NF_CONNTRACK_EVENTS=y
254+CONFIG_NF_CONNTRACK_AMANDA=y
255+CONFIG_NF_CONNTRACK_FTP=y
256+CONFIG_NF_CONNTRACK_H323=y
257+CONFIG_NF_CONNTRACK_IRC=y
258+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
259+CONFIG_NF_CONNTRACK_PPTP=y
260+CONFIG_NF_CONNTRACK_SANE=y
261+CONFIG_NF_CONNTRACK_TFTP=y
262+CONFIG_NF_CT_NETLINK=y
263+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
264+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
265+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
266+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
267+CONFIG_NETFILTER_XT_TARGET_MARK=y
268+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
269+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
270+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
271+CONFIG_NETFILTER_XT_TARGET_TRACE=y
272+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
273+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
274+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
275+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
276+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
277+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
278+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
279+CONFIG_NETFILTER_XT_MATCH_HELPER=y
280+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
281+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
282+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
283+CONFIG_NETFILTER_XT_MATCH_MAC=y
284+CONFIG_NETFILTER_XT_MATCH_MARK=y
285+CONFIG_NETFILTER_XT_MATCH_POLICY=y
286+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
287+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
288+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
289+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
290+CONFIG_NETFILTER_XT_MATCH_STATE=y
291+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
292+CONFIG_NETFILTER_XT_MATCH_STRING=y
293+CONFIG_NETFILTER_XT_MATCH_TIME=y
294+CONFIG_NETFILTER_XT_MATCH_U32=y
295+CONFIG_IP_NF_IPTABLES=y
296+CONFIG_IP_NF_MATCH_AH=y
297+CONFIG_IP_NF_MATCH_ECN=y
298+CONFIG_IP_NF_MATCH_RPFILTER=y
299+CONFIG_IP_NF_MATCH_TTL=y
300+CONFIG_IP_NF_FILTER=y
301+CONFIG_IP_NF_TARGET_REJECT=y
302+CONFIG_IP_NF_MANGLE=y
303+CONFIG_IP_NF_TARGET_ECN=y
304+CONFIG_IP_NF_TARGET_TTL=y
305+CONFIG_IP_NF_RAW=y
306+CONFIG_IP_NF_SECURITY=y
307+CONFIG_IP_NF_ARPTABLES=y
308+CONFIG_IP_NF_ARPFILTER=y
309+CONFIG_IP_NF_ARP_MANGLE=y
310+CONFIG_IP6_NF_IPTABLES=y
311+CONFIG_IP6_NF_MATCH_AH=y
312+CONFIG_IP6_NF_MATCH_EUI64=y
313+CONFIG_IP6_NF_MATCH_FRAG=y
314+CONFIG_IP6_NF_MATCH_OPTS=y
315+CONFIG_IP6_NF_MATCH_HL=y
316+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
317+CONFIG_IP6_NF_MATCH_MH=y
318+CONFIG_IP6_NF_MATCH_RT=y
319+CONFIG_IP6_NF_TARGET_HL=y
320+CONFIG_IP6_NF_FILTER=y
321+CONFIG_IP6_NF_TARGET_REJECT=y
322+CONFIG_IP6_NF_MANGLE=y
323+CONFIG_IP6_NF_RAW=y
324+CONFIG_BRIDGE=y
325+CONFIG_NET_SCHED=y
326+CONFIG_NET_SCH_HTB=y
327+CONFIG_NET_CLS_U32=y
328+CONFIG_NET_EMATCH=y
329+CONFIG_NET_EMATCH_U32=y
330+CONFIG_NET_CLS_ACT=y
331+# CONFIG_WIRELESS is not set
332+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
333+CONFIG_BLK_DEV_LOOP=y
334+CONFIG_BLK_DEV_RAM=y
335+CONFIG_BLK_DEV_RAM_SIZE=8192
336+CONFIG_VIRTIO_BLK=y
337+CONFIG_SCSI=y
338+# CONFIG_SCSI_PROC_FS is not set
339+CONFIG_BLK_DEV_SD=y
340+# CONFIG_SCSI_LOWLEVEL is not set
341+CONFIG_MD=y
342+CONFIG_BLK_DEV_DM=y
343+CONFIG_DM_CRYPT=y
344+CONFIG_DM_UEVENT=y
345+CONFIG_DM_VERITY=y
346+CONFIG_DM_VERITY_FEC=y
347+CONFIG_NETDEVICES=y
348+CONFIG_TUN=y
349+CONFIG_VIRTIO_NET=y
350+CONFIG_E1000=y
351+CONFIG_E1000E=y
352+CONFIG_PPP=y
353+CONFIG_PPP_BSDCOMP=y
354+CONFIG_PPP_DEFLATE=y
355+CONFIG_PPP_MPPE=y
356+# CONFIG_WLAN is not set
357+CONFIG_INPUT_EVDEV=y
358+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
359+# CONFIG_INPUT_MOUSE is not set
360+CONFIG_INPUT_JOYSTICK=y
361+CONFIG_INPUT_TABLET=y
362+CONFIG_INPUT_MISC=y
363+CONFIG_INPUT_UINPUT=y
364+# CONFIG_SERIO_SERPORT is not set
365+# CONFIG_VT is not set
366+# CONFIG_LEGACY_PTYS is not set
367+# CONFIG_DEVMEM is not set
368+CONFIG_SERIAL_AMBA_PL011=y
369+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
370+CONFIG_VIRTIO_CONSOLE=y
371+# CONFIG_HW_RANDOM is not set
372+CONFIG_BATTERY_GOLDFISH=y
373+# CONFIG_HWMON is not set
374+CONFIG_TRUSTY=y
375+CONFIG_MEDIA_SUPPORT=y
376+CONFIG_FB=y
377+CONFIG_FB_GOLDFISH=y
378+CONFIG_FB_SIMPLE=y
379+CONFIG_BACKLIGHT_LCD_SUPPORT=y
380+CONFIG_LOGO=y
381+# CONFIG_LOGO_LINUX_MONO is not set
382+# CONFIG_LOGO_LINUX_VGA16 is not set
383+CONFIG_SOUND=y
384+CONFIG_SND=y
385+CONFIG_HIDRAW=y
386+CONFIG_UHID=y
387+CONFIG_HID_A4TECH=y
388+CONFIG_HID_ACRUX=y
389+CONFIG_HID_ACRUX_FF=y
390+CONFIG_HID_APPLE=y
391+CONFIG_HID_BELKIN=y
392+CONFIG_HID_CHERRY=y
393+CONFIG_HID_CHICONY=y
394+CONFIG_HID_PRODIKEYS=y
395+CONFIG_HID_CYPRESS=y
396+CONFIG_HID_DRAGONRISE=y
397+CONFIG_DRAGONRISE_FF=y
398+CONFIG_HID_EMS_FF=y
399+CONFIG_HID_ELECOM=y
400+CONFIG_HID_EZKEY=y
401+CONFIG_HID_KEYTOUCH=y
402+CONFIG_HID_KYE=y
403+CONFIG_HID_WALTOP=y
404+CONFIG_HID_GYRATION=y
405+CONFIG_HID_TWINHAN=y
406+CONFIG_HID_KENSINGTON=y
407+CONFIG_HID_LCPOWER=y
408+CONFIG_HID_LOGITECH=y
409+CONFIG_HID_LOGITECH_DJ=y
410+CONFIG_LOGITECH_FF=y
411+CONFIG_LOGIRUMBLEPAD2_FF=y
412+CONFIG_LOGIG940_FF=y
413+CONFIG_HID_MAGICMOUSE=y
414+CONFIG_HID_MICROSOFT=y
415+CONFIG_HID_MONTEREY=y
416+CONFIG_HID_MULTITOUCH=y
417+CONFIG_HID_ORTEK=y
418+CONFIG_HID_PANTHERLORD=y
419+CONFIG_PANTHERLORD_FF=y
420+CONFIG_HID_PETALYNX=y
421+CONFIG_HID_PICOLCD=y
422+CONFIG_HID_PRIMAX=y
423+CONFIG_HID_SAITEK=y
424+CONFIG_HID_SAMSUNG=y
425+CONFIG_HID_SPEEDLINK=y
426+CONFIG_HID_SUNPLUS=y
427+CONFIG_HID_GREENASIA=y
428+CONFIG_GREENASIA_FF=y
429+CONFIG_HID_SMARTJOYPLUS=y
430+CONFIG_SMARTJOYPLUS_FF=y
431+CONFIG_HID_TIVO=y
432+CONFIG_HID_TOPSEED=y
433+CONFIG_HID_THRUSTMASTER=y
434+CONFIG_HID_ZEROPLUS=y
435+CONFIG_HID_ZYDACRON=y
436+# CONFIG_USB_SUPPORT is not set
437+CONFIG_RTC_CLASS=y
438+CONFIG_VIRTIO_PCI=y
439+CONFIG_VIRTIO_MMIO=y
440+CONFIG_STAGING=y
441+CONFIG_ASHMEM=y
442+CONFIG_ION=y
443+CONFIG_GOLDFISH_AUDIO=y
444+CONFIG_GOLDFISH=y
445+CONFIG_GOLDFISH_PIPE=y
446+# CONFIG_IOMMU_SUPPORT is not set
447+CONFIG_ANDROID=y
448+CONFIG_ANDROID_BINDER_IPC=y
449+CONFIG_EXT2_FS=y
450+CONFIG_EXT4_FS=y
451+CONFIG_EXT4_FS_SECURITY=y
452+CONFIG_QUOTA=y
453+CONFIG_FUSE_FS=y
454+CONFIG_CUSE=y
455+CONFIG_MSDOS_FS=y
456+CONFIG_VFAT_FS=y
457+CONFIG_TMPFS=y
458+CONFIG_TMPFS_POSIX_ACL=y
459+# CONFIG_MISC_FILESYSTEMS is not set
460+CONFIG_NFS_FS=y
461+CONFIG_ROOT_NFS=y
462+CONFIG_NLS_CODEPAGE_437=y
463+CONFIG_NLS_ISO8859_1=y
464+CONFIG_SECURITY=y
465+CONFIG_SECURITY_NETWORK=y
466+CONFIG_SECURITY_SELINUX=y
467+CONFIG_DYNAMIC_DEBUG=y
468+CONFIG_DEBUG_INFO=y
469+CONFIG_DEBUG_FS=y
470+CONFIG_MAGIC_SYSRQ=y
471+CONFIG_PANIC_TIMEOUT=5
472+# CONFIG_SCHED_DEBUG is not set
473+CONFIG_SCHEDSTATS=y
474+# CONFIG_FTRACE is not set
475+CONFIG_DMA_API_DEBUG=y
476+CONFIG_ATOMIC64_SELFTEST=y
477diff --git a/arch/arm64/configs/trusty_qemu_defconfig.fragment b/arch/arm64/configs/trusty_qemu_defconfig.fragment
478new file mode 100644
479index 000000000000..166eef1797fd
480--- /dev/null
481+++ b/arch/arm64/configs/trusty_qemu_defconfig.fragment
482@@ -0,0 +1,26 @@
483+# From goldfish
484+CONFIG_VIRTIO_BLK=y
485+CONFIG_VIRTIO_CONSOLE=y
486+CONFIG_VIRTIO_INPUT=y
487+CONFIG_VIRTIO_MMIO=y
488+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
489+CONFIG_VIRTIO_NET=y
490+CONFIG_VIRTIO_PCI=y
491+CONFIG_VIRTIO_PMEM=y
492+# From Trusty
493+CONFIG_TRUSTY=y
494+CONFIG_DMA_API_DEBUG=y
495+CONFIG_DYNAMIC_DEBUG=y
496+CONFIG_PROVE_LOCKING=y
497+CONFIG_DEBUG_ATOMIC_SLEEP=y
498+CONFIG_SEMIHOSTING_EXIT=y
499+CONFIG_E1000=y
500+CONFIG_E1000E=y
501+CONFIG_REBOOT_EMULATOR_EXIT=y
502+CONFIG_DMABUF_HEAPS_SYSTEM=y
503+# securefb test uses ION
504+CONFIG_ION=y
505+CONFIG_ION_SYSTEM_HEAP=y
506+# LTO slows down build times considerably. Disable it.
507+# CONFIG_LTO_CLANG is not set
508+# CONFIG_LTO_CLANG_FULL is not set
509diff --git a/drivers/Kconfig b/drivers/Kconfig
510index dcecc9f6e33f..2e9abcc98126 100644
511--- a/drivers/Kconfig
512+++ b/drivers/Kconfig
513@@ -86,6 +86,8 @@ source "drivers/hwmon/Kconfig"
514
515 source "drivers/thermal/Kconfig"
516
517+source "drivers/trusty/Kconfig"
518+
519 source "drivers/watchdog/Kconfig"
520
521 source "drivers/ssb/Kconfig"
522diff --git a/drivers/Makefile b/drivers/Makefile
523index 576228037718..7d15799dbe77 100644
524--- a/drivers/Makefile
525+++ b/drivers/Makefile
526@@ -118,6 +118,7 @@ obj-$(CONFIG_W1) += w1/
527 obj-y += power/
528 obj-$(CONFIG_HWMON) += hwmon/
529 obj-$(CONFIG_THERMAL) += thermal/
530+obj-$(CONFIG_TRUSTY) += trusty/
531 obj-$(CONFIG_WATCHDOG) += watchdog/
532 obj-$(CONFIG_MD) += md/
533 obj-$(CONFIG_BT) += bluetooth/
534diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig
535new file mode 100644
536index 000000000000..fcde7f097acf
537--- /dev/null
538+++ b/drivers/trusty/Kconfig
539@@ -0,0 +1,116 @@
540+# SPDX-License-Identifier: GPL-2.0-only
541+#
542+# Trusty driver
543+#
544+
545+menu "Trusty driver"
546+
547+config TRUSTY
548+ tristate "Trusty core driver"
549+ depends on ARM || ARM64
550+ help
551+ Trusty is a secure OS that provides a Trusted Execution Environment
552+ (TEE) for Android. Trusty runs on the same processor as Linux but is
553+ isolated from the rest of the system by both hardware and software.
554+
555+ This option enables the core part of the Linux kernel driver for
556+ Trusty. This doesn't do much by itself; you'll need to enable some of
557+ the sub-modules too.
558+
559+ If you build this as a module, it will be called trusty-core.
560+
561+if TRUSTY
562+
563+config TRUSTY_IRQ
564+ tristate "Trusty IRQ support"
565+ default y
566+ help
567+ Enable forwarding of IRQs from Linux to Trusty. This module retrieves
568+ from Trusty a list of IRQs that Trusty uses, and it registers handlers
569+ for them which notify Trusty that the IRQ has been received.
570+
571+ If you build this as a module, it will be called trusty-irq.
572+
573+ Usually this is needed for Trusty to work, so say 'y' or 'm'.
574+
575+config TRUSTY_LOG
576+ tristate "Trusty log support"
577+ default y
578+ help
579+ Print log messages generated by the secure OS to the Linux kernel log.
580+
581+ While this module is loaded, messages are retrieved and printed after
582+ each call into Trusty, and also during Linux kernel panics.
583+
584+ If you build this as a module, it will be called trusty-log.
585+
586+config TRUSTY_TEST
587+ tristate "Trusty stdcall test"
588+ default y
589+ help
590+ Allow running tests of the Trusty stdcall interface. Running these
591+ tests is initiated by userspace writing to a sysfs file.
592+
593+ This depends on having a test sevice running on the Trusty side.
594+
595+ If you build this as a module, it will be called trusty-test.
596+
597+config TRUSTY_VIRTIO
598+ tristate "Trusty virtio support"
599+ select VIRTIO
600+ default y
601+ help
602+ Enable the Trusty virtio driver, which is responsible for management
603+ and interaction with virtio devices exposed by Trusty. This driver
604+ requests the virtio device descriptors from Trusty, then parses them
605+ and adds the corresponding virtio devices.
606+
607+ If you build this as a module, it will be called trusty-virtio.
608+
609+config TRUSTY_VIRTIO_IPC
610+ tristate "Trusty Virtio IPC driver"
611+ depends on TRUSTY_VIRTIO
612+ default y
613+ help
614+ Enable support for communicating with Trusty services.
615+
616+ If you build this as a module, it will be called trusty-ipc.
617+
618+config TRUSTY_DMA_BUF_FFA_TAG
619+ bool "Availability of trusty_dma_buf_get_ffa_tag"
620+ default n
621+ help
622+ Whether trusty_dma_buf_get_ffa_tag is provided on this platform.
623+ Providing this function will allow the platform to select what tag
624+ should be passed to the SPM when attempting to transfer the buffer
625+ to secure world. The value passed here is implementation defined and
626+ may depend on your SPM.
627+
628+ If set to N, a default implementation which returns 0 will be used.
629+
630+config TRUSTY_DMA_BUF_SHARED_MEM_ID
631+ bool "Availability of trusty_dma_buf_get_shared_mem_id"
632+ default n
633+ help
634+ Whether trusty_dma_buf_get_shared_mem_id is provided on this platform.
635+ Providing this function allows the platform to manage memory
636+ transaction life cycle of DMA bufs independently of Trusty IPC driver.
637+ The latter can query trusty_shared_mem_id_t value allocated for a
638+ given DMA buf using trusty_dma_buf_get_shared_mem_id interface.
639+
640+ If set to N, a default implementation which does not allocate any IDs
641+ will be used.
642+
643+config TRUSTY_CRASH_IS_PANIC
644+ bool "When trusty panics, then panic the kernel"
645+ help
646+ This option will treat Trusty panics as fatal. This is useful if
647+ your system cannot recover from Trusty panic/halt and you require
648+ the system to reboot to recover.
649+
650+ If N, it will contine to run the kernel, but trusty operations will
651+ return errors.
652+
653+endif # TRUSTY
654+
655+endmenu
656diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile
657new file mode 100644
658index 000000000000..2cf1cfccf97b
659--- /dev/null
660+++ b/drivers/trusty/Makefile
661@@ -0,0 +1,14 @@
662+# SPDX-License-Identifier: GPL-2.0-only
663+#
664+# Makefile for trusty components
665+#
666+
667+obj-$(CONFIG_TRUSTY) += trusty-core.o
668+trusty-core-objs += trusty.o trusty-mem.o
669+trusty-core-$(CONFIG_ARM) += trusty-smc-arm.o
670+trusty-core-$(CONFIG_ARM64) += trusty-smc-arm64.o
671+obj-$(CONFIG_TRUSTY_IRQ) += trusty-irq.o
672+obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o
673+obj-$(CONFIG_TRUSTY_TEST) += trusty-test.o
674+obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o
675+obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o
676diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c
677new file mode 100644
678index 000000000000..82d6ddeb41f4
679--- /dev/null
680+++ b/drivers/trusty/trusty-ipc.c
681@@ -0,0 +1,2256 @@
682+// SPDX-License-Identifier: GPL-2.0-only
683+/*
684+ * Copyright (C) 2020 Google, Inc.
685+ */
686+
687+#include <linux/aio.h>
688+#include <linux/kernel.h>
689+#include <linux/module.h>
690+#include <linux/cdev.h>
691+#include <linux/slab.h>
692+#include <linux/fs.h>
693+#include <linux/poll.h>
694+#include <linux/idr.h>
695+#include <linux/completion.h>
696+#include <linux/dma-buf.h>
697+#include <linux/sched.h>
698+#include <linux/sched/signal.h>
699+#include <linux/compat.h>
700+#include <linux/uio.h>
701+#include <linux/file.h>
702+
703+#include <linux/virtio.h>
704+#include <linux/virtio_ids.h>
705+#include <linux/virtio_config.h>
706+
707+#include <linux/trusty/trusty.h>
708+#include <linux/trusty/trusty_ipc.h>
709+
710+#include <uapi/linux/trusty/ipc.h>
711+
712+#define MAX_DEVICES 4
713+
714+#define REPLY_TIMEOUT 5000
715+#define TXBUF_TIMEOUT 15000
716+
717+#define MAX_SRV_NAME_LEN 256
718+#define MAX_DEV_NAME_LEN 32
719+
720+#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE
721+#define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE
722+
723+#define TIPC_CTRL_ADDR 53
724+#define TIPC_ANY_ADDR 0xFFFFFFFF
725+
726+#define TIPC_MIN_LOCAL_ADDR 1024
727+
728+#ifdef CONFIG_COMPAT
729+#define TIPC_IOC32_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, compat_uptr_t)
730+#endif
731+
732+struct tipc_virtio_dev;
733+
734+struct tipc_dev_config {
735+ u32 msg_buf_max_size;
736+ u32 msg_buf_alignment;
737+ char dev_name[MAX_DEV_NAME_LEN];
738+} __packed;
739+
740+struct tipc_shm {
741+ trusty_shared_mem_id_t obj_id;
742+ u64 size;
743+ u64 tag;
744+};
745+
746+struct tipc_msg_hdr {
747+ u32 src;
748+ u32 dst;
749+ u16 reserved;
750+ u16 shm_cnt;
751+ u16 len;
752+ u16 flags;
753+ u8 data[];
754+} __packed;
755+
756+enum tipc_ctrl_msg_types {
757+ TIPC_CTRL_MSGTYPE_GO_ONLINE = 1,
758+ TIPC_CTRL_MSGTYPE_GO_OFFLINE,
759+ TIPC_CTRL_MSGTYPE_CONN_REQ,
760+ TIPC_CTRL_MSGTYPE_CONN_RSP,
761+ TIPC_CTRL_MSGTYPE_DISC_REQ,
762+ TIPC_CTRL_MSGTYPE_RELEASE,
763+};
764+
765+struct tipc_ctrl_msg {
766+ u32 type;
767+ u32 body_len;
768+ u8 body[];
769+} __packed;
770+
771+struct tipc_conn_req_body {
772+ char name[MAX_SRV_NAME_LEN];
773+} __packed;
774+
775+struct tipc_conn_rsp_body {
776+ u32 target;
777+ u32 status;
778+ u32 remote;
779+ u32 max_msg_size;
780+ u32 max_msg_cnt;
781+} __packed;
782+
783+struct tipc_disc_req_body {
784+ u32 target;
785+} __packed;
786+
787+struct tipc_release_body {
788+ trusty_shared_mem_id_t id;
789+} __packed;
790+
791+struct tipc_cdev_node {
792+ struct cdev cdev;
793+ struct device *dev;
794+ unsigned int minor;
795+};
796+
797+enum tipc_device_state {
798+ VDS_OFFLINE = 0,
799+ VDS_ONLINE,
800+ VDS_DEAD,
801+};
802+
803+struct tipc_virtio_dev {
804+ struct kref refcount;
805+ struct mutex lock; /* protects access to this device */
806+ struct virtio_device *vdev;
807+ struct virtqueue *rxvq;
808+ struct virtqueue *txvq;
809+ unsigned int msg_buf_cnt;
810+ unsigned int msg_buf_max_cnt;
811+ size_t msg_buf_max_sz;
812+ unsigned int free_msg_buf_cnt;
813+ struct list_head free_buf_list;
814+ wait_queue_head_t sendq;
815+ struct idr addr_idr;
816+ enum tipc_device_state state;
817+ struct tipc_cdev_node cdev_node;
818+ /* protects shared_handles, dev lock never acquired while held */
819+ struct mutex shared_handles_lock;
820+ struct rb_root shared_handles;
821+ char cdev_name[MAX_DEV_NAME_LEN];
822+};
823+
824+enum tipc_chan_state {
825+ TIPC_DISCONNECTED = 0,
826+ TIPC_CONNECTING,
827+ TIPC_CONNECTED,
828+ TIPC_STALE,
829+};
830+
831+struct tipc_chan {
832+ struct mutex lock; /* protects channel state */
833+ struct kref refcount;
834+ enum tipc_chan_state state;
835+ struct tipc_virtio_dev *vds;
836+ const struct tipc_chan_ops *ops;
837+ void *ops_arg;
838+ u32 remote;
839+ u32 local;
840+ u32 max_msg_size;
841+ u32 max_msg_cnt;
842+ char srv_name[MAX_SRV_NAME_LEN];
843+};
844+
845+struct tipc_shared_handle {
846+ struct rb_node node;
847+ struct tipc_shm tipc;
848+ struct tipc_virtio_dev *vds;
849+ struct dma_buf *dma_buf;
850+ bool shared;
851+ /*
852+ * Following fields are only used if dma_buf does not own a
853+ * trusty_shared_mem_id_t.
854+ */
855+ struct dma_buf_attachment *attach;
856+ struct sg_table *sgt;
857+};
858+
859+static struct class *tipc_class;
860+static unsigned int tipc_major;
861+
862+static struct virtio_device *default_vdev;
863+
864+static DEFINE_IDR(tipc_devices);
865+static DEFINE_MUTEX(tipc_devices_lock);
866+
867+static int _match_any(int id, void *p, void *data)
868+{
869+ return id;
870+}
871+
872+static int _match_data(int id, void *p, void *data)
873+{
874+ return (p == data);
875+}
876+
877+static void *_alloc_shareable_mem(size_t sz, gfp_t gfp)
878+{
879+ return alloc_pages_exact(sz, gfp);
880+}
881+
882+static void _free_shareable_mem(size_t sz, void *va)
883+{
884+ free_pages_exact(va, sz);
885+}
886+
887+static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds,
888+ bool share_write)
889+{
890+ int ret;
891+ struct tipc_msg_buf *mb;
892+ size_t sz = vds->msg_buf_max_sz;
893+ pgprot_t pgprot = share_write ? PAGE_KERNEL : PAGE_KERNEL_RO;
894+
895+ /* allocate tracking structure */
896+ mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL);
897+ if (!mb)
898+ return NULL;
899+
900+ /* allocate buffer that can be shared with secure world */
901+ mb->buf_va = _alloc_shareable_mem(sz, GFP_KERNEL);
902+ if (!mb->buf_va)
903+ goto err_alloc;
904+
905+ sg_init_one(&mb->sg, mb->buf_va, sz);
906+ ret = trusty_share_memory_compat(vds->vdev->dev.parent->parent,
907+ &mb->buf_id, &mb->sg, 1, pgprot);
908+ if (ret) {
909+ dev_err(&vds->vdev->dev, "trusty_share_memory failed: %d\n",
910+ ret);
911+ goto err_share;
912+ }
913+
914+ mb->buf_sz = sz;
915+ mb->shm_cnt = 0;
916+
917+ return mb;
918+
919+err_share:
920+ _free_shareable_mem(sz, mb->buf_va);
921+err_alloc:
922+ kfree(mb);
923+ return NULL;
924+}
925+
926+static void vds_free_msg_buf(struct tipc_virtio_dev *vds,
927+ struct tipc_msg_buf *mb)
928+{
929+ int ret;
930+
931+ ret = trusty_reclaim_memory(vds->vdev->dev.parent->parent, mb->buf_id,
932+ &mb->sg, 1);
933+ if (WARN_ON(ret)) {
934+ dev_err(&vds->vdev->dev,
935+ "trusty_revoke_memory failed: %d txbuf %lld\n",
936+ ret, mb->buf_id);
937+
938+ /*
939+ * It is not safe to free this memory if trusty_revoke_memory
940+ * fails. Leak it in that case.
941+ */
942+ } else {
943+ _free_shareable_mem(mb->buf_sz, mb->buf_va);
944+ }
945+ kfree(mb);
946+}
947+
948+static void vds_free_msg_buf_list(struct tipc_virtio_dev *vds,
949+ struct list_head *list)
950+{
951+ struct tipc_msg_buf *mb = NULL;
952+
953+ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
954+ while (mb) {
955+ list_del(&mb->node);
956+ vds_free_msg_buf(vds, mb);
957+ mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
958+ }
959+}
960+
961+static inline void mb_reset(struct tipc_msg_buf *mb)
962+{
963+ mb->wpos = 0;
964+ mb->rpos = 0;
965+}
966+
967+static inline void mb_reset_read(struct tipc_msg_buf *mb)
968+{
969+ mb->rpos = 0;
970+}
971+
972+static void _free_vds(struct kref *kref)
973+{
974+ struct tipc_virtio_dev *vds =
975+ container_of(kref, struct tipc_virtio_dev, refcount);
976+ /*
977+ * If this WARN triggers, we're leaking remote memory references.
978+ *
979+ * No need to lock shared_handles_lock. All references to this lock
980+ * should already be gone by this point, since we are freeing it in this
981+ * function.
982+ */
983+ WARN_ON(!RB_EMPTY_ROOT(&vds->shared_handles));
984+ kfree(vds);
985+}
986+
987+static void _free_chan(struct kref *kref)
988+{
989+ struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount);
990+
991+ if (ch->ops && ch->ops->handle_release)
992+ ch->ops->handle_release(ch->ops_arg);
993+
994+ kref_put(&ch->vds->refcount, _free_vds);
995+ kfree(ch);
996+}
997+
998+static bool _put_txbuf_locked(struct tipc_virtio_dev *vds,
999+ struct tipc_msg_buf *mb)
1000+{
1001+ list_add_tail(&mb->node, &vds->free_buf_list);
1002+ return vds->free_msg_buf_cnt++ == 0;
1003+}
1004+
1005+static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds)
1006+{
1007+ struct tipc_msg_buf *mb;
1008+
1009+ if (vds->state != VDS_ONLINE)
1010+ return ERR_PTR(-ENODEV);
1011+
1012+ if (vds->free_msg_buf_cnt) {
1013+ /* take it out of free list */
1014+ mb = list_first_entry(&vds->free_buf_list,
1015+ struct tipc_msg_buf, node);
1016+ list_del(&mb->node);
1017+ mb->shm_cnt = 0;
1018+ vds->free_msg_buf_cnt--;
1019+ } else {
1020+ if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt)
1021+ return ERR_PTR(-EAGAIN);
1022+
1023+ /* try to allocate it */
1024+ mb = vds_alloc_msg_buf(vds, false);
1025+ if (!mb)
1026+ return ERR_PTR(-ENOMEM);
1027+
1028+ vds->msg_buf_cnt++;
1029+ }
1030+ return mb;
1031+}
1032+
1033+static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds)
1034+{
1035+ struct tipc_msg_buf *mb;
1036+
1037+ mutex_lock(&vds->lock);
1038+ mb = _get_txbuf_locked(vds);
1039+ mutex_unlock(&vds->lock);
1040+
1041+ return mb;
1042+}
1043+
1044+static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb)
1045+{
1046+ mutex_lock(&vds->lock);
1047+ _put_txbuf_locked(vds, mb);
1048+ wake_up_interruptible(&vds->sendq);
1049+ mutex_unlock(&vds->lock);
1050+}
1051+
1052+static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds,
1053+ long timeout)
1054+{
1055+ struct tipc_msg_buf *mb;
1056+
1057+ mb = _vds_get_txbuf(vds);
1058+
1059+ if ((PTR_ERR(mb) == -EAGAIN) && timeout) {
1060+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
1061+
1062+ timeout = msecs_to_jiffies(timeout);
1063+ add_wait_queue(&vds->sendq, &wait);
1064+ for (;;) {
1065+ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
1066+ timeout);
1067+ if (!timeout) {
1068+ mb = ERR_PTR(-ETIMEDOUT);
1069+ break;
1070+ }
1071+
1072+ if (signal_pending(current)) {
1073+ mb = ERR_PTR(-ERESTARTSYS);
1074+ break;
1075+ }
1076+
1077+ mb = _vds_get_txbuf(vds);
1078+ if (PTR_ERR(mb) != -EAGAIN)
1079+ break;
1080+ }
1081+ remove_wait_queue(&vds->sendq, &wait);
1082+ }
1083+
1084+ if (IS_ERR(mb))
1085+ return mb;
1086+
1087+ if (WARN_ON(!mb))
1088+ return ERR_PTR(-EINVAL);
1089+
1090+ /* reset and reserve space for message header */
1091+ mb_reset(mb);
1092+ mb_put_data(mb, sizeof(struct tipc_msg_hdr));
1093+
1094+ return mb;
1095+}
1096+
1097+static int vds_queue_txbuf(struct tipc_virtio_dev *vds,
1098+ struct tipc_msg_buf *mb)
1099+{
1100+ int err;
1101+ struct scatterlist sg;
1102+ bool need_notify = false;
1103+
1104+ mutex_lock(&vds->lock);
1105+ if (vds->state == VDS_ONLINE) {
1106+ sg_init_one(&sg, mb, mb->wpos);
1107+ err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL);
1108+ need_notify = virtqueue_kick_prepare(vds->txvq);
1109+ } else {
1110+ err = -ENODEV;
1111+ }
1112+ mutex_unlock(&vds->lock);
1113+
1114+ if (need_notify)
1115+ virtqueue_notify(vds->txvq);
1116+
1117+ return err;
1118+}
1119+
1120+static int vds_add_channel(struct tipc_virtio_dev *vds,
1121+ struct tipc_chan *chan)
1122+{
1123+ int ret;
1124+
1125+ mutex_lock(&vds->lock);
1126+ if (vds->state == VDS_ONLINE) {
1127+ ret = idr_alloc(&vds->addr_idr, chan,
1128+ TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1,
1129+ GFP_KERNEL);
1130+ if (ret > 0) {
1131+ chan->local = ret;
1132+ kref_get(&chan->refcount);
1133+ ret = 0;
1134+ }
1135+ } else {
1136+ ret = -EINVAL;
1137+ }
1138+ mutex_unlock(&vds->lock);
1139+
1140+ return ret;
1141+}
1142+
1143+static void vds_del_channel(struct tipc_virtio_dev *vds,
1144+ struct tipc_chan *chan)
1145+{
1146+ mutex_lock(&vds->lock);
1147+ if (chan->local) {
1148+ idr_remove(&vds->addr_idr, chan->local);
1149+ chan->local = 0;
1150+ chan->remote = 0;
1151+ kref_put(&chan->refcount, _free_chan);
1152+ }
1153+ mutex_unlock(&vds->lock);
1154+}
1155+
1156+static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds,
1157+ u32 addr)
1158+{
1159+ int id;
1160+ struct tipc_chan *chan = NULL;
1161+
1162+ mutex_lock(&vds->lock);
1163+ if (addr == TIPC_ANY_ADDR) {
1164+ id = idr_for_each(&vds->addr_idr, _match_any, NULL);
1165+ if (id > 0)
1166+ chan = idr_find(&vds->addr_idr, id);
1167+ } else {
1168+ chan = idr_find(&vds->addr_idr, addr);
1169+ }
1170+ if (chan)
1171+ kref_get(&chan->refcount);
1172+ mutex_unlock(&vds->lock);
1173+
1174+ return chan;
1175+}
1176+
1177+static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds,
1178+ const struct tipc_chan_ops *ops,
1179+ void *ops_arg)
1180+{
1181+ int ret;
1182+ struct tipc_chan *chan = NULL;
1183+
1184+ if (!vds)
1185+ return ERR_PTR(-ENOENT);
1186+
1187+ if (!ops)
1188+ return ERR_PTR(-EINVAL);
1189+
1190+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1191+ if (!chan)
1192+ return ERR_PTR(-ENOMEM);
1193+
1194+ kref_get(&vds->refcount);
1195+ chan->vds = vds;
1196+ chan->ops = ops;
1197+ chan->ops_arg = ops_arg;
1198+ mutex_init(&chan->lock);
1199+ kref_init(&chan->refcount);
1200+ chan->state = TIPC_DISCONNECTED;
1201+
1202+ ret = vds_add_channel(vds, chan);
1203+ if (ret) {
1204+ kfree(chan);
1205+ kref_put(&vds->refcount, _free_vds);
1206+ return ERR_PTR(ret);
1207+ }
1208+
1209+ return chan;
1210+}
1211+
1212+static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst)
1213+{
1214+ struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr));
1215+
1216+ hdr->src = src;
1217+ hdr->dst = dst;
1218+ hdr->len = mb_avail_data(mb);
1219+ hdr->flags = 0;
1220+ hdr->shm_cnt = mb->shm_cnt;
1221+ hdr->reserved = 0;
1222+}
1223+
1224+static int tipc_shared_handle_new(struct tipc_shared_handle **shared_handle,
1225+ struct tipc_virtio_dev *vds)
1226+{
1227+ struct tipc_shared_handle *out = kzalloc(sizeof(*out), GFP_KERNEL);
1228+
1229+ if (!out)
1230+ return -ENOMEM;
1231+
1232+ out->vds = vds;
1233+ *shared_handle = out;
1234+
1235+ return 0;
1236+}
1237+
1238+static struct device *tipc_shared_handle_dev(struct tipc_shared_handle
1239+ *shared_handle)
1240+{
1241+ return shared_handle->vds->vdev->dev.parent->parent;
1242+}
1243+
1244+static bool is_same_memory_region(struct tipc_shared_handle *h1,
1245+ struct tipc_shared_handle *h2)
1246+{
1247+ return h1->tipc.obj_id == h2->tipc.obj_id &&
1248+ h1->tipc.size == h2->tipc.size &&
1249+ h1->tipc.tag == h2->tipc.tag &&
1250+ h1->dma_buf == h2->dma_buf &&
1251+ h1->shared == h2->shared;
1252+}
1253+
1254+static bool dma_buf_owns_shared_mem_id(struct tipc_shared_handle *h)
1255+{
1256+ /* h->shared is true only if dma_buf did not own an shared memory ID */
1257+ return !h->shared;
1258+}
1259+
1260+static void tipc_shared_handle_register(struct tipc_shared_handle
1261+ *new_handle)
1262+{
1263+ struct tipc_virtio_dev *vds = new_handle->vds;
1264+ struct rb_node **new;
1265+ struct rb_node *parent = NULL;
1266+
1267+ mutex_lock(&vds->shared_handles_lock);
1268+
1269+ new = &vds->shared_handles.rb_node;
1270+ while (*new) {
1271+ struct tipc_shared_handle *handle =
1272+ rb_entry(*new, struct tipc_shared_handle, node);
1273+ parent = *new;
1274+ /*
1275+ * An obj_id can be registered multiple times if it's owned by a
1276+ * dma_buf, because in this case we use the same obj_id across
1277+ * multiple memory transfer operations.
1278+ */
1279+ if (handle->tipc.obj_id == new_handle->tipc.obj_id) {
1280+ if (dma_buf_owns_shared_mem_id(new_handle)) {
1281+ WARN_ON(!is_same_memory_region(handle,
1282+ new_handle));
1283+ } else {
1284+ WARN(1, "This handle is already registered");
1285+ goto already_registered;
1286+ }
1287+ }
1288+
1289+ if (handle->tipc.obj_id > new_handle->tipc.obj_id)
1290+ new = &((*new)->rb_left);
1291+ else
1292+ new = &((*new)->rb_right);
1293+ }
1294+
1295+ rb_link_node(&new_handle->node, parent, new);
1296+ rb_insert_color(&new_handle->node, &vds->shared_handles);
1297+
1298+already_registered:
1299+ mutex_unlock(&vds->shared_handles_lock);
1300+}
1301+
1302+static struct tipc_shared_handle *tipc_shared_handle_take(struct tipc_virtio_dev
1303+ *vds,
1304+ trusty_shared_mem_id_t
1305+ obj_id)
1306+{
1307+ struct rb_node *node;
1308+ struct tipc_shared_handle *out = NULL;
1309+
1310+ mutex_lock(&vds->shared_handles_lock);
1311+
1312+ node = vds->shared_handles.rb_node;
1313+ while (node) {
1314+ struct tipc_shared_handle *handle =
1315+ rb_entry(node, struct tipc_shared_handle, node);
1316+ if (obj_id == handle->tipc.obj_id) {
1317+ rb_erase(node, &vds->shared_handles);
1318+ out = handle;
1319+ break;
1320+ } else if (obj_id < handle->tipc.obj_id) {
1321+ node = node->rb_left;
1322+ } else {
1323+ node = node->rb_right;
1324+ }
1325+ }
1326+
1327+ mutex_unlock(&vds->shared_handles_lock);
1328+
1329+ return out;
1330+}
1331+
1332+static int tipc_shared_handle_drop(struct tipc_shared_handle *shared_handle)
1333+{
1334+ int ret;
1335+ struct tipc_virtio_dev *vds = shared_handle->vds;
1336+ struct device *dev = tipc_shared_handle_dev(shared_handle);
1337+
1338+ if (shared_handle->shared) {
1339+ /*
1340+ * If this warning fires, it means this shared handle was still
1341+ * in the set of active handles. This shouldn't happen (calling
1342+ * code should ensure it is out if the tree) but this serves as
1343+ * an extra check before it is released.
1344+ *
1345+ * However, the take itself should clean this incorrect state up
1346+ * by removing the handle from the tree.
1347+ *
1348+ * This warning is only applicable when registering a handle
1349+ * multiple times is not allowed, i.e. when dma_buf doesn't own
1350+ * the handle.
1351+ */
1352+ WARN_ON(tipc_shared_handle_take(vds,
1353+ shared_handle->tipc.obj_id));
1354+
1355+ ret = trusty_reclaim_memory(dev,
1356+ shared_handle->tipc.obj_id,
1357+ shared_handle->sgt->sgl,
1358+ shared_handle->sgt->orig_nents);
1359+ if (ret) {
1360+ /*
1361+ * We can't safely release this, it may still be in
1362+ * use outside Linux.
1363+ */
1364+ dev_warn(dev, "Failed to drop handle, leaking...\n");
1365+ return ret;
1366+ }
1367+ }
1368+
1369+ if (shared_handle->sgt)
1370+ dma_buf_unmap_attachment(shared_handle->attach,
1371+ shared_handle->sgt, DMA_BIDIRECTIONAL);
1372+ if (shared_handle->attach)
1373+ dma_buf_detach(shared_handle->dma_buf, shared_handle->attach);
1374+ if (shared_handle->dma_buf)
1375+ dma_buf_put(shared_handle->dma_buf);
1376+
1377+ kfree(shared_handle);
1378+
1379+ return 0;
1380+}
1381+
1382+/*****************************************************************************/
1383+
1384+struct tipc_chan *tipc_create_channel(struct device *dev,
1385+ const struct tipc_chan_ops *ops,
1386+ void *ops_arg)
1387+{
1388+ struct virtio_device *vd;
1389+ struct tipc_chan *chan;
1390+ struct tipc_virtio_dev *vds;
1391+
1392+ mutex_lock(&tipc_devices_lock);
1393+ if (dev) {
1394+ vd = container_of(dev, struct virtio_device, dev);
1395+ } else {
1396+ vd = default_vdev;
1397+ if (!vd) {
1398+ mutex_unlock(&tipc_devices_lock);
1399+ return ERR_PTR(-ENOENT);
1400+ }
1401+ }
1402+ vds = vd->priv;
1403+ kref_get(&vds->refcount);
1404+ mutex_unlock(&tipc_devices_lock);
1405+
1406+ chan = vds_create_channel(vds, ops, ops_arg);
1407+ kref_put(&vds->refcount, _free_vds);
1408+ return chan;
1409+}
1410+EXPORT_SYMBOL(tipc_create_channel);
1411+
1412+struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan)
1413+{
1414+ return vds_alloc_msg_buf(chan->vds, true);
1415+}
1416+EXPORT_SYMBOL(tipc_chan_get_rxbuf);
1417+
1418+void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
1419+{
1420+ vds_free_msg_buf(chan->vds, mb);
1421+}
1422+EXPORT_SYMBOL(tipc_chan_put_rxbuf);
1423+
1424+struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan,
1425+ long timeout)
1426+{
1427+ return vds_get_txbuf(chan->vds, timeout);
1428+}
1429+EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout);
1430+
1431+void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
1432+{
1433+ vds_put_txbuf(chan->vds, mb);
1434+}
1435+EXPORT_SYMBOL(tipc_chan_put_txbuf);
1436+
1437+int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb)
1438+{
1439+ int err;
1440+
1441+ mutex_lock(&chan->lock);
1442+ switch (chan->state) {
1443+ case TIPC_CONNECTED:
1444+ fill_msg_hdr(mb, chan->local, chan->remote);
1445+ err = vds_queue_txbuf(chan->vds, mb);
1446+ if (err) {
1447+ /* this should never happen */
1448+ dev_err(&chan->vds->vdev->dev,
1449+ "%s: failed to queue tx buffer (%d)\n",
1450+ __func__, err);
1451+ }
1452+ break;
1453+ case TIPC_DISCONNECTED:
1454+ case TIPC_CONNECTING:
1455+ err = -ENOTCONN;
1456+ break;
1457+ case TIPC_STALE:
1458+ err = -ESHUTDOWN;
1459+ break;
1460+ default:
1461+ err = -EBADFD;
1462+ dev_err(&chan->vds->vdev->dev,
1463+ "%s: unexpected channel state %d\n",
1464+ __func__, chan->state);
1465+ }
1466+ mutex_unlock(&chan->lock);
1467+ return err;
1468+}
1469+EXPORT_SYMBOL(tipc_chan_queue_msg);
1470+
1471+
1472+int tipc_chan_connect(struct tipc_chan *chan, const char *name)
1473+{
1474+ int err;
1475+ struct tipc_ctrl_msg *msg;
1476+ struct tipc_conn_req_body *body;
1477+ struct tipc_msg_buf *txbuf;
1478+
1479+ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
1480+ if (IS_ERR(txbuf))
1481+ return PTR_ERR(txbuf);
1482+
1483+ /* reserve space for connection request control message */
1484+ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
1485+ body = (struct tipc_conn_req_body *)msg->body;
1486+
1487+ /* fill message */
1488+ msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ;
1489+ msg->body_len = sizeof(*body);
1490+
1491+ strncpy(body->name, name, sizeof(body->name));
1492+ body->name[sizeof(body->name)-1] = '\0';
1493+
1494+ mutex_lock(&chan->lock);
1495+ switch (chan->state) {
1496+ case TIPC_DISCONNECTED:
1497+ /* save service name we are connecting to */
1498+ strcpy(chan->srv_name, body->name);
1499+
1500+ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
1501+ err = vds_queue_txbuf(chan->vds, txbuf);
1502+ if (err) {
1503+ /* this should never happen */
1504+ dev_err(&chan->vds->vdev->dev,
1505+ "%s: failed to queue tx buffer (%d)\n",
1506+ __func__, err);
1507+ } else {
1508+ chan->state = TIPC_CONNECTING;
1509+ txbuf = NULL; /* prevents discarding buffer */
1510+ }
1511+ break;
1512+ case TIPC_CONNECTED:
1513+ case TIPC_CONNECTING:
1514+ /* check if we are trying to connect to the same service */
1515+ if (strcmp(chan->srv_name, body->name) == 0)
1516+ err = 0;
1517+ else
1518+ if (chan->state == TIPC_CONNECTING)
1519+ err = -EALREADY; /* in progress */
1520+ else
1521+ err = -EISCONN; /* already connected */
1522+ break;
1523+
1524+ case TIPC_STALE:
1525+ err = -ESHUTDOWN;
1526+ break;
1527+ default:
1528+ err = -EBADFD;
1529+ dev_err(&chan->vds->vdev->dev,
1530+ "%s: unexpected channel state %d\n",
1531+ __func__, chan->state);
1532+ break;
1533+ }
1534+ mutex_unlock(&chan->lock);
1535+
1536+ if (txbuf)
1537+ tipc_chan_put_txbuf(chan, txbuf); /* discard it */
1538+
1539+ return err;
1540+}
1541+EXPORT_SYMBOL(tipc_chan_connect);
1542+
1543+int tipc_chan_shutdown(struct tipc_chan *chan)
1544+{
1545+ int err;
1546+ struct tipc_ctrl_msg *msg;
1547+ struct tipc_disc_req_body *body;
1548+ struct tipc_msg_buf *txbuf = NULL;
1549+
1550+ /* get tx buffer */
1551+ txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
1552+ if (IS_ERR(txbuf))
1553+ return PTR_ERR(txbuf);
1554+
1555+ mutex_lock(&chan->lock);
1556+ if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) {
1557+ /* reserve space for disconnect request control message */
1558+ msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
1559+ body = (struct tipc_disc_req_body *)msg->body;
1560+
1561+ msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ;
1562+ msg->body_len = sizeof(*body);
1563+ body->target = chan->remote;
1564+
1565+ fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
1566+ err = vds_queue_txbuf(chan->vds, txbuf);
1567+ if (err) {
1568+ /* this should never happen */
1569+ dev_err(&chan->vds->vdev->dev,
1570+ "%s: failed to queue tx buffer (%d)\n",
1571+ __func__, err);
1572+ }
1573+ } else {
1574+ err = -ENOTCONN;
1575+ }
1576+ chan->state = TIPC_STALE;
1577+ mutex_unlock(&chan->lock);
1578+
1579+ if (err) {
1580+ /* release buffer */
1581+ tipc_chan_put_txbuf(chan, txbuf);
1582+ }
1583+
1584+ return err;
1585+}
1586+EXPORT_SYMBOL(tipc_chan_shutdown);
1587+
1588+void tipc_chan_destroy(struct tipc_chan *chan)
1589+{
1590+ vds_del_channel(chan->vds, chan);
1591+ kref_put(&chan->refcount, _free_chan);
1592+}
1593+EXPORT_SYMBOL(tipc_chan_destroy);
1594+
1595+/***************************************************************************/
1596+
1597+struct tipc_dn_chan {
1598+ int state;
1599+ struct mutex lock; /* protects rx_msg_queue list and channel state */
1600+ struct tipc_chan *chan;
1601+ wait_queue_head_t readq;
1602+ struct completion reply_comp;
1603+ struct list_head rx_msg_queue;
1604+};
1605+
1606+static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout)
1607+{
1608+ int ret;
1609+
1610+ ret = wait_for_completion_interruptible_timeout(&dn->reply_comp,
1611+ msecs_to_jiffies(timeout));
1612+ if (ret < 0)
1613+ return ret;
1614+
1615+ mutex_lock(&dn->lock);
1616+ if (!ret) {
1617+ /* no reply from remote */
1618+ dn->state = TIPC_STALE;
1619+ ret = -ETIMEDOUT;
1620+ } else {
1621+ /* got reply */
1622+ if (dn->state == TIPC_CONNECTED)
1623+ ret = 0;
1624+ else if (dn->state == TIPC_DISCONNECTED)
1625+ if (!list_empty(&dn->rx_msg_queue))
1626+ ret = 0;
1627+ else
1628+ ret = -ENOTCONN;
1629+ else
1630+ ret = -EIO;
1631+ }
1632+ mutex_unlock(&dn->lock);
1633+
1634+ return ret;
1635+}
1636+
1637+static struct tipc_msg_buf *dn_handle_msg(void *data,
1638+ struct tipc_msg_buf *rxbuf)
1639+{
1640+ struct tipc_dn_chan *dn = data;
1641+ struct tipc_msg_buf *newbuf = rxbuf;
1642+
1643+ mutex_lock(&dn->lock);
1644+ if (dn->state == TIPC_CONNECTED) {
1645+ /* get new buffer */
1646+ newbuf = tipc_chan_get_rxbuf(dn->chan);
1647+ if (newbuf) {
1648+ /* queue an old buffer and return a new one */
1649+ list_add_tail(&rxbuf->node, &dn->rx_msg_queue);
1650+ wake_up_interruptible(&dn->readq);
1651+ } else {
1652+ /*
1653+ * return an old buffer effectively discarding
1654+ * incoming message
1655+ */
1656+ dev_err(&dn->chan->vds->vdev->dev,
1657+ "%s: discard incoming message\n", __func__);
1658+ newbuf = rxbuf;
1659+ }
1660+ }
1661+ mutex_unlock(&dn->lock);
1662+
1663+ return newbuf;
1664+}
1665+
1666+static void dn_connected(struct tipc_dn_chan *dn)
1667+{
1668+ mutex_lock(&dn->lock);
1669+ dn->state = TIPC_CONNECTED;
1670+
1671+ /* complete all pending */
1672+ complete(&dn->reply_comp);
1673+
1674+ mutex_unlock(&dn->lock);
1675+}
1676+
1677+static void dn_disconnected(struct tipc_dn_chan *dn)
1678+{
1679+ mutex_lock(&dn->lock);
1680+ dn->state = TIPC_DISCONNECTED;
1681+
1682+ /* complete all pending */
1683+ complete(&dn->reply_comp);
1684+
1685+ /* wakeup all readers */
1686+ wake_up_interruptible_all(&dn->readq);
1687+
1688+ mutex_unlock(&dn->lock);
1689+}
1690+
1691+static void dn_shutdown(struct tipc_dn_chan *dn)
1692+{
1693+ mutex_lock(&dn->lock);
1694+
1695+ /* set state to STALE */
1696+ dn->state = TIPC_STALE;
1697+
1698+ /* complete all pending */
1699+ complete(&dn->reply_comp);
1700+
1701+ /* wakeup all readers */
1702+ wake_up_interruptible_all(&dn->readq);
1703+
1704+ mutex_unlock(&dn->lock);
1705+}
1706+
1707+static void dn_handle_event(void *data, int event)
1708+{
1709+ struct tipc_dn_chan *dn = data;
1710+
1711+ switch (event) {
1712+ case TIPC_CHANNEL_SHUTDOWN:
1713+ dn_shutdown(dn);
1714+ break;
1715+
1716+ case TIPC_CHANNEL_DISCONNECTED:
1717+ dn_disconnected(dn);
1718+ break;
1719+
1720+ case TIPC_CHANNEL_CONNECTED:
1721+ dn_connected(dn);
1722+ break;
1723+
1724+ default:
1725+ dev_err(&dn->chan->vds->vdev->dev,
1726+ "%s: unhandled event %d\n", __func__, event);
1727+ break;
1728+ }
1729+}
1730+
1731+static void dn_handle_release(void *data)
1732+{
1733+ kfree(data);
1734+}
1735+
1736+static const struct tipc_chan_ops _dn_ops = {
1737+ .handle_msg = dn_handle_msg,
1738+ .handle_event = dn_handle_event,
1739+ .handle_release = dn_handle_release,
1740+};
1741+
1742+#define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev)
1743+#define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node)
1744+
1745+static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn)
1746+{
1747+ int ret;
1748+ struct tipc_virtio_dev *vds = NULL;
1749+
1750+ mutex_lock(&tipc_devices_lock);
1751+ ret = idr_for_each(&tipc_devices, _match_data, cdn);
1752+ if (ret) {
1753+ vds = cdn_to_vds(cdn);
1754+ kref_get(&vds->refcount);
1755+ }
1756+ mutex_unlock(&tipc_devices_lock);
1757+ return vds;
1758+}
1759+
1760+static int tipc_open(struct inode *inode, struct file *filp)
1761+{
1762+ int ret;
1763+ struct tipc_virtio_dev *vds;
1764+ struct tipc_dn_chan *dn;
1765+ struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev);
1766+
1767+ vds = _dn_lookup_vds(cdn);
1768+ if (!vds) {
1769+ ret = -ENOENT;
1770+ goto err_vds_lookup;
1771+ }
1772+
1773+ dn = kzalloc(sizeof(*dn), GFP_KERNEL);
1774+ if (!dn) {
1775+ ret = -ENOMEM;
1776+ goto err_alloc_chan;
1777+ }
1778+
1779+ mutex_init(&dn->lock);
1780+ init_waitqueue_head(&dn->readq);
1781+ init_completion(&dn->reply_comp);
1782+ INIT_LIST_HEAD(&dn->rx_msg_queue);
1783+
1784+ dn->state = TIPC_DISCONNECTED;
1785+
1786+ dn->chan = vds_create_channel(vds, &_dn_ops, dn);
1787+ if (IS_ERR(dn->chan)) {
1788+ ret = PTR_ERR(dn->chan);
1789+ goto err_create_chan;
1790+ }
1791+
1792+ filp->private_data = dn;
1793+ kref_put(&vds->refcount, _free_vds);
1794+ return 0;
1795+
1796+err_create_chan:
1797+ kfree(dn);
1798+err_alloc_chan:
1799+ kref_put(&vds->refcount, _free_vds);
1800+err_vds_lookup:
1801+ return ret;
1802+}
1803+
1804+
1805+static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name)
1806+{
1807+ int ret;
1808+ char name[MAX_SRV_NAME_LEN];
1809+
1810+ /* copy in service name from user space */
1811+ ret = strncpy_from_user(name, usr_name, sizeof(name));
1812+ if (ret < 0)
1813+ return ret;
1814+ if (ret == sizeof(name))
1815+ return -ENAMETOOLONG;
1816+
1817+ /* send connect request */
1818+ ret = tipc_chan_connect(dn->chan, name);
1819+ if (ret)
1820+ return ret;
1821+
1822+ /* and wait for reply */
1823+ return dn_wait_for_reply(dn, REPLY_TIMEOUT);
1824+}
1825+
1826+static int dn_share_fd(struct tipc_dn_chan *dn, int fd,
1827+ enum transfer_kind transfer_kind,
1828+ struct tipc_shared_handle **out)
1829+{
1830+ int ret = 0;
1831+ struct tipc_shared_handle *shared_handle = NULL;
1832+ struct file *file = NULL;
1833+ struct device *dev = &dn->chan->vds->vdev->dev;
1834+ bool writable = false;
1835+ pgprot_t prot;
1836+ u64 tag = 0;
1837+ trusty_shared_mem_id_t mem_id;
1838+ bool lend;
1839+
1840+ if (dn->state != TIPC_CONNECTED) {
1841+ dev_dbg(dev, "Tried to share fd while not connected\n");
1842+ return -ENOTCONN;
1843+ }
1844+
1845+ file = fget(fd);
1846+ if (!file) {
1847+ dev_dbg(dev, "Invalid fd (%d)\n", fd);
1848+ return -EBADF;
1849+ }
1850+
1851+ if (!(file->f_mode & FMODE_READ)) {
1852+ dev_dbg(dev, "Cannot create write-only mapping\n");
1853+ fput(file);
1854+ return -EACCES;
1855+ }
1856+
1857+ writable = file->f_mode & FMODE_WRITE;
1858+ prot = writable ? PAGE_KERNEL : PAGE_KERNEL_RO;
1859+ fput(file);
1860+ file = NULL;
1861+
1862+ ret = tipc_shared_handle_new(&shared_handle, dn->chan->vds);
1863+ if (ret)
1864+ return ret;
1865+
1866+ shared_handle->dma_buf = dma_buf_get(fd);
1867+ if (IS_ERR(shared_handle->dma_buf)) {
1868+ ret = PTR_ERR(shared_handle->dma_buf);
1869+ shared_handle->dma_buf = NULL;
1870+ dev_dbg(dev, "Unable to get dma buf from fd (%d)\n", ret);
1871+ goto cleanup_handle;
1872+ }
1873+
1874+ tag = trusty_dma_buf_get_ffa_tag(shared_handle->dma_buf);
1875+ ret = trusty_dma_buf_get_shared_mem_id(shared_handle->dma_buf, &mem_id);
1876+ /*
1877+ * Buffers with a preallocated mem_id should only be sent to Trusty
1878+ * using TRUSTY_SEND_SECURE. And conversely, TRUSTY_SEND_SECURE should
1879+ * only be used to send buffers with preallcoated mem_id.
1880+ */
1881+ if (!ret) {
1882+ /* Use shared memory ID owned by dma_buf */
1883+ /* TODO: Enforce transfer_kind == TRUSTY_SEND_SECURE */
1884+ WARN_ONCE(transfer_kind != TRUSTY_SEND_SECURE,
1885+ "Use TRUSTY_SEND_SECURE instead");
1886+ goto mem_id_allocated;
1887+ }
1888+
1889+ if (ret != -ENODATA) {
1890+ dev_err(dev, "dma_buf can't be transferred (%d)\n", ret);
1891+ goto cleanup_handle;
1892+ }
1893+
1894+ if (transfer_kind == TRUSTY_SEND_SECURE) {
1895+ dev_err(dev, "No mem ID for TRUSTY_SEND_SECURE\n");
1896+ goto cleanup_handle;
1897+ }
1898+ lend = (transfer_kind == TRUSTY_LEND);
1899+
1900+ shared_handle->attach = dma_buf_attach(shared_handle->dma_buf, dev);
1901+ if (IS_ERR(shared_handle->attach)) {
1902+ ret = PTR_ERR(shared_handle->attach);
1903+ shared_handle->attach = NULL;
1904+ dev_dbg(dev, "Unable to attach to dma_buf (%d)\n", ret);
1905+ goto cleanup_handle;
1906+ }
1907+
1908+ shared_handle->sgt = dma_buf_map_attachment(shared_handle->attach,
1909+ DMA_BIDIRECTIONAL);
1910+ if (IS_ERR(shared_handle->sgt)) {
1911+ ret = PTR_ERR(shared_handle->sgt);
1912+ shared_handle->sgt = NULL;
1913+ dev_dbg(dev, "Failed to match attachment (%d)\n", ret);
1914+ goto cleanup_handle;
1915+ }
1916+
1917+ ret = trusty_transfer_memory(tipc_shared_handle_dev(shared_handle),
1918+ &mem_id, shared_handle->sgt->sgl,
1919+ shared_handle->sgt->orig_nents, prot, tag,
1920+ lend);
1921+
1922+ if (ret < 0) {
1923+ dev_dbg(dev, "Transferring memory failed: %d\n", ret);
1924+ /*
1925+ * The handle now has a sgt containing the pages, so we no
1926+ * longer need to clean up the pages directly.
1927+ */
1928+ goto cleanup_handle;
1929+ }
1930+ shared_handle->shared = true;
1931+
1932+mem_id_allocated:
1933+ shared_handle->tipc.obj_id = mem_id;
1934+ shared_handle->tipc.size = shared_handle->dma_buf->size;
1935+ shared_handle->tipc.tag = tag;
1936+ *out = shared_handle;
1937+ return 0;
1938+
1939+cleanup_handle:
1940+ tipc_shared_handle_drop(shared_handle);
1941+ return ret;
1942+}
1943+
1944+static ssize_t txbuf_write_iter(struct tipc_msg_buf *txbuf,
1945+ struct iov_iter *iter)
1946+{
1947+ size_t len;
1948+ /* message length */
1949+ len = iov_iter_count(iter);
1950+
1951+ /* check available space */
1952+ if (len > mb_avail_space(txbuf))
1953+ return -EMSGSIZE;
1954+
1955+ /* copy in message data */
1956+ if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len)
1957+ return -EFAULT;
1958+
1959+ return len;
1960+}
1961+
1962+static ssize_t txbuf_write_handles(struct tipc_msg_buf *txbuf,
1963+ struct tipc_shared_handle **shm_handles,
1964+ size_t shm_cnt)
1965+{
1966+ size_t idx;
1967+
1968+ /* message length */
1969+ size_t len = shm_cnt * sizeof(struct tipc_shm);
1970+
1971+ /* check available space */
1972+ if (len > mb_avail_space(txbuf))
1973+ return -EMSGSIZE;
1974+
1975+ /* copy over handles */
1976+ for (idx = 0; idx < shm_cnt; idx++) {
1977+ memcpy(mb_put_data(txbuf, sizeof(struct tipc_shm)),
1978+ &shm_handles[idx]->tipc,
1979+ sizeof(struct tipc_shm));
1980+ }
1981+
1982+ txbuf->shm_cnt += shm_cnt;
1983+
1984+ return len;
1985+}
1986+
1987+static long filp_send_ioctl(struct file *filp,
1988+ const struct tipc_send_msg_req __user *arg)
1989+{
1990+ struct tipc_send_msg_req req;
1991+ struct iovec fast_iovs[UIO_FASTIOV];
1992+ struct iovec *iov = fast_iovs;
1993+ struct iov_iter iter;
1994+ struct trusty_shm *shm = NULL;
1995+ struct tipc_shared_handle **shm_handles = NULL;
1996+ int shm_idx = 0;
1997+ int release_idx;
1998+ struct tipc_dn_chan *dn = filp->private_data;
1999+ struct tipc_virtio_dev *vds = dn->chan->vds;
2000+ struct device *dev = &vds->vdev->dev;
2001+ long timeout = TXBUF_TIMEOUT;
2002+ struct tipc_msg_buf *txbuf = NULL;
2003+ long ret = 0;
2004+ ssize_t data_len = 0;
2005+ ssize_t shm_len = 0;
2006+
2007+ if (copy_from_user(&req, arg, sizeof(req)))
2008+ return -EFAULT;
2009+
2010+ if (req.shm_cnt > U16_MAX)
2011+ return -E2BIG;
2012+
2013+ shm = kmalloc_array(req.shm_cnt, sizeof(*shm), GFP_KERNEL);
2014+ if (!shm)
2015+ return -ENOMEM;
2016+
2017+ shm_handles = kmalloc_array(req.shm_cnt, sizeof(*shm_handles),
2018+ GFP_KERNEL);
2019+ if (!shm_handles) {
2020+ ret = -ENOMEM;
2021+ goto shm_handles_alloc_failed;
2022+ }
2023+
2024+ if (copy_from_user(shm, u64_to_user_ptr(req.shm),
2025+ req.shm_cnt * sizeof(struct trusty_shm))) {
2026+ ret = -EFAULT;
2027+ goto load_shm_args_failed;
2028+ }
2029+
2030+ ret = import_iovec(READ, u64_to_user_ptr(req.iov), req.iov_cnt,
2031+ ARRAY_SIZE(fast_iovs), &iov, &iter);
2032+ if (ret < 0) {
2033+ dev_dbg(dev, "Failed to import iovec\n");
2034+ goto iov_import_failed;
2035+ }
2036+
2037+ for (shm_idx = 0; shm_idx < req.shm_cnt; shm_idx++) {
2038+ switch (shm[shm_idx].transfer) {
2039+ case TRUSTY_SHARE:
2040+ case TRUSTY_LEND:
2041+ case TRUSTY_SEND_SECURE:
2042+ break;
2043+ default:
2044+ dev_err(dev, "Unknown transfer type: 0x%x\n",
2045+ shm[shm_idx].transfer);
2046+ goto shm_share_failed;
2047+ }
2048+ ret = dn_share_fd(dn, shm[shm_idx].fd, shm[shm_idx].transfer,
2049+ &shm_handles[shm_idx]);
2050+ if (ret) {
2051+ dev_dbg(dev, "Forwarding memory failed\n"
2052+ );
2053+ goto shm_share_failed;
2054+ }
2055+ }
2056+
2057+ if (filp->f_flags & O_NONBLOCK)
2058+ timeout = 0;
2059+
2060+ txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
2061+ if (IS_ERR(txbuf)) {
2062+ dev_dbg(dev, "Failed to get txbuffer\n");
2063+ ret = PTR_ERR(txbuf);
2064+ goto get_txbuf_failed;
2065+ }
2066+
2067+ data_len = txbuf_write_iter(txbuf, &iter);
2068+ if (data_len < 0) {
2069+ ret = data_len;
2070+ goto txbuf_write_failed;
2071+ }
2072+
2073+ shm_len = txbuf_write_handles(txbuf, shm_handles, req.shm_cnt);
2074+ if (shm_len < 0) {
2075+ ret = shm_len;
2076+ goto txbuf_write_failed;
2077+ }
2078+
2079+ /*
2080+ * These need to be aded to the index before queueing the message.
2081+ * As soon as the message is sent, we may receive a message back from
2082+ * Trusty saying it's no longer in use, and the shared_handle needs
2083+ * to be there when that happens.
2084+ */
2085+ for (shm_idx = 0; shm_idx < req.shm_cnt; shm_idx++)
2086+ tipc_shared_handle_register(shm_handles[shm_idx]);
2087+
2088+ ret = tipc_chan_queue_msg(dn->chan, txbuf);
2089+
2090+ if (ret)
2091+ goto queue_failed;
2092+
2093+ ret = data_len;
2094+
2095+common_cleanup:
2096+ kfree(iov);
2097+iov_import_failed:
2098+load_shm_args_failed:
2099+ kfree(shm_handles);
2100+shm_handles_alloc_failed:
2101+ kfree(shm);
2102+ return ret;
2103+
2104+queue_failed:
2105+ for (release_idx = 0; release_idx < req.shm_cnt; release_idx++)
2106+ tipc_shared_handle_take(vds,
2107+ shm_handles[release_idx]->tipc.obj_id);
2108+txbuf_write_failed:
2109+ tipc_chan_put_txbuf(dn->chan, txbuf);
2110+get_txbuf_failed:
2111+shm_share_failed:
2112+ for (shm_idx--; shm_idx >= 0; shm_idx--)
2113+ tipc_shared_handle_drop(shm_handles[shm_idx]);
2114+ goto common_cleanup;
2115+}
2116+
2117+static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2118+{
2119+ struct tipc_dn_chan *dn = filp->private_data;
2120+
2121+ switch (cmd) {
2122+ case TIPC_IOC_CONNECT:
2123+ return dn_connect_ioctl(dn, (char __user *)arg);
2124+ case TIPC_IOC_SEND_MSG:
2125+ return filp_send_ioctl(filp,
2126+ (const struct tipc_send_msg_req __user *)
2127+ arg);
2128+ default:
2129+ dev_dbg(&dn->chan->vds->vdev->dev,
2130+ "Unhandled ioctl cmd: 0x%x\n", cmd);
2131+ return -ENOTTY;
2132+ }
2133+}
2134+
2135+#ifdef CONFIG_COMPAT
2136+static long tipc_compat_ioctl(struct file *filp,
2137+ unsigned int cmd, unsigned long arg)
2138+{
2139+ struct tipc_dn_chan *dn = filp->private_data;
2140+
2141+ switch (cmd) {
2142+ case TIPC_IOC32_CONNECT:
2143+ cmd = TIPC_IOC_CONNECT;
2144+ break;
2145+ default:
2146+ dev_dbg(&dn->chan->vds->vdev->dev,
2147+ "Unhandled compat ioctl command: 0x%x\n", cmd);
2148+ return -ENOTTY;
2149+ }
2150+ return tipc_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2151+}
2152+#endif
2153+
2154+static inline bool _got_rx(struct tipc_dn_chan *dn)
2155+{
2156+ if (dn->state != TIPC_CONNECTED)
2157+ return true;
2158+
2159+ if (!list_empty(&dn->rx_msg_queue))
2160+ return true;
2161+
2162+ return false;
2163+}
2164+
2165+static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2166+{
2167+ ssize_t ret;
2168+ size_t len;
2169+ struct tipc_msg_buf *mb;
2170+ struct file *filp = iocb->ki_filp;
2171+ struct tipc_dn_chan *dn = filp->private_data;
2172+
2173+ mutex_lock(&dn->lock);
2174+
2175+ while (list_empty(&dn->rx_msg_queue)) {
2176+ if (dn->state != TIPC_CONNECTED) {
2177+ if (dn->state == TIPC_CONNECTING)
2178+ ret = -ENOTCONN;
2179+ else if (dn->state == TIPC_DISCONNECTED)
2180+ ret = -ENOTCONN;
2181+ else if (dn->state == TIPC_STALE)
2182+ ret = -ESHUTDOWN;
2183+ else
2184+ ret = -EBADFD;
2185+ goto out;
2186+ }
2187+
2188+ mutex_unlock(&dn->lock);
2189+
2190+ if (filp->f_flags & O_NONBLOCK)
2191+ return -EAGAIN;
2192+
2193+ if (wait_event_interruptible(dn->readq, _got_rx(dn)))
2194+ return -ERESTARTSYS;
2195+
2196+ mutex_lock(&dn->lock);
2197+ }
2198+
2199+ mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node);
2200+
2201+ len = mb_avail_data(mb);
2202+ if (len > iov_iter_count(iter)) {
2203+ ret = -EMSGSIZE;
2204+ goto out;
2205+ }
2206+
2207+ if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) {
2208+ ret = -EFAULT;
2209+ goto out;
2210+ }
2211+
2212+ ret = len;
2213+ list_del(&mb->node);
2214+ tipc_chan_put_rxbuf(dn->chan, mb);
2215+
2216+out:
2217+ mutex_unlock(&dn->lock);
2218+ return ret;
2219+}
2220+
2221+static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter)
2222+{
2223+ struct file *filp = iocb->ki_filp;
2224+ struct tipc_dn_chan *dn = filp->private_data;
2225+ long timeout = TXBUF_TIMEOUT;
2226+ struct tipc_msg_buf *txbuf = NULL;
2227+ ssize_t ret = 0;
2228+ ssize_t len = 0;
2229+
2230+ if (filp->f_flags & O_NONBLOCK)
2231+ timeout = 0;
2232+
2233+ txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
2234+
2235+ if (IS_ERR(txbuf))
2236+ return PTR_ERR(txbuf);
2237+
2238+ len = txbuf_write_iter(txbuf, iter);
2239+ if (len < 0)
2240+ goto err_out;
2241+
2242+ /* queue message */
2243+ ret = tipc_chan_queue_msg(dn->chan, txbuf);
2244+ if (ret)
2245+ goto err_out;
2246+
2247+ return len;
2248+
2249+err_out:
2250+ tipc_chan_put_txbuf(dn->chan, txbuf);
2251+ return ret;
2252+}
2253+
2254+static __poll_t tipc_poll(struct file *filp, poll_table *wait)
2255+{
2256+ __poll_t mask = 0;
2257+ struct tipc_dn_chan *dn = filp->private_data;
2258+
2259+ mutex_lock(&dn->lock);
2260+
2261+ poll_wait(filp, &dn->readq, wait);
2262+
2263+ /* Writes always succeed for now */
2264+ mask |= EPOLLOUT | EPOLLWRNORM;
2265+
2266+ if (!list_empty(&dn->rx_msg_queue))
2267+ mask |= EPOLLIN | EPOLLRDNORM;
2268+
2269+ if (dn->state != TIPC_CONNECTED)
2270+ mask |= EPOLLERR;
2271+
2272+ mutex_unlock(&dn->lock);
2273+ return mask;
2274+}
2275+
2276+
2277+static int tipc_release(struct inode *inode, struct file *filp)
2278+{
2279+ struct tipc_dn_chan *dn = filp->private_data;
2280+
2281+ dn_shutdown(dn);
2282+
2283+ /* free all pending buffers */
2284+ vds_free_msg_buf_list(dn->chan->vds, &dn->rx_msg_queue);
2285+
2286+ /* shutdown channel */
2287+ tipc_chan_shutdown(dn->chan);
2288+
2289+ /* and destroy it */
2290+ tipc_chan_destroy(dn->chan);
2291+
2292+ return 0;
2293+}
2294+
2295+static const struct file_operations tipc_fops = {
2296+ .open = tipc_open,
2297+ .release = tipc_release,
2298+ .unlocked_ioctl = tipc_ioctl,
2299+#ifdef CONFIG_COMPAT
2300+ .compat_ioctl = tipc_compat_ioctl,
2301+#endif
2302+ .read_iter = tipc_read_iter,
2303+ .write_iter = tipc_write_iter,
2304+ .poll = tipc_poll,
2305+ .owner = THIS_MODULE,
2306+};
2307+
2308+/*****************************************************************************/
2309+
2310+static void chan_trigger_event(struct tipc_chan *chan, int event)
2311+{
2312+ if (!event)
2313+ return;
2314+
2315+ chan->ops->handle_event(chan->ops_arg, event);
2316+}
2317+
2318+static void _cleanup_vq(struct tipc_virtio_dev *vds, struct virtqueue *vq)
2319+{
2320+ struct tipc_msg_buf *mb;
2321+
2322+ while ((mb = virtqueue_detach_unused_buf(vq)) != NULL)
2323+ vds_free_msg_buf(vds, mb);
2324+}
2325+
2326+static int _create_cdev_node(struct device *parent,
2327+ struct tipc_cdev_node *cdn,
2328+ const char *name)
2329+{
2330+ int ret;
2331+ dev_t devt;
2332+
2333+ if (!name) {
2334+ dev_dbg(parent, "%s: cdev name has to be provided\n",
2335+ __func__);
2336+ return -EINVAL;
2337+ }
2338+
2339+ /* allocate minor */
2340+ ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES, GFP_KERNEL);
2341+ if (ret < 0) {
2342+ dev_dbg(parent, "%s: failed (%d) to get id\n",
2343+ __func__, ret);
2344+ return ret;
2345+ }
2346+
2347+ cdn->minor = ret;
2348+ cdev_init(&cdn->cdev, &tipc_fops);
2349+ cdn->cdev.owner = THIS_MODULE;
2350+
2351+ /* Add character device */
2352+ devt = MKDEV(tipc_major, cdn->minor);
2353+ ret = cdev_add(&cdn->cdev, devt, 1);
2354+ if (ret) {
2355+ dev_dbg(parent, "%s: cdev_add failed (%d)\n",
2356+ __func__, ret);
2357+ goto err_add_cdev;
2358+ }
2359+
2360+ /* Create a device node */
2361+ cdn->dev = device_create(tipc_class, parent,
2362+ devt, NULL, "trusty-ipc-%s", name);
2363+ if (IS_ERR(cdn->dev)) {
2364+ ret = PTR_ERR(cdn->dev);
2365+ dev_dbg(parent, "%s: device_create failed: %d\n",
2366+ __func__, ret);
2367+ goto err_device_create;
2368+ }
2369+
2370+ return 0;
2371+
2372+err_device_create:
2373+ cdn->dev = NULL;
2374+ cdev_del(&cdn->cdev);
2375+err_add_cdev:
2376+ idr_remove(&tipc_devices, cdn->minor);
2377+ return ret;
2378+}
2379+
2380+static void create_cdev_node(struct tipc_virtio_dev *vds,
2381+ struct tipc_cdev_node *cdn)
2382+{
2383+ int err;
2384+
2385+ mutex_lock(&tipc_devices_lock);
2386+
2387+ if (!default_vdev) {
2388+ kref_get(&vds->refcount);
2389+ default_vdev = vds->vdev;
2390+ }
2391+
2392+ if (vds->cdev_name[0] && !cdn->dev) {
2393+ kref_get(&vds->refcount);
2394+ err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name);
2395+ if (err) {
2396+ dev_err(&vds->vdev->dev,
2397+ "failed (%d) to create cdev node\n", err);
2398+ kref_put(&vds->refcount, _free_vds);
2399+ }
2400+ }
2401+ mutex_unlock(&tipc_devices_lock);
2402+}
2403+
2404+static void destroy_cdev_node(struct tipc_virtio_dev *vds,
2405+ struct tipc_cdev_node *cdn)
2406+{
2407+ mutex_lock(&tipc_devices_lock);
2408+ if (cdn->dev) {
2409+ device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor));
2410+ cdev_del(&cdn->cdev);
2411+ idr_remove(&tipc_devices, cdn->minor);
2412+ cdn->dev = NULL;
2413+ kref_put(&vds->refcount, _free_vds);
2414+ }
2415+
2416+ if (default_vdev == vds->vdev) {
2417+ default_vdev = NULL;
2418+ kref_put(&vds->refcount, _free_vds);
2419+ }
2420+
2421+ mutex_unlock(&tipc_devices_lock);
2422+}
2423+
2424+static void _go_online(struct tipc_virtio_dev *vds)
2425+{
2426+ mutex_lock(&vds->lock);
2427+ if (vds->state == VDS_OFFLINE)
2428+ vds->state = VDS_ONLINE;
2429+ mutex_unlock(&vds->lock);
2430+
2431+ create_cdev_node(vds, &vds->cdev_node);
2432+
2433+ dev_info(&vds->vdev->dev, "is online\n");
2434+}
2435+
2436+static void _go_offline(struct tipc_virtio_dev *vds)
2437+{
2438+ struct tipc_chan *chan;
2439+
2440+ /* change state to OFFLINE */
2441+ mutex_lock(&vds->lock);
2442+ if (vds->state != VDS_ONLINE) {
2443+ mutex_unlock(&vds->lock);
2444+ return;
2445+ }
2446+ vds->state = VDS_OFFLINE;
2447+ mutex_unlock(&vds->lock);
2448+
2449+ /* wakeup all waiters */
2450+ wake_up_interruptible_all(&vds->sendq);
2451+
2452+ /* shutdown all channels */
2453+ while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) {
2454+ mutex_lock(&chan->lock);
2455+ chan->state = TIPC_STALE;
2456+ chan->remote = 0;
2457+ chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN);
2458+ mutex_unlock(&chan->lock);
2459+ kref_put(&chan->refcount, _free_chan);
2460+ }
2461+
2462+ /* shutdown device node */
2463+ destroy_cdev_node(vds, &vds->cdev_node);
2464+
2465+ dev_info(&vds->vdev->dev, "is offline\n");
2466+}
2467+
2468+static void _handle_conn_rsp(struct tipc_virtio_dev *vds,
2469+ struct tipc_conn_rsp_body *rsp, size_t len)
2470+{
2471+ struct tipc_chan *chan;
2472+
2473+ if (sizeof(*rsp) != len) {
2474+ dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n",
2475+ __func__, len);
2476+ return;
2477+ }
2478+
2479+ dev_dbg(&vds->vdev->dev,
2480+ "%s: connection response: for addr 0x%x: status %d remote addr 0x%x\n",
2481+ __func__, rsp->target, rsp->status, rsp->remote);
2482+
2483+ /* Lookup channel */
2484+ chan = vds_lookup_channel(vds, rsp->target);
2485+ if (chan) {
2486+ mutex_lock(&chan->lock);
2487+ if (chan->state == TIPC_CONNECTING) {
2488+ if (!rsp->status) {
2489+ chan->state = TIPC_CONNECTED;
2490+ chan->remote = rsp->remote;
2491+ chan->max_msg_cnt = rsp->max_msg_cnt;
2492+ chan->max_msg_size = rsp->max_msg_size;
2493+ chan_trigger_event(chan,
2494+ TIPC_CHANNEL_CONNECTED);
2495+ } else {
2496+ chan->state = TIPC_DISCONNECTED;
2497+ chan->remote = 0;
2498+ chan_trigger_event(chan,
2499+ TIPC_CHANNEL_DISCONNECTED);
2500+ }
2501+ }
2502+ mutex_unlock(&chan->lock);
2503+ kref_put(&chan->refcount, _free_chan);
2504+ }
2505+}
2506+
2507+static void _handle_disc_req(struct tipc_virtio_dev *vds,
2508+ struct tipc_disc_req_body *req, size_t len)
2509+{
2510+ struct tipc_chan *chan;
2511+
2512+ if (sizeof(*req) != len) {
2513+ dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n",
2514+ __func__, len);
2515+ return;
2516+ }
2517+
2518+ dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n",
2519+ __func__, req->target);
2520+
2521+ chan = vds_lookup_channel(vds, req->target);
2522+ if (chan) {
2523+ mutex_lock(&chan->lock);
2524+ if (chan->state == TIPC_CONNECTED ||
2525+ chan->state == TIPC_CONNECTING) {
2526+ chan->state = TIPC_DISCONNECTED;
2527+ chan->remote = 0;
2528+ chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED);
2529+ }
2530+ mutex_unlock(&chan->lock);
2531+ kref_put(&chan->refcount, _free_chan);
2532+ }
2533+}
2534+
2535+static void _handle_release(struct tipc_virtio_dev *vds,
2536+ struct tipc_release_body *req, size_t len)
2537+{
2538+ struct tipc_shared_handle *handle = NULL;
2539+ struct device *dev = &vds->vdev->dev;
2540+ int ret = 0;
2541+
2542+ if (len < sizeof(*req)) {
2543+ dev_err(dev, "Received undersized release control message\n");
2544+ return;
2545+ }
2546+
2547+ handle = tipc_shared_handle_take(vds, req->id);
2548+ if (!handle) {
2549+ dev_err(dev,
2550+ "Received release control message for untracked handle: 0x%llx\n",
2551+ req->id);
2552+ return;
2553+ }
2554+
2555+ ret = tipc_shared_handle_drop(handle);
2556+
2557+ if (ret) {
2558+ dev_err(dev,
2559+ "Failed to release handle 0x%llx upon request: (%d)\n",
2560+ req->id, ret);
2561+ /*
2562+ * Put the handle back in case we got a spurious release now and
2563+ * get a real one later. This path should not happen, we're
2564+ * just trying to be robust.
2565+ */
2566+ tipc_shared_handle_register(handle);
2567+ }
2568+}
2569+
2570+static void _handle_ctrl_msg(struct tipc_virtio_dev *vds,
2571+ void *data, int len, u32 src)
2572+{
2573+ struct tipc_ctrl_msg *msg = data;
2574+
2575+ if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) {
2576+ dev_err(&vds->vdev->dev,
2577+ "%s: Invalid message length ( %d vs. %d)\n",
2578+ __func__, (int)(sizeof(*msg) + msg->body_len), len);
2579+ return;
2580+ }
2581+
2582+ dev_dbg(&vds->vdev->dev,
2583+ "%s: Incoming ctrl message: src 0x%x type %d len %d\n",
2584+ __func__, src, msg->type, msg->body_len);
2585+
2586+ switch (msg->type) {
2587+ case TIPC_CTRL_MSGTYPE_GO_ONLINE:
2588+ _go_online(vds);
2589+ break;
2590+
2591+ case TIPC_CTRL_MSGTYPE_GO_OFFLINE:
2592+ _go_offline(vds);
2593+ break;
2594+
2595+ case TIPC_CTRL_MSGTYPE_CONN_RSP:
2596+ _handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body,
2597+ msg->body_len);
2598+ break;
2599+
2600+ case TIPC_CTRL_MSGTYPE_DISC_REQ:
2601+ _handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body,
2602+ msg->body_len);
2603+ break;
2604+
2605+ case TIPC_CTRL_MSGTYPE_RELEASE:
2606+ _handle_release(vds, (struct tipc_release_body *)msg->body,
2607+ msg->body_len);
2608+ break;
2609+
2610+ default:
2611+ dev_warn(&vds->vdev->dev,
2612+ "%s: Unexpected message type: %d\n",
2613+ __func__, msg->type);
2614+ }
2615+}
2616+
2617+static void handle_dropped_chan_msg(struct tipc_virtio_dev *vds,
2618+ struct tipc_msg_buf *mb,
2619+ struct tipc_msg_hdr *msg)
2620+{
2621+ int shm_idx;
2622+ struct tipc_shm *shm;
2623+ struct tipc_shared_handle *shared_handle;
2624+ struct device *dev = &vds->vdev->dev;
2625+ size_t len;
2626+
2627+ if (msg->len < msg->shm_cnt * sizeof(*shm)) {
2628+ dev_err(dev, "shm_cnt does not fit in dropped message");
2629+ /* The message is corrupt, so we can't recover resources */
2630+ return;
2631+ }
2632+
2633+ len = msg->len - msg->shm_cnt * sizeof(*shm);
2634+ /* skip normal data */
2635+ (void)mb_get_data(mb, len);
2636+
2637+ for (shm_idx = 0; shm_idx < msg->shm_cnt; shm_idx++) {
2638+ shm = mb_get_data(mb, sizeof(*shm));
2639+ shared_handle = tipc_shared_handle_take(vds, shm->obj_id);
2640+ if (shared_handle) {
2641+ if (tipc_shared_handle_drop(shared_handle))
2642+ dev_err(dev,
2643+ "Failed to drop handle found in dropped buffer");
2644+ } else {
2645+ dev_err(dev,
2646+ "Found handle in dropped buffer which was not registered to tipc device...");
2647+ }
2648+ }
2649+}
2650+
2651+static void handle_dropped_mb(struct tipc_virtio_dev *vds,
2652+ struct tipc_msg_buf *mb)
2653+{
2654+ struct tipc_msg_hdr *msg;
2655+
2656+ mb_reset_read(mb);
2657+ msg = mb_get_data(mb, sizeof(*msg));
2658+ if (msg->dst != TIPC_CTRL_ADDR) {
2659+ handle_dropped_chan_msg(vds, mb, msg);
2660+ }
2661+}
2662+
2663+static int _handle_rxbuf(struct tipc_virtio_dev *vds,
2664+ struct tipc_msg_buf *rxbuf, size_t rxlen)
2665+{
2666+ int err;
2667+ struct scatterlist sg;
2668+ struct tipc_msg_hdr *msg;
2669+ struct device *dev = &vds->vdev->dev;
2670+
2671+ /* message sanity check */
2672+ if (rxlen > rxbuf->buf_sz) {
2673+ dev_warn(dev, "inbound msg is too big: %zd\n", rxlen);
2674+ goto drop_it;
2675+ }
2676+
2677+ if (rxlen < sizeof(*msg)) {
2678+ dev_warn(dev, "inbound msg is too short: %zd\n", rxlen);
2679+ goto drop_it;
2680+ }
2681+
2682+ /* reset buffer and put data */
2683+ mb_reset(rxbuf);
2684+ mb_put_data(rxbuf, rxlen);
2685+
2686+ /* get message header */
2687+ msg = mb_get_data(rxbuf, sizeof(*msg));
2688+ if (mb_avail_data(rxbuf) != msg->len) {
2689+ dev_warn(dev, "inbound msg length mismatch: (%zu vs. %d)\n",
2690+ mb_avail_data(rxbuf), msg->len);
2691+ goto drop_it;
2692+ }
2693+
2694+ dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d, shm_cnt: %d\n",
2695+ msg->src, msg->dst, msg->len, msg->flags, msg->reserved,
2696+ msg->shm_cnt);
2697+
2698+ /* message directed to control endpoint is a special case */
2699+ if (msg->dst == TIPC_CTRL_ADDR) {
2700+ _handle_ctrl_msg(vds, msg->data, msg->len, msg->src);
2701+ } else {
2702+ struct tipc_chan *chan = NULL;
2703+ /* Lookup channel */
2704+ chan = vds_lookup_channel(vds, msg->dst);
2705+ if (chan) {
2706+ /* handle it */
2707+ rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf);
2708+ kref_put(&chan->refcount, _free_chan);
2709+ if (WARN_ON(!rxbuf))
2710+ return -EINVAL;
2711+ }
2712+ }
2713+
2714+drop_it:
2715+ /* add the buffer back to the virtqueue */
2716+ sg_init_one(&sg, rxbuf, rxbuf->buf_sz);
2717+ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
2718+ if (err < 0) {
2719+ dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
2720+ return err;
2721+ }
2722+
2723+ return 0;
2724+}
2725+
2726+static void _rxvq_cb(struct virtqueue *rxvq)
2727+{
2728+ unsigned int len;
2729+ struct tipc_msg_buf *mb;
2730+ unsigned int msg_cnt = 0;
2731+ struct tipc_virtio_dev *vds = rxvq->vdev->priv;
2732+
2733+ while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) {
2734+ if (_handle_rxbuf(vds, mb, len))
2735+ break;
2736+ msg_cnt++;
2737+ }
2738+
2739+ /* tell the other size that we added rx buffers */
2740+ if (msg_cnt)
2741+ virtqueue_kick(rxvq);
2742+}
2743+
2744+static void _txvq_cb(struct virtqueue *txvq)
2745+{
2746+ unsigned int len;
2747+ struct tipc_msg_buf *mb;
2748+ bool need_wakeup = false;
2749+ struct tipc_virtio_dev *vds = txvq->vdev->priv;
2750+
2751+ /* detach all buffers */
2752+ mutex_lock(&vds->lock);
2753+ while ((mb = virtqueue_get_buf(txvq, &len)) != NULL) {
2754+ if ((int)len < 0)
2755+ handle_dropped_mb(vds, mb);
2756+ need_wakeup |= _put_txbuf_locked(vds, mb);
2757+ }
2758+ mutex_unlock(&vds->lock);
2759+
2760+ if (need_wakeup) {
2761+ /* wake up potential senders waiting for a tx buffer */
2762+ wake_up_interruptible_all(&vds->sendq);
2763+ }
2764+}
2765+
2766+static int tipc_virtio_probe(struct virtio_device *vdev)
2767+{
2768+ int err, i;
2769+ struct tipc_virtio_dev *vds;
2770+ struct tipc_dev_config config;
2771+ struct virtqueue *vqs[2];
2772+ vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb};
2773+ static const char * const vq_names[] = { "rx", "tx" };
2774+
2775+ vds = kzalloc(sizeof(*vds), GFP_KERNEL);
2776+ if (!vds)
2777+ return -ENOMEM;
2778+
2779+ vds->vdev = vdev;
2780+
2781+ mutex_init(&vds->lock);
2782+ mutex_init(&vds->shared_handles_lock);
2783+ kref_init(&vds->refcount);
2784+ init_waitqueue_head(&vds->sendq);
2785+ INIT_LIST_HEAD(&vds->free_buf_list);
2786+ idr_init(&vds->addr_idr);
2787+ vds->shared_handles = RB_ROOT;
2788+ dma_coerce_mask_and_coherent(&vds->vdev->dev,
2789+ *vds->vdev->dev.parent->parent->dma_mask);
2790+
2791+ /* set default max message size and alignment */
2792+ memset(&config, 0, sizeof(config));
2793+ config.msg_buf_max_size = DEFAULT_MSG_BUF_SIZE;
2794+ config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN;
2795+
2796+ /* get configuration if present */
2797+ vdev->config->get(vdev, 0, &config, sizeof(config));
2798+
2799+ /* copy dev name */
2800+ strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name));
2801+ vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0';
2802+
2803+ /* find tx virtqueues (rx and tx and in this order) */
2804+ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL,
2805+ NULL);
2806+ if (err)
2807+ goto err_find_vqs;
2808+
2809+ vds->rxvq = vqs[0];
2810+ vds->txvq = vqs[1];
2811+
2812+ /* save max buffer size and count */
2813+ vds->msg_buf_max_sz = config.msg_buf_max_size;
2814+ vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq);
2815+
2816+ /* set up the receive buffers */
2817+ for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) {
2818+ struct scatterlist sg;
2819+ struct tipc_msg_buf *rxbuf;
2820+
2821+ rxbuf = vds_alloc_msg_buf(vds, true);
2822+ if (!rxbuf) {
2823+ dev_err(&vdev->dev, "failed to allocate rx buffer\n");
2824+ err = -ENOMEM;
2825+ goto err_free_rx_buffers;
2826+ }
2827+
2828+ sg_init_one(&sg, rxbuf, rxbuf->buf_sz);
2829+ err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
2830+ WARN_ON(err); /* sanity check; this can't really happen */
2831+ }
2832+
2833+ vdev->priv = vds;
2834+ vds->state = VDS_OFFLINE;
2835+
2836+ dev_dbg(&vdev->dev, "%s: done\n", __func__);
2837+ return 0;
2838+
2839+err_free_rx_buffers:
2840+ _cleanup_vq(vds, vds->rxvq);
2841+err_find_vqs:
2842+ kref_put(&vds->refcount, _free_vds);
2843+ return err;
2844+}
2845+
2846+static void tipc_virtio_remove(struct virtio_device *vdev)
2847+{
2848+ struct tipc_virtio_dev *vds = vdev->priv;
2849+
2850+ _go_offline(vds);
2851+
2852+ mutex_lock(&vds->lock);
2853+ vds->state = VDS_DEAD;
2854+ vds->vdev = NULL;
2855+ mutex_unlock(&vds->lock);
2856+
2857+ vdev->config->reset(vdev);
2858+
2859+ idr_destroy(&vds->addr_idr);
2860+
2861+ _cleanup_vq(vds, vds->rxvq);
2862+ _cleanup_vq(vds, vds->txvq);
2863+ vds_free_msg_buf_list(vds, &vds->free_buf_list);
2864+
2865+ vdev->config->del_vqs(vds->vdev);
2866+
2867+ kref_put(&vds->refcount, _free_vds);
2868+}
2869+
2870+static const struct virtio_device_id tipc_virtio_id_table[] = {
2871+ { VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID },
2872+ { 0 },
2873+};
2874+
2875+static const unsigned int features[] = {
2876+ 0,
2877+};
2878+
2879+static struct virtio_driver virtio_tipc_driver = {
2880+ .feature_table = features,
2881+ .feature_table_size = ARRAY_SIZE(features),
2882+ .driver.name = KBUILD_MODNAME,
2883+ .driver.owner = THIS_MODULE,
2884+ .id_table = tipc_virtio_id_table,
2885+ .probe = tipc_virtio_probe,
2886+ .remove = tipc_virtio_remove,
2887+};
2888+
2889+static int __init tipc_init(void)
2890+{
2891+ int ret;
2892+ dev_t dev;
2893+
2894+ ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME);
2895+ if (ret) {
2896+ pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret);
2897+ return ret;
2898+ }
2899+
2900+ tipc_major = MAJOR(dev);
2901+ tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME);
2902+ if (IS_ERR(tipc_class)) {
2903+ ret = PTR_ERR(tipc_class);
2904+ pr_err("%s: class_create failed: %d\n", __func__, ret);
2905+ goto err_class_create;
2906+ }
2907+
2908+ ret = register_virtio_driver(&virtio_tipc_driver);
2909+ if (ret) {
2910+ pr_err("failed to register virtio driver: %d\n", ret);
2911+ goto err_register_virtio_drv;
2912+ }
2913+
2914+ return 0;
2915+
2916+err_register_virtio_drv:
2917+ class_destroy(tipc_class);
2918+
2919+err_class_create:
2920+ unregister_chrdev_region(dev, MAX_DEVICES);
2921+ return ret;
2922+}
2923+
2924+static void __exit tipc_exit(void)
2925+{
2926+ unregister_virtio_driver(&virtio_tipc_driver);
2927+ class_destroy(tipc_class);
2928+ unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES);
2929+}
2930+
2931+/* We need to init this early */
2932+subsys_initcall(tipc_init);
2933+module_exit(tipc_exit);
2934+
2935+MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table);
2936+MODULE_DESCRIPTION("Trusty IPC driver");
2937+MODULE_LICENSE("GPL v2");
2938diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c
2939new file mode 100644
2940index 000000000000..5c6076108d0e
2941--- /dev/null
2942+++ b/drivers/trusty/trusty-irq.c
2943@@ -0,0 +1,645 @@
2944+// SPDX-License-Identifier: GPL-2.0-only
2945+/*
2946+ * Copyright (C) 2013 Google, Inc.
2947+ */
2948+
2949+#include <linux/cpu.h>
2950+#include <linux/interrupt.h>
2951+#include <linux/irq.h>
2952+#include <linux/irqdomain.h>
2953+#include <linux/module.h>
2954+#include <linux/of.h>
2955+#include <linux/of_irq.h>
2956+#include <linux/platform_device.h>
2957+#include <linux/slab.h>
2958+#include <linux/string.h>
2959+#include <linux/trusty/smcall.h>
2960+#include <linux/trusty/sm_err.h>
2961+#include <linux/trusty/trusty.h>
2962+
2963+struct trusty_irq {
2964+ struct trusty_irq_state *is;
2965+ struct hlist_node node;
2966+ unsigned int irq;
2967+ bool percpu;
2968+ bool enable;
2969+ bool doorbell;
2970+ struct trusty_irq __percpu *percpu_ptr;
2971+};
2972+
2973+struct trusty_irq_irqset {
2974+ struct hlist_head pending;
2975+ struct hlist_head inactive;
2976+};
2977+
2978+struct trusty_irq_state {
2979+ struct device *dev;
2980+ struct device *trusty_dev;
2981+ struct trusty_irq_irqset normal_irqs;
2982+ spinlock_t normal_irqs_lock;
2983+ struct trusty_irq_irqset __percpu *percpu_irqs;
2984+ struct notifier_block trusty_call_notifier;
2985+ struct hlist_node cpuhp_node;
2986+};
2987+
2988+static int trusty_irq_cpuhp_slot = -1;
2989+
2990+static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is,
2991+ struct trusty_irq_irqset *irqset,
2992+ bool percpu)
2993+{
2994+ struct hlist_node *n;
2995+ struct trusty_irq *trusty_irq;
2996+
2997+ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
2998+ dev_dbg(is->dev,
2999+ "%s: enable pending irq %d, percpu %d, cpu %d\n",
3000+ __func__, trusty_irq->irq, percpu, smp_processor_id());
3001+ if (percpu)
3002+ enable_percpu_irq(trusty_irq->irq, 0);
3003+ else
3004+ enable_irq(trusty_irq->irq);
3005+ hlist_del(&trusty_irq->node);
3006+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
3007+ }
3008+}
3009+
3010+static void trusty_irq_enable_irqset(struct trusty_irq_state *is,
3011+ struct trusty_irq_irqset *irqset)
3012+{
3013+ struct trusty_irq *trusty_irq;
3014+
3015+ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
3016+ if (trusty_irq->enable) {
3017+ dev_warn(is->dev,
3018+ "%s: percpu irq %d already enabled, cpu %d\n",
3019+ __func__, trusty_irq->irq, smp_processor_id());
3020+ continue;
3021+ }
3022+ dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n",
3023+ __func__, trusty_irq->irq, smp_processor_id());
3024+ enable_percpu_irq(trusty_irq->irq, 0);
3025+ trusty_irq->enable = true;
3026+ }
3027+}
3028+
3029+static void trusty_irq_disable_irqset(struct trusty_irq_state *is,
3030+ struct trusty_irq_irqset *irqset)
3031+{
3032+ struct hlist_node *n;
3033+ struct trusty_irq *trusty_irq;
3034+
3035+ hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
3036+ if (!trusty_irq->enable) {
3037+ dev_warn(is->dev,
3038+ "irq %d already disabled, percpu %d, cpu %d\n",
3039+ trusty_irq->irq, trusty_irq->percpu,
3040+ smp_processor_id());
3041+ continue;
3042+ }
3043+ dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n",
3044+ __func__, trusty_irq->irq, trusty_irq->percpu,
3045+ smp_processor_id());
3046+ trusty_irq->enable = false;
3047+ if (trusty_irq->percpu)
3048+ disable_percpu_irq(trusty_irq->irq);
3049+ else
3050+ disable_irq_nosync(trusty_irq->irq);
3051+ }
3052+ hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
3053+ if (!trusty_irq->enable) {
3054+ dev_warn(is->dev,
3055+ "pending irq %d already disabled, percpu %d, cpu %d\n",
3056+ trusty_irq->irq, trusty_irq->percpu,
3057+ smp_processor_id());
3058+ }
3059+ dev_dbg(is->dev,
3060+ "%s: disable pending irq %d, percpu %d, cpu %d\n",
3061+ __func__, trusty_irq->irq, trusty_irq->percpu,
3062+ smp_processor_id());
3063+ trusty_irq->enable = false;
3064+ hlist_del(&trusty_irq->node);
3065+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
3066+ }
3067+}
3068+
3069+static int trusty_irq_call_notify(struct notifier_block *nb,
3070+ unsigned long action, void *data)
3071+{
3072+ struct trusty_irq_state *is;
3073+
3074+ if (WARN_ON(!irqs_disabled()))
3075+ return NOTIFY_DONE;
3076+
3077+ if (action != TRUSTY_CALL_PREPARE)
3078+ return NOTIFY_DONE;
3079+
3080+ is = container_of(nb, struct trusty_irq_state, trusty_call_notifier);
3081+
3082+ spin_lock(&is->normal_irqs_lock);
3083+ trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false);
3084+ spin_unlock(&is->normal_irqs_lock);
3085+ trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true);
3086+
3087+ return NOTIFY_OK;
3088+}
3089+
3090+static irqreturn_t trusty_irq_handler(int irq, void *data)
3091+{
3092+ struct trusty_irq *trusty_irq = data;
3093+ struct trusty_irq_state *is = trusty_irq->is;
3094+ struct trusty_irq_irqset *irqset;
3095+
3096+ dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n",
3097+ __func__, irq, trusty_irq->irq, smp_processor_id(),
3098+ trusty_irq->enable);
3099+
3100+ if (!trusty_irq->doorbell) {
3101+ if (trusty_irq->percpu) {
3102+ disable_percpu_irq(irq);
3103+ irqset = this_cpu_ptr(is->percpu_irqs);
3104+ } else {
3105+ disable_irq_nosync(irq);
3106+ irqset = &is->normal_irqs;
3107+ }
3108+
3109+ spin_lock(&is->normal_irqs_lock);
3110+ if (trusty_irq->enable) {
3111+ hlist_del(&trusty_irq->node);
3112+ hlist_add_head(&trusty_irq->node, &irqset->pending);
3113+ }
3114+ spin_unlock(&is->normal_irqs_lock);
3115+ }
3116+
3117+ trusty_enqueue_nop(is->trusty_dev, NULL);
3118+
3119+ dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq);
3120+
3121+ return IRQ_HANDLED;
3122+}
3123+
3124+static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node)
3125+{
3126+ unsigned long irq_flags;
3127+ struct trusty_irq_state *is;
3128+
3129+ is = container_of(node, struct trusty_irq_state, cpuhp_node);
3130+
3131+ dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu);
3132+
3133+ local_irq_save(irq_flags);
3134+ trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs));
3135+ local_irq_restore(irq_flags);
3136+
3137+ /*
3138+ * Temporary workaround blindly enqueuing work to force trusty scheduler
3139+ * to run after a cpu suspend.
3140+ * Root causing the workqueue being inappropriately empty
3141+ * (e.g. loss of an IPI) may make this workaround unnecessary
3142+ * in the future.
3143+ */
3144+ trusty_enqueue_nop(is->trusty_dev, NULL);
3145+
3146+ return 0;
3147+}
3148+
3149+static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node)
3150+{
3151+ unsigned long irq_flags;
3152+ struct trusty_irq_state *is;
3153+
3154+ is = container_of(node, struct trusty_irq_state, cpuhp_node);
3155+
3156+ dev_dbg(is->dev, "%s: cpu %d\n", __func__, cpu);
3157+
3158+ local_irq_save(irq_flags);
3159+ trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs));
3160+ local_irq_restore(irq_flags);
3161+
3162+ return 0;
3163+}
3164+
3165+static int trusty_irq_map_ipi(struct trusty_irq_state *is, int irq)
3166+{
3167+ int ret;
3168+ u32 ipi_range[3];
3169+ struct device_node *gic;
3170+ struct of_phandle_args oirq = {};
3171+ u32 beg, end, ipi_base;
3172+
3173+ ret = of_property_read_u32_array(is->dev->of_node, "ipi-range",
3174+ ipi_range, ARRAY_SIZE(ipi_range));
3175+ if (ret != 0)
3176+ return -ENODATA;
3177+ beg = ipi_range[0];
3178+ end = ipi_range[1];
3179+ ipi_base = ipi_range[2];
3180+
3181+ if (irq < beg || irq > end)
3182+ return -ENODATA;
3183+
3184+ gic = of_irq_find_parent(is->dev->of_node);
3185+ if (!gic)
3186+ return -ENXIO;
3187+
3188+ oirq.np = gic;
3189+ oirq.args_count = 1;
3190+ oirq.args[0] = ipi_base + (irq - beg);
3191+
3192+ ret = irq_create_of_mapping(&oirq);
3193+
3194+ of_node_put(gic);
3195+ return (!ret) ? -EINVAL : ret;
3196+}
3197+
3198+static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq)
3199+{
3200+ int ret;
3201+ int index;
3202+ u32 irq_pos;
3203+ u32 templ_idx;
3204+ u32 range_base;
3205+ u32 range_end;
3206+ struct of_phandle_args oirq;
3207+
3208+ /* check if this is an IPI (inter-processor interrupt) */
3209+ ret = trusty_irq_map_ipi(is, irq);
3210+ if (ret != -ENODATA)
3211+ return ret;
3212+
3213+ /* check if "interrupt-ranges" property is present */
3214+ if (!of_find_property(is->dev->of_node, "interrupt-ranges", NULL)) {
3215+ /* fallback to old behavior to be backward compatible with
3216+ * systems that do not need IRQ domains.
3217+ */
3218+ return irq;
3219+ }
3220+
3221+ /* find irq range */
3222+ for (index = 0;; index += 3) {
3223+ ret = of_property_read_u32_index(is->dev->of_node,
3224+ "interrupt-ranges",
3225+ index, &range_base);
3226+ if (ret)
3227+ return ret;
3228+
3229+ ret = of_property_read_u32_index(is->dev->of_node,
3230+ "interrupt-ranges",
3231+ index + 1, &range_end);
3232+ if (ret)
3233+ return ret;
3234+
3235+ if (irq >= range_base && irq <= range_end)
3236+ break;
3237+ }
3238+
3239+ /* read the rest of range entry: template index and irq_pos */
3240+ ret = of_property_read_u32_index(is->dev->of_node,
3241+ "interrupt-ranges",
3242+ index + 2, &templ_idx);
3243+ if (ret)
3244+ return ret;
3245+
3246+ /* read irq template */
3247+ ret = of_parse_phandle_with_args(is->dev->of_node,
3248+ "interrupt-templates",
3249+ "#interrupt-cells",
3250+ templ_idx, &oirq);
3251+ if (ret)
3252+ return ret;
3253+
3254+ WARN_ON(!oirq.np);
3255+ WARN_ON(!oirq.args_count);
3256+
3257+ /*
3258+ * An IRQ template is a non empty array of u32 values describing group
3259+ * of interrupts having common properties. The u32 entry with index
3260+ * zero contains the position of irq_id in interrupt specifier array
3261+ * followed by data representing interrupt specifier array with irq id
3262+ * field omitted, so to convert irq template to interrupt specifier
3263+ * array we have to move down one slot the first irq_pos entries and
3264+ * replace the resulting gap with real irq id.
3265+ */
3266+ irq_pos = oirq.args[0];
3267+
3268+ if (irq_pos >= oirq.args_count) {
3269+ dev_err(is->dev, "irq pos is out of range: %d\n", irq_pos);
3270+ return -EINVAL;
3271+ }
3272+
3273+ for (index = 1; index <= irq_pos; index++)
3274+ oirq.args[index - 1] = oirq.args[index];
3275+
3276+ oirq.args[irq_pos] = irq - range_base;
3277+
3278+ ret = irq_create_of_mapping(&oirq);
3279+
3280+ return (!ret) ? -EINVAL : ret;
3281+}
3282+
3283+static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq)
3284+{
3285+ int ret;
3286+ int irq;
3287+ unsigned long irq_flags;
3288+ struct trusty_irq *trusty_irq;
3289+
3290+ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq);
3291+
3292+ irq = trusty_irq_create_irq_mapping(is, tirq);
3293+ if (irq < 0) {
3294+ dev_err(is->dev,
3295+ "trusty_irq_create_irq_mapping failed (%d)\n", irq);
3296+ return irq;
3297+ }
3298+
3299+ trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL);
3300+ if (!trusty_irq)
3301+ return -ENOMEM;
3302+
3303+ trusty_irq->is = is;
3304+ trusty_irq->irq = irq;
3305+ trusty_irq->enable = true;
3306+
3307+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
3308+ hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive);
3309+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
3310+
3311+ ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD,
3312+ "trusty", trusty_irq);
3313+ if (ret) {
3314+ dev_err(is->dev, "request_irq failed %d\n", ret);
3315+ goto err_request_irq;
3316+ }
3317+ return 0;
3318+
3319+err_request_irq:
3320+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
3321+ hlist_del(&trusty_irq->node);
3322+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
3323+ kfree(trusty_irq);
3324+ return ret;
3325+}
3326+
3327+static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq,
3328+ unsigned int type)
3329+{
3330+ int ret;
3331+ int irq;
3332+ unsigned int cpu;
3333+ struct trusty_irq __percpu *trusty_irq_handler_data;
3334+
3335+ dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq);
3336+
3337+ irq = trusty_irq_create_irq_mapping(is, tirq);
3338+ if (irq <= 0) {
3339+ dev_err(is->dev,
3340+ "trusty_irq_create_irq_mapping failed (%d)\n", irq);
3341+ return irq;
3342+ }
3343+
3344+ trusty_irq_handler_data = alloc_percpu(struct trusty_irq);
3345+ if (!trusty_irq_handler_data)
3346+ return -ENOMEM;
3347+
3348+ for_each_possible_cpu(cpu) {
3349+ struct trusty_irq *trusty_irq;
3350+ struct trusty_irq_irqset *irqset;
3351+
3352+ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
3353+ irqset = per_cpu_ptr(is->percpu_irqs, cpu);
3354+
3355+ trusty_irq->is = is;
3356+ hlist_add_head(&trusty_irq->node, &irqset->inactive);
3357+ trusty_irq->irq = irq;
3358+ trusty_irq->percpu = true;
3359+ trusty_irq->doorbell = type == TRUSTY_IRQ_TYPE_DOORBELL;
3360+ trusty_irq->percpu_ptr = trusty_irq_handler_data;
3361+ }
3362+
3363+ ret = request_percpu_irq(irq, trusty_irq_handler, "trusty",
3364+ trusty_irq_handler_data);
3365+ if (ret) {
3366+ dev_err(is->dev, "request_percpu_irq failed %d\n", ret);
3367+ goto err_request_percpu_irq;
3368+ }
3369+
3370+ return 0;
3371+
3372+err_request_percpu_irq:
3373+ for_each_possible_cpu(cpu) {
3374+ struct trusty_irq *trusty_irq;
3375+
3376+ trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
3377+ hlist_del(&trusty_irq->node);
3378+ }
3379+
3380+ free_percpu(trusty_irq_handler_data);
3381+ return ret;
3382+}
3383+
3384+static int trusty_smc_get_next_irq(struct trusty_irq_state *is,
3385+ unsigned long min_irq, unsigned int type)
3386+{
3387+ return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ,
3388+ min_irq, type, 0);
3389+}
3390+
3391+static int trusty_irq_init_one(struct trusty_irq_state *is,
3392+ int irq, unsigned int type)
3393+{
3394+ int ret;
3395+
3396+ irq = trusty_smc_get_next_irq(is, irq, type);
3397+ if (irq < 0)
3398+ return irq;
3399+
3400+ if (type != TRUSTY_IRQ_TYPE_NORMAL)
3401+ ret = trusty_irq_init_per_cpu_irq(is, irq, type);
3402+ else
3403+ ret = trusty_irq_init_normal_irq(is, irq);
3404+
3405+ if (ret) {
3406+ dev_warn(is->dev,
3407+ "failed to initialize irq %d, irq will be ignored\n",
3408+ irq);
3409+ }
3410+
3411+ return irq + 1;
3412+}
3413+
3414+static void trusty_irq_free_irqs(struct trusty_irq_state *is)
3415+{
3416+ struct trusty_irq *irq;
3417+ struct hlist_node *n;
3418+ unsigned int cpu;
3419+
3420+ hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) {
3421+ dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq);
3422+ free_irq(irq->irq, irq);
3423+ hlist_del(&irq->node);
3424+ kfree(irq);
3425+ }
3426+ hlist_for_each_entry_safe(irq, n,
3427+ &this_cpu_ptr(is->percpu_irqs)->inactive,
3428+ node) {
3429+ struct trusty_irq __percpu *trusty_irq_handler_data;
3430+
3431+ dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq);
3432+ trusty_irq_handler_data = irq->percpu_ptr;
3433+ free_percpu_irq(irq->irq, trusty_irq_handler_data);
3434+ for_each_possible_cpu(cpu) {
3435+ struct trusty_irq *irq_tmp;
3436+
3437+ irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu);
3438+ hlist_del(&irq_tmp->node);
3439+ }
3440+ free_percpu(trusty_irq_handler_data);
3441+ }
3442+}
3443+
3444+static int trusty_irq_probe(struct platform_device *pdev)
3445+{
3446+ int ret;
3447+ int irq;
3448+ unsigned long irq_flags;
3449+ struct trusty_irq_state *is;
3450+
3451+ is = kzalloc(sizeof(*is), GFP_KERNEL);
3452+ if (!is) {
3453+ ret = -ENOMEM;
3454+ goto err_alloc_is;
3455+ }
3456+
3457+ is->dev = &pdev->dev;
3458+ is->trusty_dev = is->dev->parent;
3459+ spin_lock_init(&is->normal_irqs_lock);
3460+ is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset);
3461+ if (!is->percpu_irqs) {
3462+ ret = -ENOMEM;
3463+ goto err_alloc_pending_percpu_irqs;
3464+ }
3465+
3466+ platform_set_drvdata(pdev, is);
3467+
3468+ is->trusty_call_notifier.notifier_call = trusty_irq_call_notify;
3469+ ret = trusty_call_notifier_register(is->trusty_dev,
3470+ &is->trusty_call_notifier);
3471+ if (ret) {
3472+ dev_err(&pdev->dev,
3473+ "failed to register trusty call notifier\n");
3474+ goto err_trusty_call_notifier_register;
3475+ }
3476+
3477+ for (irq = 0; irq >= 0;)
3478+ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_PER_CPU);
3479+ for (irq = 0; irq >= 0;)
3480+ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_NORMAL);
3481+ for (irq = 0; irq >= 0;)
3482+ irq = trusty_irq_init_one(is, irq, TRUSTY_IRQ_TYPE_DOORBELL);
3483+
3484+ ret = cpuhp_state_add_instance(trusty_irq_cpuhp_slot, &is->cpuhp_node);
3485+ if (ret < 0) {
3486+ dev_err(&pdev->dev, "cpuhp_state_add_instance failed %d\n",
3487+ ret);
3488+ goto err_add_cpuhp_instance;
3489+ }
3490+
3491+ return 0;
3492+
3493+err_add_cpuhp_instance:
3494+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
3495+ trusty_irq_disable_irqset(is, &is->normal_irqs);
3496+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
3497+ trusty_irq_free_irqs(is);
3498+ trusty_call_notifier_unregister(is->trusty_dev,
3499+ &is->trusty_call_notifier);
3500+err_trusty_call_notifier_register:
3501+ free_percpu(is->percpu_irqs);
3502+err_alloc_pending_percpu_irqs:
3503+ kfree(is);
3504+err_alloc_is:
3505+ return ret;
3506+}
3507+
3508+static int trusty_irq_remove(struct platform_device *pdev)
3509+{
3510+ int ret;
3511+ unsigned long irq_flags;
3512+ struct trusty_irq_state *is = platform_get_drvdata(pdev);
3513+
3514+ ret = cpuhp_state_remove_instance(trusty_irq_cpuhp_slot,
3515+ &is->cpuhp_node);
3516+ if (WARN_ON(ret))
3517+ return ret;
3518+
3519+ spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
3520+ trusty_irq_disable_irqset(is, &is->normal_irqs);
3521+ spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
3522+
3523+ trusty_irq_free_irqs(is);
3524+
3525+ trusty_call_notifier_unregister(is->trusty_dev,
3526+ &is->trusty_call_notifier);
3527+ free_percpu(is->percpu_irqs);
3528+ kfree(is);
3529+
3530+ return 0;
3531+}
3532+
3533+static const struct of_device_id trusty_test_of_match[] = {
3534+ { .compatible = "android,trusty-irq-v1", },
3535+ {},
3536+};
3537+
3538+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
3539+
3540+static struct platform_driver trusty_irq_driver = {
3541+ .probe = trusty_irq_probe,
3542+ .remove = trusty_irq_remove,
3543+ .driver = {
3544+ .name = "trusty-irq",
3545+ .of_match_table = trusty_test_of_match,
3546+ },
3547+};
3548+
3549+static int __init trusty_irq_driver_init(void)
3550+{
3551+ int ret;
3552+
3553+ /* allocate dynamic cpuhp state slot */
3554+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
3555+ "trusty-irq:cpu:online",
3556+ trusty_irq_cpu_up,
3557+ trusty_irq_cpu_down);
3558+ if (ret < 0)
3559+ return ret;
3560+ trusty_irq_cpuhp_slot = ret;
3561+
3562+ /* Register platform driver */
3563+ ret = platform_driver_register(&trusty_irq_driver);
3564+ if (ret < 0)
3565+ goto err_driver_register;
3566+
3567+ return ret;
3568+
3569+err_driver_register:
3570+ /* undo cpuhp slot allocation */
3571+ cpuhp_remove_multi_state(trusty_irq_cpuhp_slot);
3572+ trusty_irq_cpuhp_slot = -1;
3573+
3574+ return ret;
3575+}
3576+
3577+static void __exit trusty_irq_driver_exit(void)
3578+{
3579+ platform_driver_unregister(&trusty_irq_driver);
3580+ cpuhp_remove_multi_state(trusty_irq_cpuhp_slot);
3581+ trusty_irq_cpuhp_slot = -1;
3582+}
3583+
3584+module_init(trusty_irq_driver_init);
3585+module_exit(trusty_irq_driver_exit);
3586+
3587+MODULE_LICENSE("GPL v2");
3588+MODULE_DESCRIPTION("Trusty IRQ driver");
3589diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c
3590new file mode 100644
3591index 000000000000..7b279fe63766
3592--- /dev/null
3593+++ b/drivers/trusty/trusty-log.c
3594@@ -0,0 +1,830 @@
3595+// SPDX-License-Identifier: GPL-2.0-only
3596+/*
3597+ * Copyright (C) 2015 Google, Inc.
3598+ */
3599+#include <linux/platform_device.h>
3600+#include <linux/trusty/smcall.h>
3601+#include <linux/trusty/trusty.h>
3602+#include <linux/notifier.h>
3603+#include <linux/scatterlist.h>
3604+#include <linux/slab.h>
3605+#include <linux/mm.h>
3606+#include <linux/mod_devicetable.h>
3607+#include <linux/module.h>
3608+#include <linux/moduleparam.h>
3609+#include <linux/log2.h>
3610+#include <linux/miscdevice.h>
3611+#include <linux/poll.h>
3612+#include <linux/seq_file.h>
3613+#include <asm/page.h>
3614+#include "trusty-log.h"
3615+
3616+/*
3617+ * Rationale for the chosen default log buffer size:
3618+ * - the log buffer shall contain unthrottled Trusty crash dump.
3619+ * - the register list portion of a crash dump is about 1KB
3620+ * - the memory-around-registers portion of a crash dump can be up to 12 KB
3621+ * - an average size backtrace is about 1 KB
3622+ * - average length of non-crash trusty logs during boot is about 85 characters
3623+ * - a crash dump with 50 lines of context therefore requires up to 18 KB
3624+ * - buffer size needs to be power-of-two number of bytes
3625+ * - rounding up to power of two from 18 KB gives 32 KB
3626+ * The log size can be adjusted by setting the "trusty_log.log_size" parameter
3627+ * on the kernel command line. The specified value will be adjusted as needed.
3628+ */
3629+
3630+#define TRUSTY_LOG_DEFAULT_SIZE (32768)
3631+#define TRUSTY_LOG_MIN_SIZE (PAGE_SIZE / 2)
3632+#define TRUSTY_LOG_MAX_SIZE (1 * 1024 * 1024 * 1024)
3633+#define TRUSTY_LINE_BUFFER_SIZE (256)
3634+
3635+static size_t log_size_param = TRUSTY_LOG_DEFAULT_SIZE;
3636+
3637+static int trusty_log_size_set(const char *val, const struct kernel_param *kp)
3638+{
3639+ unsigned long long requested = memparse(val, NULL);
3640+
3641+ if (requested < TRUSTY_LOG_MIN_SIZE)
3642+ requested = TRUSTY_LOG_MIN_SIZE;
3643+ if (requested > TRUSTY_LOG_MAX_SIZE)
3644+ requested = TRUSTY_LOG_MAX_SIZE;
3645+ requested = rounddown_pow_of_two(requested);
3646+ log_size_param = requested;
3647+ return 0;
3648+}
3649+
3650+static int trusty_log_size_get(char *buffer, const struct kernel_param *kp)
3651+{
3652+ sprintf(buffer, "%zu", log_size_param);
3653+ return strlen(buffer);
3654+}
3655+
3656+module_param_call(log_size, trusty_log_size_set, trusty_log_size_get, NULL,
3657+ 0644);
3658+/*
3659+ * If we log too much and a UART or other slow source is connected, we can stall
3660+ * out another thread which is doing printk.
3661+ *
3662+ * Trusty crash logs are currently ~16 lines, so 100 should include context and
3663+ * the crash most of the time.
3664+ */
3665+static struct ratelimit_state trusty_log_rate_limit =
3666+ RATELIMIT_STATE_INIT("trusty_log", 1 * HZ, 100);
3667+
3668+/**
3669+ * struct trusty_log_sfile - trusty log misc device state
3670+ *
3671+ * @misc: misc device created for the trusty log virtual file
3672+ * @device_name: misc device name following the convention
3673+ * "trusty-<name><id>"
3674+ */
3675+struct trusty_log_sfile {
3676+ struct miscdevice misc;
3677+ char device_name[64];
3678+};
3679+
3680+/**
3681+ * struct trusty_log_sink_state - trusty log sink state
3682+ *
3683+ * @get: current read unwrapped index
3684+ * @trusty_panicked: trusty panic status at the start of the sink interation
3685+ * (only used for kernel log sink)
3686+ * @sfile: seq_file used for sinking to a virtual file (misc device);
3687+ * set to NULL for the kernel log sink.
3688+ * @ignore_overflow: ignore_overflow used to coalesce overflow messages and
3689+ * avoid reporting an overflow when sinking the oldest
3690+ * line to the virtual file (only used for virtual file sink)
3691+ *
3692+ * A sink state structure is used for both the kernel log sink
3693+ * and the virtual device sink.
3694+ * An instance of the sink state structure is dynamically created
3695+ * for each read iteration of the trusty log virtual file (misc device).
3696+ *
3697+ */
3698+struct trusty_log_sink_state {
3699+ u32 get;
3700+ bool trusty_panicked;
3701+
3702+ /* virtual file sink specific attributes */
3703+ struct seq_file *sfile;
3704+ bool ignore_overflow;
3705+};
3706+
3707+struct trusty_log_state {
3708+ struct device *dev;
3709+ struct device *trusty_dev;
3710+ struct trusty_log_sfile log_sfile;
3711+
3712+ struct log_rb *log;
3713+ struct trusty_log_sink_state klog_sink;
3714+
3715+ u32 log_num_pages;
3716+ struct scatterlist *sg;
3717+ trusty_shared_mem_id_t log_pages_shared_mem_id;
3718+
3719+ struct notifier_block call_notifier;
3720+ struct notifier_block panic_notifier;
3721+ char line_buffer[TRUSTY_LINE_BUFFER_SIZE];
3722+ wait_queue_head_t poll_waiters;
3723+ /* this lock protects access to wake_put */
3724+ spinlock_t wake_up_lock;
3725+ u32 last_wake_put;
3726+};
3727+
3728+static inline u32 u32_add_overflow(u32 a, u32 b)
3729+{
3730+ u32 d;
3731+
3732+ if (check_add_overflow(a, b, &d)) {
3733+ /*
3734+ * silence the overflow,
3735+ * what matters in the log buffer context
3736+ * is the casted addition
3737+ */
3738+ }
3739+ return d;
3740+}
3741+
3742+static inline u32 u32_sub_overflow(u32 a, u32 b)
3743+{
3744+ u32 d;
3745+
3746+ if (check_sub_overflow(a, b, &d)) {
3747+ /*
3748+ * silence the overflow,
3749+ * what matters in the log buffer context
3750+ * is the casted substraction
3751+ */
3752+ }
3753+ return d;
3754+}
3755+
3756+static int log_read_line(struct trusty_log_state *s, u32 put, u32 get)
3757+{
3758+ struct log_rb *log = s->log;
3759+ int i;
3760+ char c = '\0';
3761+ size_t max_to_read =
3762+ min_t(size_t,
3763+ u32_sub_overflow(put, get),
3764+ sizeof(s->line_buffer) - 1);
3765+ size_t mask = log->sz - 1;
3766+
3767+ for (i = 0; i < max_to_read && c != '\n';) {
3768+ c = log->data[get & mask];
3769+ s->line_buffer[i++] = c;
3770+ get = u32_add_overflow(get, 1);
3771+ }
3772+ s->line_buffer[i] = '\0';
3773+
3774+ return i;
3775+}
3776+
3777+/**
3778+ * trusty_log_has_data() - returns true when more data is available to sink
3779+ * @s: Current log state.
3780+ * @sink: trusty_log_sink_state holding the get index on a given sink
3781+ *
3782+ * Return: true if data is available.
3783+ */
3784+static bool trusty_log_has_data(struct trusty_log_state *s,
3785+ struct trusty_log_sink_state *sink)
3786+{
3787+ struct log_rb *log = s->log;
3788+
3789+ return (log->put != sink->get);
3790+}
3791+
3792+/**
3793+ * trusty_log_start() - initialize the sink iteration either to kernel log
3794+ * or to secondary log_sfile
3795+ * @s: Current log state.
3796+ * @sink: trusty_log_sink_state holding the get index on a given sink
3797+ * @index: Unwrapped ring buffer index from where iteration shall start
3798+ *
3799+ * Return: 0 if successful, negative error code otherwise
3800+ */
3801+static int trusty_log_start(struct trusty_log_state *s,
3802+ struct trusty_log_sink_state *sink,
3803+ u32 index)
3804+{
3805+ struct log_rb *log;
3806+
3807+ if (WARN_ON(!s))
3808+ return -EINVAL;
3809+
3810+ log = s->log;
3811+ if (WARN_ON(!is_power_of_2(log->sz)))
3812+ return -EINVAL;
3813+
3814+ sink->get = index;
3815+ return 0;
3816+}
3817+
3818+/**
3819+ * trusty_log_show() - sink log entry at current iteration
3820+ * @s: Current log state.
3821+ * @sink: trusty_log_sink_state holding the get index on a given sink
3822+ */
3823+static void trusty_log_show(struct trusty_log_state *s,
3824+ struct trusty_log_sink_state *sink)
3825+{
3826+ struct log_rb *log = s->log;
3827+ u32 alloc, put, get;
3828+ int read_chars;
3829+
3830+ /*
3831+ * For this ring buffer, at any given point, alloc >= put >= get.
3832+ * The producer side of the buffer is not locked, so the put and alloc
3833+ * pointers must be read in a defined order (put before alloc) so
3834+ * that the above condition is maintained. A read barrier is needed
3835+ * to make sure the hardware and compiler keep the reads ordered.
3836+ */
3837+ get = sink->get;
3838+ put = log->put;
3839+
3840+ /* Make sure that the read of put occurs before the read of log data */
3841+ rmb();
3842+
3843+ /* Read a line from the log */
3844+ read_chars = log_read_line(s, put, get);
3845+
3846+ /* Force the loads from log_read_line to complete. */
3847+ rmb();
3848+ alloc = log->alloc;
3849+
3850+ /*
3851+ * Discard the line that was just read if the data could
3852+ * have been corrupted by the producer.
3853+ */
3854+ if (u32_sub_overflow(alloc, get) > log->sz) {
3855+ /*
3856+ * this condition is acceptable in the case of the sfile sink
3857+ * when attempting to read the oldest entry (at alloc-log->sz)
3858+ * which may be overrun by a new one when ring buffer write
3859+ * index wraps around.
3860+ * So the overrun is not reported in case the oldest line
3861+ * was being read.
3862+ */
3863+ if (sink->sfile) {
3864+ if (!sink->ignore_overflow)
3865+ seq_puts(sink->sfile, "log overflow.\n");
3866+ /* coalesce subsequent contiguous overflows. */
3867+ sink->ignore_overflow = true;
3868+ } else {
3869+ dev_err(s->dev, "log overflow.\n");
3870+ }
3871+ sink->get = u32_sub_overflow(alloc, log->sz);
3872+ return;
3873+ }
3874+ /* compute next line index */
3875+ sink->get = u32_add_overflow(get, read_chars);
3876+ /* once a line is valid, ignore_overflow must be disabled */
3877+ sink->ignore_overflow = false;
3878+ if (sink->sfile) {
3879+ seq_printf(sink->sfile, "%s", s->line_buffer);
3880+ } else {
3881+ if (sink->trusty_panicked ||
3882+ __ratelimit(&trusty_log_rate_limit)) {
3883+ dev_info(s->dev, "%s", s->line_buffer);
3884+ }
3885+ }
3886+}
3887+
3888+static void *trusty_log_seq_start(struct seq_file *sfile, loff_t *pos)
3889+{
3890+ struct trusty_log_sfile *lb;
3891+ struct trusty_log_state *s;
3892+ struct log_rb *log;
3893+ struct trusty_log_sink_state *log_sfile_sink;
3894+ u32 index;
3895+ int rc;
3896+
3897+ if (WARN_ON(!pos))
3898+ return ERR_PTR(-EINVAL);
3899+
3900+ lb = sfile->private;
3901+ if (WARN_ON(!lb))
3902+ return ERR_PTR(-EINVAL);
3903+
3904+ log_sfile_sink = kzalloc(sizeof(*log_sfile_sink), GFP_KERNEL);
3905+ if (!log_sfile_sink)
3906+ return ERR_PTR(-ENOMEM);
3907+
3908+ s = container_of(lb, struct trusty_log_state, log_sfile);
3909+ log_sfile_sink->sfile = sfile;
3910+ log = s->log;
3911+ if (*pos == 0) {
3912+ /* start at the oldest line */
3913+ index = 0;
3914+ if (log->alloc > log->sz)
3915+ index = u32_sub_overflow(log->alloc, log->sz);
3916+ } else {
3917+ /*
3918+ * '*pos>0': pos hold the 32bits unwrapped index from where
3919+ * to start iterating
3920+ */
3921+ index = (u32)*pos;
3922+ }
3923+ pr_debug("%s start=%u\n", __func__, index);
3924+
3925+ log_sfile_sink->ignore_overflow = true;
3926+ rc = trusty_log_start(s, log_sfile_sink, index);
3927+ if (rc < 0)
3928+ goto free_sink;
3929+
3930+ if (!trusty_log_has_data(s, log_sfile_sink))
3931+ goto free_sink;
3932+
3933+ return log_sfile_sink;
3934+
3935+free_sink:
3936+ pr_debug("%s kfree\n", __func__);
3937+ kfree(log_sfile_sink);
3938+ return rc < 0 ? ERR_PTR(rc) : NULL;
3939+}
3940+
3941+static void *trusty_log_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
3942+{
3943+ struct trusty_log_sfile *lb;
3944+ struct trusty_log_state *s;
3945+ struct trusty_log_sink_state *log_sfile_sink = v;
3946+ int rc = 0;
3947+
3948+ if (WARN_ON(!log_sfile_sink))
3949+ return ERR_PTR(-EINVAL);
3950+
3951+ lb = sfile->private;
3952+ if (WARN_ON(!lb)) {
3953+ rc = -EINVAL;
3954+ goto end_of_iter;
3955+ }
3956+ s = container_of(lb, struct trusty_log_state, log_sfile);
3957+
3958+ if (WARN_ON(!pos)) {
3959+ rc = -EINVAL;
3960+ goto end_of_iter;
3961+ }
3962+ /*
3963+ * When starting a virtual file sink, the start function is invoked
3964+ * with a pos argument which value is set to zero.
3965+ * Subsequent starts are invoked with pos being set to
3966+ * the unwrapped read index (get).
3967+ * Upon u32 wraparound, the get index could be reset to zero.
3968+ * Thus a msb is used to distinguish the `get` zero value
3969+ * from the `start of file` zero value.
3970+ */
3971+ *pos = (1UL << 32) + log_sfile_sink->get;
3972+ if (!trusty_log_has_data(s, log_sfile_sink))
3973+ goto end_of_iter;
3974+
3975+ return log_sfile_sink;
3976+
3977+end_of_iter:
3978+ pr_debug("%s kfree\n", __func__);
3979+ kfree(log_sfile_sink);
3980+ return rc < 0 ? ERR_PTR(rc) : NULL;
3981+}
3982+
3983+static void trusty_log_seq_stop(struct seq_file *sfile, void *v)
3984+{
3985+ /*
3986+ * When iteration completes or on error, the next callback frees
3987+ * the sink structure and returns NULL/error-code.
3988+ * In that case stop (being invoked with void* v set to the last next
3989+ * return value) would be invoked with v == NULL or error code.
3990+ * When user space stops the iteration earlier than the end
3991+ * (in case of user-space memory allocation limit for example)
3992+ * then the stop function receives a non NULL get pointer
3993+ * and is in charge or freeing the sink structure.
3994+ */
3995+ struct trusty_log_sink_state *log_sfile_sink = v;
3996+
3997+ /* nothing to do - sink structure already freed */
3998+ if (IS_ERR_OR_NULL(log_sfile_sink))
3999+ return;
4000+
4001+ kfree(log_sfile_sink);
4002+
4003+ pr_debug("%s kfree\n", __func__);
4004+}
4005+
4006+static int trusty_log_seq_show(struct seq_file *sfile, void *v)
4007+{
4008+ struct trusty_log_sfile *lb;
4009+ struct trusty_log_state *s;
4010+ struct trusty_log_sink_state *log_sfile_sink = v;
4011+
4012+ if (WARN_ON(!log_sfile_sink))
4013+ return -EINVAL;
4014+
4015+ lb = sfile->private;
4016+ if (WARN_ON(!lb))
4017+ return -EINVAL;
4018+
4019+ s = container_of(lb, struct trusty_log_state, log_sfile);
4020+
4021+ trusty_log_show(s, log_sfile_sink);
4022+ return 0;
4023+}
4024+
4025+static void trusty_dump_logs(struct trusty_log_state *s)
4026+{
4027+ int rc;
4028+ /*
4029+ * note: klog_sink.get initialized to zero by kzalloc
4030+ */
4031+ s->klog_sink.trusty_panicked = trusty_get_panic_status(s->trusty_dev);
4032+
4033+ rc = trusty_log_start(s, &s->klog_sink, s->klog_sink.get);
4034+ if (rc < 0)
4035+ return;
4036+
4037+ while (trusty_log_has_data(s, &s->klog_sink))
4038+ trusty_log_show(s, &s->klog_sink);
4039+}
4040+
4041+static int trusty_log_call_notify(struct notifier_block *nb,
4042+ unsigned long action, void *data)
4043+{
4044+ struct trusty_log_state *s;
4045+ unsigned long flags;
4046+ u32 cur_put;
4047+
4048+ if (action != TRUSTY_CALL_RETURNED)
4049+ return NOTIFY_DONE;
4050+
4051+ s = container_of(nb, struct trusty_log_state, call_notifier);
4052+ spin_lock_irqsave(&s->wake_up_lock, flags);
4053+ cur_put = s->log->put;
4054+ if (cur_put != s->last_wake_put) {
4055+ s->last_wake_put = cur_put;
4056+ wake_up_all(&s->poll_waiters);
4057+ }
4058+ spin_unlock_irqrestore(&s->wake_up_lock, flags);
4059+ return NOTIFY_OK;
4060+}
4061+
4062+static int trusty_log_panic_notify(struct notifier_block *nb,
4063+ unsigned long action, void *data)
4064+{
4065+ struct trusty_log_state *s;
4066+
4067+ /*
4068+ * Don't grab the spin lock to hold up the panic notifier, even
4069+ * though this is racy.
4070+ */
4071+ s = container_of(nb, struct trusty_log_state, panic_notifier);
4072+ dev_info(s->dev, "panic notifier - trusty version %s",
4073+ trusty_version_str_get(s->trusty_dev));
4074+ trusty_dump_logs(s);
4075+ return NOTIFY_OK;
4076+}
4077+
4078+const struct seq_operations trusty_log_seq_ops = {
4079+ .start = trusty_log_seq_start,
4080+ .stop = trusty_log_seq_stop,
4081+ .next = trusty_log_seq_next,
4082+ .show = trusty_log_seq_show,
4083+};
4084+
4085+static int trusty_log_sfile_dev_open(struct inode *inode, struct file *file)
4086+{
4087+ struct trusty_log_sfile *ls;
4088+ struct seq_file *sfile;
4089+ int rc;
4090+
4091+ /*
4092+ * file->private_data contains a pointer to the misc_device struct
4093+ * passed to misc_register()
4094+ */
4095+ if (WARN_ON(!file->private_data))
4096+ return -EINVAL;
4097+
4098+ ls = container_of(file->private_data, struct trusty_log_sfile, misc);
4099+
4100+ /*
4101+ * seq_open uses file->private_data to store the seq_file associated
4102+ * with the struct file, but it must be NULL when seq_open is called
4103+ */
4104+ file->private_data = NULL;
4105+ rc = seq_open(file, &trusty_log_seq_ops);
4106+ if (rc < 0)
4107+ return rc;
4108+
4109+ sfile = file->private_data;
4110+ if (WARN_ON(!sfile))
4111+ return -EINVAL;
4112+
4113+ sfile->private = ls;
4114+ return 0;
4115+}
4116+
4117+static unsigned int trusty_log_sfile_dev_poll(struct file *filp,
4118+ struct poll_table_struct *wait)
4119+{
4120+ struct seq_file *sfile;
4121+ struct trusty_log_sfile *lb;
4122+ struct trusty_log_state *s;
4123+ struct log_rb *log;
4124+
4125+ /*
4126+ * trusty_log_sfile_dev_open() pointed filp->private_data to a
4127+ * seq_file, and that seq_file->private to the trusty_log_sfile
4128+ * field of a trusty_log_state
4129+ */
4130+ sfile = filp->private_data;
4131+ lb = sfile->private;
4132+ s = container_of(lb, struct trusty_log_state, log_sfile);
4133+ poll_wait(filp, &s->poll_waiters, wait);
4134+ log = s->log;
4135+
4136+ /*
4137+ * Userspace has read up to filp->f_pos so far. Update klog_sink
4138+ * to indicate that, so that we don't end up dumping the entire
4139+ * Trusty log in case of panic.
4140+ */
4141+ s->klog_sink.get = (u32)filp->f_pos;
4142+
4143+ if (log->put != (u32)filp->f_pos) {
4144+ /* data ready to read */
4145+ return EPOLLIN | EPOLLRDNORM;
4146+ }
4147+ /* no data available, go to sleep */
4148+ return 0;
4149+}
4150+
4151+static const struct file_operations log_sfile_dev_operations = {
4152+ .owner = THIS_MODULE,
4153+ .open = trusty_log_sfile_dev_open,
4154+ .poll = trusty_log_sfile_dev_poll,
4155+ .read = seq_read,
4156+ .release = seq_release,
4157+};
4158+
4159+static int trusty_log_sfile_register(struct trusty_log_state *s)
4160+{
4161+ int ret;
4162+ struct trusty_log_sfile *ls = &s->log_sfile;
4163+
4164+ if (WARN_ON(!ls))
4165+ return -EINVAL;
4166+
4167+ snprintf(ls->device_name, sizeof(ls->device_name),
4168+ "trusty-log%d", s->dev->id);
4169+ ls->misc.minor = MISC_DYNAMIC_MINOR;
4170+ ls->misc.name = ls->device_name;
4171+ ls->misc.fops = &log_sfile_dev_operations;
4172+
4173+ ret = misc_register(&ls->misc);
4174+ if (ret) {
4175+ dev_err(s->dev,
4176+ "log_sfile error while doing misc_register ret=%d\n",
4177+ ret);
4178+ return ret;
4179+ }
4180+ dev_info(s->dev, "/dev/%s registered\n",
4181+ ls->device_name);
4182+ return 0;
4183+}
4184+
4185+static void trusty_log_sfile_unregister(struct trusty_log_state *s)
4186+{
4187+ struct trusty_log_sfile *ls = &s->log_sfile;
4188+
4189+ misc_deregister(&ls->misc);
4190+ if (s->dev) {
4191+ dev_info(s->dev, "/dev/%s unregistered\n",
4192+ ls->misc.name);
4193+ }
4194+}
4195+
4196+static bool trusty_supports_logging(struct device *device)
4197+{
4198+ int result;
4199+
4200+ result = trusty_std_call32(device, SMC_SC_SHARED_LOG_VERSION,
4201+ TRUSTY_LOG_API_VERSION, 0, 0);
4202+ if (result == SM_ERR_UNDEFINED_SMC) {
4203+ dev_info(device, "trusty-log not supported on secure side.\n");
4204+ return false;
4205+ } else if (result < 0) {
4206+ dev_err(device,
4207+ "trusty std call (SMC_SC_SHARED_LOG_VERSION) failed: %d\n",
4208+ result);
4209+ return false;
4210+ }
4211+
4212+ if (result != TRUSTY_LOG_API_VERSION) {
4213+ dev_info(device, "unsupported api version: %d, supported: %d\n",
4214+ result, TRUSTY_LOG_API_VERSION);
4215+ return false;
4216+ }
4217+ return true;
4218+}
4219+
4220+static int trusty_log_init(struct platform_device *pdev)
4221+{
4222+ struct trusty_log_state *s;
4223+ struct scatterlist *sg;
4224+ unsigned char *mem;
4225+ int i;
4226+ int result;
4227+ trusty_shared_mem_id_t mem_id;
4228+ int log_size;
4229+
4230+ s = kzalloc(sizeof(*s), GFP_KERNEL);
4231+ if (!s) {
4232+ result = -ENOMEM;
4233+ goto error_alloc_state;
4234+ }
4235+
4236+ s->dev = &pdev->dev;
4237+ s->trusty_dev = s->dev->parent;
4238+
4239+ s->log_num_pages = DIV_ROUND_UP(log_size_param + sizeof(struct log_rb),
4240+ PAGE_SIZE);
4241+ s->sg = kcalloc(s->log_num_pages, sizeof(*s->sg), GFP_KERNEL);
4242+ if (!s->sg) {
4243+ result = -ENOMEM;
4244+ goto error_alloc_sg;
4245+ }
4246+
4247+ log_size = s->log_num_pages * PAGE_SIZE;
4248+ mem = vzalloc(log_size);
4249+ if (!mem) {
4250+ result = -ENOMEM;
4251+ goto error_alloc_log;
4252+ }
4253+
4254+ s->log = (struct log_rb *)mem;
4255+
4256+ sg_init_table(s->sg, s->log_num_pages);
4257+ for_each_sg(s->sg, sg, s->log_num_pages, i) {
4258+ struct page *pg = vmalloc_to_page(mem + (i * PAGE_SIZE));
4259+
4260+ if (!pg) {
4261+ result = -ENOMEM;
4262+ goto err_share_memory;
4263+ }
4264+ sg_set_page(sg, pg, PAGE_SIZE, 0);
4265+ }
4266+ /*
4267+ * This will fail for Trusty api version < TRUSTY_API_VERSION_MEM_OBJ
4268+ * if s->log_num_pages > 1
4269+ * Use trusty_share_memory_compat instead of trusty_share_memory in case
4270+ * s->log_num_pages == 1 and api version < TRUSTY_API_VERSION_MEM_OBJ,
4271+ * In that case SMC_SC_SHARED_LOG_ADD expects a different value than
4272+ * what trusty_share_memory returns
4273+ */
4274+ result = trusty_share_memory_compat(s->trusty_dev, &mem_id, s->sg,
4275+ s->log_num_pages, PAGE_KERNEL);
4276+ if (result) {
4277+ dev_err(s->dev, "trusty_share_memory failed: %d\n", result);
4278+ goto err_share_memory;
4279+ }
4280+ s->log_pages_shared_mem_id = mem_id;
4281+
4282+ result = trusty_std_call32(s->trusty_dev,
4283+ SMC_SC_SHARED_LOG_ADD,
4284+ (u32)(mem_id), (u32)(mem_id >> 32),
4285+ log_size);
4286+ if (result < 0) {
4287+ dev_err(s->dev,
4288+ "trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d 0x%llx\n",
4289+ result, mem_id);
4290+ goto error_std_call;
4291+ }
4292+
4293+ init_waitqueue_head(&s->poll_waiters);
4294+ spin_lock_init(&s->wake_up_lock);
4295+
4296+ s->call_notifier.notifier_call = trusty_log_call_notify;
4297+ result = trusty_call_notifier_register(s->trusty_dev,
4298+ &s->call_notifier);
4299+ if (result < 0) {
4300+ dev_err(&pdev->dev,
4301+ "failed to register trusty call notifier\n");
4302+ goto error_call_notifier;
4303+ }
4304+
4305+ s->panic_notifier.notifier_call = trusty_log_panic_notify;
4306+ result = atomic_notifier_chain_register(&panic_notifier_list,
4307+ &s->panic_notifier);
4308+ if (result < 0) {
4309+ dev_err(&pdev->dev,
4310+ "failed to register panic notifier\n");
4311+ goto error_panic_notifier;
4312+ }
4313+
4314+ result = trusty_log_sfile_register(s);
4315+ if (result < 0) {
4316+ dev_err(&pdev->dev, "failed to register log_sfile\n");
4317+ goto error_log_sfile;
4318+ }
4319+
4320+ platform_set_drvdata(pdev, s);
4321+
4322+ return 0;
4323+
4324+error_log_sfile:
4325+ atomic_notifier_chain_unregister(&panic_notifier_list,
4326+ &s->panic_notifier);
4327+error_panic_notifier:
4328+ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier);
4329+error_call_notifier:
4330+ trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM,
4331+ (u32)mem_id, (u32)(mem_id >> 32), 0);
4332+error_std_call:
4333+ if (WARN_ON(trusty_reclaim_memory(s->trusty_dev, mem_id, s->sg,
4334+ s->log_num_pages))) {
4335+ dev_err(&pdev->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
4336+ result, mem_id);
4337+ /*
4338+ * It is not safe to free this memory if trusty_revoke_memory
4339+ * fails. Leak it in that case.
4340+ */
4341+ } else {
4342+err_share_memory:
4343+ vfree(s->log);
4344+ }
4345+error_alloc_log:
4346+ kfree(s->sg);
4347+error_alloc_sg:
4348+ kfree(s);
4349+error_alloc_state:
4350+ return result;
4351+}
4352+
4353+static int trusty_log_probe(struct platform_device *pdev)
4354+{
4355+ int rc;
4356+
4357+ if (!trusty_supports_logging(pdev->dev.parent))
4358+ return -ENXIO;
4359+
4360+ rc = trusty_log_init(pdev);
4361+ if (rc && log_size_param > TRUSTY_LOG_MIN_SIZE) {
4362+ dev_warn(&pdev->dev, "init failed, retrying with 1-page log\n");
4363+ log_size_param = TRUSTY_LOG_MIN_SIZE;
4364+ rc = trusty_log_init(pdev);
4365+ }
4366+ return rc;
4367+}
4368+
4369+static int trusty_log_remove(struct platform_device *pdev)
4370+{
4371+ int result;
4372+ struct trusty_log_state *s = platform_get_drvdata(pdev);
4373+ trusty_shared_mem_id_t mem_id = s->log_pages_shared_mem_id;
4374+
4375+ trusty_log_sfile_unregister(s);
4376+ atomic_notifier_chain_unregister(&panic_notifier_list,
4377+ &s->panic_notifier);
4378+ trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier);
4379+
4380+ result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM,
4381+ (u32)mem_id, (u32)(mem_id >> 32), 0);
4382+ if (result) {
4383+ dev_err(&pdev->dev,
4384+ "trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n",
4385+ result);
4386+ }
4387+ result = trusty_reclaim_memory(s->trusty_dev, mem_id, s->sg,
4388+ s->log_num_pages);
4389+ if (WARN_ON(result)) {
4390+ dev_err(&pdev->dev,
4391+ "trusty failed to remove shared memory: %d\n", result);
4392+ } else {
4393+ /*
4394+ * It is not safe to free this memory if trusty_revoke_memory
4395+ * fails. Leak it in that case.
4396+ */
4397+ vfree(s->log);
4398+ }
4399+ kfree(s->sg);
4400+ kfree(s);
4401+
4402+ return 0;
4403+}
4404+
4405+static const struct of_device_id trusty_test_of_match[] = {
4406+ { .compatible = "android,trusty-log-v1", },
4407+ {},
4408+};
4409+
4410+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
4411+
4412+static struct platform_driver trusty_log_driver = {
4413+ .probe = trusty_log_probe,
4414+ .remove = trusty_log_remove,
4415+ .driver = {
4416+ .name = "trusty-log",
4417+ .of_match_table = trusty_test_of_match,
4418+ },
4419+};
4420+
4421+module_platform_driver(trusty_log_driver);
4422+
4423+MODULE_LICENSE("GPL v2");
4424+MODULE_DESCRIPTION("Trusty logging driver");
4425diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h
4426new file mode 100644
4427index 000000000000..7b5e6096b51e
4428--- /dev/null
4429+++ b/drivers/trusty/trusty-log.h
4430@@ -0,0 +1,28 @@
4431+/* SPDX-License-Identifier: MIT */
4432+/*
4433+ * Copyright (c) 2015 Google, Inc.
4434+ *
4435+ * Trusty also has a copy of this header. Please keep the copies in sync.
4436+ */
4437+#ifndef _TRUSTY_LOG_H_
4438+#define _TRUSTY_LOG_H_
4439+
4440+/*
4441+ * Ring buffer that supports one secure producer thread and one
4442+ * linux side consumer thread.
4443+ */
4444+struct log_rb {
4445+ volatile uint32_t alloc;
4446+ volatile uint32_t put;
4447+ uint32_t sz;
4448+ volatile char data[];
4449+} __packed;
4450+
4451+#define SMC_SC_SHARED_LOG_VERSION SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 0)
4452+#define SMC_SC_SHARED_LOG_ADD SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 1)
4453+#define SMC_SC_SHARED_LOG_RM SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 2)
4454+
4455+#define TRUSTY_LOG_API_VERSION 1
4456+
4457+#endif
4458+
4459diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c
4460new file mode 100644
4461index 000000000000..8a360298e501
4462--- /dev/null
4463+++ b/drivers/trusty/trusty-mem.c
4464@@ -0,0 +1,139 @@
4465+// SPDX-License-Identifier: GPL-2.0-only
4466+/*
4467+ * Copyright (C) 2015 Google, Inc.
4468+ */
4469+
4470+#include <linux/types.h>
4471+#include <linux/printk.h>
4472+#include <linux/trusty/arm_ffa.h>
4473+#include <linux/trusty/trusty.h>
4474+#include <linux/trusty/smcall.h>
4475+
4476+#define MEM_ATTR_STRONGLY_ORDERED (0x00U)
4477+#define MEM_ATTR_DEVICE (0x04U)
4478+#define MEM_ATTR_NORMAL_NON_CACHEABLE (0x44U)
4479+#define MEM_ATTR_NORMAL_WRITE_THROUGH (0xAAU)
4480+#define MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE (0xEEU)
4481+#define MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE (0xFFU)
4482+
4483+#define ATTR_RDONLY (1U << 7)
4484+#define ATTR_INNER_SHAREABLE (3U << 8)
4485+
4486+static int get_mem_attr(struct page *page, pgprot_t pgprot)
4487+{
4488+#if defined(CONFIG_ARM64)
4489+ u64 mair;
4490+ unsigned int attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2;
4491+
4492+ asm ("mrs %0, mair_el1\n" : "=&r" (mair));
4493+ return (mair >> (attr_index * 8)) & 0xff;
4494+
4495+#elif defined(CONFIG_ARM_LPAE)
4496+ u32 mair;
4497+ unsigned int attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2);
4498+
4499+ if (attr_index >= 4) {
4500+ attr_index -= 4;
4501+ asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair));
4502+ } else {
4503+ asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair));
4504+ }
4505+ return (mair >> (attr_index * 8)) & 0xff;
4506+
4507+#elif defined(CONFIG_ARM)
4508+ /* check memory type */
4509+ switch (pgprot_val(pgprot) & L_PTE_MT_MASK) {
4510+ case L_PTE_MT_WRITEALLOC:
4511+ return MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE;
4512+
4513+ case L_PTE_MT_BUFFERABLE:
4514+ return MEM_ATTR_NORMAL_NON_CACHEABLE;
4515+
4516+ case L_PTE_MT_WRITEBACK:
4517+ return MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE;
4518+
4519+ case L_PTE_MT_WRITETHROUGH:
4520+ return MEM_ATTR_NORMAL_WRITE_THROUGH;
4521+
4522+ case L_PTE_MT_UNCACHED:
4523+ return MEM_ATTR_STRONGLY_ORDERED;
4524+
4525+ case L_PTE_MT_DEV_SHARED:
4526+ case L_PTE_MT_DEV_NONSHARED:
4527+ return MEM_ATTR_DEVICE;
4528+
4529+ default:
4530+ return -EINVAL;
4531+ }
4532+#else
4533+ return 0;
4534+#endif
4535+}
4536+
4537+int trusty_encode_page_info(struct ns_mem_page_info *inf,
4538+ struct page *page, pgprot_t pgprot)
4539+{
4540+ int mem_attr;
4541+ u64 pte;
4542+ u8 ffa_mem_attr;
4543+ u8 ffa_mem_perm = 0;
4544+
4545+ if (!inf || !page)
4546+ return -EINVAL;
4547+
4548+ /* get physical address */
4549+ pte = (u64)page_to_phys(page);
4550+
4551+ /* get memory attributes */
4552+ mem_attr = get_mem_attr(page, pgprot);
4553+ if (mem_attr < 0)
4554+ return mem_attr;
4555+
4556+ switch (mem_attr) {
4557+ case MEM_ATTR_STRONGLY_ORDERED:
4558+ ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRNE;
4559+ break;
4560+
4561+ case MEM_ATTR_DEVICE:
4562+ ffa_mem_attr = FFA_MEM_ATTR_DEVICE_NGNRE;
4563+ break;
4564+
4565+ case MEM_ATTR_NORMAL_NON_CACHEABLE:
4566+ ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED;
4567+ break;
4568+
4569+ case MEM_ATTR_NORMAL_WRITE_BACK_READ_ALLOCATE:
4570+ case MEM_ATTR_NORMAL_WRITE_BACK_WRITE_ALLOCATE:
4571+ ffa_mem_attr = FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB;
4572+ break;
4573+
4574+ default:
4575+ return -EINVAL;
4576+ }
4577+
4578+ inf->paddr = pte;
4579+
4580+ /* add other attributes */
4581+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE)
4582+ pte |= pgprot_val(pgprot);
4583+#elif defined(CONFIG_ARM)
4584+ if (pgprot_val(pgprot) & L_PTE_RDONLY)
4585+ pte |= ATTR_RDONLY;
4586+ if (pgprot_val(pgprot) & L_PTE_SHARED)
4587+ pte |= ATTR_INNER_SHAREABLE; /* inner sharable */
4588+#endif
4589+
4590+ if (!(pte & ATTR_RDONLY))
4591+ ffa_mem_perm |= FFA_MEM_PERM_RW;
4592+ else
4593+ ffa_mem_perm |= FFA_MEM_PERM_RO;
4594+
4595+ if ((pte & ATTR_INNER_SHAREABLE) == ATTR_INNER_SHAREABLE)
4596+ ffa_mem_attr |= FFA_MEM_ATTR_INNER_SHAREABLE;
4597+
4598+ inf->ffa_mem_attr = ffa_mem_attr;
4599+ inf->ffa_mem_perm = ffa_mem_perm;
4600+ inf->compat_attr = (pte & 0x0000FFFFFFFFFFFFull) |
4601+ ((u64)mem_attr << 48);
4602+ return 0;
4603+}
4604diff --git a/drivers/trusty/trusty-smc-arm.S b/drivers/trusty/trusty-smc-arm.S
4605new file mode 100644
4606index 000000000000..8ff83547d33f
4607--- /dev/null
4608+++ b/drivers/trusty/trusty-smc-arm.S
4609@@ -0,0 +1,41 @@
4610+/* SPDX-License-Identifier: GPL-2.0-only */
4611+/*
4612+ * Copyright (C) 2020 Google, Inc.
4613+ */
4614+
4615+#include <linux/linkage.h>
4616+
4617+.arch_extension sec
4618+
4619+ENTRY(trusty_smc8)
4620+ /* Save stack location where r3-r7 smc arguments are stored */
4621+ mov r12, sp
4622+
4623+ /* Save original r4-r7 values as caller expects these to be preserved */
4624+ push {r4-r7}
4625+
4626+ /* Save return value pointer and return address */
4627+ push {r0, lr}
4628+
4629+ /* arm abi shifts arguments when returning a struct, shift them back */
4630+ mov r0, r1
4631+ mov r1, r2
4632+ mov r2, r3
4633+
4634+ /* Load stack based arguments */
4635+ ldmia r12, {r3-r7}
4636+
4637+ smc #0
4638+
4639+ /* Restore return address and get return value pointer */
4640+ pop {r12, lr}
4641+
4642+ /* Copy 8-register smc return value to struct smc_ret8 return value */
4643+ stmia r12, {r0-r7}
4644+
4645+ /* Restore original r4-r7 values */
4646+ pop {r4-r7}
4647+
4648+ /* Return */
4649+ bx lr
4650+ENDPROC(trusty_smc8)
4651diff --git a/drivers/trusty/trusty-smc-arm64.S b/drivers/trusty/trusty-smc-arm64.S
4652new file mode 100644
4653index 000000000000..14c8fed28a5e
4654--- /dev/null
4655+++ b/drivers/trusty/trusty-smc-arm64.S
4656@@ -0,0 +1,35 @@
4657+/* SPDX-License-Identifier: GPL-2.0-only */
4658+/*
4659+ * Copyright (C) 2020 Google, Inc.
4660+ */
4661+
4662+#include <linux/linkage.h>
4663+
4664+.macro push ra, rb
4665+stp \ra, \rb, [sp,#-16]!
4666+.endm
4667+
4668+.macro pop ra, rb
4669+ldp \ra, \rb, [sp], #16
4670+.endm
4671+
4672+lr .req x30
4673+
4674+SYM_FUNC_START(trusty_smc8)
4675+ /*
4676+ * Save x8 (return value ptr) and lr. The SMC calling convention says el3
4677+ * does not need to preserve x8. The normal ABI does not require either x8
4678+ * or lr to be preserved.
4679+ */
4680+ push x8, lr
4681+ smc #0
4682+ pop x8, lr
4683+
4684+ /* Copy 8-register smc return value to struct smc_ret8 return value */
4685+ stp x0, x1, [x8], #16
4686+ stp x2, x3, [x8], #16
4687+ stp x4, x5, [x8], #16
4688+ stp x6, x7, [x8], #16
4689+
4690+ ret
4691+SYM_FUNC_END(trusty_smc8)
4692diff --git a/drivers/trusty/trusty-smc.h b/drivers/trusty/trusty-smc.h
4693new file mode 100644
4694index 000000000000..b53e5abb4d05
4695--- /dev/null
4696+++ b/drivers/trusty/trusty-smc.h
4697@@ -0,0 +1,26 @@
4698+/* SPDX-License-Identifier: GPL-2.0-only */
4699+/*
4700+ * Copyright (C) 2020 Google, Inc.
4701+ */
4702+#ifndef _TRUSTY_SMC_H
4703+#define _TRUSTY_SMC_H
4704+
4705+#include <linux/types.h>
4706+
4707+struct smc_ret8 {
4708+ unsigned long r0;
4709+ unsigned long r1;
4710+ unsigned long r2;
4711+ unsigned long r3;
4712+ unsigned long r4;
4713+ unsigned long r5;
4714+ unsigned long r6;
4715+ unsigned long r7;
4716+};
4717+
4718+struct smc_ret8 trusty_smc8(unsigned long r0, unsigned long r1,
4719+ unsigned long r2, unsigned long r3,
4720+ unsigned long r4, unsigned long r5,
4721+ unsigned long r6, unsigned long r7);
4722+
4723+#endif /* _TRUSTY_SMC_H */
4724diff --git a/drivers/trusty/trusty-test.c b/drivers/trusty/trusty-test.c
4725new file mode 100644
4726index 000000000000..844868981fa5
4727--- /dev/null
4728+++ b/drivers/trusty/trusty-test.c
4729@@ -0,0 +1,440 @@
4730+// SPDX-License-Identifier: GPL-2.0-only
4731+/*
4732+ * Copyright (C) 2020 Google, Inc.
4733+ */
4734+
4735+#include <linux/ctype.h>
4736+#include <linux/list.h>
4737+#include <linux/platform_device.h>
4738+#include <linux/trusty/smcall.h>
4739+#include <linux/trusty/trusty.h>
4740+#include <linux/scatterlist.h>
4741+#include <linux/slab.h>
4742+#include <linux/mm.h>
4743+#include <linux/mod_devicetable.h>
4744+#include <linux/module.h>
4745+
4746+#include "trusty-test.h"
4747+
4748+struct trusty_test_state {
4749+ struct device *dev;
4750+ struct device *trusty_dev;
4751+};
4752+
4753+struct trusty_test_shmem_obj {
4754+ struct list_head node;
4755+ size_t page_count;
4756+ struct page **pages;
4757+ void *buf;
4758+ struct sg_table sgt;
4759+ trusty_shared_mem_id_t mem_id;
4760+};
4761+
4762+/*
4763+ * Allocate a test object with @page_count number of pages, map it and add it to
4764+ * @list.
4765+ * For multi-page allocations, order the pages so they are not contiguous.
4766+ */
4767+static int trusty_test_alloc_obj(struct trusty_test_state *s,
4768+ size_t page_count,
4769+ struct list_head *list)
4770+{
4771+ size_t i;
4772+ int ret = -ENOMEM;
4773+ struct trusty_test_shmem_obj *obj;
4774+
4775+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4776+ if (!obj)
4777+ goto err_alloc_obj;
4778+ obj->page_count = page_count;
4779+
4780+ obj->pages = kmalloc_array(page_count, sizeof(*obj->pages), GFP_KERNEL);
4781+ if (!obj->pages) {
4782+ ret = -ENOMEM;
4783+ dev_err(s->dev, "failed to allocate page array, count %zd\n",
4784+ page_count);
4785+ goto err_alloc_pages;
4786+ }
4787+
4788+ for (i = 0; i < page_count; i++) {
4789+ obj->pages[i] = alloc_page(GFP_KERNEL);
4790+ if (!obj->pages[i]) {
4791+ ret = -ENOMEM;
4792+ dev_err(s->dev, "failed to allocate page %zd/%zd\n",
4793+ i, page_count);
4794+ goto err_alloc_page;
4795+ }
4796+ if (i > 0 && obj->pages[i - 1] + 1 == obj->pages[i]) {
4797+ /* swap adacent pages to increase fragmentation */
4798+ swap(obj->pages[i - 1], obj->pages[i]);
4799+ }
4800+ }
4801+
4802+ obj->buf = vmap(obj->pages, page_count, VM_MAP, PAGE_KERNEL);
4803+ if (!obj->buf) {
4804+ ret = -ENOMEM;
4805+ dev_err(s->dev, "failed to map test buffer page count %zd\n",
4806+ page_count);
4807+ goto err_map_pages;
4808+ }
4809+
4810+ ret = sg_alloc_table_from_pages(&obj->sgt, obj->pages, page_count,
4811+ 0, page_count * PAGE_SIZE, GFP_KERNEL);
4812+ if (ret) {
4813+ dev_err(s->dev, "sg_alloc_table_from_pages failed: %d\n", ret);
4814+ goto err_alloc_sgt;
4815+ }
4816+ list_add_tail(&obj->node, list);
4817+ dev_dbg(s->dev, "buffer has %d page runs\n", obj->sgt.nents);
4818+ return 0;
4819+
4820+err_alloc_sgt:
4821+ vunmap(obj->buf);
4822+err_map_pages:
4823+ for (i = page_count; i > 0; i--) {
4824+ __free_page(obj->pages[i - 1]);
4825+err_alloc_page:
4826+ ;
4827+ }
4828+ kfree(obj->pages);
4829+err_alloc_pages:
4830+ kfree(obj);
4831+err_alloc_obj:
4832+ return ret;
4833+}
4834+
4835+/* Unlink, unmap and free a test object and its pages */
4836+static void trusty_test_free_obj(struct trusty_test_state *s,
4837+ struct trusty_test_shmem_obj *obj)
4838+{
4839+ size_t i;
4840+
4841+ list_del(&obj->node);
4842+ sg_free_table(&obj->sgt);
4843+ vunmap(obj->buf);
4844+ for (i = obj->page_count; i > 0; i--)
4845+ __free_page(obj->pages[i - 1]);
4846+ kfree(obj->pages);
4847+ kfree(obj);
4848+}
4849+
4850+/*
4851+ * Share all the pages of all the test object in &obj_list.
4852+ * If sharing a test object fails, free it so that every test object that
4853+ * remains in @obj_list has been shared when this function returns.
4854+ * Return a error if any test object failed to be shared.
4855+ */
4856+static int trusty_test_share_objs(struct trusty_test_state *s,
4857+ struct list_head *obj_list, size_t size)
4858+{
4859+ int ret = 0;
4860+ int tmpret;
4861+ struct trusty_test_shmem_obj *obj;
4862+ struct trusty_test_shmem_obj *next_obj;
4863+ ktime_t t1;
4864+ ktime_t t2;
4865+
4866+ list_for_each_entry_safe(obj, next_obj, obj_list, node) {
4867+ t1 = ktime_get();
4868+ tmpret = trusty_share_memory(s->trusty_dev, &obj->mem_id,
4869+ obj->sgt.sgl, obj->sgt.nents,
4870+ PAGE_KERNEL);
4871+ t2 = ktime_get();
4872+ if (tmpret) {
4873+ ret = tmpret;
4874+ dev_err(s->dev,
4875+ "trusty_share_memory failed: %d, size=%zd\n",
4876+ ret, size);
4877+
4878+ /*
4879+ * Free obj and continue, so we can revoke the
4880+ * whole list in trusty_test_reclaim_objs.
4881+ */
4882+ trusty_test_free_obj(s, obj);
4883+ }
4884+ dev_dbg(s->dev, "share id=0x%llx, size=%zu took %lld ns\n",
4885+ obj->mem_id, size,
4886+ ktime_to_ns(ktime_sub(t2, t1)));
4887+ }
4888+
4889+ return ret;
4890+}
4891+
4892+/* Reclaim memory shared with trusty for all test objects in @obj_list. */
4893+static int trusty_test_reclaim_objs(struct trusty_test_state *s,
4894+ struct list_head *obj_list, size_t size)
4895+{
4896+ int ret = 0;
4897+ int tmpret;
4898+ struct trusty_test_shmem_obj *obj;
4899+ struct trusty_test_shmem_obj *next_obj;
4900+ ktime_t t1;
4901+ ktime_t t2;
4902+
4903+ list_for_each_entry_safe(obj, next_obj, obj_list, node) {
4904+ t1 = ktime_get();
4905+ tmpret = trusty_reclaim_memory(s->trusty_dev, obj->mem_id,
4906+ obj->sgt.sgl, obj->sgt.nents);
4907+ t2 = ktime_get();
4908+ if (tmpret) {
4909+ ret = tmpret;
4910+ dev_err(s->dev,
4911+ "trusty_reclaim_memory failed: %d, id=0x%llx\n",
4912+ ret, obj->mem_id);
4913+
4914+ /*
4915+ * It is not safe to free this memory if
4916+ * trusty_reclaim_memory fails. Leak it in that
4917+ * case.
4918+ */
4919+ list_del(&obj->node);
4920+ }
4921+ dev_dbg(s->dev, "revoke id=0x%llx, size=%zu took %lld ns\n",
4922+ obj->mem_id, size,
4923+ ktime_to_ns(ktime_sub(t2, t1)));
4924+ }
4925+
4926+ return ret;
4927+}
4928+
4929+/*
4930+ * Test a test object. First, initialize the memory, then make a std call into
4931+ * trusty which will read it and return an error if the initialized value does
4932+ * not match what it expects. If trusty reads the correct values, it will modify
4933+ * the memory and return 0. This function then checks that it can read the
4934+ * correct modified value.
4935+ */
4936+static int trusty_test_rw(struct trusty_test_state *s,
4937+ struct trusty_test_shmem_obj *obj)
4938+{
4939+ size_t size = obj->page_count * PAGE_SIZE;
4940+ int ret;
4941+ size_t i;
4942+ u64 *buf = obj->buf;
4943+ ktime_t t1;
4944+ ktime_t t2;
4945+
4946+ for (i = 0; i < size / sizeof(*buf); i++)
4947+ buf[i] = i;
4948+
4949+ t1 = ktime_get();
4950+ ret = trusty_std_call32(s->trusty_dev, SMC_SC_TEST_SHARED_MEM_RW,
4951+ (u32)(obj->mem_id), (u32)(obj->mem_id >> 32),
4952+ size);
4953+ t2 = ktime_get();
4954+ if (ret < 0) {
4955+ dev_err(s->dev,
4956+ "trusty std call (SMC_SC_TEST_SHARED_MEM_RW) failed: %d 0x%llx\n",
4957+ ret, obj->mem_id);
4958+ return ret;
4959+ }
4960+
4961+ for (i = 0; i < size / sizeof(*buf); i++) {
4962+ if (buf[i] != size - i) {
4963+ dev_err(s->dev,
4964+ "input mismatch at %zd, got 0x%llx instead of 0x%zx\n",
4965+ i, buf[i], size - i);
4966+ return -EIO;
4967+ }
4968+ }
4969+
4970+ dev_dbg(s->dev, "rw id=0x%llx, size=%zu took %lld ns\n", obj->mem_id,
4971+ size, ktime_to_ns(ktime_sub(t2, t1)));
4972+
4973+ return 0;
4974+}
4975+
4976+/*
4977+ * Run test on every test object in @obj_list. Repeat @repeat_access times.
4978+ */
4979+static int trusty_test_rw_objs(struct trusty_test_state *s,
4980+ struct list_head *obj_list,
4981+ size_t repeat_access)
4982+{
4983+ int ret;
4984+ size_t i;
4985+ struct trusty_test_shmem_obj *obj;
4986+
4987+ for (i = 0; i < repeat_access; i++) {
4988+ /*
4989+ * Repeat test in case the memory attributes don't match
4990+ * and either side see old data.
4991+ */
4992+ list_for_each_entry(obj, obj_list, node) {
4993+ ret = trusty_test_rw(s, obj);
4994+ if (ret)
4995+ return ret;
4996+ }
4997+ }
4998+
4999+ return 0;
5000+}
5001+
5002+/*
5003+ * Allocate @obj_count test object that each have @page_count pages. Share each
5004+ * object @repeat_share times, each time running tests on every object
5005+ * @repeat_access times.
5006+ */
5007+static int trusty_test_run(struct trusty_test_state *s, size_t page_count,
5008+ size_t obj_count, size_t repeat_share,
5009+ size_t repeat_access)
5010+{
5011+ int ret = 0;
5012+ int tmpret;
5013+ size_t i;
5014+ size_t size = page_count * PAGE_SIZE;
5015+ LIST_HEAD(obj_list);
5016+ struct trusty_test_shmem_obj *obj;
5017+ struct trusty_test_shmem_obj *next_obj;
5018+
5019+ for (i = 0; i < obj_count && !ret; i++)
5020+ ret = trusty_test_alloc_obj(s, page_count, &obj_list);
5021+
5022+ for (i = 0; i < repeat_share && !ret; i++) {
5023+ ret = trusty_test_share_objs(s, &obj_list, size);
5024+ if (ret) {
5025+ dev_err(s->dev,
5026+ "trusty_share_memory failed: %d, i=%zd/%zd, size=%zd\n",
5027+ ret, i, repeat_share, size);
5028+ } else {
5029+ ret = trusty_test_rw_objs(s, &obj_list, repeat_access);
5030+ if (ret)
5031+ dev_err(s->dev,
5032+ "test failed: %d, i=%zd/%zd, size=%zd\n",
5033+ ret, i, repeat_share, size);
5034+ }
5035+ tmpret = trusty_test_reclaim_objs(s, &obj_list, size);
5036+ if (tmpret) {
5037+ ret = tmpret;
5038+ dev_err(s->dev,
5039+ "trusty_reclaim_memory failed: %d, i=%zd/%zd\n",
5040+ ret, i, repeat_share);
5041+ }
5042+ }
5043+
5044+ list_for_each_entry_safe(obj, next_obj, &obj_list, node)
5045+ trusty_test_free_obj(s, obj);
5046+
5047+ dev_info(s->dev, "[ %s ] size %zd, obj_count %zd, repeat_share %zd, repeat_access %zd\n",
5048+ ret ? "FAILED" : "PASSED", size, obj_count, repeat_share,
5049+ repeat_access);
5050+
5051+ return ret;
5052+}
5053+
5054+/*
5055+ * Get an optional numeric argument from @buf, update @buf and return the value.
5056+ * If @buf does not start with ",", return @default_val instead.
5057+ */
5058+static size_t trusty_test_get_arg(const char **buf, size_t default_val)
5059+{
5060+ char *buf_next;
5061+ size_t ret;
5062+
5063+ if (**buf != ',')
5064+ return default_val;
5065+
5066+ (*buf)++;
5067+ ret = simple_strtoul(*buf, &buf_next, 0);
5068+ if (buf_next == *buf)
5069+ return default_val;
5070+
5071+ *buf = buf_next;
5072+
5073+ return ret;
5074+}
5075+
5076+/*
5077+ * Run tests described by a string in this format:
5078+ * <obj_size>,<obj_count=1>,<repeat_share=1>,<repeat_access=3>
5079+ */
5080+static ssize_t trusty_test_run_store(struct device *dev,
5081+ struct device_attribute *attr,
5082+ const char *buf, size_t count)
5083+{
5084+ struct platform_device *pdev = to_platform_device(dev);
5085+ struct trusty_test_state *s = platform_get_drvdata(pdev);
5086+ size_t size;
5087+ size_t obj_count;
5088+ size_t repeat_share;
5089+ size_t repeat_access;
5090+ int ret;
5091+ char *buf_next;
5092+
5093+ while (true) {
5094+ while (isspace(*buf))
5095+ buf++;
5096+ size = simple_strtoul(buf, &buf_next, 0);
5097+ if (buf_next == buf)
5098+ return count;
5099+ buf = buf_next;
5100+ obj_count = trusty_test_get_arg(&buf, 1);
5101+ repeat_share = trusty_test_get_arg(&buf, 1);
5102+ repeat_access = trusty_test_get_arg(&buf, 3);
5103+
5104+ ret = trusty_test_run(s, DIV_ROUND_UP(size, PAGE_SIZE),
5105+ obj_count, repeat_share, repeat_access);
5106+ if (ret)
5107+ return ret;
5108+ }
5109+}
5110+
5111+static DEVICE_ATTR_WO(trusty_test_run);
5112+
5113+static struct attribute *trusty_test_attrs[] = {
5114+ &dev_attr_trusty_test_run.attr,
5115+ NULL,
5116+};
5117+ATTRIBUTE_GROUPS(trusty_test);
5118+
5119+static int trusty_test_probe(struct platform_device *pdev)
5120+{
5121+ struct trusty_test_state *s;
5122+ int ret;
5123+
5124+ ret = trusty_std_call32(pdev->dev.parent, SMC_SC_TEST_VERSION,
5125+ TRUSTY_STDCALLTEST_API_VERSION, 0, 0);
5126+ if (ret != TRUSTY_STDCALLTEST_API_VERSION)
5127+ return -ENOENT;
5128+
5129+ s = kzalloc(sizeof(*s), GFP_KERNEL);
5130+ if (!s)
5131+ return -ENOMEM;
5132+
5133+ s->dev = &pdev->dev;
5134+ s->trusty_dev = s->dev->parent;
5135+
5136+ platform_set_drvdata(pdev, s);
5137+
5138+ return 0;
5139+}
5140+
5141+static int trusty_test_remove(struct platform_device *pdev)
5142+{
5143+ struct trusty_log_state *s = platform_get_drvdata(pdev);
5144+
5145+ kfree(s);
5146+ return 0;
5147+}
5148+
5149+static const struct of_device_id trusty_test_of_match[] = {
5150+ { .compatible = "android,trusty-test-v1", },
5151+ {},
5152+};
5153+
5154+MODULE_DEVICE_TABLE(trusty, trusty_test_of_match);
5155+
5156+static struct platform_driver trusty_test_driver = {
5157+ .probe = trusty_test_probe,
5158+ .remove = trusty_test_remove,
5159+ .driver = {
5160+ .name = "trusty-test",
5161+ .of_match_table = trusty_test_of_match,
5162+ .dev_groups = trusty_test_groups,
5163+ },
5164+};
5165+
5166+module_platform_driver(trusty_test_driver);
5167+
5168+MODULE_LICENSE("GPL v2");
5169+MODULE_DESCRIPTION("Trusty test driver");
5170diff --git a/drivers/trusty/trusty-test.h b/drivers/trusty/trusty-test.h
5171new file mode 100644
5172index 000000000000..eea7beb96876
5173--- /dev/null
5174+++ b/drivers/trusty/trusty-test.h
5175@@ -0,0 +1,13 @@
5176+/* SPDX-License-Identifier: GPL-2.0-only */
5177+/*
5178+ * Copyright (c) 2020 Google, Inc.
5179+ */
5180+#ifndef _TRUSTY_TEST_H
5181+#define _TRUSTY_TEST_H
5182+
5183+#define SMC_SC_TEST_VERSION SMC_STDCALL_NR(SMC_ENTITY_TEST, 0)
5184+#define SMC_SC_TEST_SHARED_MEM_RW SMC_STDCALL_NR(SMC_ENTITY_TEST, 1)
5185+
5186+#define TRUSTY_STDCALLTEST_API_VERSION 1
5187+
5188+#endif /* _TRUSTY_TEST_H */
5189diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c
5190new file mode 100644
5191index 000000000000..fea59cd2e218
5192--- /dev/null
5193+++ b/drivers/trusty/trusty-virtio.c
5194@@ -0,0 +1,840 @@
5195+// SPDX-License-Identifier: GPL-2.0-only
5196+/*
5197+ * Trusty Virtio driver
5198+ *
5199+ * Copyright (C) 2015 Google, Inc.
5200+ */
5201+#include <linux/device.h>
5202+#include <linux/err.h>
5203+#include <linux/kernel.h>
5204+
5205+#include <linux/dma-map-ops.h>
5206+#include <linux/module.h>
5207+#include <linux/mutex.h>
5208+#include <linux/notifier.h>
5209+#include <linux/workqueue.h>
5210+#include <linux/remoteproc.h>
5211+#include <linux/slab.h>
5212+
5213+#include <linux/platform_device.h>
5214+#include <linux/trusty/smcall.h>
5215+#include <linux/trusty/trusty.h>
5216+#include <linux/trusty/trusty_ipc.h>
5217+
5218+#include <linux/virtio.h>
5219+#include <linux/virtio_config.h>
5220+#include <linux/virtio_ids.h>
5221+#include <linux/virtio_ring.h>
5222+
5223+#include <linux/atomic.h>
5224+
5225+#define RSC_DESCR_VER 1
5226+
5227+struct trusty_vdev;
5228+
5229+struct trusty_ctx {
5230+ struct device *dev;
5231+ void *shared_va;
5232+ struct scatterlist shared_sg;
5233+ trusty_shared_mem_id_t shared_id;
5234+ size_t shared_sz;
5235+ struct work_struct check_vqs;
5236+ struct work_struct kick_vqs;
5237+ struct notifier_block call_notifier;
5238+ struct list_head vdev_list;
5239+ struct mutex mlock; /* protects vdev_list */
5240+ struct workqueue_struct *kick_wq;
5241+ struct workqueue_struct *check_wq;
5242+};
5243+
5244+struct trusty_vring {
5245+ void *vaddr;
5246+ struct scatterlist sg;
5247+ trusty_shared_mem_id_t shared_mem_id;
5248+ size_t size;
5249+ unsigned int align;
5250+ unsigned int elem_num;
5251+ u32 notifyid;
5252+ atomic_t needs_kick;
5253+ struct fw_rsc_vdev_vring *vr_descr;
5254+ struct virtqueue *vq;
5255+ struct trusty_vdev *tvdev;
5256+ struct trusty_nop kick_nop;
5257+};
5258+
5259+struct trusty_vdev {
5260+ struct list_head node;
5261+ struct virtio_device vdev;
5262+ struct trusty_ctx *tctx;
5263+ u32 notifyid;
5264+ unsigned int config_len;
5265+ void *config;
5266+ struct fw_rsc_vdev *vdev_descr;
5267+ unsigned int vring_num;
5268+ struct trusty_vring vrings[];
5269+};
5270+
5271+#define vdev_to_tvdev(vd) container_of((vd), struct trusty_vdev, vdev)
5272+
5273+static void check_all_vqs(struct work_struct *work)
5274+{
5275+ unsigned int i;
5276+ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx,
5277+ check_vqs);
5278+ struct trusty_vdev *tvdev;
5279+
5280+ list_for_each_entry(tvdev, &tctx->vdev_list, node) {
5281+ for (i = 0; i < tvdev->vring_num; i++)
5282+ if (tvdev->vrings[i].vq)
5283+ vring_interrupt(0, tvdev->vrings[i].vq);
5284+ }
5285+}
5286+
5287+static int trusty_call_notify(struct notifier_block *nb,
5288+ unsigned long action, void *data)
5289+{
5290+ struct trusty_ctx *tctx;
5291+
5292+ if (action != TRUSTY_CALL_RETURNED)
5293+ return NOTIFY_DONE;
5294+
5295+ tctx = container_of(nb, struct trusty_ctx, call_notifier);
5296+ queue_work(tctx->check_wq, &tctx->check_vqs);
5297+
5298+ return NOTIFY_OK;
5299+}
5300+
5301+static void kick_vq(struct trusty_ctx *tctx,
5302+ struct trusty_vdev *tvdev,
5303+ struct trusty_vring *tvr)
5304+{
5305+ int ret;
5306+
5307+ dev_dbg(tctx->dev, "%s: vdev_id=%d: vq_id=%d\n",
5308+ __func__, tvdev->notifyid, tvr->notifyid);
5309+
5310+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_KICK_VQ,
5311+ tvdev->notifyid, tvr->notifyid, 0);
5312+ if (ret) {
5313+ dev_err(tctx->dev, "vq notify (%d, %d) returned %d\n",
5314+ tvdev->notifyid, tvr->notifyid, ret);
5315+ }
5316+}
5317+
5318+static void kick_vqs(struct work_struct *work)
5319+{
5320+ unsigned int i;
5321+ struct trusty_vdev *tvdev;
5322+ struct trusty_ctx *tctx = container_of(work, struct trusty_ctx,
5323+ kick_vqs);
5324+ mutex_lock(&tctx->mlock);
5325+ list_for_each_entry(tvdev, &tctx->vdev_list, node) {
5326+ for (i = 0; i < tvdev->vring_num; i++) {
5327+ struct trusty_vring *tvr = &tvdev->vrings[i];
5328+
5329+ if (atomic_xchg(&tvr->needs_kick, 0))
5330+ kick_vq(tctx, tvdev, tvr);
5331+ }
5332+ }
5333+ mutex_unlock(&tctx->mlock);
5334+}
5335+
5336+static bool trusty_virtio_notify(struct virtqueue *vq)
5337+{
5338+ struct trusty_vring *tvr = vq->priv;
5339+ struct trusty_vdev *tvdev = tvr->tvdev;
5340+ struct trusty_ctx *tctx = tvdev->tctx;
5341+ u32 api_ver = trusty_get_api_version(tctx->dev->parent);
5342+
5343+ if (api_ver < TRUSTY_API_VERSION_SMP_NOP) {
5344+ atomic_set(&tvr->needs_kick, 1);
5345+ queue_work(tctx->kick_wq, &tctx->kick_vqs);
5346+ } else {
5347+ trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop);
5348+ }
5349+
5350+ return true;
5351+}
5352+
5353+static int trusty_load_device_descr(struct trusty_ctx *tctx,
5354+ trusty_shared_mem_id_t id, size_t sz)
5355+{
5356+ int ret;
5357+
5358+ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
5359+
5360+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_GET_DESCR,
5361+ (u32)id, id >> 32, sz);
5362+ if (ret < 0) {
5363+ dev_err(tctx->dev, "%s: virtio get descr returned (%d)\n",
5364+ __func__, ret);
5365+ return -ENODEV;
5366+ }
5367+ return ret;
5368+}
5369+
5370+static void trusty_virtio_stop(struct trusty_ctx *tctx,
5371+ trusty_shared_mem_id_t id, size_t sz)
5372+{
5373+ int ret;
5374+
5375+ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
5376+
5377+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_STOP,
5378+ (u32)id, id >> 32, sz);
5379+ if (ret) {
5380+ dev_err(tctx->dev, "%s: virtio done returned (%d)\n",
5381+ __func__, ret);
5382+ return;
5383+ }
5384+}
5385+
5386+static int trusty_virtio_start(struct trusty_ctx *tctx,
5387+ trusty_shared_mem_id_t id, size_t sz)
5388+{
5389+ int ret;
5390+
5391+ dev_dbg(tctx->dev, "%s: %zu bytes @ id %llu\n", __func__, sz, id);
5392+
5393+ ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VIRTIO_START,
5394+ (u32)id, id >> 32, sz);
5395+ if (ret) {
5396+ dev_err(tctx->dev, "%s: virtio start returned (%d)\n",
5397+ __func__, ret);
5398+ return -ENODEV;
5399+ }
5400+ return 0;
5401+}
5402+
5403+static void trusty_virtio_reset(struct virtio_device *vdev)
5404+{
5405+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5406+ struct trusty_ctx *tctx = tvdev->tctx;
5407+
5408+ dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid);
5409+ trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET,
5410+ tvdev->notifyid, 0, 0);
5411+}
5412+
5413+static u64 trusty_virtio_get_features(struct virtio_device *vdev)
5414+{
5415+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5416+
5417+ return tvdev->vdev_descr->dfeatures |
5418+ (1ULL << VIRTIO_F_ACCESS_PLATFORM);
5419+}
5420+
5421+static int trusty_virtio_finalize_features(struct virtio_device *vdev)
5422+{
5423+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5424+ u64 features = vdev->features;
5425+
5426+ /*
5427+ * We set VIRTIO_F_ACCESS_PLATFORM to enable the dma mapping hooks.
5428+ * The other side does not need to know.
5429+ */
5430+ features &= ~(1ULL << VIRTIO_F_ACCESS_PLATFORM);
5431+
5432+ /* Make sure we don't have any features > 32 bits! */
5433+ if (WARN_ON((u32)vdev->features != features))
5434+ return -EINVAL;
5435+
5436+ tvdev->vdev_descr->gfeatures = vdev->features;
5437+ return 0;
5438+}
5439+
5440+static void trusty_virtio_get_config(struct virtio_device *vdev,
5441+ unsigned int offset, void *buf,
5442+ unsigned int len)
5443+{
5444+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5445+
5446+ dev_dbg(&vdev->dev, "%s: %d bytes @ offset %d\n",
5447+ __func__, len, offset);
5448+
5449+ if (tvdev->config) {
5450+ if (offset + len <= tvdev->config_len)
5451+ memcpy(buf, tvdev->config + offset, len);
5452+ }
5453+}
5454+
5455+static void trusty_virtio_set_config(struct virtio_device *vdev,
5456+ unsigned int offset, const void *buf,
5457+ unsigned int len)
5458+{
5459+}
5460+
5461+static u8 trusty_virtio_get_status(struct virtio_device *vdev)
5462+{
5463+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5464+
5465+ return tvdev->vdev_descr->status;
5466+}
5467+
5468+static void trusty_virtio_set_status(struct virtio_device *vdev, u8 status)
5469+{
5470+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5471+
5472+ tvdev->vdev_descr->status = status;
5473+}
5474+
5475+static void _del_vqs(struct virtio_device *vdev)
5476+{
5477+ unsigned int i;
5478+ int ret;
5479+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5480+ struct trusty_vring *tvr = &tvdev->vrings[0];
5481+
5482+ for (i = 0; i < tvdev->vring_num; i++, tvr++) {
5483+ /* dequeue kick_nop */
5484+ trusty_dequeue_nop(tvdev->tctx->dev->parent, &tvr->kick_nop);
5485+
5486+ /* delete vq */
5487+ if (tvr->vq) {
5488+ vring_del_virtqueue(tvr->vq);
5489+ tvr->vq = NULL;
5490+ }
5491+ /* delete vring */
5492+ if (tvr->vaddr) {
5493+ ret = trusty_reclaim_memory(tvdev->tctx->dev->parent,
5494+ tvr->shared_mem_id,
5495+ &tvr->sg, 1);
5496+ if (WARN_ON(ret)) {
5497+ dev_err(&vdev->dev,
5498+ "trusty_revoke_memory failed: %d 0x%llx\n",
5499+ ret, tvr->shared_mem_id);
5500+ /*
5501+ * It is not safe to free this memory if
5502+ * trusty_revoke_memory fails. Leak it in that
5503+ * case.
5504+ */
5505+ } else {
5506+ free_pages_exact(tvr->vaddr, tvr->size);
5507+ }
5508+ tvr->vaddr = NULL;
5509+ }
5510+ }
5511+}
5512+
5513+static void trusty_virtio_del_vqs(struct virtio_device *vdev)
5514+{
5515+ _del_vqs(vdev);
5516+}
5517+
5518+
5519+static struct virtqueue *_find_vq(struct virtio_device *vdev,
5520+ unsigned int id,
5521+ void (*callback)(struct virtqueue *vq),
5522+ const char *name,
5523+ bool ctx)
5524+{
5525+ struct trusty_vring *tvr;
5526+ struct trusty_vdev *tvdev = vdev_to_tvdev(vdev);
5527+ phys_addr_t pa;
5528+ int ret;
5529+
5530+ if (!name)
5531+ return ERR_PTR(-EINVAL);
5532+
5533+ if (id >= tvdev->vring_num)
5534+ return ERR_PTR(-EINVAL);
5535+
5536+ tvr = &tvdev->vrings[id];
5537+
5538+ /* actual size of vring (in bytes) */
5539+ tvr->size = PAGE_ALIGN(vring_size(tvr->elem_num, tvr->align));
5540+
5541+ /* allocate memory for the vring. */
5542+ tvr->vaddr = alloc_pages_exact(tvr->size, GFP_KERNEL | __GFP_ZERO);
5543+ if (!tvr->vaddr) {
5544+ dev_err(&vdev->dev, "vring alloc failed\n");
5545+ return ERR_PTR(-ENOMEM);
5546+ }
5547+
5548+ sg_init_one(&tvr->sg, tvr->vaddr, tvr->size);
5549+ ret = trusty_share_memory_compat(tvdev->tctx->dev->parent,
5550+ &tvr->shared_mem_id, &tvr->sg, 1,
5551+ PAGE_KERNEL);
5552+ if (ret) {
5553+ pa = virt_to_phys(tvr->vaddr);
5554+ dev_err(&vdev->dev, "trusty_share_memory failed: %d %pa\n",
5555+ ret, &pa);
5556+ goto err_share_memory;
5557+ }
5558+
5559+ /* save vring address to shared structure */
5560+ tvr->vr_descr->da = (u32)tvr->shared_mem_id;
5561+
5562+ /* da field is only 32 bit wide. Use previously unused 'reserved' field
5563+ * to store top 32 bits of 64-bit shared_mem_id
5564+ */
5565+ tvr->vr_descr->pa = (u32)(tvr->shared_mem_id >> 32);
5566+
5567+ dev_info(&vdev->dev, "vring%d: va(id) %p(%llx) qsz %d notifyid %d\n",
5568+ id, tvr->vaddr, (u64)tvr->shared_mem_id, tvr->elem_num,
5569+ tvr->notifyid);
5570+
5571+ tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align,
5572+ vdev, true, ctx, tvr->vaddr,
5573+ trusty_virtio_notify, callback, name);
5574+ if (!tvr->vq) {
5575+ dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n",
5576+ name);
5577+ goto err_new_virtqueue;
5578+ }
5579+
5580+ tvr->vq->priv = tvr;
5581+
5582+ return tvr->vq;
5583+
5584+err_new_virtqueue:
5585+ ret = trusty_reclaim_memory(tvdev->tctx->dev->parent,
5586+ tvr->shared_mem_id, &tvr->sg, 1);
5587+ if (WARN_ON(ret)) {
5588+ dev_err(&vdev->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
5589+ ret, tvr->shared_mem_id);
5590+ /*
5591+ * It is not safe to free this memory if trusty_revoke_memory
5592+ * fails. Leak it in that case.
5593+ */
5594+ } else {
5595+err_share_memory:
5596+ free_pages_exact(tvr->vaddr, tvr->size);
5597+ }
5598+ tvr->vaddr = NULL;
5599+ return ERR_PTR(-ENOMEM);
5600+}
5601+
5602+static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
5603+ struct virtqueue *vqs[],
5604+ vq_callback_t *callbacks[],
5605+ const char * const names[],
5606+ const bool *ctxs,
5607+ struct irq_affinity *desc)
5608+{
5609+ unsigned int i;
5610+ int ret;
5611+ bool ctx = false;
5612+
5613+ for (i = 0; i < nvqs; i++) {
5614+ ctx = false;
5615+ if (ctxs)
5616+ ctx = ctxs[i];
5617+ vqs[i] = _find_vq(vdev, i, callbacks[i], names[i], ctx);
5618+ if (IS_ERR(vqs[i])) {
5619+ ret = PTR_ERR(vqs[i]);
5620+ _del_vqs(vdev);
5621+ return ret;
5622+ }
5623+ }
5624+ return 0;
5625+}
5626+
5627+static const char *trusty_virtio_bus_name(struct virtio_device *vdev)
5628+{
5629+ return "trusty-virtio";
5630+}
5631+
5632+/* The ops structure which hooks everything together. */
5633+static const struct virtio_config_ops trusty_virtio_config_ops = {
5634+ .get_features = trusty_virtio_get_features,
5635+ .finalize_features = trusty_virtio_finalize_features,
5636+ .get = trusty_virtio_get_config,
5637+ .set = trusty_virtio_set_config,
5638+ .get_status = trusty_virtio_get_status,
5639+ .set_status = trusty_virtio_set_status,
5640+ .reset = trusty_virtio_reset,
5641+ .find_vqs = trusty_virtio_find_vqs,
5642+ .del_vqs = trusty_virtio_del_vqs,
5643+ .bus_name = trusty_virtio_bus_name,
5644+};
5645+
5646+static int trusty_virtio_add_device(struct trusty_ctx *tctx,
5647+ struct fw_rsc_vdev *vdev_descr,
5648+ struct fw_rsc_vdev_vring *vr_descr,
5649+ void *config)
5650+{
5651+ int i, ret;
5652+ struct trusty_vdev *tvdev;
5653+
5654+ tvdev = kzalloc(struct_size(tvdev, vrings, vdev_descr->num_of_vrings),
5655+ GFP_KERNEL);
5656+ if (!tvdev)
5657+ return -ENOMEM;
5658+
5659+ /* setup vdev */
5660+ tvdev->tctx = tctx;
5661+ tvdev->vdev.dev.parent = tctx->dev;
5662+ tvdev->vdev.id.device = vdev_descr->id;
5663+ tvdev->vdev.config = &trusty_virtio_config_ops;
5664+ tvdev->vdev_descr = vdev_descr;
5665+ tvdev->notifyid = vdev_descr->notifyid;
5666+
5667+ /* setup config */
5668+ tvdev->config = config;
5669+ tvdev->config_len = vdev_descr->config_len;
5670+
5671+ /* setup vrings and vdev resource */
5672+ tvdev->vring_num = vdev_descr->num_of_vrings;
5673+
5674+ for (i = 0; i < tvdev->vring_num; i++, vr_descr++) {
5675+ struct trusty_vring *tvr = &tvdev->vrings[i];
5676+
5677+ tvr->tvdev = tvdev;
5678+ tvr->vr_descr = vr_descr;
5679+ tvr->align = vr_descr->align;
5680+ tvr->elem_num = vr_descr->num;
5681+ tvr->notifyid = vr_descr->notifyid;
5682+ trusty_nop_init(&tvr->kick_nop, SMC_NC_VDEV_KICK_VQ,
5683+ tvdev->notifyid, tvr->notifyid);
5684+ }
5685+
5686+ /* register device */
5687+ ret = register_virtio_device(&tvdev->vdev);
5688+ if (ret) {
5689+ dev_err(tctx->dev,
5690+ "Failed (%d) to register device dev type %u\n",
5691+ ret, vdev_descr->id);
5692+ goto err_register;
5693+ }
5694+
5695+ /* add it to tracking list */
5696+ list_add_tail(&tvdev->node, &tctx->vdev_list);
5697+
5698+ return 0;
5699+
5700+err_register:
5701+ kfree(tvdev);
5702+ return ret;
5703+}
5704+
5705+static int trusty_parse_device_descr(struct trusty_ctx *tctx,
5706+ void *descr_va, size_t descr_sz)
5707+{
5708+ u32 i;
5709+ struct resource_table *descr = descr_va;
5710+
5711+ if (descr_sz < sizeof(*descr)) {
5712+ dev_err(tctx->dev, "descr table is too small (0x%x)\n",
5713+ (int)descr_sz);
5714+ return -ENODEV;
5715+ }
5716+
5717+ if (descr->ver != RSC_DESCR_VER) {
5718+ dev_err(tctx->dev, "unexpected descr ver (0x%x)\n",
5719+ (int)descr->ver);
5720+ return -ENODEV;
5721+ }
5722+
5723+ if (descr_sz < (sizeof(*descr) + descr->num * sizeof(u32))) {
5724+ dev_err(tctx->dev, "descr table is too small (0x%x)\n",
5725+ (int)descr->ver);
5726+ return -ENODEV;
5727+ }
5728+
5729+ for (i = 0; i < descr->num; i++) {
5730+ struct fw_rsc_hdr *hdr;
5731+ struct fw_rsc_vdev *vd;
5732+ struct fw_rsc_vdev_vring *vr;
5733+ void *cfg;
5734+ size_t vd_sz;
5735+
5736+ u32 offset = descr->offset[i];
5737+
5738+ if (offset >= descr_sz) {
5739+ dev_err(tctx->dev, "offset is out of bounds (%u)\n",
5740+ offset);
5741+ return -ENODEV;
5742+ }
5743+
5744+ /* check space for rsc header */
5745+ if ((descr_sz - offset) < sizeof(struct fw_rsc_hdr)) {
5746+ dev_err(tctx->dev, "no space for rsc header (%u)\n",
5747+ offset);
5748+ return -ENODEV;
5749+ }
5750+ hdr = (struct fw_rsc_hdr *)((u8 *)descr + offset);
5751+ offset += sizeof(struct fw_rsc_hdr);
5752+
5753+ /* check type */
5754+ if (hdr->type != RSC_VDEV) {
5755+ dev_err(tctx->dev, "unsupported rsc type (%u)\n",
5756+ hdr->type);
5757+ continue;
5758+ }
5759+
5760+ /* got vdev: check space for vdev */
5761+ if ((descr_sz - offset) < sizeof(struct fw_rsc_vdev)) {
5762+ dev_err(tctx->dev, "no space for vdev descr (%u)\n",
5763+ offset);
5764+ return -ENODEV;
5765+ }
5766+ vd = (struct fw_rsc_vdev *)((u8 *)descr + offset);
5767+
5768+ /* check space for vrings and config area */
5769+ vd_sz = sizeof(struct fw_rsc_vdev) +
5770+ vd->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) +
5771+ vd->config_len;
5772+
5773+ if ((descr_sz - offset) < vd_sz) {
5774+ dev_err(tctx->dev, "no space for vdev (%u)\n", offset);
5775+ return -ENODEV;
5776+ }
5777+ vr = (struct fw_rsc_vdev_vring *)vd->vring;
5778+ cfg = (void *)(vr + vd->num_of_vrings);
5779+
5780+ trusty_virtio_add_device(tctx, vd, vr, cfg);
5781+ }
5782+
5783+ return 0;
5784+}
5785+
5786+static void _remove_devices_locked(struct trusty_ctx *tctx)
5787+{
5788+ struct trusty_vdev *tvdev, *next;
5789+
5790+ list_for_each_entry_safe(tvdev, next, &tctx->vdev_list, node) {
5791+ list_del(&tvdev->node);
5792+ unregister_virtio_device(&tvdev->vdev);
5793+ kfree(tvdev);
5794+ }
5795+}
5796+
5797+static void trusty_virtio_remove_devices(struct trusty_ctx *tctx)
5798+{
5799+ mutex_lock(&tctx->mlock);
5800+ _remove_devices_locked(tctx);
5801+ mutex_unlock(&tctx->mlock);
5802+}
5803+
5804+static int trusty_virtio_add_devices(struct trusty_ctx *tctx)
5805+{
5806+ int ret;
5807+ int ret_tmp;
5808+ void *descr_va;
5809+ trusty_shared_mem_id_t descr_id;
5810+ size_t descr_sz;
5811+ size_t descr_buf_sz;
5812+
5813+ /* allocate buffer to load device descriptor into */
5814+ descr_buf_sz = PAGE_SIZE;
5815+ descr_va = alloc_pages_exact(descr_buf_sz, GFP_KERNEL | __GFP_ZERO);
5816+ if (!descr_va) {
5817+ dev_err(tctx->dev, "Failed to allocate shared area\n");
5818+ return -ENOMEM;
5819+ }
5820+
5821+ sg_init_one(&tctx->shared_sg, descr_va, descr_buf_sz);
5822+ ret = trusty_share_memory(tctx->dev->parent, &descr_id,
5823+ &tctx->shared_sg, 1, PAGE_KERNEL);
5824+ if (ret) {
5825+ dev_err(tctx->dev, "trusty_share_memory failed: %d\n", ret);
5826+ goto err_share_memory;
5827+ }
5828+
5829+ /* load device descriptors */
5830+ ret = trusty_load_device_descr(tctx, descr_id, descr_buf_sz);
5831+ if (ret < 0) {
5832+ dev_err(tctx->dev, "failed (%d) to load device descr\n", ret);
5833+ goto err_load_descr;
5834+ }
5835+
5836+ descr_sz = (size_t)ret;
5837+
5838+ mutex_lock(&tctx->mlock);
5839+
5840+ /* parse device descriptor and add virtio devices */
5841+ ret = trusty_parse_device_descr(tctx, descr_va, descr_sz);
5842+ if (ret) {
5843+ dev_err(tctx->dev, "failed (%d) to parse device descr\n", ret);
5844+ goto err_parse_descr;
5845+ }
5846+
5847+ /* register call notifier */
5848+ ret = trusty_call_notifier_register(tctx->dev->parent,
5849+ &tctx->call_notifier);
5850+ if (ret) {
5851+ dev_err(tctx->dev, "%s: failed (%d) to register notifier\n",
5852+ __func__, ret);
5853+ goto err_register_notifier;
5854+ }
5855+
5856+ /* start virtio */
5857+ ret = trusty_virtio_start(tctx, descr_id, descr_sz);
5858+ if (ret) {
5859+ dev_err(tctx->dev, "failed (%d) to start virtio\n", ret);
5860+ goto err_start_virtio;
5861+ }
5862+
5863+ /* attach shared area */
5864+ tctx->shared_va = descr_va;
5865+ tctx->shared_id = descr_id;
5866+ tctx->shared_sz = descr_buf_sz;
5867+
5868+ mutex_unlock(&tctx->mlock);
5869+
5870+ return 0;
5871+
5872+err_start_virtio:
5873+ trusty_call_notifier_unregister(tctx->dev->parent,
5874+ &tctx->call_notifier);
5875+ cancel_work_sync(&tctx->check_vqs);
5876+err_register_notifier:
5877+err_parse_descr:
5878+ _remove_devices_locked(tctx);
5879+ mutex_unlock(&tctx->mlock);
5880+ cancel_work_sync(&tctx->kick_vqs);
5881+ trusty_virtio_stop(tctx, descr_id, descr_sz);
5882+err_load_descr:
5883+ ret_tmp = trusty_reclaim_memory(tctx->dev->parent, descr_id,
5884+ &tctx->shared_sg, 1);
5885+ if (WARN_ON(ret_tmp)) {
5886+ dev_err(tctx->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
5887+ ret_tmp, tctx->shared_id);
5888+ /*
5889+ * It is not safe to free this memory if trusty_revoke_memory
5890+ * fails. Leak it in that case.
5891+ */
5892+ } else {
5893+err_share_memory:
5894+ free_pages_exact(descr_va, descr_buf_sz);
5895+ }
5896+ return ret;
5897+}
5898+
5899+static dma_addr_t trusty_virtio_dma_map_page(struct device *dev,
5900+ struct page *page,
5901+ unsigned long offset, size_t size,
5902+ enum dma_data_direction dir,
5903+ unsigned long attrs)
5904+{
5905+ struct tipc_msg_buf *buf = page_to_virt(page) + offset;
5906+
5907+ return buf->buf_id;
5908+}
5909+
5910+static const struct dma_map_ops trusty_virtio_dma_map_ops = {
5911+ .map_page = trusty_virtio_dma_map_page,
5912+};
5913+
5914+static int trusty_virtio_probe(struct platform_device *pdev)
5915+{
5916+ int ret;
5917+ struct trusty_ctx *tctx;
5918+
5919+ tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
5920+ if (!tctx)
5921+ return -ENOMEM;
5922+
5923+ tctx->dev = &pdev->dev;
5924+ tctx->call_notifier.notifier_call = trusty_call_notify;
5925+ mutex_init(&tctx->mlock);
5926+ INIT_LIST_HEAD(&tctx->vdev_list);
5927+ INIT_WORK(&tctx->check_vqs, check_all_vqs);
5928+ INIT_WORK(&tctx->kick_vqs, kick_vqs);
5929+ platform_set_drvdata(pdev, tctx);
5930+
5931+ set_dma_ops(&pdev->dev, &trusty_virtio_dma_map_ops);
5932+
5933+ tctx->check_wq = alloc_workqueue("trusty-check-wq", WQ_UNBOUND, 0);
5934+ if (!tctx->check_wq) {
5935+ ret = -ENODEV;
5936+ dev_err(&pdev->dev, "Failed create trusty-check-wq\n");
5937+ goto err_create_check_wq;
5938+ }
5939+
5940+ tctx->kick_wq = alloc_workqueue("trusty-kick-wq",
5941+ WQ_UNBOUND | WQ_CPU_INTENSIVE, 0);
5942+ if (!tctx->kick_wq) {
5943+ ret = -ENODEV;
5944+ dev_err(&pdev->dev, "Failed create trusty-kick-wq\n");
5945+ goto err_create_kick_wq;
5946+ }
5947+
5948+ ret = trusty_virtio_add_devices(tctx);
5949+ if (ret) {
5950+ dev_err(&pdev->dev, "Failed to add virtio devices\n");
5951+ goto err_add_devices;
5952+ }
5953+
5954+ dev_info(&pdev->dev, "initializing done\n");
5955+ return 0;
5956+
5957+err_add_devices:
5958+ destroy_workqueue(tctx->kick_wq);
5959+err_create_kick_wq:
5960+ destroy_workqueue(tctx->check_wq);
5961+err_create_check_wq:
5962+ kfree(tctx);
5963+ return ret;
5964+}
5965+
5966+static int trusty_virtio_remove(struct platform_device *pdev)
5967+{
5968+ struct trusty_ctx *tctx = platform_get_drvdata(pdev);
5969+ int ret;
5970+
5971+ /* unregister call notifier and wait until workqueue is done */
5972+ trusty_call_notifier_unregister(tctx->dev->parent,
5973+ &tctx->call_notifier);
5974+ cancel_work_sync(&tctx->check_vqs);
5975+
5976+ /* remove virtio devices */
5977+ trusty_virtio_remove_devices(tctx);
5978+ cancel_work_sync(&tctx->kick_vqs);
5979+
5980+ /* destroy workqueues */
5981+ destroy_workqueue(tctx->kick_wq);
5982+ destroy_workqueue(tctx->check_wq);
5983+
5984+ /* notify remote that shared area goes away */
5985+ trusty_virtio_stop(tctx, tctx->shared_id, tctx->shared_sz);
5986+
5987+ /* free shared area */
5988+ ret = trusty_reclaim_memory(tctx->dev->parent, tctx->shared_id,
5989+ &tctx->shared_sg, 1);
5990+ if (WARN_ON(ret)) {
5991+ dev_err(tctx->dev, "trusty_revoke_memory failed: %d 0x%llx\n",
5992+ ret, tctx->shared_id);
5993+ /*
5994+ * It is not safe to free this memory if trusty_revoke_memory
5995+ * fails. Leak it in that case.
5996+ */
5997+ } else {
5998+ free_pages_exact(tctx->shared_va, tctx->shared_sz);
5999+ }
6000+
6001+ /* free context */
6002+ kfree(tctx);
6003+ return 0;
6004+}
6005+
6006+static const struct of_device_id trusty_of_match[] = {
6007+ {
6008+ .compatible = "android,trusty-virtio-v1",
6009+ },
6010+ {},
6011+};
6012+
6013+MODULE_DEVICE_TABLE(of, trusty_of_match);
6014+
6015+static struct platform_driver trusty_virtio_driver = {
6016+ .probe = trusty_virtio_probe,
6017+ .remove = trusty_virtio_remove,
6018+ .driver = {
6019+ .name = "trusty-virtio",
6020+ .of_match_table = trusty_of_match,
6021+ },
6022+};
6023+
6024+module_platform_driver(trusty_virtio_driver);
6025+
6026+MODULE_LICENSE("GPL v2");
6027+MODULE_DESCRIPTION("Trusty virtio driver");
6028+/*
6029+ * TODO(b/168322325): trusty-virtio and trusty-ipc should be independent.
6030+ * However, trusty-virtio is not completely generic and is aware of trusty-ipc.
6031+ * See header includes. Particularly, trusty-virtio.ko can't be loaded before
6032+ * trusty-ipc.ko.
6033+ */
6034+MODULE_SOFTDEP("pre: trusty-ipc");
6035diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c
6036new file mode 100644
6037index 000000000000..265eab52aea0
6038--- /dev/null
6039+++ b/drivers/trusty/trusty.c
6040@@ -0,0 +1,981 @@
6041+// SPDX-License-Identifier: GPL-2.0-only
6042+/*
6043+ * Copyright (C) 2013 Google, Inc.
6044+ */
6045+
6046+#include <linux/delay.h>
6047+#include <linux/module.h>
6048+#include <linux/of.h>
6049+#include <linux/of_platform.h>
6050+#include <linux/platform_device.h>
6051+#include <linux/slab.h>
6052+#include <linux/stat.h>
6053+#include <linux/string.h>
6054+#include <linux/trusty/arm_ffa.h>
6055+#include <linux/trusty/smcall.h>
6056+#include <linux/trusty/sm_err.h>
6057+#include <linux/trusty/trusty.h>
6058+
6059+#include <linux/scatterlist.h>
6060+#include <linux/dma-mapping.h>
6061+
6062+#include "trusty-smc.h"
6063+
6064+struct trusty_state;
6065+static struct platform_driver trusty_driver;
6066+
6067+struct trusty_work {
6068+ struct trusty_state *ts;
6069+ struct work_struct work;
6070+};
6071+
6072+struct trusty_state {
6073+ struct mutex smc_lock;
6074+ struct atomic_notifier_head notifier;
6075+ struct completion cpu_idle_completion;
6076+ char *version_str;
6077+ u32 api_version;
6078+ bool trusty_panicked;
6079+ struct device *dev;
6080+ struct workqueue_struct *nop_wq;
6081+ struct trusty_work __percpu *nop_works;
6082+ struct list_head nop_queue;
6083+ spinlock_t nop_lock; /* protects nop_queue */
6084+ struct device_dma_parameters dma_parms;
6085+ void *ffa_tx;
6086+ void *ffa_rx;
6087+ u16 ffa_local_id;
6088+ u16 ffa_remote_id;
6089+ struct mutex share_memory_msg_lock; /* protects share_memory_msg */
6090+};
6091+
6092+static inline unsigned long smc(unsigned long r0, unsigned long r1,
6093+ unsigned long r2, unsigned long r3)
6094+{
6095+ return trusty_smc8(r0, r1, r2, r3, 0, 0, 0, 0).r0;
6096+}
6097+
6098+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
6099+{
6100+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6101+
6102+ if (WARN_ON(!s))
6103+ return SM_ERR_INVALID_PARAMETERS;
6104+ if (WARN_ON(!SMC_IS_FASTCALL(smcnr)))
6105+ return SM_ERR_INVALID_PARAMETERS;
6106+ if (WARN_ON(SMC_IS_SMC64(smcnr)))
6107+ return SM_ERR_INVALID_PARAMETERS;
6108+
6109+ return smc(smcnr, a0, a1, a2);
6110+}
6111+EXPORT_SYMBOL(trusty_fast_call32);
6112+
6113+#ifdef CONFIG_64BIT
6114+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2)
6115+{
6116+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6117+
6118+ if (WARN_ON(!s))
6119+ return SM_ERR_INVALID_PARAMETERS;
6120+ if (WARN_ON(!SMC_IS_FASTCALL(smcnr)))
6121+ return SM_ERR_INVALID_PARAMETERS;
6122+ if (WARN_ON(!SMC_IS_SMC64(smcnr)))
6123+ return SM_ERR_INVALID_PARAMETERS;
6124+
6125+ return smc(smcnr, a0, a1, a2);
6126+}
6127+EXPORT_SYMBOL(trusty_fast_call64);
6128+#endif
6129+
6130+static unsigned long trusty_std_call_inner(struct device *dev,
6131+ unsigned long smcnr,
6132+ unsigned long a0, unsigned long a1,
6133+ unsigned long a2)
6134+{
6135+ unsigned long ret;
6136+ int retry = 5;
6137+
6138+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n",
6139+ __func__, smcnr, a0, a1, a2);
6140+ while (true) {
6141+ ret = smc(smcnr, a0, a1, a2);
6142+ while ((s32)ret == SM_ERR_FIQ_INTERRUPTED)
6143+ ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0);
6144+ if ((int)ret != SM_ERR_BUSY || !retry)
6145+ break;
6146+
6147+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n",
6148+ __func__, smcnr, a0, a1, a2);
6149+ retry--;
6150+ }
6151+
6152+ return ret;
6153+}
6154+
6155+static unsigned long trusty_std_call_helper(struct device *dev,
6156+ unsigned long smcnr,
6157+ unsigned long a0, unsigned long a1,
6158+ unsigned long a2)
6159+{
6160+ unsigned long ret;
6161+ int sleep_time = 1;
6162+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6163+
6164+ while (true) {
6165+ local_irq_disable();
6166+ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE,
6167+ NULL);
6168+ ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2);
6169+ if (ret == SM_ERR_PANIC) {
6170+ s->trusty_panicked = true;
6171+ if (IS_ENABLED(CONFIG_TRUSTY_CRASH_IS_PANIC))
6172+ panic("trusty crashed");
6173+ else
6174+ WARN_ONCE(1, "trusty crashed");
6175+ }
6176+
6177+ atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED,
6178+ NULL);
6179+ if (ret == SM_ERR_INTERRUPTED) {
6180+ /*
6181+ * Make sure this cpu will eventually re-enter trusty
6182+ * even if the std_call resumes on another cpu.
6183+ */
6184+ trusty_enqueue_nop(dev, NULL);
6185+ }
6186+ local_irq_enable();
6187+
6188+ if ((int)ret != SM_ERR_BUSY)
6189+ break;
6190+
6191+ if (sleep_time == 256)
6192+ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n",
6193+ __func__, smcnr, a0, a1, a2);
6194+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n",
6195+ __func__, smcnr, a0, a1, a2, sleep_time);
6196+
6197+ msleep(sleep_time);
6198+ if (sleep_time < 1000)
6199+ sleep_time <<= 1;
6200+
6201+ dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n",
6202+ __func__, smcnr, a0, a1, a2);
6203+ }
6204+
6205+ if (sleep_time > 256)
6206+ dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n",
6207+ __func__, smcnr, a0, a1, a2);
6208+
6209+ return ret;
6210+}
6211+
6212+static void trusty_std_call_cpu_idle(struct trusty_state *s)
6213+{
6214+ int ret;
6215+
6216+ ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10);
6217+ if (!ret) {
6218+ dev_warn(s->dev,
6219+ "%s: timed out waiting for cpu idle to clear, retry anyway\n",
6220+ __func__);
6221+ }
6222+}
6223+
6224+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
6225+{
6226+ int ret;
6227+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6228+
6229+ if (WARN_ON(SMC_IS_FASTCALL(smcnr)))
6230+ return SM_ERR_INVALID_PARAMETERS;
6231+
6232+ if (WARN_ON(SMC_IS_SMC64(smcnr)))
6233+ return SM_ERR_INVALID_PARAMETERS;
6234+
6235+ if (s->trusty_panicked) {
6236+ /*
6237+ * Avoid calling the notifiers if trusty has panicked as they
6238+ * can trigger more calls.
6239+ */
6240+ return SM_ERR_PANIC;
6241+ }
6242+
6243+ if (smcnr != SMC_SC_NOP) {
6244+ mutex_lock(&s->smc_lock);
6245+ reinit_completion(&s->cpu_idle_completion);
6246+ }
6247+
6248+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n",
6249+ __func__, smcnr, a0, a1, a2);
6250+
6251+ ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2);
6252+ while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) {
6253+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n",
6254+ __func__, smcnr, a0, a1, a2);
6255+ if (ret == SM_ERR_CPU_IDLE)
6256+ trusty_std_call_cpu_idle(s);
6257+ ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0);
6258+ }
6259+ dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n",
6260+ __func__, smcnr, a0, a1, a2, ret);
6261+
6262+ if (smcnr == SMC_SC_NOP)
6263+ complete(&s->cpu_idle_completion);
6264+ else
6265+ mutex_unlock(&s->smc_lock);
6266+
6267+ return ret;
6268+}
6269+EXPORT_SYMBOL(trusty_std_call32);
6270+
6271+int trusty_share_memory(struct device *dev, u64 *id,
6272+ struct scatterlist *sglist, unsigned int nents,
6273+ pgprot_t pgprot)
6274+{
6275+ return trusty_transfer_memory(dev, id, sglist, nents, pgprot, 0,
6276+ false);
6277+}
6278+EXPORT_SYMBOL(trusty_share_memory);
6279+
6280+int trusty_transfer_memory(struct device *dev, u64 *id,
6281+ struct scatterlist *sglist, unsigned int nents,
6282+ pgprot_t pgprot, u64 tag, bool lend)
6283+{
6284+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6285+ int ret;
6286+ struct ns_mem_page_info pg_inf;
6287+ struct scatterlist *sg;
6288+ size_t count;
6289+ size_t i;
6290+ size_t len;
6291+ u64 ffa_handle = 0;
6292+ size_t total_len;
6293+ size_t endpoint_count = 1;
6294+ struct ffa_mtd *mtd = s->ffa_tx;
6295+ size_t comp_mrd_offset = offsetof(struct ffa_mtd, emad[endpoint_count]);
6296+ struct ffa_comp_mrd *comp_mrd = s->ffa_tx + comp_mrd_offset;
6297+ struct ffa_cons_mrd *cons_mrd = comp_mrd->address_range_array;
6298+ size_t cons_mrd_offset = (void *)cons_mrd - s->ffa_tx;
6299+ struct smc_ret8 smc_ret;
6300+ u32 cookie_low;
6301+ u32 cookie_high;
6302+
6303+ if (WARN_ON(dev->driver != &trusty_driver.driver))
6304+ return -EINVAL;
6305+
6306+ if (WARN_ON(nents < 1))
6307+ return -EINVAL;
6308+
6309+ if (nents != 1 && s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
6310+ dev_err(s->dev, "%s: old trusty version does not support non-contiguous memory objects\n",
6311+ __func__);
6312+ return -EOPNOTSUPP;
6313+ }
6314+
6315+ count = dma_map_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
6316+ if (count != nents) {
6317+ dev_err(s->dev, "failed to dma map sg_table\n");
6318+ return -EINVAL;
6319+ }
6320+
6321+ sg = sglist;
6322+ ret = trusty_encode_page_info(&pg_inf, phys_to_page(sg_dma_address(sg)),
6323+ pgprot);
6324+ if (ret) {
6325+ dev_err(s->dev, "%s: trusty_encode_page_info failed\n",
6326+ __func__);
6327+ goto err_encode_page_info;
6328+ }
6329+
6330+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
6331+ *id = pg_inf.compat_attr;
6332+ return 0;
6333+ }
6334+
6335+ len = 0;
6336+ for_each_sg(sglist, sg, nents, i)
6337+ len += sg_dma_len(sg);
6338+
6339+ mutex_lock(&s->share_memory_msg_lock);
6340+
6341+ mtd->sender_id = s->ffa_local_id;
6342+ mtd->memory_region_attributes = pg_inf.ffa_mem_attr;
6343+ mtd->reserved_3 = 0;
6344+ mtd->flags = 0;
6345+ mtd->handle = 0;
6346+ mtd->tag = tag;
6347+ mtd->reserved_24_27 = 0;
6348+ mtd->emad_count = endpoint_count;
6349+ for (i = 0; i < endpoint_count; i++) {
6350+ struct ffa_emad *emad = &mtd->emad[i];
6351+ /* TODO: support stream ids */
6352+ emad->mapd.endpoint_id = s->ffa_remote_id;
6353+ emad->mapd.memory_access_permissions = pg_inf.ffa_mem_perm;
6354+ emad->mapd.flags = 0;
6355+ emad->comp_mrd_offset = comp_mrd_offset;
6356+ emad->reserved_8_15 = 0;
6357+ }
6358+ comp_mrd->total_page_count = len / PAGE_SIZE;
6359+ comp_mrd->address_range_count = nents;
6360+ comp_mrd->reserved_8_15 = 0;
6361+
6362+ total_len = cons_mrd_offset + nents * sizeof(*cons_mrd);
6363+ sg = sglist;
6364+ while (count) {
6365+ size_t lcount =
6366+ min_t(size_t, count, (PAGE_SIZE - cons_mrd_offset) /
6367+ sizeof(*cons_mrd));
6368+ size_t fragment_len = lcount * sizeof(*cons_mrd) +
6369+ cons_mrd_offset;
6370+
6371+ for (i = 0; i < lcount; i++) {
6372+ cons_mrd[i].address = sg_dma_address(sg);
6373+ cons_mrd[i].page_count = sg_dma_len(sg) / PAGE_SIZE;
6374+ cons_mrd[i].reserved_12_15 = 0;
6375+ sg = sg_next(sg);
6376+ }
6377+ count -= lcount;
6378+ if (cons_mrd_offset) {
6379+ u32 smc = lend ? SMC_FC_FFA_MEM_LEND :
6380+ SMC_FC_FFA_MEM_SHARE;
6381+ /* First fragment */
6382+ smc_ret = trusty_smc8(smc, total_len,
6383+ fragment_len, 0, 0, 0, 0, 0);
6384+ } else {
6385+ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_FRAG_TX,
6386+ cookie_low, cookie_high,
6387+ fragment_len, 0, 0, 0, 0);
6388+ }
6389+ if (smc_ret.r0 == SMC_FC_FFA_MEM_FRAG_RX) {
6390+ cookie_low = smc_ret.r1;
6391+ cookie_high = smc_ret.r2;
6392+ dev_dbg(s->dev, "cookie %x %x", cookie_low,
6393+ cookie_high);
6394+ if (!count) {
6395+ /*
6396+ * We have sent all our descriptors. Expected
6397+ * SMC_FC_FFA_SUCCESS, not a request to send
6398+ * another fragment.
6399+ */
6400+ dev_err(s->dev, "%s: fragment_len %zd/%zd, unexpected SMC_FC_FFA_MEM_FRAG_RX\n",
6401+ __func__, fragment_len, total_len);
6402+ ret = -EIO;
6403+ break;
6404+ }
6405+ } else if (smc_ret.r0 == SMC_FC_FFA_SUCCESS) {
6406+ ffa_handle = smc_ret.r2 | (u64)smc_ret.r3 << 32;
6407+ dev_dbg(s->dev, "%s: fragment_len %zu/%zu, got handle 0x%llx\n",
6408+ __func__, fragment_len, total_len,
6409+ ffa_handle);
6410+ if (count) {
6411+ /*
6412+ * We have not sent all our descriptors.
6413+ * Expected SMC_FC_FFA_MEM_FRAG_RX not
6414+ * SMC_FC_FFA_SUCCESS.
6415+ */
6416+ dev_err(s->dev, "%s: fragment_len %zu/%zu, unexpected SMC_FC_FFA_SUCCESS, count %zu != 0\n",
6417+ __func__, fragment_len, total_len,
6418+ count);
6419+ ret = -EIO;
6420+ break;
6421+ }
6422+ } else {
6423+ dev_err(s->dev, "%s: fragment_len %zu/%zu, SMC_FC_FFA_MEM_SHARE failed 0x%lx 0x%lx 0x%lx",
6424+ __func__, fragment_len, total_len,
6425+ smc_ret.r0, smc_ret.r1, smc_ret.r2);
6426+ ret = -EIO;
6427+ break;
6428+ }
6429+
6430+ cons_mrd = s->ffa_tx;
6431+ cons_mrd_offset = 0;
6432+ }
6433+
6434+ mutex_unlock(&s->share_memory_msg_lock);
6435+
6436+ if (!ret) {
6437+ *id = ffa_handle;
6438+ dev_dbg(s->dev, "%s: done\n", __func__);
6439+ return 0;
6440+ }
6441+
6442+ dev_err(s->dev, "%s: failed %d", __func__, ret);
6443+
6444+err_encode_page_info:
6445+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
6446+ return ret;
6447+}
6448+EXPORT_SYMBOL(trusty_transfer_memory);
6449+
6450+/*
6451+ * trusty_share_memory_compat - trusty_share_memory wrapper for old apis
6452+ *
6453+ * Call trusty_share_memory and filter out memory attributes if trusty version
6454+ * is old. Used by clients that used to pass just a physical address to trusty
6455+ * instead of a physical address plus memory attributes value.
6456+ */
6457+int trusty_share_memory_compat(struct device *dev, u64 *id,
6458+ struct scatterlist *sglist, unsigned int nents,
6459+ pgprot_t pgprot)
6460+{
6461+ int ret;
6462+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6463+
6464+ ret = trusty_share_memory(dev, id, sglist, nents, pgprot);
6465+ if (!ret && s->api_version < TRUSTY_API_VERSION_PHYS_MEM_OBJ)
6466+ *id &= 0x0000FFFFFFFFF000ull;
6467+
6468+ return ret;
6469+}
6470+EXPORT_SYMBOL(trusty_share_memory_compat);
6471+
6472+int trusty_reclaim_memory(struct device *dev, u64 id,
6473+ struct scatterlist *sglist, unsigned int nents)
6474+{
6475+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6476+ int ret = 0;
6477+ struct smc_ret8 smc_ret;
6478+
6479+ if (WARN_ON(dev->driver != &trusty_driver.driver))
6480+ return -EINVAL;
6481+
6482+ if (WARN_ON(nents < 1))
6483+ return -EINVAL;
6484+
6485+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ) {
6486+ if (nents != 1) {
6487+ dev_err(s->dev, "%s: not supported\n", __func__);
6488+ return -EOPNOTSUPP;
6489+ }
6490+
6491+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
6492+
6493+ dev_dbg(s->dev, "%s: done\n", __func__);
6494+ return 0;
6495+ }
6496+
6497+ mutex_lock(&s->share_memory_msg_lock);
6498+
6499+ smc_ret = trusty_smc8(SMC_FC_FFA_MEM_RECLAIM, (u32)id, id >> 32, 0, 0,
6500+ 0, 0, 0);
6501+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6502+ dev_err(s->dev, "%s: SMC_FC_FFA_MEM_RECLAIM failed 0x%lx 0x%lx 0x%lx",
6503+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6504+ if (smc_ret.r0 == SMC_FC_FFA_ERROR &&
6505+ smc_ret.r2 == FFA_ERROR_DENIED)
6506+ ret = -EBUSY;
6507+ else
6508+ ret = -EIO;
6509+ }
6510+
6511+ mutex_unlock(&s->share_memory_msg_lock);
6512+
6513+ if (ret != 0)
6514+ return ret;
6515+
6516+ dma_unmap_sg(dev, sglist, nents, DMA_BIDIRECTIONAL);
6517+
6518+ dev_dbg(s->dev, "%s: done\n", __func__);
6519+ return 0;
6520+}
6521+EXPORT_SYMBOL(trusty_reclaim_memory);
6522+
6523+int trusty_call_notifier_register(struct device *dev, struct notifier_block *n)
6524+{
6525+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6526+
6527+ return atomic_notifier_chain_register(&s->notifier, n);
6528+}
6529+EXPORT_SYMBOL(trusty_call_notifier_register);
6530+
6531+int trusty_call_notifier_unregister(struct device *dev,
6532+ struct notifier_block *n)
6533+{
6534+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6535+
6536+ return atomic_notifier_chain_unregister(&s->notifier, n);
6537+}
6538+EXPORT_SYMBOL(trusty_call_notifier_unregister);
6539+
6540+static int trusty_remove_child(struct device *dev, void *data)
6541+{
6542+ platform_device_unregister(to_platform_device(dev));
6543+ return 0;
6544+}
6545+
6546+static ssize_t trusty_version_show(struct device *dev,
6547+ struct device_attribute *attr, char *buf)
6548+{
6549+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6550+
6551+ return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str ?: "unknown");
6552+}
6553+
6554+static DEVICE_ATTR(trusty_version, 0400, trusty_version_show, NULL);
6555+
6556+static struct attribute *trusty_attrs[] = {
6557+ &dev_attr_trusty_version.attr,
6558+ NULL,
6559+};
6560+ATTRIBUTE_GROUPS(trusty);
6561+
6562+const char *trusty_version_str_get(struct device *dev)
6563+{
6564+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6565+
6566+ return s->version_str;
6567+}
6568+EXPORT_SYMBOL(trusty_version_str_get);
6569+
6570+static int trusty_init_msg_buf(struct trusty_state *s, struct device *dev)
6571+{
6572+ phys_addr_t tx_paddr;
6573+ phys_addr_t rx_paddr;
6574+ int ret;
6575+ struct smc_ret8 smc_ret;
6576+
6577+ if (s->api_version < TRUSTY_API_VERSION_MEM_OBJ)
6578+ return 0;
6579+
6580+ /* Get supported FF-A version and check if it is compatible */
6581+ smc_ret = trusty_smc8(SMC_FC_FFA_VERSION, FFA_CURRENT_VERSION, 0, 0,
6582+ 0, 0, 0, 0);
6583+ if (FFA_VERSION_TO_MAJOR(smc_ret.r0) != FFA_CURRENT_VERSION_MAJOR) {
6584+ dev_err(s->dev,
6585+ "%s: Unsupported FF-A version 0x%lx, expected 0x%x\n",
6586+ __func__, smc_ret.r0, FFA_CURRENT_VERSION);
6587+ ret = -EIO;
6588+ goto err_version;
6589+ }
6590+
6591+ /* Check that SMC_FC_FFA_MEM_SHARE is implemented */
6592+ smc_ret = trusty_smc8(SMC_FC_FFA_FEATURES, SMC_FC_FFA_MEM_SHARE, 0, 0,
6593+ 0, 0, 0, 0);
6594+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6595+ dev_err(s->dev,
6596+ "%s: SMC_FC_FFA_FEATURES(SMC_FC_FFA_MEM_SHARE) failed 0x%lx 0x%lx 0x%lx\n",
6597+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6598+ ret = -EIO;
6599+ goto err_features;
6600+ }
6601+
6602+ /*
6603+ * Set FF-A endpoint IDs.
6604+ *
6605+ * Hardcode 0x8000 for the secure os.
6606+ * TODO: Use FF-A call or device tree to configure this dynamically
6607+ */
6608+ smc_ret = trusty_smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
6609+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6610+ dev_err(s->dev,
6611+ "%s: SMC_FC_FFA_ID_GET failed 0x%lx 0x%lx 0x%lx\n",
6612+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6613+ ret = -EIO;
6614+ goto err_id_get;
6615+ }
6616+
6617+ s->ffa_local_id = smc_ret.r2;
6618+ s->ffa_remote_id = 0x8000;
6619+
6620+ s->ffa_tx = kmalloc(PAGE_SIZE, GFP_KERNEL);
6621+ if (!s->ffa_tx) {
6622+ ret = -ENOMEM;
6623+ goto err_alloc_tx;
6624+ }
6625+ tx_paddr = virt_to_phys(s->ffa_tx);
6626+ if (WARN_ON(tx_paddr & (PAGE_SIZE - 1))) {
6627+ ret = -EINVAL;
6628+ goto err_unaligned_tx_buf;
6629+ }
6630+
6631+ s->ffa_rx = kmalloc(PAGE_SIZE, GFP_KERNEL);
6632+ if (!s->ffa_rx) {
6633+ ret = -ENOMEM;
6634+ goto err_alloc_rx;
6635+ }
6636+ rx_paddr = virt_to_phys(s->ffa_rx);
6637+ if (WARN_ON(rx_paddr & (PAGE_SIZE - 1))) {
6638+ ret = -EINVAL;
6639+ goto err_unaligned_rx_buf;
6640+ }
6641+
6642+ smc_ret = trusty_smc8(SMC_FCZ_FFA_RXTX_MAP, tx_paddr, rx_paddr, 1, 0,
6643+ 0, 0, 0);
6644+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6645+ dev_err(s->dev, "%s: SMC_FCZ_FFA_RXTX_MAP failed 0x%lx 0x%lx 0x%lx\n",
6646+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6647+ ret = -EIO;
6648+ goto err_rxtx_map;
6649+ }
6650+
6651+ return 0;
6652+
6653+err_rxtx_map:
6654+err_unaligned_rx_buf:
6655+ kfree(s->ffa_rx);
6656+ s->ffa_rx = NULL;
6657+err_alloc_rx:
6658+err_unaligned_tx_buf:
6659+ kfree(s->ffa_tx);
6660+ s->ffa_tx = NULL;
6661+err_alloc_tx:
6662+err_id_get:
6663+err_features:
6664+err_version:
6665+ return ret;
6666+}
6667+
6668+static void trusty_free_msg_buf(struct trusty_state *s, struct device *dev)
6669+{
6670+ struct smc_ret8 smc_ret;
6671+
6672+ smc_ret = trusty_smc8(SMC_FC_FFA_RXTX_UNMAP, 0, 0, 0, 0, 0, 0, 0);
6673+ if (smc_ret.r0 != SMC_FC_FFA_SUCCESS) {
6674+ dev_err(s->dev, "%s: SMC_FC_FFA_RXTX_UNMAP failed 0x%lx 0x%lx 0x%lx\n",
6675+ __func__, smc_ret.r0, smc_ret.r1, smc_ret.r2);
6676+ } else {
6677+ kfree(s->ffa_rx);
6678+ kfree(s->ffa_tx);
6679+ }
6680+}
6681+
6682+static void trusty_init_version(struct trusty_state *s, struct device *dev)
6683+{
6684+ int ret;
6685+ int i;
6686+ int version_str_len;
6687+
6688+ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0);
6689+ if (ret <= 0)
6690+ goto err_get_size;
6691+
6692+ version_str_len = ret;
6693+
6694+ s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL);
6695+ for (i = 0; i < version_str_len; i++) {
6696+ ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0);
6697+ if (ret < 0)
6698+ goto err_get_char;
6699+ s->version_str[i] = ret;
6700+ }
6701+ s->version_str[i] = '\0';
6702+
6703+ dev_info(dev, "trusty version: %s\n", s->version_str);
6704+ return;
6705+
6706+err_get_char:
6707+ kfree(s->version_str);
6708+ s->version_str = NULL;
6709+err_get_size:
6710+ dev_err(dev, "failed to get version: %d\n", ret);
6711+}
6712+
6713+u32 trusty_get_api_version(struct device *dev)
6714+{
6715+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6716+
6717+ return s->api_version;
6718+}
6719+EXPORT_SYMBOL(trusty_get_api_version);
6720+
6721+bool trusty_get_panic_status(struct device *dev)
6722+{
6723+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6724+ if (WARN_ON(dev->driver != &trusty_driver.driver))
6725+ return false;
6726+ return s->trusty_panicked;
6727+}
6728+EXPORT_SYMBOL(trusty_get_panic_status);
6729+
6730+static int trusty_init_api_version(struct trusty_state *s, struct device *dev)
6731+{
6732+ u32 api_version;
6733+
6734+ api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION,
6735+ TRUSTY_API_VERSION_CURRENT, 0, 0);
6736+ if (api_version == SM_ERR_UNDEFINED_SMC)
6737+ api_version = 0;
6738+
6739+ if (api_version > TRUSTY_API_VERSION_CURRENT) {
6740+ dev_err(dev, "unsupported api version %u > %u\n",
6741+ api_version, TRUSTY_API_VERSION_CURRENT);
6742+ return -EINVAL;
6743+ }
6744+
6745+ dev_info(dev, "selected api version: %u (requested %u)\n",
6746+ api_version, TRUSTY_API_VERSION_CURRENT);
6747+ s->api_version = api_version;
6748+
6749+ return 0;
6750+}
6751+
6752+static bool dequeue_nop(struct trusty_state *s, u32 *args)
6753+{
6754+ unsigned long flags;
6755+ struct trusty_nop *nop = NULL;
6756+
6757+ spin_lock_irqsave(&s->nop_lock, flags);
6758+ if (!list_empty(&s->nop_queue)) {
6759+ nop = list_first_entry(&s->nop_queue,
6760+ struct trusty_nop, node);
6761+ list_del_init(&nop->node);
6762+ args[0] = nop->args[0];
6763+ args[1] = nop->args[1];
6764+ args[2] = nop->args[2];
6765+ } else {
6766+ args[0] = 0;
6767+ args[1] = 0;
6768+ args[2] = 0;
6769+ }
6770+ spin_unlock_irqrestore(&s->nop_lock, flags);
6771+ return nop;
6772+}
6773+
6774+static void locked_nop_work_func(struct work_struct *work)
6775+{
6776+ int ret;
6777+ struct trusty_work *tw = container_of(work, struct trusty_work, work);
6778+ struct trusty_state *s = tw->ts;
6779+
6780+ ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0);
6781+ if (ret != 0)
6782+ dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d",
6783+ __func__, ret);
6784+
6785+ dev_dbg(s->dev, "%s: done\n", __func__);
6786+}
6787+
6788+static void nop_work_func(struct work_struct *work)
6789+{
6790+ int ret;
6791+ bool next;
6792+ u32 args[3];
6793+ u32 last_arg0;
6794+ struct trusty_work *tw = container_of(work, struct trusty_work, work);
6795+ struct trusty_state *s = tw->ts;
6796+
6797+ dequeue_nop(s, args);
6798+ do {
6799+ dev_dbg(s->dev, "%s: %x %x %x\n",
6800+ __func__, args[0], args[1], args[2]);
6801+
6802+ last_arg0 = args[0];
6803+ ret = trusty_std_call32(s->dev, SMC_SC_NOP,
6804+ args[0], args[1], args[2]);
6805+
6806+ next = dequeue_nop(s, args);
6807+
6808+ if (ret == SM_ERR_NOP_INTERRUPTED) {
6809+ next = true;
6810+ } else if (ret != SM_ERR_NOP_DONE) {
6811+ dev_err(s->dev, "%s: SMC_SC_NOP %x failed %d",
6812+ __func__, last_arg0, ret);
6813+ if (last_arg0) {
6814+ /*
6815+ * Don't break out of the loop if a non-default
6816+ * nop-handler returns an error.
6817+ */
6818+ next = true;
6819+ }
6820+ }
6821+ } while (next);
6822+
6823+ dev_dbg(s->dev, "%s: done\n", __func__);
6824+}
6825+
6826+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop)
6827+{
6828+ unsigned long flags;
6829+ struct trusty_work *tw;
6830+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6831+
6832+ preempt_disable();
6833+ tw = this_cpu_ptr(s->nop_works);
6834+ if (nop) {
6835+ WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP);
6836+
6837+ spin_lock_irqsave(&s->nop_lock, flags);
6838+ if (list_empty(&nop->node))
6839+ list_add_tail(&nop->node, &s->nop_queue);
6840+ spin_unlock_irqrestore(&s->nop_lock, flags);
6841+ }
6842+ queue_work(s->nop_wq, &tw->work);
6843+ preempt_enable();
6844+}
6845+EXPORT_SYMBOL(trusty_enqueue_nop);
6846+
6847+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop)
6848+{
6849+ unsigned long flags;
6850+ struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
6851+
6852+ if (WARN_ON(!nop))
6853+ return;
6854+
6855+ spin_lock_irqsave(&s->nop_lock, flags);
6856+ if (!list_empty(&nop->node))
6857+ list_del_init(&nop->node);
6858+ spin_unlock_irqrestore(&s->nop_lock, flags);
6859+}
6860+EXPORT_SYMBOL(trusty_dequeue_nop);
6861+
6862+static int trusty_probe(struct platform_device *pdev)
6863+{
6864+ int ret;
6865+ unsigned int cpu;
6866+ work_func_t work_func;
6867+ struct trusty_state *s;
6868+ struct device_node *node = pdev->dev.of_node;
6869+
6870+ if (!node) {
6871+ dev_err(&pdev->dev, "of_node required\n");
6872+ return -EINVAL;
6873+ }
6874+
6875+ s = kzalloc(sizeof(*s), GFP_KERNEL);
6876+ if (!s) {
6877+ ret = -ENOMEM;
6878+ goto err_allocate_state;
6879+ }
6880+
6881+ s->dev = &pdev->dev;
6882+ spin_lock_init(&s->nop_lock);
6883+ INIT_LIST_HEAD(&s->nop_queue);
6884+ mutex_init(&s->smc_lock);
6885+ mutex_init(&s->share_memory_msg_lock);
6886+ ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier);
6887+ init_completion(&s->cpu_idle_completion);
6888+
6889+ s->dev->dma_parms = &s->dma_parms;
6890+ dma_set_max_seg_size(s->dev, 0xfffff000); /* dma_parms limit */
6891+ /*
6892+ * Set dma mask to 48 bits. This is the current limit of
6893+ * trusty_encode_page_info.
6894+ */
6895+ dma_coerce_mask_and_coherent(s->dev, DMA_BIT_MASK(48));
6896+
6897+ platform_set_drvdata(pdev, s);
6898+
6899+ trusty_init_version(s, &pdev->dev);
6900+
6901+ ret = trusty_init_api_version(s, &pdev->dev);
6902+ if (ret < 0)
6903+ goto err_api_version;
6904+
6905+ ret = trusty_init_msg_buf(s, &pdev->dev);
6906+ if (ret < 0)
6907+ goto err_init_msg_buf;
6908+
6909+ s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0);
6910+ if (!s->nop_wq) {
6911+ ret = -ENODEV;
6912+ dev_err(&pdev->dev, "Failed create trusty-nop-wq\n");
6913+ goto err_create_nop_wq;
6914+ }
6915+
6916+ s->nop_works = alloc_percpu(struct trusty_work);
6917+ if (!s->nop_works) {
6918+ ret = -ENOMEM;
6919+ dev_err(&pdev->dev, "Failed to allocate works\n");
6920+ goto err_alloc_works;
6921+ }
6922+
6923+ if (s->api_version < TRUSTY_API_VERSION_SMP)
6924+ work_func = locked_nop_work_func;
6925+ else
6926+ work_func = nop_work_func;
6927+
6928+ for_each_possible_cpu(cpu) {
6929+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
6930+
6931+ tw->ts = s;
6932+ INIT_WORK(&tw->work, work_func);
6933+ }
6934+
6935+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
6936+ if (ret < 0) {
6937+ dev_err(&pdev->dev, "Failed to add children: %d\n", ret);
6938+ goto err_add_children;
6939+ }
6940+
6941+ return 0;
6942+
6943+err_add_children:
6944+ for_each_possible_cpu(cpu) {
6945+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
6946+
6947+ flush_work(&tw->work);
6948+ }
6949+ free_percpu(s->nop_works);
6950+err_alloc_works:
6951+ destroy_workqueue(s->nop_wq);
6952+err_create_nop_wq:
6953+ trusty_free_msg_buf(s, &pdev->dev);
6954+err_init_msg_buf:
6955+err_api_version:
6956+ s->dev->dma_parms = NULL;
6957+ kfree(s->version_str);
6958+ device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
6959+ mutex_destroy(&s->share_memory_msg_lock);
6960+ mutex_destroy(&s->smc_lock);
6961+ kfree(s);
6962+err_allocate_state:
6963+ return ret;
6964+}
6965+
6966+static int trusty_remove(struct platform_device *pdev)
6967+{
6968+ unsigned int cpu;
6969+ struct trusty_state *s = platform_get_drvdata(pdev);
6970+
6971+ device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
6972+
6973+ for_each_possible_cpu(cpu) {
6974+ struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
6975+
6976+ flush_work(&tw->work);
6977+ }
6978+ free_percpu(s->nop_works);
6979+ destroy_workqueue(s->nop_wq);
6980+
6981+ mutex_destroy(&s->share_memory_msg_lock);
6982+ mutex_destroy(&s->smc_lock);
6983+ trusty_free_msg_buf(s, &pdev->dev);
6984+ s->dev->dma_parms = NULL;
6985+ kfree(s->version_str);
6986+ kfree(s);
6987+ return 0;
6988+}
6989+
6990+static const struct of_device_id trusty_of_match[] = {
6991+ { .compatible = "android,trusty-smc-v1", },
6992+ {},
6993+};
6994+
6995+MODULE_DEVICE_TABLE(trusty, trusty_of_match);
6996+
6997+static struct platform_driver trusty_driver = {
6998+ .probe = trusty_probe,
6999+ .remove = trusty_remove,
7000+ .driver = {
7001+ .name = "trusty",
7002+ .of_match_table = trusty_of_match,
7003+ .dev_groups = trusty_groups,
7004+ },
7005+};
7006+
7007+static int __init trusty_driver_init(void)
7008+{
7009+ return platform_driver_register(&trusty_driver);
7010+}
7011+
7012+static void __exit trusty_driver_exit(void)
7013+{
7014+ platform_driver_unregister(&trusty_driver);
7015+}
7016+
7017+subsys_initcall(trusty_driver_init);
7018+module_exit(trusty_driver_exit);
7019+
7020+MODULE_LICENSE("GPL v2");
7021+MODULE_DESCRIPTION("Trusty core driver");
7022diff --git a/include/linux/trusty/arm_ffa.h b/include/linux/trusty/arm_ffa.h
7023new file mode 100644
7024index 000000000000..ab7b2afb794c
7025--- /dev/null
7026+++ b/include/linux/trusty/arm_ffa.h
7027@@ -0,0 +1,590 @@
7028+/* SPDX-License-Identifier: MIT */
7029+/*
7030+ * Copyright (C) 2020 Google, Inc.
7031+ *
7032+ * Trusty and TF-A also have a copy of this header.
7033+ * Please keep the copies in sync.
7034+ */
7035+#ifndef __LINUX_TRUSTY_ARM_FFA_H
7036+#define __LINUX_TRUSTY_ARM_FFA_H
7037+
7038+/*
7039+ * Subset of Arm PSA Firmware Framework for Arm v8-A 1.0 EAC 1_0
7040+ * (https://developer.arm.com/docs/den0077/a) needed for shared memory.
7041+ */
7042+
7043+#include "smcall.h"
7044+
7045+#ifndef STATIC_ASSERT
7046+#define STATIC_ASSERT(e) _Static_assert(e, #e)
7047+#endif
7048+
7049+#define FFA_CURRENT_VERSION_MAJOR (1U)
7050+#define FFA_CURRENT_VERSION_MINOR (0U)
7051+
7052+#define FFA_VERSION_TO_MAJOR(version) ((version) >> 16)
7053+#define FFA_VERSION_TO_MINOR(version) ((version) & (0xffff))
7054+#define FFA_VERSION(major, minor) (((major) << 16) | (minor))
7055+#define FFA_CURRENT_VERSION \
7056+ FFA_VERSION(FFA_CURRENT_VERSION_MAJOR, FFA_CURRENT_VERSION_MINOR)
7057+
7058+#define SMC_ENTITY_SHARED_MEMORY 4
7059+
7060+#define SMC_FASTCALL_NR_SHARED_MEMORY(nr) \
7061+ SMC_FASTCALL_NR(SMC_ENTITY_SHARED_MEMORY, nr)
7062+#define SMC_FASTCALL64_NR_SHARED_MEMORY(nr) \
7063+ SMC_FASTCALL64_NR(SMC_ENTITY_SHARED_MEMORY, nr)
7064+
7065+/**
7066+ * typedef ffa_endpoint_id16_t - Endpoint ID
7067+ *
7068+ * Current implementation only supports VMIDs. FFA spec also support stream
7069+ * endpoint ids.
7070+ */
7071+typedef uint16_t ffa_endpoint_id16_t;
7072+
7073+/**
7074+ * struct ffa_cons_mrd - Constituent memory region descriptor
7075+ * @address:
7076+ * Start address of contiguous memory region. Must be 4K page aligned.
7077+ * @page_count:
7078+ * Number of 4K pages in region.
7079+ * @reserved_12_15:
7080+ * Reserve bytes 12-15 to pad struct size to 16 bytes.
7081+ */
7082+struct ffa_cons_mrd {
7083+ uint64_t address;
7084+ uint32_t page_count;
7085+ uint32_t reserved_12_15;
7086+};
7087+STATIC_ASSERT(sizeof(struct ffa_cons_mrd) == 16);
7088+
7089+/**
7090+ * struct ffa_comp_mrd - Composite memory region descriptor
7091+ * @total_page_count:
7092+ * Number of 4k pages in memory region. Must match sum of
7093+ * @address_range_array[].page_count.
7094+ * @address_range_count:
7095+ * Number of entries in @address_range_array.
7096+ * @reserved_8_15:
7097+ * Reserve bytes 8-15 to pad struct size to 16 byte alignment and
7098+ * make @address_range_array 16 byte aligned.
7099+ * @address_range_array:
7100+ * Array of &struct ffa_cons_mrd entries.
7101+ */
7102+struct ffa_comp_mrd {
7103+ uint32_t total_page_count;
7104+ uint32_t address_range_count;
7105+ uint64_t reserved_8_15;
7106+ struct ffa_cons_mrd address_range_array[];
7107+};
7108+STATIC_ASSERT(sizeof(struct ffa_comp_mrd) == 16);
7109+
7110+/**
7111+ * typedef ffa_mem_attr8_t - Memory region attributes
7112+ *
7113+ * * @FFA_MEM_ATTR_DEVICE_NGNRNE:
7114+ * Device-nGnRnE.
7115+ * * @FFA_MEM_ATTR_DEVICE_NGNRE:
7116+ * Device-nGnRE.
7117+ * * @FFA_MEM_ATTR_DEVICE_NGRE:
7118+ * Device-nGRE.
7119+ * * @FFA_MEM_ATTR_DEVICE_GRE:
7120+ * Device-GRE.
7121+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED
7122+ * Normal memory. Non-cacheable.
7123+ * * @FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB
7124+ * Normal memory. Write-back cached.
7125+ * * @FFA_MEM_ATTR_NON_SHAREABLE
7126+ * Non-shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
7127+ * * @FFA_MEM_ATTR_OUTER_SHAREABLE
7128+ * Outer Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
7129+ * * @FFA_MEM_ATTR_INNER_SHAREABLE
7130+ * Inner Shareable. Combine with FFA_MEM_ATTR_NORMAL_MEMORY_*.
7131+ */
7132+typedef uint8_t ffa_mem_attr8_t;
7133+#define FFA_MEM_ATTR_DEVICE_NGNRNE ((1U << 4) | (0x0U << 2))
7134+#define FFA_MEM_ATTR_DEVICE_NGNRE ((1U << 4) | (0x1U << 2))
7135+#define FFA_MEM_ATTR_DEVICE_NGRE ((1U << 4) | (0x2U << 2))
7136+#define FFA_MEM_ATTR_DEVICE_GRE ((1U << 4) | (0x3U << 2))
7137+#define FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED ((2U << 4) | (0x1U << 2))
7138+#define FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB ((2U << 4) | (0x3U << 2))
7139+#define FFA_MEM_ATTR_NON_SHAREABLE (0x0U << 0)
7140+#define FFA_MEM_ATTR_OUTER_SHAREABLE (0x2U << 0)
7141+#define FFA_MEM_ATTR_INNER_SHAREABLE (0x3U << 0)
7142+
7143+/**
7144+ * typedef ffa_mem_perm8_t - Memory access permissions
7145+ *
7146+ * * @FFA_MEM_ATTR_RO
7147+ * Request or specify read-only mapping.
7148+ * * @FFA_MEM_ATTR_RW
7149+ * Request or allow read-write mapping.
7150+ * * @FFA_MEM_PERM_NX
7151+ * Deny executable mapping.
7152+ * * @FFA_MEM_PERM_X
7153+ * Request executable mapping.
7154+ */
7155+typedef uint8_t ffa_mem_perm8_t;
7156+#define FFA_MEM_PERM_RO (1U << 0)
7157+#define FFA_MEM_PERM_RW (1U << 1)
7158+#define FFA_MEM_PERM_NX (1U << 2)
7159+#define FFA_MEM_PERM_X (1U << 3)
7160+
7161+/**
7162+ * typedef ffa_mem_flag8_t - Endpoint memory flags
7163+ *
7164+ * * @FFA_MEM_FLAG_OTHER
7165+ * Other borrower. Memory region must not be or was not retrieved on behalf
7166+ * of this endpoint.
7167+ */
7168+typedef uint8_t ffa_mem_flag8_t;
7169+#define FFA_MEM_FLAG_OTHER (1U << 0)
7170+
7171+/**
7172+ * typedef ffa_mtd_flag32_t - Memory transaction descriptor flags
7173+ *
7174+ * * @FFA_MTD_FLAG_ZERO_MEMORY
7175+ * Zero memory after unmapping from sender (must be 0 for share).
7176+ * * @FFA_MTD_FLAG_TIME_SLICING
7177+ * Not supported by this implementation.
7178+ * * @FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH
7179+ * Zero memory after unmapping from borrowers (must be 0 for share).
7180+ * * @FFA_MTD_FLAG_TYPE_MASK
7181+ * Bit-mask to extract memory management transaction type from flags.
7182+ * * @FFA_MTD_FLAG_TYPE_SHARE_MEMORY
7183+ * Share memory transaction flag.
7184+ * Used by @SMC_FC_FFA_MEM_RETRIEVE_RESP to indicate that memory came from
7185+ * @SMC_FC_FFA_MEM_SHARE and by @SMC_FC_FFA_MEM_RETRIEVE_REQ to specify that
7186+ * it must have.
7187+ * * @FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK
7188+ * Not supported by this implementation.
7189+ */
7190+typedef uint32_t ffa_mtd_flag32_t;
7191+#define FFA_MTD_FLAG_ZERO_MEMORY (1U << 0)
7192+#define FFA_MTD_FLAG_TIME_SLICING (1U << 1)
7193+#define FFA_MTD_FLAG_ZERO_MEMORY_AFTER_RELINQUISH (1U << 2)
7194+#define FFA_MTD_FLAG_TYPE_MASK (3U << 3)
7195+#define FFA_MTD_FLAG_TYPE_SHARE_MEMORY (1U << 3)
7196+#define FFA_MTD_FLAG_ADDRESS_RANGE_ALIGNMENT_HINT_MASK (0x1FU << 5)
7197+
7198+/**
7199+ * struct ffa_mapd - Memory access permissions descriptor
7200+ * @endpoint_id:
7201+ * Endpoint id that @memory_access_permissions and @flags apply to.
7202+ * (&typedef ffa_endpoint_id16_t).
7203+ * @memory_access_permissions:
7204+ * FFA_MEM_PERM_* values or'ed together (&typedef ffa_mem_perm8_t).
7205+ * @flags:
7206+ * FFA_MEM_FLAG_* values or'ed together (&typedef ffa_mem_flag8_t).
7207+ */
7208+struct ffa_mapd {
7209+ ffa_endpoint_id16_t endpoint_id;
7210+ ffa_mem_perm8_t memory_access_permissions;
7211+ ffa_mem_flag8_t flags;
7212+};
7213+STATIC_ASSERT(sizeof(struct ffa_mapd) == 4);
7214+
7215+/**
7216+ * struct ffa_emad - Endpoint memory access descriptor.
7217+ * @mapd: &struct ffa_mapd.
7218+ * @comp_mrd_offset:
7219+ * Offset of &struct ffa_comp_mrd form start of &struct ffa_mtd.
7220+ * @reserved_8_15:
7221+ * Reserved bytes 8-15. Must be 0.
7222+ */
7223+struct ffa_emad {
7224+ struct ffa_mapd mapd;
7225+ uint32_t comp_mrd_offset;
7226+ uint64_t reserved_8_15;
7227+};
7228+STATIC_ASSERT(sizeof(struct ffa_emad) == 16);
7229+
7230+/**
7231+ * struct ffa_mtd - Memory transaction descriptor.
7232+ * @sender_id:
7233+ * Sender endpoint id.
7234+ * @memory_region_attributes:
7235+ * FFA_MEM_ATTR_* values or'ed together (&typedef ffa_mem_attr8_t).
7236+ * @reserved_3:
7237+ * Reserved bytes 3. Must be 0.
7238+ * @flags:
7239+ * FFA_MTD_FLAG_* values or'ed together (&typedef ffa_mtd_flag32_t).
7240+ * @handle:
7241+ * Id of shared memory object. Most be 0 for MEM_SHARE.
7242+ * @tag: Client allocated tag. Must match original value.
7243+ * @reserved_24_27:
7244+ * Reserved bytes 24-27. Must be 0.
7245+ * @emad_count:
7246+ * Number of entries in @emad. Must be 1 in current implementation.
7247+ * FFA spec allows more entries.
7248+ * @emad:
7249+ * Endpoint memory access descriptor array (see @struct ffa_emad).
7250+ */
7251+struct ffa_mtd {
7252+ ffa_endpoint_id16_t sender_id;
7253+ ffa_mem_attr8_t memory_region_attributes;
7254+ uint8_t reserved_3;
7255+ ffa_mtd_flag32_t flags;
7256+ uint64_t handle;
7257+ uint64_t tag;
7258+ uint32_t reserved_24_27;
7259+ uint32_t emad_count;
7260+ struct ffa_emad emad[];
7261+};
7262+STATIC_ASSERT(sizeof(struct ffa_mtd) == 32);
7263+
7264+/**
7265+ * struct ffa_mem_relinquish_descriptor - Relinquish request descriptor.
7266+ * @handle:
7267+ * Id of shared memory object to relinquish.
7268+ * @flags:
7269+ * If bit 0 is set clear memory after unmapping from borrower. Must be 0
7270+ * for share. Bit[1]: Time slicing. Not supported, must be 0. All other
7271+ * bits are reserved 0.
7272+ * @endpoint_count:
7273+ * Number of entries in @endpoint_array.
7274+ * @endpoint_array:
7275+ * Array of endpoint ids.
7276+ */
7277+struct ffa_mem_relinquish_descriptor {
7278+ uint64_t handle;
7279+ uint32_t flags;
7280+ uint32_t endpoint_count;
7281+ ffa_endpoint_id16_t endpoint_array[];
7282+};
7283+STATIC_ASSERT(sizeof(struct ffa_mem_relinquish_descriptor) == 16);
7284+
7285+/**
7286+ * enum ffa_error - FF-A error code
7287+ * @FFA_ERROR_NOT_SUPPORTED:
7288+ * Operation contained possibly valid parameters not supported by the
7289+ * current implementation. Does not match FF-A 1.0 EAC 1_0 definition.
7290+ * @FFA_ERROR_INVALID_PARAMETERS:
7291+ * Invalid parameters. Conditions function specific.
7292+ * @FFA_ERROR_NO_MEMORY:
7293+ * Not enough memory.
7294+ * @FFA_ERROR_DENIED:
7295+ * Operation not allowed. Conditions function specific.
7296+ *
7297+ * FF-A 1.0 EAC 1_0 defines other error codes as well but the current
7298+ * implementation does not use them.
7299+ */
7300+enum ffa_error {
7301+ FFA_ERROR_NOT_SUPPORTED = -1,
7302+ FFA_ERROR_INVALID_PARAMETERS = -2,
7303+ FFA_ERROR_NO_MEMORY = -3,
7304+ FFA_ERROR_DENIED = -6,
7305+};
7306+
7307+/**
7308+ * SMC_FC32_FFA_MIN - First 32 bit SMC opcode reserved for FFA
7309+ */
7310+#define SMC_FC32_FFA_MIN SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
7311+
7312+/**
7313+ * SMC_FC32_FFA_MAX - Last 32 bit SMC opcode reserved for FFA
7314+ */
7315+#define SMC_FC32_FFA_MAX SMC_FASTCALL_NR_SHARED_MEMORY(0x7F)
7316+
7317+/**
7318+ * SMC_FC64_FFA_MIN - First 64 bit SMC opcode reserved for FFA
7319+ */
7320+#define SMC_FC64_FFA_MIN SMC_FASTCALL64_NR_SHARED_MEMORY(0x60)
7321+
7322+/**
7323+ * SMC_FC64_FFA_MAX - Last 64 bit SMC opcode reserved for FFA
7324+ */
7325+#define SMC_FC64_FFA_MAX SMC_FASTCALL64_NR_SHARED_MEMORY(0x7F)
7326+
7327+/**
7328+ * SMC_FC_FFA_ERROR - SMC error return opcode
7329+ *
7330+ * Register arguments:
7331+ *
7332+ * * w1: VMID in [31:16], vCPU in [15:0]
7333+ * * w2: Error code (&enum ffa_error)
7334+ */
7335+#define SMC_FC_FFA_ERROR SMC_FASTCALL_NR_SHARED_MEMORY(0x60)
7336+
7337+/**
7338+ * SMC_FC_FFA_SUCCESS - 32 bit SMC success return opcode
7339+ *
7340+ * Register arguments:
7341+ *
7342+ * * w1: VMID in [31:16], vCPU in [15:0]
7343+ * * w2-w7: Function specific
7344+ */
7345+#define SMC_FC_FFA_SUCCESS SMC_FASTCALL_NR_SHARED_MEMORY(0x61)
7346+
7347+/**
7348+ * SMC_FC64_FFA_SUCCESS - 64 bit SMC success return opcode
7349+ *
7350+ * Register arguments:
7351+ *
7352+ * * w1: VMID in [31:16], vCPU in [15:0]
7353+ * * w2/x2-w7/x7: Function specific
7354+ */
7355+#define SMC_FC64_FFA_SUCCESS SMC_FASTCALL64_NR_SHARED_MEMORY(0x61)
7356+
7357+/**
7358+ * SMC_FC_FFA_VERSION - SMC opcode to return supported FF-A version
7359+ *
7360+ * Register arguments:
7361+ *
7362+ * * w1: Major version bit[30:16] and minor version in bit[15:0] supported
7363+ * by caller. Bit[31] must be 0.
7364+ *
7365+ * Return:
7366+ * * w0: &SMC_FC_FFA_SUCCESS
7367+ * * w2: Major version bit[30:16], minor version in bit[15:0], bit[31] must
7368+ * be 0.
7369+ *
7370+ * or
7371+ *
7372+ * * w0: SMC_FC_FFA_ERROR
7373+ * * w2: FFA_ERROR_NOT_SUPPORTED if major version passed in is less than the
7374+ * minimum major version supported.
7375+ */
7376+#define SMC_FC_FFA_VERSION SMC_FASTCALL_NR_SHARED_MEMORY(0x63)
7377+
7378+/**
7379+ * SMC_FC_FFA_FEATURES - SMC opcode to check optional feature support
7380+ *
7381+ * Register arguments:
7382+ *
7383+ * * w1: FF-A function ID
7384+ *
7385+ * Return:
7386+ * * w0: &SMC_FC_FFA_SUCCESS
7387+ * * w2: Bit[0]: Supports custom buffers for memory transactions.
7388+ * Bit[1:0]: For RXTX_MAP min buffer size and alignment boundary.
7389+ * Other bits must be 0.
7390+ * * w3: For FFA_MEM_RETRIEVE_REQ, bit[7-0]: Number of times receiver can
7391+ * retrieve each memory region before relinquishing it specified as
7392+ * ((1U << (value + 1)) - 1 (or value = bits in reference count - 1).
7393+ * For all other bits and commands: must be 0.
7394+ * or
7395+ *
7396+ * * w0: SMC_FC_FFA_ERROR
7397+ * * w2: FFA_ERROR_NOT_SUPPORTED if function is not implemented, or
7398+ * FFA_ERROR_INVALID_PARAMETERS if function id is not valid.
7399+ */
7400+#define SMC_FC_FFA_FEATURES SMC_FASTCALL_NR_SHARED_MEMORY(0x64)
7401+
7402+/**
7403+ * SMC_FC_FFA_RXTX_MAP - 32 bit SMC opcode to map message buffers
7404+ *
7405+ * Register arguments:
7406+ *
7407+ * * w1: TX address
7408+ * * w2: RX address
7409+ * * w3: RX/TX page count in bit[5:0]
7410+ *
7411+ * Return:
7412+ * * w0: &SMC_FC_FFA_SUCCESS
7413+ */
7414+#define SMC_FC_FFA_RXTX_MAP SMC_FASTCALL_NR_SHARED_MEMORY(0x66)
7415+
7416+/**
7417+ * SMC_FC64_FFA_RXTX_MAP - 64 bit SMC opcode to map message buffers
7418+ *
7419+ * Register arguments:
7420+ *
7421+ * * x1: TX address
7422+ * * x2: RX address
7423+ * * x3: RX/TX page count in bit[5:0]
7424+ *
7425+ * Return:
7426+ * * w0: &SMC_FC_FFA_SUCCESS
7427+ */
7428+#define SMC_FC64_FFA_RXTX_MAP SMC_FASTCALL64_NR_SHARED_MEMORY(0x66)
7429+#ifdef CONFIG_64BIT
7430+#define SMC_FCZ_FFA_RXTX_MAP SMC_FC64_FFA_RXTX_MAP
7431+#else
7432+#define SMC_FCZ_FFA_RXTX_MAP SMC_FC_FFA_RXTX_MAP
7433+#endif
7434+
7435+/**
7436+ * SMC_FC_FFA_RXTX_UNMAP - SMC opcode to unmap message buffers
7437+ *
7438+ * Register arguments:
7439+ *
7440+ * * w1: ID in [31:16]
7441+ *
7442+ * Return:
7443+ * * w0: &SMC_FC_FFA_SUCCESS
7444+ */
7445+#define SMC_FC_FFA_RXTX_UNMAP SMC_FASTCALL_NR_SHARED_MEMORY(0x67)
7446+
7447+/**
7448+ * SMC_FC_FFA_ID_GET - SMC opcode to get endpoint id of caller
7449+ *
7450+ * Return:
7451+ * * w0: &SMC_FC_FFA_SUCCESS
7452+ * * w2: ID in bit[15:0], bit[31:16] must be 0.
7453+ */
7454+#define SMC_FC_FFA_ID_GET SMC_FASTCALL_NR_SHARED_MEMORY(0x69)
7455+
7456+/**
7457+ * SMC_FC_FFA_MEM_DONATE - 32 bit SMC opcode to donate memory
7458+ *
7459+ * Not supported.
7460+ */
7461+#define SMC_FC_FFA_MEM_DONATE SMC_FASTCALL_NR_SHARED_MEMORY(0x71)
7462+
7463+/**
7464+ * SMC_FC_FFA_MEM_LEND - 32 bit SMC opcode to lend memory
7465+ *
7466+ * Not currently supported.
7467+ */
7468+#define SMC_FC_FFA_MEM_LEND SMC_FASTCALL_NR_SHARED_MEMORY(0x72)
7469+
7470+/**
7471+ * SMC_FC_FFA_MEM_SHARE - 32 bit SMC opcode to share memory
7472+ *
7473+ * Register arguments:
7474+ *
7475+ * * w1: Total length
7476+ * * w2: Fragment length
7477+ * * w3: Address
7478+ * * w4: Page count
7479+ *
7480+ * Return:
7481+ * * w0: &SMC_FC_FFA_SUCCESS
7482+ * * w2/w3: Handle
7483+ *
7484+ * or
7485+ *
7486+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX
7487+ * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX
7488+ *
7489+ * or
7490+ *
7491+ * * w0: SMC_FC_FFA_ERROR
7492+ * * w2: Error code (&enum ffa_error)
7493+ */
7494+#define SMC_FC_FFA_MEM_SHARE SMC_FASTCALL_NR_SHARED_MEMORY(0x73)
7495+
7496+/**
7497+ * SMC_FC64_FFA_MEM_SHARE - 64 bit SMC opcode to share memory
7498+ *
7499+ * Register arguments:
7500+ *
7501+ * * w1: Total length
7502+ * * w2: Fragment length
7503+ * * x3: Address
7504+ * * w4: Page count
7505+ *
7506+ * Return:
7507+ * * w0: &SMC_FC_FFA_SUCCESS
7508+ * * w2/w3: Handle
7509+ *
7510+ * or
7511+ *
7512+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX
7513+ * * w1-: See &SMC_FC_FFA_MEM_FRAG_RX
7514+ *
7515+ * or
7516+ *
7517+ * * w0: SMC_FC_FFA_ERROR
7518+ * * w2: Error code (&enum ffa_error)
7519+ */
7520+#define SMC_FC64_FFA_MEM_SHARE SMC_FASTCALL64_NR_SHARED_MEMORY(0x73)
7521+
7522+/**
7523+ * SMC_FC_FFA_MEM_RETRIEVE_REQ - 32 bit SMC opcode to retrieve shared memory
7524+ *
7525+ * Register arguments:
7526+ *
7527+ * * w1: Total length
7528+ * * w2: Fragment length
7529+ * * w3: Address
7530+ * * w4: Page count
7531+ *
7532+ * Return:
7533+ * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP
7534+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP
7535+ */
7536+#define SMC_FC_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL_NR_SHARED_MEMORY(0x74)
7537+
7538+/**
7539+ * SMC_FC64_FFA_MEM_RETRIEVE_REQ - 64 bit SMC opcode to retrieve shared memory
7540+ *
7541+ * Register arguments:
7542+ *
7543+ * * w1: Total length
7544+ * * w2: Fragment length
7545+ * * x3: Address
7546+ * * w4: Page count
7547+ *
7548+ * Return:
7549+ * * w0: &SMC_FC_FFA_MEM_RETRIEVE_RESP
7550+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_RETRIEVE_RESP
7551+ */
7552+#define SMC_FC64_FFA_MEM_RETRIEVE_REQ SMC_FASTCALL64_NR_SHARED_MEMORY(0x74)
7553+
7554+/**
7555+ * SMC_FC_FFA_MEM_RETRIEVE_RESP - Retrieve 32 bit SMC return opcode
7556+ *
7557+ * Register arguments:
7558+ *
7559+ * * w1: Total length
7560+ * * w2: Fragment length
7561+ */
7562+#define SMC_FC_FFA_MEM_RETRIEVE_RESP SMC_FASTCALL_NR_SHARED_MEMORY(0x75)
7563+
7564+/**
7565+ * SMC_FC_FFA_MEM_RELINQUISH - SMC opcode to relinquish shared memory
7566+ *
7567+ * Input in &struct ffa_mem_relinquish_descriptor format in message buffer.
7568+ *
7569+ * Return:
7570+ * * w0: &SMC_FC_FFA_SUCCESS
7571+ */
7572+#define SMC_FC_FFA_MEM_RELINQUISH SMC_FASTCALL_NR_SHARED_MEMORY(0x76)
7573+
7574+/**
7575+ * SMC_FC_FFA_MEM_RECLAIM - SMC opcode to reclaim shared memory
7576+ *
7577+ * Register arguments:
7578+ *
7579+ * * w1/w2: Handle
7580+ * * w3: Flags
7581+ *
7582+ * Return:
7583+ * * w0: &SMC_FC_FFA_SUCCESS
7584+ */
7585+#define SMC_FC_FFA_MEM_RECLAIM SMC_FASTCALL_NR_SHARED_MEMORY(0x77)
7586+
7587+/**
7588+ * SMC_FC_FFA_MEM_FRAG_RX - SMC opcode to request next fragment.
7589+ *
7590+ * Register arguments:
7591+ *
7592+ * * w1/w2: Cookie
7593+ * * w3: Fragment offset.
7594+ * * w4: Endpoint id ID in [31:16], if client is hypervisor.
7595+ *
7596+ * Return:
7597+ * * w0: &SMC_FC_FFA_MEM_FRAG_TX
7598+ * * w1/x1-w5/x5: See &SMC_FC_FFA_MEM_FRAG_TX
7599+ */
7600+#define SMC_FC_FFA_MEM_FRAG_RX SMC_FASTCALL_NR_SHARED_MEMORY(0x7A)
7601+
7602+/**
7603+ * SMC_FC_FFA_MEM_FRAG_TX - SMC opcode to transmit next fragment
7604+ *
7605+ * Register arguments:
7606+ *
7607+ * * w1/w2: Cookie
7608+ * * w3: Fragment length.
7609+ * * w4: Sender endpoint id ID in [31:16], if client is hypervisor.
7610+ *
7611+ * Return:
7612+ * * w0: &SMC_FC_FFA_MEM_FRAG_RX or &SMC_FC_FFA_SUCCESS.
7613+ * * w1/x1-w5/x5: See opcode in w0.
7614+ */
7615+#define SMC_FC_FFA_MEM_FRAG_TX SMC_FASTCALL_NR_SHARED_MEMORY(0x7B)
7616+
7617+#endif /* __LINUX_TRUSTY_ARM_FFA_H */
7618diff --git a/include/linux/trusty/sm_err.h b/include/linux/trusty/sm_err.h
7619new file mode 100644
7620index 000000000000..f6504448c6c3
7621--- /dev/null
7622+++ b/include/linux/trusty/sm_err.h
7623@@ -0,0 +1,28 @@
7624+/* SPDX-License-Identifier: MIT */
7625+/*
7626+ * Copyright (c) 2013 Google Inc. All rights reserved
7627+ *
7628+ * Trusty and TF-A also have a copy of this header.
7629+ * Please keep the copies in sync.
7630+ */
7631+#ifndef __LINUX_TRUSTY_SM_ERR_H
7632+#define __LINUX_TRUSTY_SM_ERR_H
7633+
7634+/* Errors from the secure monitor */
7635+#define SM_ERR_UNDEFINED_SMC 0xFFFFFFFF /* Unknown SMC (defined by ARM DEN 0028A(0.9.0) */
7636+#define SM_ERR_INVALID_PARAMETERS -2
7637+#define SM_ERR_INTERRUPTED -3 /* Got interrupted. Call back with restart SMC */
7638+#define SM_ERR_UNEXPECTED_RESTART -4 /* Got an restart SMC when we didn't expect it */
7639+#define SM_ERR_BUSY -5 /* Temporarily busy. Call back with original args */
7640+#define SM_ERR_INTERLEAVED_SMC -6 /* Got a trusted_service SMC when a restart SMC is required */
7641+#define SM_ERR_INTERNAL_FAILURE -7 /* Unknown error */
7642+#define SM_ERR_NOT_SUPPORTED -8
7643+#define SM_ERR_NOT_ALLOWED -9 /* SMC call not allowed */
7644+#define SM_ERR_END_OF_INPUT -10
7645+#define SM_ERR_PANIC -11 /* Secure OS crashed */
7646+#define SM_ERR_FIQ_INTERRUPTED -12 /* Got interrupted by FIQ. Call back with SMC_SC_RESTART_FIQ on same CPU */
7647+#define SM_ERR_CPU_IDLE -13 /* SMC call waiting for another CPU */
7648+#define SM_ERR_NOP_INTERRUPTED -14 /* Got interrupted. Call back with new SMC_SC_NOP */
7649+#define SM_ERR_NOP_DONE -15 /* Cpu idle after SMC_SC_NOP (not an error) */
7650+
7651+#endif
7652diff --git a/include/linux/trusty/smcall.h b/include/linux/trusty/smcall.h
7653new file mode 100644
7654index 000000000000..aea3f6068593
7655--- /dev/null
7656+++ b/include/linux/trusty/smcall.h
7657@@ -0,0 +1,124 @@
7658+/* SPDX-License-Identifier: MIT */
7659+/*
7660+ * Copyright (c) 2013-2014 Google Inc. All rights reserved
7661+ *
7662+ * Trusty and TF-A also have a copy of this header.
7663+ * Please keep the copies in sync.
7664+ */
7665+#ifndef __LINUX_TRUSTY_SMCALL_H
7666+#define __LINUX_TRUSTY_SMCALL_H
7667+
7668+#define SMC_NUM_ENTITIES 64
7669+#define SMC_NUM_ARGS 4
7670+#define SMC_NUM_PARAMS (SMC_NUM_ARGS - 1)
7671+
7672+#define SMC_IS_FASTCALL(smc_nr) ((smc_nr) & 0x80000000)
7673+#define SMC_IS_SMC64(smc_nr) ((smc_nr) & 0x40000000)
7674+#define SMC_ENTITY(smc_nr) (((smc_nr) & 0x3F000000) >> 24)
7675+#define SMC_FUNCTION(smc_nr) ((smc_nr) & 0x0000FFFF)
7676+
7677+#define SMC_NR(entity, fn, fastcall, smc64) ((((fastcall) & 0x1U) << 31) | \
7678+ (((smc64) & 0x1U) << 30) | \
7679+ (((entity) & 0x3FU) << 24) | \
7680+ ((fn) & 0xFFFFU) \
7681+ )
7682+
7683+#define SMC_FASTCALL_NR(entity, fn) SMC_NR((entity), (fn), 1, 0)
7684+#define SMC_STDCALL_NR(entity, fn) SMC_NR((entity), (fn), 0, 0)
7685+#define SMC_FASTCALL64_NR(entity, fn) SMC_NR((entity), (fn), 1, 1)
7686+#define SMC_STDCALL64_NR(entity, fn) SMC_NR((entity), (fn), 0, 1)
7687+
7688+#define SMC_ENTITY_ARCH 0 /* ARM Architecture calls */
7689+#define SMC_ENTITY_CPU 1 /* CPU Service calls */
7690+#define SMC_ENTITY_SIP 2 /* SIP Service calls */
7691+#define SMC_ENTITY_OEM 3 /* OEM Service calls */
7692+#define SMC_ENTITY_STD 4 /* Standard Service calls */
7693+#define SMC_ENTITY_RESERVED 5 /* Reserved for future use */
7694+#define SMC_ENTITY_TRUSTED_APP 48 /* Trusted Application calls */
7695+#define SMC_ENTITY_TRUSTED_OS 50 /* Trusted OS calls */
7696+#define SMC_ENTITY_LOGGING 51 /* Used for secure -> nonsecure logging */
7697+#define SMC_ENTITY_TEST 52 /* Used for secure -> nonsecure tests */
7698+#define SMC_ENTITY_SECURE_MONITOR 60 /* Trusted OS calls internal to secure monitor */
7699+
7700+/* FC = Fast call, SC = Standard call */
7701+#define SMC_SC_RESTART_LAST SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
7702+#define SMC_SC_LOCKED_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1)
7703+
7704+/**
7705+ * SMC_SC_RESTART_FIQ - Re-enter trusty after it was interrupted by an fiq
7706+ *
7707+ * No arguments, no return value.
7708+ *
7709+ * Re-enter trusty after returning to ns to process an fiq. Must be called iff
7710+ * trusty returns SM_ERR_FIQ_INTERRUPTED.
7711+ *
7712+ * Enable by selecting api version TRUSTY_API_VERSION_RESTART_FIQ (1) or later.
7713+ */
7714+#define SMC_SC_RESTART_FIQ SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2)
7715+
7716+/**
7717+ * SMC_SC_NOP - Enter trusty to run pending work.
7718+ *
7719+ * No arguments.
7720+ *
7721+ * Returns SM_ERR_NOP_INTERRUPTED or SM_ERR_NOP_DONE.
7722+ * If SM_ERR_NOP_INTERRUPTED is returned, the call must be repeated.
7723+ *
7724+ * Enable by selecting api version TRUSTY_API_VERSION_SMP (2) or later.
7725+ */
7726+#define SMC_SC_NOP SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3)
7727+
7728+/*
7729+ * Return from secure os to non-secure os with return value in r1
7730+ */
7731+#define SMC_SC_NS_RETURN SMC_STDCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
7732+
7733+#define SMC_FC_RESERVED SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 0)
7734+#define SMC_FC_FIQ_EXIT SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 1)
7735+#define SMC_FC_REQUEST_FIQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 2)
7736+
7737+#define TRUSTY_IRQ_TYPE_NORMAL (0)
7738+#define TRUSTY_IRQ_TYPE_PER_CPU (1)
7739+#define TRUSTY_IRQ_TYPE_DOORBELL (2)
7740+#define SMC_FC_GET_NEXT_IRQ SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 3)
7741+
7742+#define SMC_FC_CPU_SUSPEND SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 7)
7743+#define SMC_FC_CPU_RESUME SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 8)
7744+
7745+#define SMC_FC_AARCH_SWITCH SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 9)
7746+#define SMC_FC_GET_VERSION_STR SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 10)
7747+
7748+/**
7749+ * SMC_FC_API_VERSION - Find and select supported API version.
7750+ *
7751+ * @r1: Version supported by client.
7752+ *
7753+ * Returns version supported by trusty.
7754+ *
7755+ * If multiple versions are supported, the client should start by calling
7756+ * SMC_FC_API_VERSION with the largest version it supports. Trusty will then
7757+ * return a version it supports. If the client does not support the version
7758+ * returned by trusty and the version returned is less than the version
7759+ * requested, repeat the call with the largest supported version less than the
7760+ * last returned version.
7761+ *
7762+ * This call must be made before any calls that are affected by the api version.
7763+ */
7764+#define TRUSTY_API_VERSION_RESTART_FIQ (1)
7765+#define TRUSTY_API_VERSION_SMP (2)
7766+#define TRUSTY_API_VERSION_SMP_NOP (3)
7767+#define TRUSTY_API_VERSION_PHYS_MEM_OBJ (4)
7768+#define TRUSTY_API_VERSION_MEM_OBJ (5)
7769+#define TRUSTY_API_VERSION_CURRENT (5)
7770+#define SMC_FC_API_VERSION SMC_FASTCALL_NR(SMC_ENTITY_SECURE_MONITOR, 11)
7771+
7772+/* TRUSTED_OS entity calls */
7773+#define SMC_SC_VIRTIO_GET_DESCR SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
7774+#define SMC_SC_VIRTIO_START SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
7775+#define SMC_SC_VIRTIO_STOP SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
7776+
7777+#define SMC_SC_VDEV_RESET SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
7778+#define SMC_SC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
7779+#define SMC_NC_VDEV_KICK_VQ SMC_STDCALL_NR(SMC_ENTITY_TRUSTED_OS, 25)
7780+
7781+#endif /* __LINUX_TRUSTY_SMCALL_H */
7782diff --git a/include/linux/trusty/trusty.h b/include/linux/trusty/trusty.h
7783new file mode 100644
7784index 000000000000..efbb36999a8b
7785--- /dev/null
7786+++ b/include/linux/trusty/trusty.h
7787@@ -0,0 +1,131 @@
7788+/* SPDX-License-Identifier: GPL-2.0-only */
7789+/*
7790+ * Copyright (C) 2013 Google, Inc.
7791+ */
7792+#ifndef __LINUX_TRUSTY_TRUSTY_H
7793+#define __LINUX_TRUSTY_TRUSTY_H
7794+
7795+#include <linux/kernel.h>
7796+#include <linux/trusty/sm_err.h>
7797+#include <linux/types.h>
7798+#include <linux/device.h>
7799+#include <linux/pagemap.h>
7800+
7801+
7802+#if IS_ENABLED(CONFIG_TRUSTY)
7803+s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
7804+s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2);
7805+#ifdef CONFIG_64BIT
7806+s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2);
7807+#endif
7808+#else
7809+static inline s32 trusty_std_call32(struct device *dev, u32 smcnr,
7810+ u32 a0, u32 a1, u32 a2)
7811+{
7812+ return SM_ERR_UNDEFINED_SMC;
7813+}
7814+static inline s32 trusty_fast_call32(struct device *dev, u32 smcnr,
7815+ u32 a0, u32 a1, u32 a2)
7816+{
7817+ return SM_ERR_UNDEFINED_SMC;
7818+}
7819+#ifdef CONFIG_64BIT
7820+static inline s64 trusty_fast_call64(struct device *dev,
7821+ u64 smcnr, u64 a0, u64 a1, u64 a2)
7822+{
7823+ return SM_ERR_UNDEFINED_SMC;
7824+}
7825+#endif
7826+#endif
7827+
7828+struct notifier_block;
7829+enum {
7830+ TRUSTY_CALL_PREPARE,
7831+ TRUSTY_CALL_RETURNED,
7832+};
7833+int trusty_call_notifier_register(struct device *dev,
7834+ struct notifier_block *n);
7835+int trusty_call_notifier_unregister(struct device *dev,
7836+ struct notifier_block *n);
7837+const char *trusty_version_str_get(struct device *dev);
7838+u32 trusty_get_api_version(struct device *dev);
7839+bool trusty_get_panic_status(struct device *dev);
7840+
7841+struct ns_mem_page_info {
7842+ u64 paddr;
7843+ u8 ffa_mem_attr;
7844+ u8 ffa_mem_perm;
7845+ u64 compat_attr;
7846+};
7847+
7848+int trusty_encode_page_info(struct ns_mem_page_info *inf,
7849+ struct page *page, pgprot_t pgprot);
7850+
7851+struct scatterlist;
7852+typedef u64 trusty_shared_mem_id_t;
7853+int trusty_share_memory(struct device *dev, trusty_shared_mem_id_t *id,
7854+ struct scatterlist *sglist, unsigned int nents,
7855+ pgprot_t pgprot);
7856+int trusty_share_memory_compat(struct device *dev, trusty_shared_mem_id_t *id,
7857+ struct scatterlist *sglist, unsigned int nents,
7858+ pgprot_t pgprot);
7859+int trusty_transfer_memory(struct device *dev, u64 *id,
7860+ struct scatterlist *sglist, unsigned int nents,
7861+ pgprot_t pgprot, u64 tag, bool lend);
7862+int trusty_reclaim_memory(struct device *dev, trusty_shared_mem_id_t id,
7863+ struct scatterlist *sglist, unsigned int nents);
7864+
7865+struct dma_buf;
7866+#ifdef CONFIG_TRUSTY_DMA_BUF_FFA_TAG
7867+u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf);
7868+#else
7869+static inline u64 trusty_dma_buf_get_ffa_tag(struct dma_buf *dma_buf)
7870+{
7871+ return 0;
7872+}
7873+#endif
7874+
7875+/* Invalid handle value is defined by FF-A spec */
7876+#ifdef CONFIG_TRUSTY_DMA_BUF_SHARED_MEM_ID
7877+/**
7878+ * trusty_dma_buf_get_shared_mem_id() - Get memory ID corresponding to a dma_buf
7879+ * @dma_buf: DMA buffer
7880+ * @id: Pointer to output trusty_shared_mem_id_t
7881+ *
7882+ * Sets @id to trusty_shared_mem_id_t corresponding to the given @dma_buf.
7883+ * @dma_buf "owns" the ID, i.e. is responsible for allocating/releasing it.
7884+ * @dma_buf with an allocated @id must be in secure memory and should only be
7885+ * sent to Trusty using TRUSTY_SEND_SECURE.
7886+ *
7887+ * Return:
7888+ * * 0 - success
7889+ * * -ENODATA - @dma_buf does not own a trusty_shared_mem_id_t
7890+ * * ... - @dma_buf should not be lent or shared
7891+ */
7892+int trusty_dma_buf_get_shared_mem_id(struct dma_buf *dma_buf,
7893+ trusty_shared_mem_id_t *id);
7894+#else
7895+static inline int trusty_dma_buf_get_shared_mem_id(struct dma_buf *dma_buf,
7896+ trusty_shared_mem_id_t *id)
7897+{
7898+ return -ENODATA;
7899+}
7900+#endif
7901+
7902+struct trusty_nop {
7903+ struct list_head node;
7904+ u32 args[3];
7905+};
7906+
7907+static inline void trusty_nop_init(struct trusty_nop *nop,
7908+ u32 arg0, u32 arg1, u32 arg2) {
7909+ INIT_LIST_HEAD(&nop->node);
7910+ nop->args[0] = arg0;
7911+ nop->args[1] = arg1;
7912+ nop->args[2] = arg2;
7913+}
7914+
7915+void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop);
7916+void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop);
7917+
7918+#endif
7919diff --git a/include/linux/trusty/trusty_ipc.h b/include/linux/trusty/trusty_ipc.h
7920new file mode 100644
7921index 000000000000..9386392f3a64
7922--- /dev/null
7923+++ b/include/linux/trusty/trusty_ipc.h
7924@@ -0,0 +1,89 @@
7925+/* SPDX-License-Identifier: GPL-2.0-only */
7926+/*
7927+ * Copyright (C) 2015 Google, Inc.
7928+ */
7929+#ifndef __LINUX_TRUSTY_TRUSTY_IPC_H
7930+#define __LINUX_TRUSTY_TRUSTY_IPC_H
7931+
7932+#include <linux/list.h>
7933+#include <linux/scatterlist.h>
7934+#include <linux/trusty/trusty.h>
7935+#include <linux/types.h>
7936+
7937+struct tipc_chan;
7938+
7939+struct tipc_msg_buf {
7940+ void *buf_va;
7941+ struct scatterlist sg;
7942+ trusty_shared_mem_id_t buf_id;
7943+ size_t buf_sz;
7944+ size_t wpos;
7945+ size_t rpos;
7946+ size_t shm_cnt;
7947+ struct list_head node;
7948+};
7949+
7950+enum tipc_chan_event {
7951+ TIPC_CHANNEL_CONNECTED = 1,
7952+ TIPC_CHANNEL_DISCONNECTED,
7953+ TIPC_CHANNEL_SHUTDOWN,
7954+};
7955+
7956+struct tipc_chan_ops {
7957+ void (*handle_event)(void *cb_arg, int event);
7958+ struct tipc_msg_buf *(*handle_msg)(void *cb_arg,
7959+ struct tipc_msg_buf *mb);
7960+ void (*handle_release)(void *cb_arg);
7961+};
7962+
7963+struct tipc_chan *tipc_create_channel(struct device *dev,
7964+ const struct tipc_chan_ops *ops,
7965+ void *cb_arg);
7966+
7967+int tipc_chan_connect(struct tipc_chan *chan, const char *port);
7968+
7969+int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb);
7970+
7971+int tipc_chan_shutdown(struct tipc_chan *chan);
7972+
7973+void tipc_chan_destroy(struct tipc_chan *chan);
7974+
7975+struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan);
7976+
7977+void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb);
7978+
7979+struct tipc_msg_buf *
7980+tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, long timeout);
7981+
7982+void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb);
7983+
7984+static inline size_t mb_avail_space(struct tipc_msg_buf *mb)
7985+{
7986+ return mb->buf_sz - mb->wpos;
7987+}
7988+
7989+static inline size_t mb_avail_data(struct tipc_msg_buf *mb)
7990+{
7991+ return mb->wpos - mb->rpos;
7992+}
7993+
7994+static inline void *mb_put_data(struct tipc_msg_buf *mb, size_t len)
7995+{
7996+ void *pos = (u8 *)mb->buf_va + mb->wpos;
7997+
7998+ BUG_ON(mb->wpos + len > mb->buf_sz);
7999+ mb->wpos += len;
8000+ return pos;
8001+}
8002+
8003+static inline void *mb_get_data(struct tipc_msg_buf *mb, size_t len)
8004+{
8005+ void *pos = (u8 *)mb->buf_va + mb->rpos;
8006+
8007+ BUG_ON(mb->rpos + len > mb->wpos);
8008+ mb->rpos += len;
8009+ return pos;
8010+}
8011+
8012+#endif /* __LINUX_TRUSTY_TRUSTY_IPC_H */
8013+
8014diff --git a/include/uapi/linux/trusty/ipc.h b/include/uapi/linux/trusty/ipc.h
8015new file mode 100644
8016index 000000000000..af91035484f1
8017--- /dev/null
8018+++ b/include/uapi/linux/trusty/ipc.h
8019@@ -0,0 +1,65 @@
8020+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
8021+
8022+#ifndef _UAPI_LINUX_TRUSTY_IPC_H_
8023+#define _UAPI_LINUX_TRUSTY_IPC_H_
8024+
8025+#include <linux/ioctl.h>
8026+#include <linux/types.h>
8027+#include <linux/uio.h>
8028+
8029+/**
8030+ * enum transfer_kind - How to send an fd to Trusty
8031+ * @TRUSTY_SHARE: Memory will be accessible by Linux and Trusty. On ARM it
8032+ * will be mapped as nonsecure. Suitable for shared memory.
8033+ * The paired fd must be a "dma_buf".
8034+ * @TRUSTY_LEND: Memory will be accessible only to Trusty. On ARM it will
8035+ * be transitioned to "Secure" memory if Trusty is in
8036+ * TrustZone. This transfer kind is suitable for donating
8037+ * video buffers or other similar resources. The paired fd
8038+ * may need to come from a platform-specific allocator for
8039+ * memory that may be transitioned to "Secure".
8040+ * @TRUSTY_SEND_SECURE: Send memory that is already "Secure". Memory will be
8041+ * accessible only to Trusty. The paired fd may need to
8042+ * come from a platform-specific allocator that returns
8043+ * "Secure" buffers.
8044+ *
8045+ * Describes how the user would like the resource in question to be sent to
8046+ * Trusty. Options may be valid only for certain kinds of fds.
8047+ */
8048+enum transfer_kind {
8049+ TRUSTY_SHARE = 0,
8050+ TRUSTY_LEND = 1,
8051+ TRUSTY_SEND_SECURE = 2,
8052+};
8053+
8054+/**
8055+ * struct trusty_shm - Describes a transfer of memory to Trusty
8056+ * @fd: The fd to transfer
8057+ * @transfer: How to transfer it - see &enum transfer_kind
8058+ */
8059+struct trusty_shm {
8060+ __s32 fd;
8061+ __u32 transfer;
8062+};
8063+
8064+/**
8065+ * struct tipc_send_msg_req - Request struct for @TIPC_IOC_SEND_MSG
8066+ * @iov: Pointer to an array of &struct iovec describing data to be sent
8067+ * @shm: Pointer to an array of &struct trusty_shm describing any file
8068+ * descriptors to be transferred.
8069+ * @iov_cnt: Number of elements in the @iov array
8070+ * @shm_cnt: Number of elements in the @shm array
8071+ */
8072+struct tipc_send_msg_req {
8073+ __u64 iov;
8074+ __u64 shm;
8075+ __u64 iov_cnt;
8076+ __u64 shm_cnt;
8077+};
8078+
8079+#define TIPC_IOC_MAGIC 'r'
8080+#define TIPC_IOC_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, char *)
8081+#define TIPC_IOC_SEND_MSG _IOW(TIPC_IOC_MAGIC, 0x81, \
8082+ struct tipc_send_msg_req)
8083+
8084+#endif
8085diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
8086index b052355ac7a3..cf6b95d9a1ec 100644
8087--- a/include/uapi/linux/virtio_ids.h
8088+++ b/include/uapi/linux/virtio_ids.h
8089@@ -39,6 +39,7 @@
8090 #define VIRTIO_ID_9P 9 /* 9p virtio console */
8091 #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
8092 #define VIRTIO_ID_CAIF 12 /* Virtio caif */
8093+#define VIRTIO_ID_TRUSTY_IPC 13 /* virtio trusty ipc */
8094 #define VIRTIO_ID_GPU 16 /* virtio GPU */
8095 #define VIRTIO_ID_INPUT 18 /* virtio input */
8096 #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
8097--
80982.30.2
8099