blob: 9c4bbee9f7c72f824b28a022b102c1a8776cd415 [file] [log] [blame]
Andrew Geissler84ad7c52020-06-27 00:00:16 -05001From 953cab73b8bc487da330aa454abd7f8c7466737e Mon Sep 17 00:00:00 2001
2From: Madhurkiran Harikrishnan <madhurkiran.harikrishnan@xilinx.com>
3Date: Mon, 24 Feb 2020 18:32:16 -0800
4Subject: [LINUX][rel-v2020.1][PATCH v1 2/3] Support for vm_insert_pfn
5 deprecated from kernel 4.20
6
7From kernel 4.20 onwards, support for vm_insert_pfn is deprecated.
8Hence, replace the same with vmf_insert_pfn.
9
10Signed-off-by: Madhurkiran Harikrishnan <madhurkiran.harikrishnan@xilinx.com>
11---
12 .../devicedrv/mali/linux/mali_memory_block_alloc.c | 6 +++++-
13 driver/src/devicedrv/mali/linux/mali_memory_cow.c | 14 ++++++++++++--
14 .../src/devicedrv/mali/linux/mali_memory_os_alloc.c | 20 +++++++++++++++++---
15 driver/src/devicedrv/mali/linux/mali_memory_secure.c | 7 ++++++-
16 4 files changed, 40 insertions(+), 7 deletions(-)
17
18diff --git a/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c b/driver/src/devicedrv/mali/linux/mali_memory_block_alloc.c
19index 0c5b6c3..e528699 100644
20--- linux/mali_memory_block_alloc.c
21+++ b/linux/mali_memory_block_alloc.c
22@@ -309,9 +309,13 @@ int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *v
23
24 list_for_each_entry(m_page, &block_mem->pfns, list) {
25 MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK);
26+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
27+ ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
28+ if (unlikely(VM_FAULT_ERROR & ret)) {
29+#else
30 ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
31-
32 if (unlikely(0 != ret)) {
33+#endif
34 return -EFAULT;
35 }
36 addr += _MALI_OSK_MALI_PAGE_SIZE;
37diff --git a/driver/src/devicedrv/mali/linux/mali_memory_cow.c b/driver/src/devicedrv/mali/linux/mali_memory_cow.c
38index f1d44fe..1dae1d6 100644
39--- linux/mali_memory_cow.c
40+++ b/linux/mali_memory_cow.c
41@@ -532,9 +532,14 @@ int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma
42 * flush which makes it way slower than remap_pfn_range or vm_insert_pfn.
43 ret = vm_insert_page(vma, addr, page);
44 */
45+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
46+ ret = vmf_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
47+ if (unlikely(VM_FAULT_ERROR & ret)) {
48+#else
49 ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page));
50-
51 if (unlikely(0 != ret)) {
52+#endif
53+
54 return ret;
55 }
56 addr += _MALI_OSK_MALI_PAGE_SIZE;
57@@ -569,9 +574,14 @@ _mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bken
58
59 list_for_each_entry(m_page, &cow->pages, list) {
60 if ((count >= offset) && (count < offset + num)) {
61+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
62+ ret = vmf_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
63+ if (unlikely(VM_FAULT_ERROR & ret)) {
64+#else
65 ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page));
66-
67 if (unlikely(0 != ret)) {
68+#endif
69+
70 if (count == offset) {
71 return _MALI_OSK_ERR_FAULT;
72 } else {
73diff --git a/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c b/driver/src/devicedrv/mali/linux/mali_memory_os_alloc.c
74index 3fb6f05..7de3920 100644
75--- linux/mali_memory_os_alloc.c
76+++ b/linux/mali_memory_os_alloc.c
77@@ -378,9 +378,14 @@ int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma)
78 ret = vm_insert_page(vma, addr, page);
79 */
80 page = m_page->page;
81+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
82+ ret = vmf_insert_pfn(vma, addr, page_to_pfn(page));
83+ if (unlikely(VM_FAULT_ERROR & ret)) {
84+#else
85 ret = vm_insert_pfn(vma, addr, page_to_pfn(page));
86-
87 if (unlikely(0 != ret)) {
88+#endif
89+
90 return -EFAULT;
91 }
92 addr += _MALI_OSK_MALI_PAGE_SIZE;
93@@ -416,9 +421,13 @@ _mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bken
94
95 vm_end -= _MALI_OSK_MALI_PAGE_SIZE;
96 if (mapping_page_num > 0) {
97+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
98+ ret = vmf_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
99+ if (unlikely(VM_FAULT_ERROR & ret)) {
100+#else
101 ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page));
102-
103 if (unlikely(0 != ret)) {
104+#endif
105 /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
106 if (-EBUSY == ret) {
107 break;
108@@ -439,9 +448,14 @@ _mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bken
109 list_for_each_entry(m_page, &os_mem->pages, list) {
110 if (count >= offset) {
111
112+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
113+ ret = vmf_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
114+ if (unlikely(VM_FAULT_ERROR & ret)) {
115+#else
116 ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page));
117-
118 if (unlikely(0 != ret)) {
119+#endif
120+
121 /*will return -EBUSY If the page has already been mapped into table, but it's OK*/
122 if (-EBUSY == ret) {
123 break;
124diff --git a/driver/src/devicedrv/mali/linux/mali_memory_secure.c b/driver/src/devicedrv/mali/linux/mali_memory_secure.c
125index 5546304..cebd1c8 100644
126--- linux/mali_memory_secure.c
127+++ b/linux/mali_memory_secure.c
128@@ -132,9 +132,14 @@ int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *
129 MALI_DEBUG_ASSERT(0 == size % _MALI_OSK_MALI_PAGE_SIZE);
130
131 for (j = 0; j < size / _MALI_OSK_MALI_PAGE_SIZE; j++) {
132+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
133+ ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys));
134+ if (unlikely(VM_FAULT_ERROR & ret)) {
135+#else
136 ret = vm_insert_pfn(vma, addr, PFN_DOWN(phys));
137-
138 if (unlikely(0 != ret)) {
139+#endif
140+
141 return -EFAULT;
142 }
143 addr += _MALI_OSK_MALI_PAGE_SIZE;
144--
1452.7.4
146