blob: a1a56e0e4dac72a785b239e074743d1419cf3761 [file] [log] [blame]
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001From 353fc92618ce0dc6bab4a3e8bff1c13c3b613110 Mon Sep 17 00:00:00 2001
2From: Alexander Alekhin <alexander.alekhin@intel.com>
3Date: Wed, 23 Aug 2017 17:41:23 +0300
4Subject: [PATCH 1/2] carotene: don't use __asm__ with aarch64
5
6---
7Upstream-Status: Backport
8
9 3rdparty/carotene/src/channel_extract.cpp | 4 +-
10 3rdparty/carotene/src/channels_combine.cpp | 2 +-
11 3rdparty/carotene/src/colorconvert.cpp | 104 ++++++++++++++---------------
12 3rdparty/carotene/src/convert.cpp | 54 +++++++--------
13 3rdparty/carotene/src/convert_scale.cpp | 72 ++++++++++----------
14 3rdparty/carotene/src/gaussian_blur.cpp | 6 +-
15 3rdparty/carotene/src/pyramid.cpp | 20 +++---
16 3rdparty/carotene/src/scharr.cpp | 4 +-
17 8 files changed, 133 insertions(+), 133 deletions(-)
18
19diff --git a/3rdparty/carotene/src/channel_extract.cpp b/3rdparty/carotene/src/channel_extract.cpp
20index f663bc6005..8238a3ece8 100644
21--- a/3rdparty/carotene/src/channel_extract.cpp
22+++ b/3rdparty/carotene/src/channel_extract.cpp
23@@ -231,7 +231,7 @@ void extract4(const Size2D &size,
24 srcStride == dst2Stride && \
25 srcStride == dst3Stride &&
26
27-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
28+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
29
30 #define SPLIT_ASM2(sgn, bits) __asm__ ( \
31 "vld2." #bits " {d0, d2}, [%[in0]] \n\t" \
32@@ -351,7 +351,7 @@ void extract4(const Size2D &size,
33 } \
34 }
35
36-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
37+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
38
39 #define ALPHA_QUAD(sgn, bits) { \
40 internal::prefetch(src + sj); \
41diff --git a/3rdparty/carotene/src/channels_combine.cpp b/3rdparty/carotene/src/channels_combine.cpp
42index 157c8b8121..fc98fb9181 100644
43--- a/3rdparty/carotene/src/channels_combine.cpp
44+++ b/3rdparty/carotene/src/channels_combine.cpp
45@@ -77,7 +77,7 @@ namespace CAROTENE_NS {
46 dstStride == src2Stride && \
47 dstStride == src3Stride &&
48
49-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
50+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
51
52 #define MERGE_ASM2(sgn, bits) __asm__ ( \
53 "vld1." #bits " {d0-d1}, [%[in0]] \n\t" \
54diff --git a/3rdparty/carotene/src/colorconvert.cpp b/3rdparty/carotene/src/colorconvert.cpp
55index 3037fe672a..26ae54b15c 100644
56--- a/3rdparty/carotene/src/colorconvert.cpp
57+++ b/3rdparty/carotene/src/colorconvert.cpp
58@@ -97,7 +97,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space,
59 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
60 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
61
62-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
63+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
64 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
65 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
66 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
67@@ -116,7 +116,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space,
68 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
69 size_t sj = 0u, dj = 0u;
70
71-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
72+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
73 for (; dj < roiw8; sj += 24, dj += 8)
74 {
75 internal::prefetch(src + sj);
76@@ -198,7 +198,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space,
77 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
78 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
79
80-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
81+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
82 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
83 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
84 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
85@@ -217,7 +217,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space,
86 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
87 size_t sj = 0u, dj = 0u;
88
89-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
90+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
91 for (; dj < roiw8; sj += 32, dj += 8)
92 {
93 internal::prefetch(src + sj);
94@@ -300,7 +300,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space,
95 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
96 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
97
98-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
99+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
100 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
101 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
102 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
103@@ -319,7 +319,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space,
104 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
105 size_t sj = 0u, dj = 0u;
106
107-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
108+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
109 for (; dj < roiw8; sj += 24, dj += 8)
110 {
111 internal::prefetch(src + sj);
112@@ -402,7 +402,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space,
113 const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
114 const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
115
116-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
117+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
118 register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
119 register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
120 register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
121@@ -421,7 +421,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space,
122 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
123 size_t sj = 0u, dj = 0u;
124
125-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
126+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
127 for (; dj < roiw8; sj += 32, dj += 8)
128 {
129 internal::prefetch(src + sj);
130@@ -512,7 +512,7 @@ void gray2rgb(const Size2D &size,
131 for (; sj < roiw16; sj += 16, dj += 48)
132 {
133 internal::prefetch(src + sj);
134-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
135+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
136 __asm__ (
137 "vld1.8 {d0-d1}, [%[in0]] \n\t"
138 "vmov.8 q1, q0 \n\t"
139@@ -538,7 +538,7 @@ void gray2rgb(const Size2D &size,
140
141 if (sj < roiw8)
142 {
143-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
144+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
145 __asm__ (
146 "vld1.8 {d0}, [%[in]] \n\t"
147 "vmov.8 d1, d0 \n\t"
148@@ -584,7 +584,7 @@ void gray2rgbx(const Size2D &size,
149 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
150 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
151
152-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
153+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
154 register uint8x16_t vc255 asm ("q4") = vmovq_n_u8(255);
155 #else
156 uint8x16x4_t vRgba;
157@@ -602,7 +602,7 @@ void gray2rgbx(const Size2D &size,
158 for (; sj < roiw16; sj += 16, dj += 64)
159 {
160 internal::prefetch(src + sj);
161-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
162+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
163 __asm__ (
164 "vld1.8 {d0-d1}, [%[in0]] \n\t"
165 "vmov.8 q1, q0 \n\t"
166@@ -628,7 +628,7 @@ void gray2rgbx(const Size2D &size,
167
168 if (sj < roiw8)
169 {
170-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
171+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
172 __asm__ (
173 "vld1.8 {d5}, [%[in]] \n\t"
174 "vmov.8 d6, d5 \n\t"
175@@ -672,7 +672,7 @@ void rgb2rgbx(const Size2D &size,
176 internal::assertSupportedConfiguration();
177 #ifdef CAROTENE_NEON
178 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
179-#if defined(__GNUC__) && defined(__arm__)
180+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
181 register uint8x8_t vc255_0 asm ("d3") = vmov_n_u8(255);
182 #else
183 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
184@@ -688,7 +688,7 @@ void rgb2rgbx(const Size2D &size,
185 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
186 size_t sj = 0u, dj = 0u, j = 0u;
187
188-#if defined(__GNUC__) && defined(__arm__)
189+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
190 for (; j < roiw8; sj += 24, dj += 32, j += 8)
191 {
192 internal::prefetch(src + sj);
193@@ -742,7 +742,7 @@ void rgbx2rgb(const Size2D &size,
194 internal::assertSupportedConfiguration();
195 #ifdef CAROTENE_NEON
196 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
197-#if !defined(__GNUC__) || !defined(__arm__)
198+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
199 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
200 union { uint8x16x4_t v4; uint8x16x3_t v3; } v_dst0;
201 union { uint8x8x4_t v4; uint8x8x3_t v3; } v_dst;
202@@ -754,7 +754,7 @@ void rgbx2rgb(const Size2D &size,
203 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
204 size_t sj = 0u, dj = 0u, j = 0u;
205
206-#if defined(__GNUC__) && defined(__arm__)
207+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
208 for (; j < roiw8; sj += 32, dj += 24, j += 8)
209 {
210 internal::prefetch(src + sj);
211@@ -805,7 +805,7 @@ void rgb2bgr(const Size2D &size,
212 {
213 internal::assertSupportedConfiguration();
214 #ifdef CAROTENE_NEON
215-#if !defined(__GNUC__) || !defined(__arm__)
216+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
217 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
218 #endif
219 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
220@@ -817,7 +817,7 @@ void rgb2bgr(const Size2D &size,
221 size_t sj = 0u, dj = 0u, j = 0u;
222
223
224-#if defined(__GNUC__) && defined(__arm__)
225+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
226 for (; j < roiw8; sj += 24, dj += 24, j += 8)
227 {
228 internal::prefetch(src + sj);
229@@ -874,7 +874,7 @@ void rgbx2bgrx(const Size2D &size,
230 {
231 internal::assertSupportedConfiguration();
232 #ifdef CAROTENE_NEON
233-#if !defined(__GNUC__) || !defined(__arm__)
234+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
235 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
236 #endif
237 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
238@@ -885,7 +885,7 @@ void rgbx2bgrx(const Size2D &size,
239 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
240 size_t sj = 0u, dj = 0u, j = 0u;
241
242-#if defined(__GNUC__) && defined(__arm__)
243+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
244 for (; j < roiw8; sj += 32, dj += 32, j += 8)
245 {
246 internal::prefetch(src + sj);
247@@ -943,7 +943,7 @@ void rgbx2bgr(const Size2D &size,
248 {
249 internal::assertSupportedConfiguration();
250 #ifdef CAROTENE_NEON
251-#if !defined(__GNUC__) || !defined(__arm__)
252+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
253 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
254 #endif
255 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
256@@ -954,7 +954,7 @@ void rgbx2bgr(const Size2D &size,
257 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
258 size_t sj = 0u, dj = 0u, j = 0u;
259
260-#if defined(__GNUC__) && defined(__arm__)
261+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
262 for (; j < roiw8; sj += 32, dj += 24, j += 8)
263 {
264 internal::prefetch(src + sj);
265@@ -1010,7 +1010,7 @@ void rgb2bgrx(const Size2D &size,
266 {
267 internal::assertSupportedConfiguration();
268 #ifdef CAROTENE_NEON
269-#if defined(__GNUC__) && defined(__arm__)
270+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
271 register uint8x8_t vc255 asm ("d3") = vmov_n_u8(255);
272 #else
273 union { uint8x16x4_t v4; uint8x16x3_t v3; } vals0;
274@@ -1019,7 +1019,7 @@ void rgb2bgrx(const Size2D &size,
275 vals8.v4.val[3] = vmov_n_u8(255);
276 #endif
277
278-#if !defined(__GNUC__) || !defined(__arm__)
279+#if !(!defined(__aarch64__) && defined(__GNUC__) && defined(__arm__))
280 size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
281 #endif
282 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
283@@ -1030,7 +1030,7 @@ void rgb2bgrx(const Size2D &size,
284 u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
285 size_t sj = 0u, dj = 0u, j = 0u;
286
287-#if defined(__GNUC__) && defined(__arm__)
288+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
289 for (; j < roiw8; sj += 24, dj += 32, j += 8)
290 {
291 internal::prefetch(src + sj);
292@@ -1409,7 +1409,7 @@ inline void convertToHSV(const s32 r, const s32 g, const s32 b,
293 "d24","d25","d26","d27","d28","d29","d30","d31" \
294 );
295
296-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
297+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
298
299 #define YCRCB_CONSTS \
300 register int16x4_t vcYR asm ("d31") = vmov_n_s16(4899); \
301@@ -1555,7 +1555,7 @@ inline uint8x8x3_t convertToYCrCb( const int16x8_t& vR, const int16x8_t& vG, con
302 #define COEFF_G ( 8663)
303 #define COEFF_B (-17705)
304
305-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
306+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
307 #define YUV420ALPHA3_CONST
308 #define YUV420ALPHA4_CONST register uint8x16_t c255 asm ("q13") = vmovq_n_u8(255);
309 #define YUV420ALPHA3_CONVERT
310@@ -1852,7 +1852,7 @@ void rgb2hsv(const Size2D &size,
311 #ifdef CAROTENE_NEON
312 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
313 const s32 hsv_shift = 12;
314-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
315+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
316 register const f32 vsdiv_table = f32(255 << hsv_shift);
317 register f32 vhdiv_table = f32(hrange << hsv_shift);
318 register const s32 vhrange = hrange;
319@@ -1871,7 +1871,7 @@ void rgb2hsv(const Size2D &size,
320 for (; j < roiw8; sj += 24, dj += 24, j += 8)
321 {
322 internal::prefetch(src + sj);
323-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
324+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
325 CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d0, d2)
326 #else
327 uint8x8x3_t vRgb = vld3_u8(src + sj);
328@@ -1904,7 +1904,7 @@ void rgbx2hsv(const Size2D &size,
329 #ifdef CAROTENE_NEON
330 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
331 const s32 hsv_shift = 12;
332-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
333+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
334 register const f32 vsdiv_table = f32(255 << hsv_shift);
335 register f32 vhdiv_table = f32(hrange << hsv_shift);
336 register const s32 vhrange = hrange;
337@@ -1923,7 +1923,7 @@ void rgbx2hsv(const Size2D &size,
338 for (; j < roiw8; sj += 32, dj += 24, j += 8)
339 {
340 internal::prefetch(src + sj);
341-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
342+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
343 CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d0, d2)
344 #else
345 uint8x8x4_t vRgb = vld4_u8(src + sj);
346@@ -1956,7 +1956,7 @@ void bgr2hsv(const Size2D &size,
347 #ifdef CAROTENE_NEON
348 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
349 const s32 hsv_shift = 12;
350-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
351+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
352 register const f32 vsdiv_table = f32(255 << hsv_shift);
353 register f32 vhdiv_table = f32(hrange << hsv_shift);
354 register const s32 vhrange = hrange;
355@@ -1975,7 +1975,7 @@ void bgr2hsv(const Size2D &size,
356 for (; j < roiw8; sj += 24, dj += 24, j += 8)
357 {
358 internal::prefetch(src + sj);
359-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
360+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
361 CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d2, d0)
362 #else
363 uint8x8x3_t vRgb = vld3_u8(src + sj);
364@@ -2008,7 +2008,7 @@ void bgrx2hsv(const Size2D &size,
365 #ifdef CAROTENE_NEON
366 size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
367 const s32 hsv_shift = 12;
368-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
369+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
370 register const f32 vsdiv_table = f32(255 << hsv_shift);
371 register f32 vhdiv_table = f32(hrange << hsv_shift);
372 register const s32 vhrange = hrange;
373@@ -2027,7 +2027,7 @@ void bgrx2hsv(const Size2D &size,
374 for (; j < roiw8; sj += 32, dj += 24, j += 8)
375 {
376 internal::prefetch(src + sj);
377-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
378+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
379 CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d2, d0)
380 #else
381 uint8x8x4_t vRgb = vld4_u8(src + sj);
382@@ -2068,7 +2068,7 @@ void rgbx2bgr565(const Size2D &size,
383 for (; j < roiw16; sj += 64, dj += 32, j += 16)
384 {
385 internal::prefetch(src + sj);
386-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
387+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
388 __asm__ (
389 "vld4.8 {d2, d4, d6, d8}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
390 "vld4.8 {d3, d5, d7, d9}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
391@@ -2122,7 +2122,7 @@ void rgb2bgr565(const Size2D &size,
392 for (; j < roiw16; sj += 48, dj += 32, j += 16)
393 {
394 internal::prefetch(src + sj);
395-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
396+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
397 __asm__ (
398 "vld3.8 {d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
399 "vld3.8 {d3, d5, d7}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
400@@ -2176,7 +2176,7 @@ void rgbx2rgb565(const Size2D &size,
401 for (; j < roiw16; sj += 64, dj += 32, j += 16)
402 {
403 internal::prefetch(src + sj);
404-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
405+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
406 __asm__ (
407 "vld4.8 {d0, d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 \n\t"
408 "vld4.8 {d1, d3, d5, d7}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB aaaaAAAA \n\t"
409@@ -2230,7 +2230,7 @@ void rgb2rgb565(const Size2D &size,
410 for (; j < roiw16; sj += 48, dj += 32, j += 16)
411 {
412 internal::prefetch(src + sj);
413-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
414+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
415 __asm__ (
416 "vld3.8 {d0, d2, d4}, [%[in0]] @ q0 q1 q2 q3 \n\t"
417 "vld3.8 {d1, d3, d5}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
418@@ -2285,7 +2285,7 @@ void rgb2ycrcb(const Size2D &size,
419 for (; j < roiw8; sj += 24, dj += 24, j += 8)
420 {
421 internal::prefetch(src + sj);
422-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
423+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
424 CONVERTTOYCRCB(vld3.8 {d0-d2}, d0, d1, d2)
425 #else
426 uint8x8x3_t vRgb = vld3_u8(src + sj);
427@@ -2329,7 +2329,7 @@ void rgbx2ycrcb(const Size2D &size,
428 for (; j < roiw8; sj += 32, dj += 24, j += 8)
429 {
430 internal::prefetch(src + sj);
431-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
432+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
433 CONVERTTOYCRCB(vld4.8 {d0-d3}, d0, d1, d2)
434 #else
435 uint8x8x4_t vRgba = vld4_u8(src + sj);
436@@ -2373,7 +2373,7 @@ void bgr2ycrcb(const Size2D &size,
437 for (; j < roiw8; sj += 24, dj += 24, j += 8)
438 {
439 internal::prefetch(src + sj);
440-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
441+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
442 CONVERTTOYCRCB(vld3.8 {d0-d2}, d2, d1, d0)
443 #else
444 uint8x8x3_t vBgr = vld3_u8(src + sj);
445@@ -2417,7 +2417,7 @@ void bgrx2ycrcb(const Size2D &size,
446 for (; j < roiw8; sj += 32, dj += 24, j += 8)
447 {
448 internal::prefetch(src + sj);
449-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
450+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
451 CONVERTTOYCRCB(vld4.8 {d0-d3}, d2, d1, d0)
452 #else
453 uint8x8x4_t vBgra = vld4_u8(src + sj);
454@@ -2499,7 +2499,7 @@ void yuv420sp2rgb(const Size2D &size,
455 internal::prefetch(uv + j);
456 internal::prefetch(y1 + j);
457 internal::prefetch(y2 + j);
458-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
459+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
460 CONVERTYUV420TORGB(3, d1, d0, q5, q6)
461 #else
462 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
463@@ -2545,7 +2545,7 @@ void yuv420sp2rgbx(const Size2D &size,
464 internal::prefetch(uv + j);
465 internal::prefetch(y1 + j);
466 internal::prefetch(y2 + j);
467-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
468+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
469 CONVERTYUV420TORGB(4, d1, d0, q5, q6)
470 #else
471 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
472@@ -2591,7 +2591,7 @@ void yuv420i2rgb(const Size2D &size,
473 internal::prefetch(uv + j);
474 internal::prefetch(y1 + j);
475 internal::prefetch(y2 + j);
476-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
477+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
478 CONVERTYUV420TORGB(3, d0, d1, q5, q6)
479 #else
480 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
481@@ -2637,7 +2637,7 @@ void yuv420i2rgbx(const Size2D &size,
482 internal::prefetch(uv + j);
483 internal::prefetch(y1 + j);
484 internal::prefetch(y2 + j);
485-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
486+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
487 CONVERTYUV420TORGB(4, d0, d1, q5, q6)
488 #else
489 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
490@@ -2683,7 +2683,7 @@ void yuv420sp2bgr(const Size2D &size,
491 internal::prefetch(uv + j);
492 internal::prefetch(y1 + j);
493 internal::prefetch(y2 + j);
494-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
495+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
496 CONVERTYUV420TORGB(3, d1, d0, q6, q5)
497 #else
498 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
499@@ -2729,7 +2729,7 @@ void yuv420sp2bgrx(const Size2D &size,
500 internal::prefetch(uv + j);
501 internal::prefetch(y1 + j);
502 internal::prefetch(y2 + j);
503-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
504+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
505 CONVERTYUV420TORGB(4, d1, d0, q6, q5)
506 #else
507 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
508@@ -2775,7 +2775,7 @@ void yuv420i2bgr(const Size2D &size,
509 internal::prefetch(uv + j);
510 internal::prefetch(y1 + j);
511 internal::prefetch(y2 + j);
512-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
513+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
514 CONVERTYUV420TORGB(3, d0, d1, q6, q5)
515 #else
516 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
517@@ -2821,7 +2821,7 @@ void yuv420i2bgrx(const Size2D &size,
518 internal::prefetch(uv + j);
519 internal::prefetch(y1 + j);
520 internal::prefetch(y2 + j);
521-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
522+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
523 CONVERTYUV420TORGB(4, d0, d1, q6, q5)
524 #else
525 convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
526diff --git a/3rdparty/carotene/src/convert.cpp b/3rdparty/carotene/src/convert.cpp
527index 403f16d86a..64b6db78ab 100644
528--- a/3rdparty/carotene/src/convert.cpp
529+++ b/3rdparty/carotene/src/convert.cpp
530@@ -101,7 +101,7 @@ CVT_FUNC(u8, s8, 16,
531 }
532 })
533
534-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
535+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
536 CVT_FUNC(u8, u16, 16,
537 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
538 {
539@@ -135,7 +135,7 @@ CVT_FUNC(u8, u16, 16,
540 })
541 #endif
542
543-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
544+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
545 CVT_FUNC(u8, s32, 16,
546 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);
547 register uint8x16_t zero1 asm ("q2") = vmovq_n_u8(0);
548@@ -173,7 +173,7 @@ CVT_FUNC(u8, s32, 16,
549 })
550 #endif
551
552-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
553+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
554 CVT_FUNC(u8, f32, 16,
555 ,
556 {
557@@ -248,7 +248,7 @@ CVT_FUNC(s8, u8, 16,
558 }
559 })
560
561-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
562+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
563 CVT_FUNC(s8, u16, 16,
564 register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
565 {
566@@ -284,7 +284,7 @@ CVT_FUNC(s8, u16, 16,
567 })
568 #endif
569
570-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
571+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
572 CVT_FUNC(s8, s16, 16,
573 ,
574 {
575@@ -323,7 +323,7 @@ CVT_FUNC(s8, s16, 16,
576 })
577 #endif
578
579-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
580+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
581 CVT_FUNC(s8, s32, 16,
582 ,
583 {
584@@ -377,7 +377,7 @@ CVT_FUNC(s8, s32, 16,
585 })
586 #endif
587
588-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
589+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
590 CVT_FUNC(s8, f32, 16,
591 ,
592 {
593@@ -440,7 +440,7 @@ CVT_FUNC(s8, f32, 16,
594 })
595 #endif
596
597-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
598+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
599 CVT_FUNC(u16, u8, 16,
600 ,
601 {
602@@ -479,7 +479,7 @@ CVT_FUNC(u16, u8, 16,
603 })
604 #endif
605
606-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
607+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
608 CVT_FUNC(u16, s8, 16,
609 register uint8x16_t v127 asm ("q4") = vmovq_n_u8(127);,
610 {
611@@ -522,7 +522,7 @@ CVT_FUNC(u16, s8, 16,
612 })
613 #endif
614
615-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
616+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
617 CVT_FUNC(u16, s16, 8,
618 register uint16x8_t v32767 asm ("q4") = vmovq_n_u16(0x7FFF);,
619 {
620@@ -555,7 +555,7 @@ CVT_FUNC(u16, s16, 8,
621 })
622 #endif
623
624-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
625+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
626 CVT_FUNC(u16, s32, 8,
627 register uint16x8_t zero0 asm ("q1") = vmovq_n_u16(0);,
628 {
629@@ -589,7 +589,7 @@ CVT_FUNC(u16, s32, 8,
630 })
631 #endif
632
633-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
634+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
635 CVT_FUNC(u16, f32, 8,
636 ,
637 {
638@@ -633,7 +633,7 @@ CVT_FUNC(u16, f32, 8,
639 })
640 #endif
641
642-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
643+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
644 CVT_FUNC(s16, u8, 16,
645 ,
646 {
647@@ -672,7 +672,7 @@ CVT_FUNC(s16, u8, 16,
648 })
649 #endif
650
651-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
652+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
653 CVT_FUNC(s16, s8, 16,
654 ,
655 {
656@@ -711,7 +711,7 @@ CVT_FUNC(s16, s8, 16,
657 })
658 #endif
659
660-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
661+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
662 CVT_FUNC(s16, u16, 8,
663 register int16x8_t vZero asm ("q4") = vmovq_n_s16(0);,
664 {
665@@ -747,7 +747,7 @@ CVT_FUNC(s16, u16, 8,
666 })
667 #endif
668
669-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
670+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
671 CVT_FUNC(s16, s32, 8,
672 ,
673 {
674@@ -786,7 +786,7 @@ CVT_FUNC(s16, s32, 8,
675 })
676 #endif
677
678-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
679+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
680 CVT_FUNC(s16, f32, 8,
681 ,
682 {
683@@ -829,7 +829,7 @@ CVT_FUNC(s16, f32, 8,
684 })
685 #endif
686
687-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
688+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
689 CVT_FUNC(s32, u8, 8,
690 ,
691 {
692@@ -870,7 +870,7 @@ CVT_FUNC(s32, u8, 8,
693 })
694 #endif
695
696-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
697+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
698 CVT_FUNC(s32, s8, 8,
699 ,
700 {
701@@ -911,7 +911,7 @@ CVT_FUNC(s32, s8, 8,
702 })
703 #endif
704
705-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
706+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
707 CVT_FUNC(s32, u16, 8,
708 ,
709 {
710@@ -950,7 +950,7 @@ CVT_FUNC(s32, u16, 8,
711 })
712 #endif
713
714-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
715+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
716 CVT_FUNC(s32, s16, 8,
717 ,
718 {
719@@ -989,7 +989,7 @@ CVT_FUNC(s32, s16, 8,
720 })
721 #endif
722
723-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
724+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
725 CVT_FUNC(s32, f32, 8,
726 ,
727 {
728@@ -1034,7 +1034,7 @@ CVT_FUNC(s32, f32, 8,
729 })
730 #endif
731
732-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
733+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
734 CVT_FUNC(f32, u8, 8,
735 register float32x4_t vmult asm ("q0") = vdupq_n_f32((float)(1 << 16));
736 register uint32x4_t vmask asm ("q1") = vdupq_n_u32(1<<16);,
737@@ -1101,7 +1101,7 @@ CVT_FUNC(f32, u8, 8,
738 })
739 #endif
740
741-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
742+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
743 CVT_FUNC(f32, s8, 8,
744 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
745 {
746@@ -1153,7 +1153,7 @@ CVT_FUNC(f32, s8, 8,
747 })
748 #endif
749
750-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
751+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
752 CVT_FUNC(f32, u16, 8,
753 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
754 {
755@@ -1212,7 +1212,7 @@ CVT_FUNC(f32, u16, 8,
756 })
757 #endif
758
759-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
760+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
761 CVT_FUNC(f32, s16, 8,
762 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
763 {
764@@ -1271,7 +1271,7 @@ CVT_FUNC(f32, s16, 8,
765 })
766 #endif
767
768-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
769+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
770 CVT_FUNC(f32, s32, 8,
771 register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
772 {
773diff --git a/3rdparty/carotene/src/convert_scale.cpp b/3rdparty/carotene/src/convert_scale.cpp
774index 0a14a8035c..ae41a985c8 100644
775--- a/3rdparty/carotene/src/convert_scale.cpp
776+++ b/3rdparty/carotene/src/convert_scale.cpp
777@@ -135,7 +135,7 @@ namespace CAROTENE_NS {
778
779 #endif
780
781-#if defined(__GNUC__) && defined(__arm__)
782+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
783 CVTS_FUNC1(u8, 16,
784 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
785 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
786@@ -220,7 +220,7 @@ CVTS_FUNC1(u8, 16,
787 })
788 #endif
789
790-#if defined(__GNUC__) && defined(__arm__)
791+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
792 CVTS_FUNC(u8, s8, 16,
793 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
794 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
795@@ -305,7 +305,7 @@ CVTS_FUNC(u8, s8, 16,
796 })
797 #endif
798
799-#if defined(__GNUC__) && defined(__arm__)
800+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
801 CVTS_FUNC(u8, u16, 16,
802 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
803 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
804@@ -389,7 +389,7 @@ CVTS_FUNC(u8, u16, 16,
805 })
806 #endif
807
808-#if defined(__GNUC__) && defined(__arm__)
809+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
810 CVTS_FUNC(u8, s16, 16,
811 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
812 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
813@@ -473,7 +473,7 @@ CVTS_FUNC(u8, s16, 16,
814 })
815 #endif
816
817-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
818+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
819 CVTS_FUNC(u8, s32, 16,
820 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
821 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
822@@ -562,7 +562,7 @@ CVTS_FUNC(u8, s32, 16,
823 })
824 #endif
825
826-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
827+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
828 CVTS_FUNC(u8, f32, 16,
829 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
830 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
831@@ -643,7 +643,7 @@ CVTS_FUNC(u8, f32, 16,
832 })
833 #endif
834
835-#if defined(__GNUC__) && defined(__arm__)
836+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
837 CVTS_FUNC(s8, u8, 16,
838 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
839 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
840@@ -728,7 +728,7 @@ CVTS_FUNC(s8, u8, 16,
841 })
842 #endif
843
844-#if defined(__GNUC__) && defined(__arm__)
845+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
846 CVTS_FUNC1(s8, 16,
847 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
848 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
849@@ -813,7 +813,7 @@ CVTS_FUNC1(s8, 16,
850 })
851 #endif
852
853-#if defined(__GNUC__) && defined(__arm__)
854+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
855 CVTS_FUNC(s8, u16, 16,
856 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
857 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
858@@ -899,7 +899,7 @@ CVTS_FUNC(s8, u16, 16,
859 })
860 #endif
861
862-#if defined(__GNUC__) && defined(__arm__)
863+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
864 CVTS_FUNC(s8, s16, 16,
865 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
866 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
867@@ -985,7 +985,7 @@ CVTS_FUNC(s8, s16, 16,
868 })
869 #endif
870
871-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
872+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
873 CVTS_FUNC(s8, s32, 16,
874 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
875 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
876@@ -1074,7 +1074,7 @@ CVTS_FUNC(s8, s32, 16,
877 })
878 #endif
879
880-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
881+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
882 CVTS_FUNC(s8, f32, 16,
883 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
884 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
885@@ -1155,7 +1155,7 @@ CVTS_FUNC(s8, f32, 16,
886 })
887 #endif
888
889-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
890+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
891 CVTS_FUNC(u16, u8, 16,
892 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
893 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
894@@ -1214,7 +1214,7 @@ CVTS_FUNC(u16, u8, 16,
895 })
896 #endif
897
898-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
899+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
900 CVTS_FUNC(u16, s8, 16,
901 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
902 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
903@@ -1273,7 +1273,7 @@ CVTS_FUNC(u16, s8, 16,
904 })
905 #endif
906
907-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
908+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
909 CVTS_FUNC1(u16, 16,
910 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
911 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
912@@ -1330,7 +1330,7 @@ CVTS_FUNC1(u16, 16,
913 })
914 #endif
915
916-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
917+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
918 CVTS_FUNC(u16, s16, 8,
919 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
920 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
921@@ -1387,7 +1387,7 @@ CVTS_FUNC(u16, s16, 8,
922 })
923 #endif
924
925-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
926+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
927 CVTS_FUNC(u16, s32, 8,
928 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
929 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
930@@ -1443,7 +1443,7 @@ CVTS_FUNC(u16, s32, 8,
931 })
932 #endif
933
934-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
935+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
936 CVTS_FUNC(u16, f32, 8,
937 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
938 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
939@@ -1495,7 +1495,7 @@ CVTS_FUNC(u16, f32, 8,
940 })
941 #endif
942
943-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
944+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
945 CVTS_FUNC(s16, u8, 16,
946 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
947 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
948@@ -1554,7 +1554,7 @@ CVTS_FUNC(s16, u8, 16,
949 })
950 #endif
951
952-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
953+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
954 CVTS_FUNC(s16, s8, 16,
955 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
956 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
957@@ -1613,7 +1613,7 @@ CVTS_FUNC(s16, s8, 16,
958 })
959 #endif
960
961-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
962+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
963 CVTS_FUNC(s16, u16, 8,
964 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
965 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
966@@ -1670,7 +1670,7 @@ CVTS_FUNC(s16, u16, 8,
967 })
968 #endif
969
970-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
971+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
972 CVTS_FUNC1(s16, 16,
973 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
974 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
975@@ -1727,7 +1727,7 @@ CVTS_FUNC1(s16, 16,
976 })
977 #endif
978
979-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
980+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
981 CVTS_FUNC(s16, s32, 8,
982 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
983 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
984@@ -1783,7 +1783,7 @@ CVTS_FUNC(s16, s32, 8,
985 })
986 #endif
987
988-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
989+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
990 CVTS_FUNC(s16, f32, 8,
991 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
992 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
993@@ -1835,7 +1835,7 @@ CVTS_FUNC(s16, f32, 8,
994 })
995 #endif
996
997-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
998+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
999 CVTS_FUNC(s32, u8, 8,
1000 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1001 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1002@@ -1893,7 +1893,7 @@ CVTS_FUNC(s32, u8, 8,
1003 })
1004 #endif
1005
1006-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1007+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1008 CVTS_FUNC(s32, s8, 8,
1009 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1010 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1011@@ -1951,7 +1951,7 @@ CVTS_FUNC(s32, s8, 8,
1012 })
1013 #endif
1014
1015-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1016+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1017 CVTS_FUNC(s32, u16, 8,
1018 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1019 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1020@@ -2007,7 +2007,7 @@ CVTS_FUNC(s32, u16, 8,
1021 })
1022 #endif
1023
1024-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1025+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1026 CVTS_FUNC(s32, s16, 8,
1027 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1028 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1029@@ -2063,7 +2063,7 @@ CVTS_FUNC(s32, s16, 8,
1030 })
1031 #endif
1032
1033-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1034+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1035 CVTS_FUNC1(s32, 8,
1036 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1037 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1038@@ -2118,7 +2118,7 @@ CVTS_FUNC1(s32, 8,
1039 })
1040 #endif
1041
1042-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1043+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1044 CVTS_FUNC(s32, f32, 8,
1045 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1046 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
1047@@ -2169,7 +2169,7 @@ CVTS_FUNC(s32, f32, 8,
1048 })
1049 #endif
1050
1051-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1052+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1053 CVTS_FUNC(f32, u8, 8,
1054 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)((1 << 16)*alpha));
1055 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)((1 << 16)*beta));
1056@@ -2239,7 +2239,7 @@ CVTS_FUNC(f32, u8, 8,
1057 })
1058 #endif
1059
1060-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1061+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1062 CVTS_FUNC(f32, s8, 8,
1063 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1064 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1065@@ -2293,7 +2293,7 @@ CVTS_FUNC(f32, s8, 8,
1066 })
1067 #endif
1068
1069-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1070+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1071 CVTS_FUNC(f32, u16, 8,
1072 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1073 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1074@@ -2345,7 +2345,7 @@ CVTS_FUNC(f32, u16, 8,
1075 })
1076 #endif
1077
1078-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1079+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1080 CVTS_FUNC(f32, s16, 8,
1081 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1082 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1083@@ -2397,7 +2397,7 @@ CVTS_FUNC(f32, s16, 8,
1084 })
1085 #endif
1086
1087-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1088+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1089 CVTS_FUNC(f32, s32, 8,
1090 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1091 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
1092@@ -2448,7 +2448,7 @@ CVTS_FUNC(f32, s32, 8,
1093 })
1094 #endif
1095
1096-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1097+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1098 CVTS_FUNC1(f32, 8,
1099 register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
1100 register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
1101diff --git a/3rdparty/carotene/src/gaussian_blur.cpp b/3rdparty/carotene/src/gaussian_blur.cpp
1102index 1b5399436f..f7b5f18d79 100644
1103--- a/3rdparty/carotene/src/gaussian_blur.cpp
1104+++ b/3rdparty/carotene/src/gaussian_blur.cpp
1105@@ -327,7 +327,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn,
1106 u16* lidx1 = lane + x - 1*2;
1107 u16* lidx3 = lane + x + 1*2;
1108 u16* lidx4 = lane + x + 2*2;
1109-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1110+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1111 __asm__ __volatile__ (
1112 "vld2.16 {d0, d2}, [%[in0]]! \n\t"
1113 "vld2.16 {d1, d3}, [%[in0]] \n\t"
1114@@ -398,7 +398,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn,
1115 u16* lidx1 = lane + x - 1*3;
1116 u16* lidx3 = lane + x + 1*3;
1117 u16* lidx4 = lane + x + 2*3;
1118-#if defined(__GNUC__) && defined(__arm__)
1119+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1120 __asm__ __volatile__ (
1121 "vld3.16 {d0, d2, d4}, [%[in0]]! \n\t"
1122 "vld3.16 {d1, d3, d5}, [%[in0]] \n\t"
1123@@ -482,7 +482,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn,
1124 u16* lidx1 = lane + x - 1*4;
1125 u16* lidx3 = lane + x + 1*4;
1126 u16* lidx4 = lane + x + 2*4;
1127-#if defined(__GNUC__) && defined(__arm__)
1128+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1129 __asm__ __volatile__ (
1130 "vld4.16 {d0, d2, d4, d6}, [%[in0]]! \n\t"
1131 "vld4.16 {d1, d3, d5, d7}, [%[in0]] \n\t"
1132diff --git a/3rdparty/carotene/src/pyramid.cpp b/3rdparty/carotene/src/pyramid.cpp
1133index 8ef1268933..232ccf3efd 100644
1134--- a/3rdparty/carotene/src/pyramid.cpp
1135+++ b/3rdparty/carotene/src/pyramid.cpp
1136@@ -331,7 +331,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
1137 for (; x < roiw8; x += 8)
1138 {
1139 internal::prefetch(lane + 2 * x);
1140-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1141+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1142 __asm__ (
1143 "vld2.16 {d0-d3}, [%[in0]] \n\t"
1144 "vld2.16 {d4-d7}, [%[in4]] \n\t"
1145@@ -538,7 +538,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
1146 for (; x < roiw4; x += 4)
1147 {
1148 internal::prefetch(lane + 2 * x);
1149-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1150+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1151 __asm__ (
1152 "vld2.32 {d0-d3}, [%[in0]] \n\t"
1153 "vld2.32 {d4-d7}, [%[in4]] \n\t"
1154@@ -672,7 +672,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
1155 std::vector<f32> _buf(cn*(srcSize.width + 4) + 32/sizeof(f32));
1156 f32* lane = internal::alignPtr(&_buf[2*cn], 32);
1157
1158-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1159+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1160 register float32x4_t vc6d4f32 asm ("q11") = vmovq_n_f32(1.5f); // 6/4
1161 register float32x4_t vc1d4f32 asm ("q12") = vmovq_n_f32(0.25f); // 1/4
1162
1163@@ -739,7 +739,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
1164 for (; x < roiw4; x += 4)
1165 {
1166 internal::prefetch(lane + 2 * x);
1167-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1168+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1169 __asm__ __volatile__ (
1170 "vld2.32 {d0-d3}, [%[in0]] \n\t"
1171 "vld2.32 {d8-d11}, [%[in4]] \n\t"
1172@@ -932,7 +932,7 @@ pyrUp8uHorizontalConvolution:
1173 for (; x < lim; x += 8)
1174 {
1175 internal::prefetch(lane + x);
1176-#if defined(__GNUC__) && defined(__arm__)
1177+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1178 __asm__ (
1179 "vld1.16 {d0-d1}, [%[in0]] /*q0 = v0*/ \n\t"
1180 "vld1.16 {d2-d3}, [%[in2]] /*q1 = v2*/ \n\t"
1181@@ -973,7 +973,7 @@ pyrUp8uHorizontalConvolution:
1182 for (; x < lim; x += 24)
1183 {
1184 internal::prefetch(lane + x);
1185-#if defined(__GNUC__) && defined(__arm__)
1186+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1187 __asm__ (
1188 "vmov.u16 q9, #6 \n\t"
1189 "vld3.16 {d0, d2, d4}, [%[in0]] /*v0*/ \n\t"
1190@@ -1064,7 +1064,7 @@ pyrUp8uHorizontalConvolution:
1191 for (; x < lim; x += 8)
1192 {
1193 internal::prefetch(lane + x);
1194-#if defined(__GNUC__) && defined(__arm__)
1195+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1196 __asm__ (
1197 "vld1.16 {d0-d1}, [%[in0]] /*q0 = v0*/ \n\t"
1198 "vld1.16 {d2-d3}, [%[in2]] /*q1 = v2*/ \n\t"
1199@@ -1210,7 +1210,7 @@ pyrUp16sHorizontalConvolution:
1200 for (; x < lim; x += 4)
1201 {
1202 internal::prefetch(lane + x);
1203-#if defined(__GNUC__) && defined(__arm__)
1204+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1205 __asm__ (
1206 "vld1.32 {d0-d1}, [%[in0]] /*q0 = v0*/ \n\t"
1207 "vld1.32 {d2-d3}, [%[in2]] /*q1 = v2*/ \n\t"
1208@@ -1251,7 +1251,7 @@ pyrUp16sHorizontalConvolution:
1209 for (; x < lim; x += 12)
1210 {
1211 internal::prefetch(lane + x + 3);
1212-#if defined(__GNUC__) && defined(__arm__)
1213+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1214 __asm__ (
1215 "vmov.s32 q9, #6 \n\t"
1216 "vld3.32 {d0, d2, d4}, [%[in0]] /*v0*/ \n\t"
1217@@ -1343,7 +1343,7 @@ pyrUp16sHorizontalConvolution:
1218 for (; x < lim; x += 4)
1219 {
1220 internal::prefetch(lane + x);
1221-#if defined(__GNUC__) && defined(__arm__)
1222+#if !defined(__aarch64__) && defined(__GNUC__) && defined(__arm__)
1223 __asm__ (
1224 "vld1.32 {d0-d1}, [%[in0]] /*q0 = v0*/ \n\t"
1225 "vld1.32 {d2-d3}, [%[in2]] /*q1 = v2*/ \n\t"
1226diff --git a/3rdparty/carotene/src/scharr.cpp b/3rdparty/carotene/src/scharr.cpp
1227index 5695804fe4..8d3b6328b1 100644
1228--- a/3rdparty/carotene/src/scharr.cpp
1229+++ b/3rdparty/carotene/src/scharr.cpp
1230@@ -109,7 +109,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
1231 internal::prefetch(srow0 + x);
1232 internal::prefetch(srow1 + x);
1233 internal::prefetch(srow2 + x);
1234-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1235+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
1236 __asm__ (
1237 "vld1.8 {d0}, [%[src0]] \n\t"
1238 "vld1.8 {d2}, [%[src2]] \n\t"
1239@@ -161,7 +161,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
1240 x = 0;
1241 for( ; x < roiw8; x += 8 )
1242 {
1243-#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
1244+#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
1245 __asm__ (
1246 "vld1.16 {d4-d5}, [%[s2ptr]] \n\t"
1247 "vld1.16 {d8-d9}, [%[s4ptr]] \n\t"
1248--
12492.14.1
1250