Brad Bishop | d7bf8c1 | 2018-02-25 22:55:05 -0500 | [diff] [blame] | 1 | From 333f60165b6737588eb975a5e4393d847011a1cd Mon Sep 17 00:00:00 2001 |
| 2 | From: Khem Raj <raj.khem@gmail.com> |
| 3 | Date: Tue, 19 Sep 2017 18:07:35 -0700 |
| 4 | Subject: [PATCH 2/2] Do not enable asm with clang |
| 5 | |
| 6 | clang pretends to be gcc 4.2.0 which means we will |
| 7 | use inline asm for no reason, instead of builtins |
| 8 | on clang when possible. |
| 9 | |
| 10 | Signed-off-by: Khem Raj <raj.khem@gmail.com> |
| 11 | --- |
| 12 | Upstream-Status: Submitted |
| 13 | 3rdparty/carotene/src/channel_extract.cpp | 4 +- |
| 14 | 3rdparty/carotene/src/channels_combine.cpp | 2 +- |
| 15 | 3rdparty/carotene/src/colorconvert.cpp | 78 +++++++++++++++--------------- |
| 16 | 3rdparty/carotene/src/convert.cpp | 54 ++++++++++----------- |
| 17 | 3rdparty/carotene/src/convert_scale.cpp | 56 ++++++++++----------- |
| 18 | 3rdparty/carotene/src/gaussian_blur.cpp | 2 +- |
| 19 | 3rdparty/carotene/src/pyramid.cpp | 8 +-- |
| 20 | 3rdparty/carotene/src/scharr.cpp | 4 +- |
| 21 | 8 files changed, 104 insertions(+), 104 deletions(-) |
| 22 | |
| 23 | diff --git a/3rdparty/carotene/src/channel_extract.cpp b/3rdparty/carotene/src/channel_extract.cpp |
| 24 | index 8238a3ece8..ff4fb3770c 100644 |
| 25 | --- a/3rdparty/carotene/src/channel_extract.cpp |
| 26 | +++ b/3rdparty/carotene/src/channel_extract.cpp |
| 27 | @@ -231,7 +231,7 @@ void extract4(const Size2D &size, |
| 28 | srcStride == dst2Stride && \ |
| 29 | srcStride == dst3Stride && |
| 30 | |
| 31 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 32 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 33 | |
| 34 | #define SPLIT_ASM2(sgn, bits) __asm__ ( \ |
| 35 | "vld2." #bits " {d0, d2}, [%[in0]] \n\t" \ |
| 36 | @@ -351,7 +351,7 @@ void extract4(const Size2D &size, |
| 37 | } \ |
| 38 | } |
| 39 | |
| 40 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 41 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 42 | |
| 43 | #define ALPHA_QUAD(sgn, bits) { \ |
| 44 | internal::prefetch(src + sj); \ |
| 45 | diff --git a/3rdparty/carotene/src/channels_combine.cpp b/3rdparty/carotene/src/channels_combine.cpp |
| 46 | index fc98fb9181..5d9251d51c 100644 |
| 47 | --- a/3rdparty/carotene/src/channels_combine.cpp |
| 48 | +++ b/3rdparty/carotene/src/channels_combine.cpp |
| 49 | @@ -77,7 +77,7 @@ namespace CAROTENE_NS { |
| 50 | dstStride == src2Stride && \ |
| 51 | dstStride == src3Stride && |
| 52 | |
| 53 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 54 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 55 | |
| 56 | #define MERGE_ASM2(sgn, bits) __asm__ ( \ |
| 57 | "vld1." #bits " {d0-d1}, [%[in0]] \n\t" \ |
| 58 | diff --git a/3rdparty/carotene/src/colorconvert.cpp b/3rdparty/carotene/src/colorconvert.cpp |
| 59 | index 26ae54b15c..d3a40fe64e 100644 |
| 60 | --- a/3rdparty/carotene/src/colorconvert.cpp |
| 61 | +++ b/3rdparty/carotene/src/colorconvert.cpp |
| 62 | @@ -97,7 +97,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space, |
| 63 | const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709; |
| 64 | const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709; |
| 65 | |
| 66 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 67 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 68 | register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y); |
| 69 | register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y); |
| 70 | register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y); |
| 71 | @@ -116,7 +116,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space, |
| 72 | u8 * dst = internal::getRowPtr(dstBase, dstStride, i); |
| 73 | size_t sj = 0u, dj = 0u; |
| 74 | |
| 75 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 76 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 77 | for (; dj < roiw8; sj += 24, dj += 8) |
| 78 | { |
| 79 | internal::prefetch(src + sj); |
| 80 | @@ -198,7 +198,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space, |
| 81 | const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709; |
| 82 | const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709; |
| 83 | |
| 84 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 85 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 86 | register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y); |
| 87 | register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y); |
| 88 | register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y); |
| 89 | @@ -217,7 +217,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space, |
| 90 | u8 * dst = internal::getRowPtr(dstBase, dstStride, i); |
| 91 | size_t sj = 0u, dj = 0u; |
| 92 | |
| 93 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 94 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 95 | for (; dj < roiw8; sj += 32, dj += 8) |
| 96 | { |
| 97 | internal::prefetch(src + sj); |
| 98 | @@ -300,7 +300,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space, |
| 99 | const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709; |
| 100 | const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709; |
| 101 | |
| 102 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 103 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 104 | register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y); |
| 105 | register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y); |
| 106 | register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y); |
| 107 | @@ -319,7 +319,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space, |
| 108 | u8 * dst = internal::getRowPtr(dstBase, dstStride, i); |
| 109 | size_t sj = 0u, dj = 0u; |
| 110 | |
| 111 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 112 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 113 | for (; dj < roiw8; sj += 24, dj += 8) |
| 114 | { |
| 115 | internal::prefetch(src + sj); |
| 116 | @@ -402,7 +402,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space, |
| 117 | const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709; |
| 118 | const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709; |
| 119 | |
| 120 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 121 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 122 | register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y); |
| 123 | register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y); |
| 124 | register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y); |
| 125 | @@ -421,7 +421,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space, |
| 126 | u8 * dst = internal::getRowPtr(dstBase, dstStride, i); |
| 127 | size_t sj = 0u, dj = 0u; |
| 128 | |
| 129 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 130 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 131 | for (; dj < roiw8; sj += 32, dj += 8) |
| 132 | { |
| 133 | internal::prefetch(src + sj); |
| 134 | @@ -512,7 +512,7 @@ void gray2rgb(const Size2D &size, |
| 135 | for (; sj < roiw16; sj += 16, dj += 48) |
| 136 | { |
| 137 | internal::prefetch(src + sj); |
| 138 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 139 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 140 | __asm__ ( |
| 141 | "vld1.8 {d0-d1}, [%[in0]] \n\t" |
| 142 | "vmov.8 q1, q0 \n\t" |
| 143 | @@ -538,7 +538,7 @@ void gray2rgb(const Size2D &size, |
| 144 | |
| 145 | if (sj < roiw8) |
| 146 | { |
| 147 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 148 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 149 | __asm__ ( |
| 150 | "vld1.8 {d0}, [%[in]] \n\t" |
| 151 | "vmov.8 d1, d0 \n\t" |
| 152 | @@ -584,7 +584,7 @@ void gray2rgbx(const Size2D &size, |
| 153 | size_t roiw16 = size.width >= 15 ? size.width - 15 : 0; |
| 154 | size_t roiw8 = size.width >= 7 ? size.width - 7 : 0; |
| 155 | |
| 156 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 157 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 158 | register uint8x16_t vc255 asm ("q4") = vmovq_n_u8(255); |
| 159 | #else |
| 160 | uint8x16x4_t vRgba; |
| 161 | @@ -602,7 +602,7 @@ void gray2rgbx(const Size2D &size, |
| 162 | for (; sj < roiw16; sj += 16, dj += 64) |
| 163 | { |
| 164 | internal::prefetch(src + sj); |
| 165 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 166 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 167 | __asm__ ( |
| 168 | "vld1.8 {d0-d1}, [%[in0]] \n\t" |
| 169 | "vmov.8 q1, q0 \n\t" |
| 170 | @@ -628,7 +628,7 @@ void gray2rgbx(const Size2D &size, |
| 171 | |
| 172 | if (sj < roiw8) |
| 173 | { |
| 174 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 175 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 176 | __asm__ ( |
| 177 | "vld1.8 {d5}, [%[in]] \n\t" |
| 178 | "vmov.8 d6, d5 \n\t" |
| 179 | @@ -1409,7 +1409,7 @@ inline void convertToHSV(const s32 r, const s32 g, const s32 b, |
| 180 | "d24","d25","d26","d27","d28","d29","d30","d31" \ |
| 181 | ); |
| 182 | |
| 183 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 184 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 185 | |
| 186 | #define YCRCB_CONSTS \ |
| 187 | register int16x4_t vcYR asm ("d31") = vmov_n_s16(4899); \ |
| 188 | @@ -1555,7 +1555,7 @@ inline uint8x8x3_t convertToYCrCb( const int16x8_t& vR, const int16x8_t& vG, con |
| 189 | #define COEFF_G ( 8663) |
| 190 | #define COEFF_B (-17705) |
| 191 | |
| 192 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 193 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 194 | #define YUV420ALPHA3_CONST |
| 195 | #define YUV420ALPHA4_CONST register uint8x16_t c255 asm ("q13") = vmovq_n_u8(255); |
| 196 | #define YUV420ALPHA3_CONVERT |
| 197 | @@ -1852,7 +1852,7 @@ void rgb2hsv(const Size2D &size, |
| 198 | #ifdef CAROTENE_NEON |
| 199 | size_t roiw8 = size.width >= 7 ? size.width - 7 : 0; |
| 200 | const s32 hsv_shift = 12; |
| 201 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 202 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 203 | register const f32 vsdiv_table = f32(255 << hsv_shift); |
| 204 | register f32 vhdiv_table = f32(hrange << hsv_shift); |
| 205 | register const s32 vhrange = hrange; |
| 206 | @@ -1871,7 +1871,7 @@ void rgb2hsv(const Size2D &size, |
| 207 | for (; j < roiw8; sj += 24, dj += 24, j += 8) |
| 208 | { |
| 209 | internal::prefetch(src + sj); |
| 210 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 211 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 212 | CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d0, d2) |
| 213 | #else |
| 214 | uint8x8x3_t vRgb = vld3_u8(src + sj); |
| 215 | @@ -1904,7 +1904,7 @@ void rgbx2hsv(const Size2D &size, |
| 216 | #ifdef CAROTENE_NEON |
| 217 | size_t roiw8 = size.width >= 7 ? size.width - 7 : 0; |
| 218 | const s32 hsv_shift = 12; |
| 219 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 220 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 221 | register const f32 vsdiv_table = f32(255 << hsv_shift); |
| 222 | register f32 vhdiv_table = f32(hrange << hsv_shift); |
| 223 | register const s32 vhrange = hrange; |
| 224 | @@ -1923,7 +1923,7 @@ void rgbx2hsv(const Size2D &size, |
| 225 | for (; j < roiw8; sj += 32, dj += 24, j += 8) |
| 226 | { |
| 227 | internal::prefetch(src + sj); |
| 228 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 229 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 230 | CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d0, d2) |
| 231 | #else |
| 232 | uint8x8x4_t vRgb = vld4_u8(src + sj); |
| 233 | @@ -1956,7 +1956,7 @@ void bgr2hsv(const Size2D &size, |
| 234 | #ifdef CAROTENE_NEON |
| 235 | size_t roiw8 = size.width >= 7 ? size.width - 7 : 0; |
| 236 | const s32 hsv_shift = 12; |
| 237 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 238 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 239 | register const f32 vsdiv_table = f32(255 << hsv_shift); |
| 240 | register f32 vhdiv_table = f32(hrange << hsv_shift); |
| 241 | register const s32 vhrange = hrange; |
| 242 | @@ -1975,7 +1975,7 @@ void bgr2hsv(const Size2D &size, |
| 243 | for (; j < roiw8; sj += 24, dj += 24, j += 8) |
| 244 | { |
| 245 | internal::prefetch(src + sj); |
| 246 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 247 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 248 | CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d2, d0) |
| 249 | #else |
| 250 | uint8x8x3_t vRgb = vld3_u8(src + sj); |
| 251 | @@ -2008,7 +2008,7 @@ void bgrx2hsv(const Size2D &size, |
| 252 | #ifdef CAROTENE_NEON |
| 253 | size_t roiw8 = size.width >= 7 ? size.width - 7 : 0; |
| 254 | const s32 hsv_shift = 12; |
| 255 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 256 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 257 | register const f32 vsdiv_table = f32(255 << hsv_shift); |
| 258 | register f32 vhdiv_table = f32(hrange << hsv_shift); |
| 259 | register const s32 vhrange = hrange; |
| 260 | @@ -2027,7 +2027,7 @@ void bgrx2hsv(const Size2D &size, |
| 261 | for (; j < roiw8; sj += 32, dj += 24, j += 8) |
| 262 | { |
| 263 | internal::prefetch(src + sj); |
| 264 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 265 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 266 | CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d2, d0) |
| 267 | #else |
| 268 | uint8x8x4_t vRgb = vld4_u8(src + sj); |
| 269 | @@ -2068,7 +2068,7 @@ void rgbx2bgr565(const Size2D &size, |
| 270 | for (; j < roiw16; sj += 64, dj += 32, j += 16) |
| 271 | { |
| 272 | internal::prefetch(src + sj); |
| 273 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 274 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 275 | __asm__ ( |
| 276 | "vld4.8 {d2, d4, d6, d8}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t" |
| 277 | "vld4.8 {d3, d5, d7, d9}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t" |
| 278 | @@ -2122,7 +2122,7 @@ void rgb2bgr565(const Size2D &size, |
| 279 | for (; j < roiw16; sj += 48, dj += 32, j += 16) |
| 280 | { |
| 281 | internal::prefetch(src + sj); |
| 282 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 283 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 284 | __asm__ ( |
| 285 | "vld3.8 {d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t" |
| 286 | "vld3.8 {d3, d5, d7}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t" |
| 287 | @@ -2176,7 +2176,7 @@ void rgbx2rgb565(const Size2D &size, |
| 288 | for (; j < roiw16; sj += 64, dj += 32, j += 16) |
| 289 | { |
| 290 | internal::prefetch(src + sj); |
| 291 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 292 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 293 | __asm__ ( |
| 294 | "vld4.8 {d0, d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 \n\t" |
| 295 | "vld4.8 {d1, d3, d5, d7}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB aaaaAAAA \n\t" |
| 296 | @@ -2230,7 +2230,7 @@ void rgb2rgb565(const Size2D &size, |
| 297 | for (; j < roiw16; sj += 48, dj += 32, j += 16) |
| 298 | { |
| 299 | internal::prefetch(src + sj); |
| 300 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 301 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 302 | __asm__ ( |
| 303 | "vld3.8 {d0, d2, d4}, [%[in0]] @ q0 q1 q2 q3 \n\t" |
| 304 | "vld3.8 {d1, d3, d5}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t" |
| 305 | @@ -2285,7 +2285,7 @@ void rgb2ycrcb(const Size2D &size, |
| 306 | for (; j < roiw8; sj += 24, dj += 24, j += 8) |
| 307 | { |
| 308 | internal::prefetch(src + sj); |
| 309 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 310 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 311 | CONVERTTOYCRCB(vld3.8 {d0-d2}, d0, d1, d2) |
| 312 | #else |
| 313 | uint8x8x3_t vRgb = vld3_u8(src + sj); |
| 314 | @@ -2329,7 +2329,7 @@ void rgbx2ycrcb(const Size2D &size, |
| 315 | for (; j < roiw8; sj += 32, dj += 24, j += 8) |
| 316 | { |
| 317 | internal::prefetch(src + sj); |
| 318 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 319 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 320 | CONVERTTOYCRCB(vld4.8 {d0-d3}, d0, d1, d2) |
| 321 | #else |
| 322 | uint8x8x4_t vRgba = vld4_u8(src + sj); |
| 323 | @@ -2373,7 +2373,7 @@ void bgr2ycrcb(const Size2D &size, |
| 324 | for (; j < roiw8; sj += 24, dj += 24, j += 8) |
| 325 | { |
| 326 | internal::prefetch(src + sj); |
| 327 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 328 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 329 | CONVERTTOYCRCB(vld3.8 {d0-d2}, d2, d1, d0) |
| 330 | #else |
| 331 | uint8x8x3_t vBgr = vld3_u8(src + sj); |
| 332 | @@ -2417,7 +2417,7 @@ void bgrx2ycrcb(const Size2D &size, |
| 333 | for (; j < roiw8; sj += 32, dj += 24, j += 8) |
| 334 | { |
| 335 | internal::prefetch(src + sj); |
| 336 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 337 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 338 | CONVERTTOYCRCB(vld4.8 {d0-d3}, d2, d1, d0) |
| 339 | #else |
| 340 | uint8x8x4_t vBgra = vld4_u8(src + sj); |
| 341 | @@ -2499,7 +2499,7 @@ void yuv420sp2rgb(const Size2D &size, |
| 342 | internal::prefetch(uv + j); |
| 343 | internal::prefetch(y1 + j); |
| 344 | internal::prefetch(y2 + j); |
| 345 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 346 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 347 | CONVERTYUV420TORGB(3, d1, d0, q5, q6) |
| 348 | #else |
| 349 | convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj); |
| 350 | @@ -2545,7 +2545,7 @@ void yuv420sp2rgbx(const Size2D &size, |
| 351 | internal::prefetch(uv + j); |
| 352 | internal::prefetch(y1 + j); |
| 353 | internal::prefetch(y2 + j); |
| 354 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 355 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 356 | CONVERTYUV420TORGB(4, d1, d0, q5, q6) |
| 357 | #else |
| 358 | convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj); |
| 359 | @@ -2591,7 +2591,7 @@ void yuv420i2rgb(const Size2D &size, |
| 360 | internal::prefetch(uv + j); |
| 361 | internal::prefetch(y1 + j); |
| 362 | internal::prefetch(y2 + j); |
| 363 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 364 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 365 | CONVERTYUV420TORGB(3, d0, d1, q5, q6) |
| 366 | #else |
| 367 | convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj); |
| 368 | @@ -2637,7 +2637,7 @@ void yuv420i2rgbx(const Size2D &size, |
| 369 | internal::prefetch(uv + j); |
| 370 | internal::prefetch(y1 + j); |
| 371 | internal::prefetch(y2 + j); |
| 372 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 373 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 374 | CONVERTYUV420TORGB(4, d0, d1, q5, q6) |
| 375 | #else |
| 376 | convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj); |
| 377 | @@ -2683,7 +2683,7 @@ void yuv420sp2bgr(const Size2D &size, |
| 378 | internal::prefetch(uv + j); |
| 379 | internal::prefetch(y1 + j); |
| 380 | internal::prefetch(y2 + j); |
| 381 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 382 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 383 | CONVERTYUV420TORGB(3, d1, d0, q6, q5) |
| 384 | #else |
| 385 | convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj); |
| 386 | @@ -2729,7 +2729,7 @@ void yuv420sp2bgrx(const Size2D &size, |
| 387 | internal::prefetch(uv + j); |
| 388 | internal::prefetch(y1 + j); |
| 389 | internal::prefetch(y2 + j); |
| 390 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 391 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 392 | CONVERTYUV420TORGB(4, d1, d0, q6, q5) |
| 393 | #else |
| 394 | convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj); |
| 395 | @@ -2775,7 +2775,7 @@ void yuv420i2bgr(const Size2D &size, |
| 396 | internal::prefetch(uv + j); |
| 397 | internal::prefetch(y1 + j); |
| 398 | internal::prefetch(y2 + j); |
| 399 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 400 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 401 | CONVERTYUV420TORGB(3, d0, d1, q6, q5) |
| 402 | #else |
| 403 | convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj); |
| 404 | @@ -2821,7 +2821,7 @@ void yuv420i2bgrx(const Size2D &size, |
| 405 | internal::prefetch(uv + j); |
| 406 | internal::prefetch(y1 + j); |
| 407 | internal::prefetch(y2 + j); |
| 408 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 409 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 410 | CONVERTYUV420TORGB(4, d0, d1, q6, q5) |
| 411 | #else |
| 412 | convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj); |
| 413 | diff --git a/3rdparty/carotene/src/convert.cpp b/3rdparty/carotene/src/convert.cpp |
| 414 | index 64b6db78ab..f0c2d153f2 100644 |
| 415 | --- a/3rdparty/carotene/src/convert.cpp |
| 416 | +++ b/3rdparty/carotene/src/convert.cpp |
| 417 | @@ -101,7 +101,7 @@ CVT_FUNC(u8, s8, 16, |
| 418 | } |
| 419 | }) |
| 420 | |
| 421 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 422 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 423 | CVT_FUNC(u8, u16, 16, |
| 424 | register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);, |
| 425 | { |
| 426 | @@ -135,7 +135,7 @@ CVT_FUNC(u8, u16, 16, |
| 427 | }) |
| 428 | #endif |
| 429 | |
| 430 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 431 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 432 | CVT_FUNC(u8, s32, 16, |
| 433 | register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0); |
| 434 | register uint8x16_t zero1 asm ("q2") = vmovq_n_u8(0); |
| 435 | @@ -173,7 +173,7 @@ CVT_FUNC(u8, s32, 16, |
| 436 | }) |
| 437 | #endif |
| 438 | |
| 439 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 440 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 441 | CVT_FUNC(u8, f32, 16, |
| 442 | , |
| 443 | { |
| 444 | @@ -248,7 +248,7 @@ CVT_FUNC(s8, u8, 16, |
| 445 | } |
| 446 | }) |
| 447 | |
| 448 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 449 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 450 | CVT_FUNC(s8, u16, 16, |
| 451 | register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);, |
| 452 | { |
| 453 | @@ -284,7 +284,7 @@ CVT_FUNC(s8, u16, 16, |
| 454 | }) |
| 455 | #endif |
| 456 | |
| 457 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 458 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 459 | CVT_FUNC(s8, s16, 16, |
| 460 | , |
| 461 | { |
| 462 | @@ -323,7 +323,7 @@ CVT_FUNC(s8, s16, 16, |
| 463 | }) |
| 464 | #endif |
| 465 | |
| 466 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 467 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 468 | CVT_FUNC(s8, s32, 16, |
| 469 | , |
| 470 | { |
| 471 | @@ -377,7 +377,7 @@ CVT_FUNC(s8, s32, 16, |
| 472 | }) |
| 473 | #endif |
| 474 | |
| 475 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 476 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 477 | CVT_FUNC(s8, f32, 16, |
| 478 | , |
| 479 | { |
| 480 | @@ -440,7 +440,7 @@ CVT_FUNC(s8, f32, 16, |
| 481 | }) |
| 482 | #endif |
| 483 | |
| 484 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 485 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 486 | CVT_FUNC(u16, u8, 16, |
| 487 | , |
| 488 | { |
| 489 | @@ -479,7 +479,7 @@ CVT_FUNC(u16, u8, 16, |
| 490 | }) |
| 491 | #endif |
| 492 | |
| 493 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 494 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 495 | CVT_FUNC(u16, s8, 16, |
| 496 | register uint8x16_t v127 asm ("q4") = vmovq_n_u8(127);, |
| 497 | { |
| 498 | @@ -522,7 +522,7 @@ CVT_FUNC(u16, s8, 16, |
| 499 | }) |
| 500 | #endif |
| 501 | |
| 502 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 503 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 504 | CVT_FUNC(u16, s16, 8, |
| 505 | register uint16x8_t v32767 asm ("q4") = vmovq_n_u16(0x7FFF);, |
| 506 | { |
| 507 | @@ -555,7 +555,7 @@ CVT_FUNC(u16, s16, 8, |
| 508 | }) |
| 509 | #endif |
| 510 | |
| 511 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 512 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 513 | CVT_FUNC(u16, s32, 8, |
| 514 | register uint16x8_t zero0 asm ("q1") = vmovq_n_u16(0);, |
| 515 | { |
| 516 | @@ -589,7 +589,7 @@ CVT_FUNC(u16, s32, 8, |
| 517 | }) |
| 518 | #endif |
| 519 | |
| 520 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 521 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 522 | CVT_FUNC(u16, f32, 8, |
| 523 | , |
| 524 | { |
| 525 | @@ -633,7 +633,7 @@ CVT_FUNC(u16, f32, 8, |
| 526 | }) |
| 527 | #endif |
| 528 | |
| 529 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 530 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 531 | CVT_FUNC(s16, u8, 16, |
| 532 | , |
| 533 | { |
| 534 | @@ -672,7 +672,7 @@ CVT_FUNC(s16, u8, 16, |
| 535 | }) |
| 536 | #endif |
| 537 | |
| 538 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 539 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 540 | CVT_FUNC(s16, s8, 16, |
| 541 | , |
| 542 | { |
| 543 | @@ -711,7 +711,7 @@ CVT_FUNC(s16, s8, 16, |
| 544 | }) |
| 545 | #endif |
| 546 | |
| 547 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 548 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 549 | CVT_FUNC(s16, u16, 8, |
| 550 | register int16x8_t vZero asm ("q4") = vmovq_n_s16(0);, |
| 551 | { |
| 552 | @@ -747,7 +747,7 @@ CVT_FUNC(s16, u16, 8, |
| 553 | }) |
| 554 | #endif |
| 555 | |
| 556 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 557 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 558 | CVT_FUNC(s16, s32, 8, |
| 559 | , |
| 560 | { |
| 561 | @@ -786,7 +786,7 @@ CVT_FUNC(s16, s32, 8, |
| 562 | }) |
| 563 | #endif |
| 564 | |
| 565 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 566 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 567 | CVT_FUNC(s16, f32, 8, |
| 568 | , |
| 569 | { |
| 570 | @@ -829,7 +829,7 @@ CVT_FUNC(s16, f32, 8, |
| 571 | }) |
| 572 | #endif |
| 573 | |
| 574 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 575 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 576 | CVT_FUNC(s32, u8, 8, |
| 577 | , |
| 578 | { |
| 579 | @@ -870,7 +870,7 @@ CVT_FUNC(s32, u8, 8, |
| 580 | }) |
| 581 | #endif |
| 582 | |
| 583 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 584 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 585 | CVT_FUNC(s32, s8, 8, |
| 586 | , |
| 587 | { |
| 588 | @@ -911,7 +911,7 @@ CVT_FUNC(s32, s8, 8, |
| 589 | }) |
| 590 | #endif |
| 591 | |
| 592 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 593 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 594 | CVT_FUNC(s32, u16, 8, |
| 595 | , |
| 596 | { |
| 597 | @@ -950,7 +950,7 @@ CVT_FUNC(s32, u16, 8, |
| 598 | }) |
| 599 | #endif |
| 600 | |
| 601 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 602 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 603 | CVT_FUNC(s32, s16, 8, |
| 604 | , |
| 605 | { |
| 606 | @@ -989,7 +989,7 @@ CVT_FUNC(s32, s16, 8, |
| 607 | }) |
| 608 | #endif |
| 609 | |
| 610 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 611 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 612 | CVT_FUNC(s32, f32, 8, |
| 613 | , |
| 614 | { |
| 615 | @@ -1034,7 +1034,7 @@ CVT_FUNC(s32, f32, 8, |
| 616 | }) |
| 617 | #endif |
| 618 | |
| 619 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 620 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 621 | CVT_FUNC(f32, u8, 8, |
| 622 | register float32x4_t vmult asm ("q0") = vdupq_n_f32((float)(1 << 16)); |
| 623 | register uint32x4_t vmask asm ("q1") = vdupq_n_u32(1<<16);, |
| 624 | @@ -1101,7 +1101,7 @@ CVT_FUNC(f32, u8, 8, |
| 625 | }) |
| 626 | #endif |
| 627 | |
| 628 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 629 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 630 | CVT_FUNC(f32, s8, 8, |
| 631 | register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);, |
| 632 | { |
| 633 | @@ -1153,7 +1153,7 @@ CVT_FUNC(f32, s8, 8, |
| 634 | }) |
| 635 | #endif |
| 636 | |
| 637 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 638 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 639 | CVT_FUNC(f32, u16, 8, |
| 640 | register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);, |
| 641 | { |
| 642 | @@ -1212,7 +1212,7 @@ CVT_FUNC(f32, u16, 8, |
| 643 | }) |
| 644 | #endif |
| 645 | |
| 646 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 647 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 648 | CVT_FUNC(f32, s16, 8, |
| 649 | register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);, |
| 650 | { |
| 651 | @@ -1271,7 +1271,7 @@ CVT_FUNC(f32, s16, 8, |
| 652 | }) |
| 653 | #endif |
| 654 | |
| 655 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 656 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 657 | CVT_FUNC(f32, s32, 8, |
| 658 | register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);, |
| 659 | { |
| 660 | diff --git a/3rdparty/carotene/src/convert_scale.cpp b/3rdparty/carotene/src/convert_scale.cpp |
| 661 | index ae41a985c8..d599d24c1e 100644 |
| 662 | --- a/3rdparty/carotene/src/convert_scale.cpp |
| 663 | +++ b/3rdparty/carotene/src/convert_scale.cpp |
| 664 | @@ -473,7 +473,7 @@ CVTS_FUNC(u8, s16, 16, |
| 665 | }) |
| 666 | #endif |
| 667 | |
| 668 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 669 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 670 | CVTS_FUNC(u8, s32, 16, |
| 671 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 672 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 673 | @@ -562,7 +562,7 @@ CVTS_FUNC(u8, s32, 16, |
| 674 | }) |
| 675 | #endif |
| 676 | |
| 677 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 678 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 679 | CVTS_FUNC(u8, f32, 16, |
| 680 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 681 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);, |
| 682 | @@ -985,7 +985,7 @@ CVTS_FUNC(s8, s16, 16, |
| 683 | }) |
| 684 | #endif |
| 685 | |
| 686 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 687 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 688 | CVTS_FUNC(s8, s32, 16, |
| 689 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 690 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 691 | @@ -1074,7 +1074,7 @@ CVTS_FUNC(s8, s32, 16, |
| 692 | }) |
| 693 | #endif |
| 694 | |
| 695 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 696 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 697 | CVTS_FUNC(s8, f32, 16, |
| 698 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 699 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);, |
| 700 | @@ -1155,7 +1155,7 @@ CVTS_FUNC(s8, f32, 16, |
| 701 | }) |
| 702 | #endif |
| 703 | |
| 704 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 705 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 706 | CVTS_FUNC(u16, u8, 16, |
| 707 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 708 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 709 | @@ -1214,7 +1214,7 @@ CVTS_FUNC(u16, u8, 16, |
| 710 | }) |
| 711 | #endif |
| 712 | |
| 713 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 714 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 715 | CVTS_FUNC(u16, s8, 16, |
| 716 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 717 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 718 | @@ -1273,7 +1273,7 @@ CVTS_FUNC(u16, s8, 16, |
| 719 | }) |
| 720 | #endif |
| 721 | |
| 722 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 723 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 724 | CVTS_FUNC1(u16, 16, |
| 725 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 726 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 727 | @@ -1330,7 +1330,7 @@ CVTS_FUNC1(u16, 16, |
| 728 | }) |
| 729 | #endif |
| 730 | |
| 731 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 732 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 733 | CVTS_FUNC(u16, s16, 8, |
| 734 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 735 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 736 | @@ -1387,7 +1387,7 @@ CVTS_FUNC(u16, s16, 8, |
| 737 | }) |
| 738 | #endif |
| 739 | |
| 740 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 741 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 742 | CVTS_FUNC(u16, s32, 8, |
| 743 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 744 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 745 | @@ -1443,7 +1443,7 @@ CVTS_FUNC(u16, s32, 8, |
| 746 | }) |
| 747 | #endif |
| 748 | |
| 749 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 750 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 751 | CVTS_FUNC(u16, f32, 8, |
| 752 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 753 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);, |
| 754 | @@ -1495,7 +1495,7 @@ CVTS_FUNC(u16, f32, 8, |
| 755 | }) |
| 756 | #endif |
| 757 | |
| 758 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 759 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 760 | CVTS_FUNC(s16, u8, 16, |
| 761 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 762 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 763 | @@ -1554,7 +1554,7 @@ CVTS_FUNC(s16, u8, 16, |
| 764 | }) |
| 765 | #endif |
| 766 | |
| 767 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 768 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 769 | CVTS_FUNC(s16, s8, 16, |
| 770 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 771 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 772 | @@ -1613,7 +1613,7 @@ CVTS_FUNC(s16, s8, 16, |
| 773 | }) |
| 774 | #endif |
| 775 | |
| 776 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 777 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 778 | CVTS_FUNC(s16, u16, 8, |
| 779 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 780 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 781 | @@ -1670,7 +1670,7 @@ CVTS_FUNC(s16, u16, 8, |
| 782 | }) |
| 783 | #endif |
| 784 | |
| 785 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 786 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 787 | CVTS_FUNC1(s16, 16, |
| 788 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 789 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 790 | @@ -1727,7 +1727,7 @@ CVTS_FUNC1(s16, 16, |
| 791 | }) |
| 792 | #endif |
| 793 | |
| 794 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 795 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 796 | CVTS_FUNC(s16, s32, 8, |
| 797 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 798 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 799 | @@ -1783,7 +1783,7 @@ CVTS_FUNC(s16, s32, 8, |
| 800 | }) |
| 801 | #endif |
| 802 | |
| 803 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 804 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 805 | CVTS_FUNC(s16, f32, 8, |
| 806 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 807 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);, |
| 808 | @@ -1835,7 +1835,7 @@ CVTS_FUNC(s16, f32, 8, |
| 809 | }) |
| 810 | #endif |
| 811 | |
| 812 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 813 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 814 | CVTS_FUNC(s32, u8, 8, |
| 815 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 816 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 817 | @@ -1893,7 +1893,7 @@ CVTS_FUNC(s32, u8, 8, |
| 818 | }) |
| 819 | #endif |
| 820 | |
| 821 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 822 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 823 | CVTS_FUNC(s32, s8, 8, |
| 824 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 825 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 826 | @@ -1951,7 +1951,7 @@ CVTS_FUNC(s32, s8, 8, |
| 827 | }) |
| 828 | #endif |
| 829 | |
| 830 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 831 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 832 | CVTS_FUNC(s32, u16, 8, |
| 833 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 834 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 835 | @@ -2007,7 +2007,7 @@ CVTS_FUNC(s32, u16, 8, |
| 836 | }) |
| 837 | #endif |
| 838 | |
| 839 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 840 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 841 | CVTS_FUNC(s32, s16, 8, |
| 842 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 843 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 844 | @@ -2063,7 +2063,7 @@ CVTS_FUNC(s32, s16, 8, |
| 845 | }) |
| 846 | #endif |
| 847 | |
| 848 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 849 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 850 | CVTS_FUNC1(s32, 8, |
| 851 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 852 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 853 | @@ -2118,7 +2118,7 @@ CVTS_FUNC1(s32, 8, |
| 854 | }) |
| 855 | #endif |
| 856 | |
| 857 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 858 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 859 | CVTS_FUNC(s32, f32, 8, |
| 860 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 861 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);, |
| 862 | @@ -2169,7 +2169,7 @@ CVTS_FUNC(s32, f32, 8, |
| 863 | }) |
| 864 | #endif |
| 865 | |
| 866 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 867 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 868 | CVTS_FUNC(f32, u8, 8, |
| 869 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)((1 << 16)*alpha)); |
| 870 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)((1 << 16)*beta)); |
| 871 | @@ -2239,7 +2239,7 @@ CVTS_FUNC(f32, u8, 8, |
| 872 | }) |
| 873 | #endif |
| 874 | |
| 875 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 876 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 877 | CVTS_FUNC(f32, s8, 8, |
| 878 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 879 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 880 | @@ -2293,7 +2293,7 @@ CVTS_FUNC(f32, s8, 8, |
| 881 | }) |
| 882 | #endif |
| 883 | |
| 884 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 885 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 886 | CVTS_FUNC(f32, u16, 8, |
| 887 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 888 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 889 | @@ -2345,7 +2345,7 @@ CVTS_FUNC(f32, u16, 8, |
| 890 | }) |
| 891 | #endif |
| 892 | |
| 893 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 894 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 895 | CVTS_FUNC(f32, s16, 8, |
| 896 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 897 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 898 | @@ -2397,7 +2397,7 @@ CVTS_FUNC(f32, s16, 8, |
| 899 | }) |
| 900 | #endif |
| 901 | |
| 902 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 903 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 904 | CVTS_FUNC(f32, s32, 8, |
| 905 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 906 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);, |
| 907 | @@ -2448,7 +2448,7 @@ CVTS_FUNC(f32, s32, 8, |
| 908 | }) |
| 909 | #endif |
| 910 | |
| 911 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 912 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 913 | CVTS_FUNC1(f32, 8, |
| 914 | register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha); |
| 915 | register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);, |
| 916 | diff --git a/3rdparty/carotene/src/gaussian_blur.cpp b/3rdparty/carotene/src/gaussian_blur.cpp |
| 917 | index f7b5f18d79..e5aa8fc75b 100644 |
| 918 | --- a/3rdparty/carotene/src/gaussian_blur.cpp |
| 919 | +++ b/3rdparty/carotene/src/gaussian_blur.cpp |
| 920 | @@ -327,7 +327,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn, |
| 921 | u16* lidx1 = lane + x - 1*2; |
| 922 | u16* lidx3 = lane + x + 1*2; |
| 923 | u16* lidx4 = lane + x + 2*2; |
| 924 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 925 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 926 | __asm__ __volatile__ ( |
| 927 | "vld2.16 {d0, d2}, [%[in0]]! \n\t" |
| 928 | "vld2.16 {d1, d3}, [%[in0]] \n\t" |
| 929 | diff --git a/3rdparty/carotene/src/pyramid.cpp b/3rdparty/carotene/src/pyramid.cpp |
| 930 | index 232ccf3efd..d4e32ea50f 100644 |
| 931 | --- a/3rdparty/carotene/src/pyramid.cpp |
| 932 | +++ b/3rdparty/carotene/src/pyramid.cpp |
| 933 | @@ -331,7 +331,7 @@ void gaussianPyramidDown(const Size2D &srcSize, |
| 934 | for (; x < roiw8; x += 8) |
| 935 | { |
| 936 | internal::prefetch(lane + 2 * x); |
| 937 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 938 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 939 | __asm__ ( |
| 940 | "vld2.16 {d0-d3}, [%[in0]] \n\t" |
| 941 | "vld2.16 {d4-d7}, [%[in4]] \n\t" |
| 942 | @@ -538,7 +538,7 @@ void gaussianPyramidDown(const Size2D &srcSize, |
| 943 | for (; x < roiw4; x += 4) |
| 944 | { |
| 945 | internal::prefetch(lane + 2 * x); |
| 946 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 947 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 948 | __asm__ ( |
| 949 | "vld2.32 {d0-d3}, [%[in0]] \n\t" |
| 950 | "vld2.32 {d4-d7}, [%[in4]] \n\t" |
| 951 | @@ -672,7 +672,7 @@ void gaussianPyramidDown(const Size2D &srcSize, |
| 952 | std::vector<f32> _buf(cn*(srcSize.width + 4) + 32/sizeof(f32)); |
| 953 | f32* lane = internal::alignPtr(&_buf[2*cn], 32); |
| 954 | |
| 955 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 956 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 957 | register float32x4_t vc6d4f32 asm ("q11") = vmovq_n_f32(1.5f); // 6/4 |
| 958 | register float32x4_t vc1d4f32 asm ("q12") = vmovq_n_f32(0.25f); // 1/4 |
| 959 | |
| 960 | @@ -739,7 +739,7 @@ void gaussianPyramidDown(const Size2D &srcSize, |
| 961 | for (; x < roiw4; x += 4) |
| 962 | { |
| 963 | internal::prefetch(lane + 2 * x); |
| 964 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 965 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 966 | __asm__ __volatile__ ( |
| 967 | "vld2.32 {d0-d3}, [%[in0]] \n\t" |
| 968 | "vld2.32 {d8-d11}, [%[in4]] \n\t" |
| 969 | diff --git a/3rdparty/carotene/src/scharr.cpp b/3rdparty/carotene/src/scharr.cpp |
| 970 | index 8d3b6328b1..36f6b2276e 100644 |
| 971 | --- a/3rdparty/carotene/src/scharr.cpp |
| 972 | +++ b/3rdparty/carotene/src/scharr.cpp |
| 973 | @@ -109,7 +109,7 @@ void ScharrDeriv(const Size2D &size, s32 cn, |
| 974 | internal::prefetch(srow0 + x); |
| 975 | internal::prefetch(srow1 + x); |
| 976 | internal::prefetch(srow2 + x); |
| 977 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 |
| 978 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7 && !defined(__clang__) |
| 979 | __asm__ ( |
| 980 | "vld1.8 {d0}, [%[src0]] \n\t" |
| 981 | "vld1.8 {d2}, [%[src2]] \n\t" |
| 982 | @@ -161,7 +161,7 @@ void ScharrDeriv(const Size2D &size, s32 cn, |
| 983 | x = 0; |
| 984 | for( ; x < roiw8; x += 8 ) |
| 985 | { |
| 986 | -#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 |
| 987 | +#if !defined(__aarch64__) && defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6 && !defined(__clang__) |
| 988 | __asm__ ( |
| 989 | "vld1.16 {d4-d5}, [%[s2ptr]] \n\t" |
| 990 | "vld1.16 {d8-d9}, [%[s4ptr]] \n\t" |
| 991 | -- |
| 992 | 2.14.1 |
| 993 | |