blob: d873c51e60a80de613fdbef02b595638d55dad73 [file] [log] [blame]
Brad Bishopd5ae7d92018-06-14 09:52:03 -07001From cd66c0e584c6d692bc8347b5e72723d02b8a8ada Mon Sep 17 00:00:00 2001
2From: Andrew Senkevich <andrew.n.senkevich@gmail.com>
3Date: Fri, 23 Mar 2018 16:19:45 +0100
4Subject: [PATCH] Fix i386 memmove issue (bug 22644).
5
6 [BZ #22644]
7 * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed
8 branch conditions.
9 * string/test-memmove.c (do_test2): New testcase.
10
11Upstream-Status: Backport
12CVE: CVE-2017-18269
13Signed-off-by: Zhixiong Chi <zhixiong.chi@windriver.com>
14---
15 ChangeLog | 8 +++
16 string/test-memmove.c | 58 ++++++++++++++++++++++
17 .../i386/i686/multiarch/memcpy-sse2-unaligned.S | 12 ++---
18 3 files changed, 72 insertions(+), 6 deletions(-)
19
20diff --git a/ChangeLog b/ChangeLog
21index 18ed09e..afdb766 100644
22--- a/ChangeLog
23+++ b/ChangeLog
24@@ -1,3 +1,11 @@
25+2018-03-23 Andrew Senkevich <andrew.senkevich@intel.com>
26+ Max Horn <max@quendi.de>
27+
28+ [BZ #22644]
29+ * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed
30+ branch conditions.
31+ * string/test-memmove.c (do_test2): New testcase.
32+
33 2018-02-22 Andrew Waterman <andrew@sifive.com>
34
35 [BZ # 22884]
36diff --git a/string/test-memmove.c b/string/test-memmove.c
37index edc7a4c..64e3651 100644
38--- a/string/test-memmove.c
39+++ b/string/test-memmove.c
40@@ -24,6 +24,7 @@
41 # define TEST_NAME "memmove"
42 #endif
43 #include "test-string.h"
44+#include <support/test-driver.h>
45
46 char *simple_memmove (char *, const char *, size_t);
47
48@@ -245,6 +246,60 @@ do_random_tests (void)
49 }
50 }
51
52+static void
53+do_test2 (void)
54+{
55+ size_t size = 0x20000000;
56+ uint32_t * large_buf;
57+
58+ large_buf = mmap ((void*) 0x70000000, size, PROT_READ | PROT_WRITE,
59+ MAP_PRIVATE | MAP_ANON, -1, 0);
60+
61+ if (large_buf == MAP_FAILED)
62+ error (EXIT_UNSUPPORTED, errno, "Large mmap failed");
63+
64+ if ((uintptr_t) large_buf > 0x80000000 - 128
65+ || 0x80000000 - (uintptr_t) large_buf > 0x20000000)
66+ {
67+ error (0, 0, "Large mmap allocated improperly");
68+ ret = EXIT_UNSUPPORTED;
69+ munmap ((void *) large_buf, size);
70+ return;
71+ }
72+
73+ size_t bytes_move = 0x80000000 - (uintptr_t) large_buf;
74+ size_t arr_size = bytes_move / sizeof (uint32_t);
75+ size_t i;
76+
77+ FOR_EACH_IMPL (impl, 0)
78+ {
79+ for (i = 0; i < arr_size; i++)
80+ large_buf[i] = (uint32_t) i;
81+
82+ uint32_t * dst = &large_buf[33];
83+
84+#ifdef TEST_BCOPY
85+ CALL (impl, (char *) large_buf, (char *) dst, bytes_move);
86+#else
87+ CALL (impl, (char *) dst, (char *) large_buf, bytes_move);
88+#endif
89+
90+ for (i = 0; i < arr_size; i++)
91+ {
92+ if (dst[i] != (uint32_t) i)
93+ {
94+ error (0, 0,
95+ "Wrong result in function %s dst \"%p\" src \"%p\" offset \"%zd\"",
96+ impl->name, dst, large_buf, i);
97+ ret = 1;
98+ break;
99+ }
100+ }
101+ }
102+
103+ munmap ((void *) large_buf, size);
104+}
105+
106 int
107 test_main (void)
108 {
109@@ -284,6 +339,9 @@ test_main (void)
110 }
111
112 do_random_tests ();
113+
114+ do_test2 ();
115+
116 return ret;
117 }
118
119diff --git a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
120index 9c3bbe7..9aa17de 100644
121--- a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
122+++ b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S
123@@ -72,7 +72,7 @@ ENTRY (MEMCPY)
124 cmp %edx, %eax
125
126 # ifdef USE_AS_MEMMOVE
127- jg L(check_forward)
128+ ja L(check_forward)
129
130 L(mm_len_0_or_more_backward):
131 /* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
132@@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward):
133 jbe L(mm_len_0_16_bytes_backward)
134
135 cmpl $32, %ecx
136- jg L(mm_len_32_or_more_backward)
137+ ja L(mm_len_32_or_more_backward)
138
139 /* Copy [0..32] and return. */
140 movdqu (%eax), %xmm0
141@@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward):
142
143 L(mm_len_32_or_more_backward):
144 cmpl $64, %ecx
145- jg L(mm_len_64_or_more_backward)
146+ ja L(mm_len_64_or_more_backward)
147
148 /* Copy [0..64] and return. */
149 movdqu (%eax), %xmm0
150@@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward):
151
152 L(mm_len_64_or_more_backward):
153 cmpl $128, %ecx
154- jg L(mm_len_128_or_more_backward)
155+ ja L(mm_len_128_or_more_backward)
156
157 /* Copy [0..128] and return. */
158 movdqu (%eax), %xmm0
159@@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward):
160 add %ecx, %eax
161 cmp %edx, %eax
162 movl SRC(%esp), %eax
163- jle L(forward)
164+ jbe L(forward)
165 PUSH (%esi)
166 PUSH (%edi)
167 PUSH (%ebx)
168@@ -269,7 +269,7 @@ L(check_forward):
169 add %edx, %ecx
170 cmp %eax, %ecx
171 movl LEN(%esp), %ecx
172- jle L(forward)
173+ jbe L(forward)
174
175 /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
176 separately. */
177--
1782.9.3