blob: 83f4543193218ac733f79eebdc366b34963e265c [file] [log] [blame]
Brad Bishop316dfdd2018-06-25 12:45:53 -04001From 1d1fa95c8ff7697e46343385a79a8f7e5c514a87 Mon Sep 17 00:00:00 2001
Patrick Williamsddad1a12017-02-23 20:36:32 -06002From: Zheng Xu <zheng.xu@linaro.org>
3Date: Fri, 2 Sep 2016 17:40:05 +0800
4Subject: [PATCH] Bug 1143022 - Manually mmap on arm64 to ensure high 17 bits
5 are clear. r=ehoogeveen
6
7There might be 48-bit VA on arm64 depending on kernel configuration.
8Manually mmap heap memory to align with the assumption made by JS engine.
9
10Change-Id: Ic5d2b2fe4b758b3c87cc0688348af7e71a991146
11
12Upstream-status: Backport
13
14---
15 js/src/gc/Memory.cpp | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++--
16 1 file changed, 71 insertions(+), 2 deletions(-)
17
18diff --git a/js/src/gc/Memory.cpp b/js/src/gc/Memory.cpp
Brad Bishop316dfdd2018-06-25 12:45:53 -040019index e5ad018..4149adf 100644
Patrick Williamsddad1a12017-02-23 20:36:32 -060020--- a/js/src/gc/Memory.cpp
21+++ b/js/src/gc/Memory.cpp
Brad Bishop316dfdd2018-06-25 12:45:53 -040022@@ -309,6 +309,75 @@ InitMemorySubsystem()
23 #endif
Patrick Williamsddad1a12017-02-23 20:36:32 -060024 }
25
26+static inline void *
27+MapMemory(size_t length, int prot, int flags, int fd, off_t offset)
28+{
29+#if defined(__ia64__)
30+ /*
31+ * The JS engine assumes that all allocated pointers have their high 17 bits clear,
32+ * which ia64's mmap doesn't support directly. However, we can emulate it by passing
33+ * mmap an "addr" parameter with those bits clear. The mmap will return that address,
34+ * or the nearest available memory above that address, providing a near-guarantee
35+ * that those bits are clear. If they are not, we return NULL below to indicate
36+ * out-of-memory.
37+ *
38+ * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
39+ * address space.
40+ *
41+ * See Bug 589735 for more information.
42+ */
43+ void *region = mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
44+ if (region == MAP_FAILED)
45+ return MAP_FAILED;
46+ /*
47+ * If the allocated memory doesn't have its upper 17 bits clear, consider it
48+ * as out of memory.
49+ */
50+ if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
51+ JS_ALWAYS_TRUE(0 == munmap(region, length));
52+ return MAP_FAILED;
53+ }
54+ return region;
55+#elif defined(__aarch64__)
56+ /*
57+ * There might be similar virtual address issue on arm64 which depends on
58+ * hardware and kernel configurations. But the work around is slightly
59+ * different due to the different mmap behavior.
60+ *
61+ * TODO: Merge with the above code block if this implementation works for
62+ * ia64 and sparc64.
63+ */
64+ const uintptr_t start = UINT64_C(0x0000070000000000);
65+ const uintptr_t end = UINT64_C(0x0000800000000000);
66+ const uintptr_t step = ChunkSize;
67+ /*
68+ * Optimization options if there are too many retries in practice:
69+ * 1. Examine /proc/self/maps to find an available address. This file is
70+ * not always available, however. In addition, even if we examine
71+ * /proc/self/maps, we may still need to retry several times due to
72+ * racing with other threads.
73+ * 2. Use a global/static variable with lock to track the addresses we have
74+ * allocated or tried.
75+ */
76+ uintptr_t hint;
77+ void* region = MAP_FAILED;
78+ for (hint = start; region == MAP_FAILED && hint + length <= end; hint += step) {
79+ region = mmap((void*)hint, length, prot, flags, fd, offset);
80+ if (region != MAP_FAILED) {
81+ if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
82+ if (munmap(region, length)) {
83+ MOZ_ASSERT(errno == ENOMEM);
84+ }
85+ region = MAP_FAILED;
86+ }
87+ }
88+ }
89+ return region == MAP_FAILED ? NULL : region;
90+#else
91+ return mmap(NULL, length, prot, flags, fd, offset);
92+#endif
93+}
94+
95 void *
96 MapAlignedPages(size_t size, size_t alignment)
97 {
Brad Bishop316dfdd2018-06-25 12:45:53 -040098@@ -322,12 +391,12 @@ MapAlignedPages(size_t size, size_t alignment)
Patrick Williamsddad1a12017-02-23 20:36:32 -060099
100 /* Special case: If we want page alignment, no further work is needed. */
101 if (alignment == PageSize) {
102- return mmap(NULL, size, prot, flags, -1, 0);
103+ return MapMemory(size, prot, flags, -1, 0);
104 }
105
106 /* Overallocate and unmap the region's edges. */
107 size_t reqSize = Min(size + 2 * alignment, 2 * size);
108- void *region = mmap(NULL, reqSize, prot, flags, -1, 0);
109+ void *region = MapMemory(reqSize, prot, flags, -1, 0);
110 if (region == MAP_FAILED)
111 return NULL;
112