blob: ad9524a304195d0c8eac60c4fcfa855b99162898 [file] [log] [blame]
Brad Bishop316dfdd2018-06-25 12:45:53 -04001From ad208b8b7e45fb2b7c572b86c61c26412609e82d Mon Sep 17 00:00:00 2001
2From: George Dunlap <george.dunlap@citrix.com>
3Date: Fri, 10 Nov 2017 16:53:54 +0000
4Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually
5 worked
6
7The PoD zero-check functions speculatively remove memory from the p2m,
8then check to see if it's completely zeroed, before putting it in the
9cache.
10
11Unfortunately, the p2m_set_entry() calls may fail if the underlying
12pagetable structure needs to change and the domain has exhausted its
13p2m memory pool: for instance, if we're removing a 2MiB region out of
14a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
15region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
16case); and the return value is not checked.
17
18The underlying mfn will then be added into the PoD cache, and at some
19point mapped into another location in the p2m. If the guest
20afterwards ballons out this memory, it will be freed to the hypervisor
21and potentially reused by another domain, in spite of the fact that
22the original domain still has writable mappings to it.
23
24There are several places where p2m_set_entry() shouldn't be able to
25fail, as it is guaranteed to write an entry of the same order that
26succeeded before. Add a backstop of crashing the domain just in case,
27and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
28builds.
29
30While we're here, use PAGE_ORDER_2M rather than a magic constant.
31
32This is part of XSA-247.
33
34Reported-by: George Dunlap <george.dunlap.com>
35Signed-off-by: George Dunlap <george.dunlap@citrix.com>
36Reviewed-by: Jan Beulich <jbeulich@suse.com>
37---
38v4:
39- Removed some training whitespace
40v3:
41- Reformat reset clause to be more compact
42- Make sure to set map[i] = NULL when unmapping in case we need to bail
43v2:
44- Crash a domain if a p2m_set_entry we think cannot fail fails anyway.
45---
46 xen/arch/x86/mm/p2m-pod.c | 77 +++++++++++++++++++++++++++++++++++++----------
47 1 file changed, 61 insertions(+), 16 deletions(-)
48
49diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
50index 730a48f928..f2ed751892 100644
51--- a/xen/arch/x86/mm/p2m-pod.c
52+++ b/xen/arch/x86/mm/p2m-pod.c
53@@ -752,8 +752,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
54 }
55
56 /* Try to remove the page, restoring old mapping if it fails. */
57- p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
58- p2m_populate_on_demand, p2m->default_access);
59+ if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M,
60+ p2m_populate_on_demand, p2m->default_access) )
61+ goto out;
62+
63 p2m_tlb_flush_sync(p2m);
64
65 /* Make none of the MFNs are used elsewhere... for example, mapped
66@@ -810,9 +812,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
67 ret = SUPERPAGE_PAGES;
68
69 out_reset:
70- if ( reset )
71- p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
72-
73+ /*
74+ * This p2m_set_entry() call shouldn't be able to fail, since the same order
75+ * on the same gfn succeeded above. If that turns out to be false, crashing
76+ * the domain should be the safest way of making sure we don't leak memory.
77+ */
78+ if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
79+ type0, p2m->default_access) )
80+ {
81+ ASSERT_UNREACHABLE();
82+ domain_crash(d);
83+ }
84+
85 out:
86 gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
87 return ret;
88@@ -869,19 +880,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
89 }
90
91 /* Try to remove the page, restoring old mapping if it fails. */
92- p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
93- p2m_populate_on_demand, p2m->default_access);
94+ if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K,
95+ p2m_populate_on_demand, p2m->default_access) )
96+ goto skip;
97
98 /* See if the page was successfully unmapped. (Allow one refcount
99 * for being allocated to a domain.) */
100 if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
101 {
102+ /*
103+ * If the previous p2m_set_entry call succeeded, this one shouldn't
104+ * be able to fail. If it does, crashing the domain should be safe.
105+ */
106+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
107+ types[i], p2m->default_access) )
108+ {
109+ ASSERT_UNREACHABLE();
110+ domain_crash(d);
111+ goto out_unmap;
112+ }
113+
114+ skip:
115 unmap_domain_page(map[i]);
116 map[i] = NULL;
117
118- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
119- types[i], p2m->default_access);
120-
121 continue;
122 }
123 }
124@@ -900,12 +922,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
125
126 unmap_domain_page(map[i]);
127
128- /* See comment in p2m_pod_zero_check_superpage() re gnttab
129- * check timing. */
130- if ( j < PAGE_SIZE/sizeof(*map[i]) )
131+ map[i] = NULL;
132+
133+ /*
134+ * See comment in p2m_pod_zero_check_superpage() re gnttab
135+ * check timing.
136+ */
137+ if ( j < (PAGE_SIZE / sizeof(*map[i])) )
138 {
139- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
140- types[i], p2m->default_access);
141+ /*
142+ * If the previous p2m_set_entry call succeeded, this one shouldn't
143+ * be able to fail. If it does, crashing the domain should be safe.
144+ */
145+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
146+ types[i], p2m->default_access) )
147+ {
148+ ASSERT_UNREACHABLE();
149+ domain_crash(d);
150+ goto out_unmap;
151+ }
152 }
153 else
154 {
155@@ -929,7 +964,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
156 p2m->pod.entry_count++;
157 }
158 }
159-
160+
161+ return;
162+
163+out_unmap:
164+ /*
165+ * Something went wrong, probably crashing the domain. Unmap
166+ * everything and return.
167+ */
168+ for ( i = 0; i < count; i++ )
169+ if ( map[i] )
170+ unmap_domain_page(map[i]);
171 }
172
173 #define POD_SWEEP_LIMIT 1024
174--
1752.15.0
176