blob: 6370a106254c84b6140da3fc12122af4fd188c4d [file] [log] [blame]
Brad Bishop316dfdd2018-06-25 12:45:53 -04001From: Julien Grall <julien.grall@linaro.org>
2Subject: x86/pod: prevent infinite loop when shattering large pages
3
4When populating pages, the PoD may need to split large ones using
5p2m_set_entry and request the caller to retry (see ept_get_entry for
6instance).
7
8p2m_set_entry may fail to shatter if it is not possible to allocate
9memory for the new page table. However, the error is not propagated
10resulting to the callers to retry infinitely the PoD.
11
12Prevent the infinite loop by return false when it is not possible to
13shatter the large mapping.
14
15This is XSA-246.
16
17Signed-off-by: Julien Grall <julien.grall@linaro.org>
18Signed-off-by: Jan Beulich <jbeulich@suse.com>
19Reviewed-by: George Dunlap <george.dunlap@citrix.com>
20
21--- a/xen/arch/x86/mm/p2m-pod.c
22+++ b/xen/arch/x86/mm/p2m-pod.c
23@@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai
24 * NOTE: In a fine-grained p2m locking scenario this operation
25 * may need to promote its locking from gfn->1g superpage
26 */
27- p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
28- p2m_populate_on_demand, p2m->default_access);
29- return 0;
30+ return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M,
31+ p2m_populate_on_demand, p2m->default_access);
32 }
33
34 /* Only reclaim if we're in actual need of more cache. */
35@@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai
36
37 gfn_aligned = (gfn >> order) << order;
38
39- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
40- p2m->default_access);
41+ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
42+ p2m->default_access) )
43+ {
44+ p2m_pod_cache_add(p2m, p, order);
45+ goto out_fail;
46+ }
47
48 for( i = 0; i < (1UL << order); i++ )
49 {
50@@ -1150,13 +1153,18 @@ remap_and_retry:
51 BUG_ON(order != PAGE_ORDER_2M);
52 pod_unlock(p2m);
53
54- /* Remap this 2-meg region in singleton chunks */
55- /* NOTE: In a p2m fine-grained lock scenario this might
56- * need promoting the gfn lock from gfn->2M superpage */
57+ /*
58+ * Remap this 2-meg region in singleton chunks. See the comment on the
59+ * 1G page splitting path above for why a single call suffices.
60+ *
61+ * NOTE: In a p2m fine-grained lock scenario this might
62+ * need promoting the gfn lock from gfn->2M superpage.
63+ */
64 gfn_aligned = (gfn>>order)<<order;
65- for(i=0; i<(1<<order); i++)
66- p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K,
67- p2m_populate_on_demand, p2m->default_access);
68+ if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K,
69+ p2m_populate_on_demand, p2m->default_access) )
70+ return -1;
71+
72 if ( tb_init_done )
73 {
74 struct {