| From: Julien Grall <julien.grall@linaro.org> |
| Subject: x86/pod: prevent infinite loop when shattering large pages |
| |
| When populating pages, the PoD may need to split large ones using |
| p2m_set_entry and request the caller to retry (see ept_get_entry for |
| instance). |
| |
| p2m_set_entry may fail to shatter if it is not possible to allocate |
| memory for the new page table. However, the error is not propagated |
| resulting to the callers to retry infinitely the PoD. |
| |
| Prevent the infinite loop by return false when it is not possible to |
| shatter the large mapping. |
| |
| This is XSA-246. |
| |
| Signed-off-by: Julien Grall <julien.grall@linaro.org> |
| Signed-off-by: Jan Beulich <jbeulich@suse.com> |
| Reviewed-by: George Dunlap <george.dunlap@citrix.com> |
| |
| --- a/xen/arch/x86/mm/p2m-pod.c |
| +++ b/xen/arch/x86/mm/p2m-pod.c |
| @@ -1071,9 +1071,8 @@ p2m_pod_demand_populate(struct p2m_domai |
| * NOTE: In a fine-grained p2m locking scenario this operation |
| * may need to promote its locking from gfn->1g superpage |
| */ |
| - p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, |
| - p2m_populate_on_demand, p2m->default_access); |
| - return 0; |
| + return p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, |
| + p2m_populate_on_demand, p2m->default_access); |
| } |
| |
| /* Only reclaim if we're in actual need of more cache. */ |
| @@ -1104,8 +1103,12 @@ p2m_pod_demand_populate(struct p2m_domai |
| |
| gfn_aligned = (gfn >> order) << order; |
| |
| - p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, |
| - p2m->default_access); |
| + if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, |
| + p2m->default_access) ) |
| + { |
| + p2m_pod_cache_add(p2m, p, order); |
| + goto out_fail; |
| + } |
| |
| for( i = 0; i < (1UL << order); i++ ) |
| { |
| @@ -1150,13 +1153,18 @@ remap_and_retry: |
| BUG_ON(order != PAGE_ORDER_2M); |
| pod_unlock(p2m); |
| |
| - /* Remap this 2-meg region in singleton chunks */ |
| - /* NOTE: In a p2m fine-grained lock scenario this might |
| - * need promoting the gfn lock from gfn->2M superpage */ |
| + /* |
| + * Remap this 2-meg region in singleton chunks. See the comment on the |
| + * 1G page splitting path above for why a single call suffices. |
| + * |
| + * NOTE: In a p2m fine-grained lock scenario this might |
| + * need promoting the gfn lock from gfn->2M superpage. |
| + */ |
| gfn_aligned = (gfn>>order)<<order; |
| - for(i=0; i<(1<<order); i++) |
| - p2m_set_entry(p2m, gfn_aligned + i, INVALID_MFN, PAGE_ORDER_4K, |
| - p2m_populate_on_demand, p2m->default_access); |
| + if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K, |
| + p2m_populate_on_demand, p2m->default_access) ) |
| + return -1; |
| + |
| if ( tb_init_done ) |
| { |
| struct { |