blob: 62376806c811eae7f93860a78b4c4a9eae95fdef [file] [log] [blame]
Andrew Geissler615f2f12022-07-15 14:00:58 -05001From 8d5da4d2a3d7d9173208f4e8dc7a709f0bfc9820 Mon Sep 17 00:00:00 2001
2From: Michael Jeanson <mjeanson@efficios.com>
3Date: Wed, 8 Jun 2022 12:56:36 -0400
4Subject: [PATCH 1/3] fix: mm/page_alloc: fix tracepoint
5 mm_page_alloc_zone_locked() (v5.19)
6
7See upstream commit :
8
9 commit 10e0f7530205799e7e971aba699a7cb3a47456de
10 Author: Wonhyuk Yang <vvghjk1234@gmail.com>
11 Date: Thu May 19 14:08:54 2022 -0700
12
13 mm/page_alloc: fix tracepoint mm_page_alloc_zone_locked()
14
15 Currently, trace point mm_page_alloc_zone_locked() doesn't show correct
16 information.
17
18 First, when alloc_flag has ALLOC_HARDER/ALLOC_CMA, page can be allocated
19 from MIGRATE_HIGHATOMIC/MIGRATE_CMA. Nevertheless, tracepoint use
20 requested migration type not MIGRATE_HIGHATOMIC and MIGRATE_CMA.
21
22 Second, after commit 44042b4498728 ("mm/page_alloc: allow high-order pages
23 to be stored on the per-cpu lists") percpu-list can store high order
24 pages. But trace point determine whether it is a refiil of percpu-list by
25 comparing requested order and 0.
26
27 To handle these problems, make mm_page_alloc_zone_locked() only be called
28 by __rmqueue_smallest with correct migration type. With a new argument
29 called percpu_refill, it can show roughly whether it is a refill of
30 percpu-list.
31
32Upstream-Status: Backport
33
34Change-Id: I2e4a57393757f12b9c5a4566c4d1102ee2474a09
35Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
36Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
37---
38 include/instrumentation/events/kmem.h | 45 +++++++++++++++++++++++++++
39 1 file changed, 45 insertions(+)
40
41diff --git a/include/instrumentation/events/kmem.h b/include/instrumentation/events/kmem.h
42index 29c0fb7f..8c19e962 100644
43--- a/include/instrumentation/events/kmem.h
44+++ b/include/instrumentation/events/kmem.h
45@@ -218,6 +218,50 @@ LTTNG_TRACEPOINT_EVENT_MAP(mm_page_alloc, kmem_mm_page_alloc,
46 )
47 )
48
49+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,19,0))
50+LTTNG_TRACEPOINT_EVENT_CLASS(kmem_mm_page,
51+
52+ TP_PROTO(struct page *page, unsigned int order, int migratetype,
53+ int percpu_refill),
54+
55+ TP_ARGS(page, order, migratetype, percpu_refill),
56+
57+ TP_FIELDS(
58+ ctf_integer_hex(struct page *, page, page)
59+ ctf_integer(unsigned long, pfn,
60+ page ? page_to_pfn(page) : -1UL)
61+ ctf_integer(unsigned int, order, order)
62+ ctf_integer(int, migratetype, migratetype)
63+ ctf_integer(int, percpu_refill, percpu_refill)
64+ )
65+)
66+
67+LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(kmem_mm_page, mm_page_alloc_zone_locked,
68+
69+ kmem_mm_page_alloc_zone_locked,
70+
71+ TP_PROTO(struct page *page, unsigned int order, int migratetype,
72+ int percpu_refill),
73+
74+ TP_ARGS(page, order, migratetype, percpu_refill)
75+)
76+
77+LTTNG_TRACEPOINT_EVENT_MAP(mm_page_pcpu_drain,
78+
79+ kmem_mm_page_pcpu_drain,
80+
81+ TP_PROTO(struct page *page, unsigned int order, int migratetype),
82+
83+ TP_ARGS(page, order, migratetype),
84+
85+ TP_FIELDS(
86+ ctf_integer(unsigned long, pfn,
87+ page ? page_to_pfn(page) : -1UL)
88+ ctf_integer(unsigned int, order, order)
89+ ctf_integer(int, migratetype, migratetype)
90+ )
91+)
92+#else
93 LTTNG_TRACEPOINT_EVENT_CLASS(kmem_mm_page,
94
95 TP_PROTO(struct page *page, unsigned int order, int migratetype),
96@@ -250,6 +294,7 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(kmem_mm_page, mm_page_pcpu_drain,
97
98 TP_ARGS(page, order, migratetype)
99 )
100+#endif
101
102 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,2) \
103 || LTTNG_KERNEL_RANGE(3,14,36, 3,15,0) \
104--
1052.19.1
106