blob: 8627ed3c3141c6e11b28aead677ff4067a4d712f [file] [log] [blame]
Brad Bishopc342db32019-05-15 21:57:59 -04001#
2# SPDX-License-Identifier: GPL-2.0-only
3#
Brad Bishop6e60e8b2018-02-01 10:27:11 -05004# Implements system state sampling. Called by buildstats.bbclass.
5# Because it is a real Python module, it can hold persistent state,
6# like open log files and the time of the last sampling.
7
8import time
9import re
10import bb.event
11
12class SystemStats:
13 def __init__(self, d):
14 bn = d.getVar('BUILDNAME')
15 bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
16 bb.utils.mkdirhier(bsdir)
17
18 self.proc_files = []
19 for filename, handler in (
20 ('diskstats', self._reduce_diskstats),
21 ('meminfo', self._reduce_meminfo),
22 ('stat', self._reduce_stat),
23 ):
24 # The corresponding /proc files might not exist on the host.
25 # For example, /proc/diskstats is not available in virtualized
26 # environments like Linux-VServer. Silently skip collecting
27 # the data.
28 if os.path.exists(os.path.join('/proc', filename)):
29 # In practice, this class gets instantiated only once in
30 # the bitbake cooker process. Therefore 'append' mode is
31 # not strictly necessary, but using it makes the class
32 # more robust should two processes ever write
33 # concurrently.
34 destfile = os.path.join(bsdir, '%sproc_%s.log' % ('reduced_' if handler else '', filename))
35 self.proc_files.append((filename, open(destfile, 'ab'), handler))
36 self.monitor_disk = open(os.path.join(bsdir, 'monitor_disk.log'), 'ab')
37 # Last time that we sampled /proc data resp. recorded disk monitoring data.
38 self.last_proc = 0
39 self.last_disk_monitor = 0
40 # Minimum number of seconds between recording a sample. This
41 # becames relevant when we get called very often while many
42 # short tasks get started. Sampling during quiet periods
43 # depends on the heartbeat event, which fires less often.
44 self.min_seconds = 1
45
46 self.meminfo_regex = re.compile(b'^(MemTotal|MemFree|Buffers|Cached|SwapTotal|SwapFree):\s*(\d+)')
47 self.diskstats_regex = re.compile(b'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
48 self.diskstats_ltime = None
49 self.diskstats_data = None
50 self.stat_ltimes = None
51
52 def close(self):
53 self.monitor_disk.close()
54 for _, output, _ in self.proc_files:
55 output.close()
56
57 def _reduce_meminfo(self, time, data):
58 """
59 Extracts 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'
60 and writes their values into a single line, in that order.
61 """
62 values = {}
63 for line in data.split(b'\n'):
64 m = self.meminfo_regex.match(line)
65 if m:
66 values[m.group(1)] = m.group(2)
67 if len(values) == 6:
68 return (time,
69 b' '.join([values[x] for x in
70 (b'MemTotal', b'MemFree', b'Buffers', b'Cached', b'SwapTotal', b'SwapFree')]) + b'\n')
71
72 def _diskstats_is_relevant_line(self, linetokens):
73 if len(linetokens) != 14:
74 return False
75 disk = linetokens[2]
76 return self.diskstats_regex.match(disk)
77
78 def _reduce_diskstats(self, time, data):
79 relevant_tokens = filter(self._diskstats_is_relevant_line, map(lambda x: x.split(), data.split(b'\n')))
80 diskdata = [0] * 3
81 reduced = None
82 for tokens in relevant_tokens:
83 # rsect
84 diskdata[0] += int(tokens[5])
85 # wsect
86 diskdata[1] += int(tokens[9])
87 # use
88 diskdata[2] += int(tokens[12])
89 if self.diskstats_ltime:
90 # We need to compute information about the time interval
91 # since the last sampling and record the result as sample
92 # for that point in the past.
93 interval = time - self.diskstats_ltime
94 if interval > 0:
95 sums = [ a - b for a, b in zip(diskdata, self.diskstats_data) ]
96 readTput = sums[0] / 2.0 * 100.0 / interval
97 writeTput = sums[1] / 2.0 * 100.0 / interval
98 util = float( sums[2] ) / 10 / interval
99 util = max(0.0, min(1.0, util))
100 reduced = (self.diskstats_ltime, (readTput, writeTput, util))
101
102 self.diskstats_ltime = time
103 self.diskstats_data = diskdata
104 return reduced
105
106
107 def _reduce_nop(self, time, data):
108 return (time, data)
109
110 def _reduce_stat(self, time, data):
111 if not data:
112 return None
113 # CPU times {user, nice, system, idle, io_wait, irq, softirq} from first line
114 tokens = data.split(b'\n', 1)[0].split()
115 times = [ int(token) for token in tokens[1:] ]
116 reduced = None
117 if self.stat_ltimes:
118 user = float((times[0] + times[1]) - (self.stat_ltimes[0] + self.stat_ltimes[1]))
119 system = float((times[2] + times[5] + times[6]) - (self.stat_ltimes[2] + self.stat_ltimes[5] + self.stat_ltimes[6]))
120 idle = float(times[3] - self.stat_ltimes[3])
121 iowait = float(times[4] - self.stat_ltimes[4])
122
123 aSum = max(user + system + idle + iowait, 1)
124 reduced = (time, (user/aSum, system/aSum, iowait/aSum))
125
126 self.stat_ltimes = times
127 return reduced
128
129 def sample(self, event, force):
130 now = time.time()
131 if (now - self.last_proc > self.min_seconds) or force:
132 for filename, output, handler in self.proc_files:
133 with open(os.path.join('/proc', filename), 'rb') as input:
134 data = input.read()
135 if handler:
136 reduced = handler(now, data)
137 else:
138 reduced = (now, data)
139 if reduced:
140 if isinstance(reduced[1], bytes):
141 # Use as it is.
142 data = reduced[1]
143 else:
144 # Convert to a single line.
145 data = (' '.join([str(x) for x in reduced[1]]) + '\n').encode('ascii')
146 # Unbuffered raw write, less overhead and useful
147 # in case that we end up with concurrent writes.
148 os.write(output.fileno(),
149 ('%.0f\n' % reduced[0]).encode('ascii') +
150 data +
151 b'\n')
152 self.last_proc = now
153
154 if isinstance(event, bb.event.MonitorDiskEvent) and \
155 ((now - self.last_disk_monitor > self.min_seconds) or force):
156 os.write(self.monitor_disk.fileno(),
157 ('%.0f\n' % now).encode('ascii') +
158 ''.join(['%s: %d\n' % (dev, sample.total_bytes - sample.free_bytes)
159 for dev, sample in event.disk_usage.items()]).encode('ascii') +
160 b'\n')
161 self.last_disk_monitor = now