blob: 8fe6955e62545a12e6548ad5b67624092c0aab6f [file] [log] [blame]
Matt Spinlerf60ac272019-12-11 13:47:50 -06001/**
2 * Copyright © 2019 IBM Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include "host_notifier.hpp"
17
18#include <phosphor-logging/log.hpp>
19
20namespace openpower::pels
21{
22
23const auto subscriptionName = "PELHostNotifier";
Matt Spinlerf77debb2019-12-12 10:04:33 -060024const size_t maxRetryAttempts = 15;
Matt Spinlerf60ac272019-12-11 13:47:50 -060025
26using namespace phosphor::logging;
27
28HostNotifier::HostNotifier(Repository& repo, DataInterfaceBase& dataIface,
29 std::unique_ptr<HostInterface> hostIface) :
30 _repo(repo),
Matt Spinlerf869fcf2019-12-11 15:02:20 -060031 _dataIface(dataIface), _hostIface(std::move(hostIface)),
32 _retryTimer(_hostIface->getEvent(),
33 std::bind(std::mem_fn(&HostNotifier::retryTimerExpired), this))
Matt Spinlerf60ac272019-12-11 13:47:50 -060034{
35 // Subscribe to be told about new PELs.
36 _repo.subscribeToAdds(subscriptionName,
37 std::bind(std::mem_fn(&HostNotifier::newLogCallback),
38 this, std::placeholders::_1));
39
40 // Add any existing PELs to the queue to send them if necessary.
41 _repo.for_each(std::bind(std::mem_fn(&HostNotifier::addPELToQueue), this,
42 std::placeholders::_1));
43
44 // Subscribe to be told about host state changes.
45 _dataIface.subscribeToHostStateChange(
46 subscriptionName,
47 std::bind(std::mem_fun(&HostNotifier::hostStateChange), this,
48 std::placeholders::_1));
49
50 // Set the function to call when the async reponse is received.
51 _hostIface->setResponseFunction(
52 std::bind(std::mem_fn(&HostNotifier::commandResponse), this,
53 std::placeholders::_1));
54
55 // Start sending logs if the host is running
56 if (!_pelQueue.empty() && _dataIface.isHostUp())
57 {
58 doNewLogNotify();
59 }
60}
61
62HostNotifier::~HostNotifier()
63{
64 _repo.unsubscribeFromAdds(subscriptionName);
65 _dataIface.unsubscribeFromHostStateChange(subscriptionName);
66}
67
68bool HostNotifier::addPELToQueue(const PEL& pel)
69{
70 if (enqueueRequired(pel.id()))
71 {
72 _pelQueue.push_back(pel.id());
73 }
74
75 // Return false so that Repo::for_each keeps going.
76 return false;
77}
78
79bool HostNotifier::enqueueRequired(uint32_t id) const
80{
81 bool required = true;
Matt Spinlera943b152019-12-11 14:44:50 -060082 Repository::LogID i{Repository::LogID::Pel{id}};
83
84 if (auto attributes = _repo.getPELAttributes(i); attributes)
85 {
86 auto a = attributes.value().get();
87
88 if ((a.hostState == TransmissionState::acked) ||
89 (a.hostState == TransmissionState::badPEL))
90 {
91 required = false;
92 }
93 else if (a.actionFlags.test(hiddenFlagBit) &&
94 (a.hmcState == TransmissionState::acked))
95 {
96 required = false;
97 }
98 else if (a.actionFlags.test(dontReportToHostFlagBit))
99 {
100 required = false;
101 }
102 }
103 else
104 {
105 using namespace phosphor::logging;
106 log<level::ERR>("Host Enqueue: Unable to find PEL ID in repository",
107 entry("PEL_ID=0x%X", id));
108 required = false;
109 }
Matt Spinlerf60ac272019-12-11 13:47:50 -0600110
111 return required;
112}
113
Matt Spinlerf77debb2019-12-12 10:04:33 -0600114bool HostNotifier::notifyRequired(uint32_t id) const
115{
116 bool notify = true;
117 Repository::LogID i{Repository::LogID::Pel{id}};
118
119 if (auto attributes = _repo.getPELAttributes(i); attributes)
120 {
121 // If already acked by the host, don't send again.
122 // (A safety check as it shouldn't get to this point.)
123 auto a = attributes.value().get();
124 if (a.hostState == TransmissionState::acked)
125 {
126 notify = false;
127 }
128 else if (a.actionFlags.test(hiddenFlagBit))
129 {
130 // If hidden and acked (or will be) acked by the HMC,
131 // also don't send it. (HMC management can come and
132 // go at any time)
133 if ((a.hmcState == TransmissionState::acked) ||
134 _dataIface.isHMCManaged())
135 {
136 notify = false;
137 }
138 }
139 }
140 else
141 {
142 // Must have been deleted since put on the queue.
143 notify = false;
144 }
145
146 return notify;
147}
148
Matt Spinlerf60ac272019-12-11 13:47:50 -0600149void HostNotifier::newLogCallback(const PEL& pel)
150{
151 if (!enqueueRequired(pel.id()))
152 {
153 return;
154 }
155
156 _pelQueue.push_back(pel.id());
157
158 // TODO: Check if a send is needed now
159}
160
161void HostNotifier::doNewLogNotify()
162{
Matt Spinlerf77debb2019-12-12 10:04:33 -0600163 if (!_dataIface.isHostUp() || _retryTimer.isEnabled())
164 {
165 return;
166 }
167
168 if (_retryCount >= maxRetryAttempts)
169 {
170 // Give up until a new log comes in.
171 if (_retryCount == maxRetryAttempts)
172 {
173 // If this were to really happen, the PLDM interface
174 // would be down and isolating that shouldn't left to
175 // a logging daemon, so just trace. Also, this will start
176 // trying again when the next new log comes in.
177 log<level::ERR>(
178 "PEL Host notifier hit max retry attempts. Giving up for now.",
179 entry("PEL_ID=0x%X", _pelQueue.front()));
180 }
181 return;
182 }
183
184 bool doNotify = false;
185 uint32_t id = 0;
186
187 // Find the PEL to send
188 while (!doNotify && !_pelQueue.empty())
189 {
190 id = _pelQueue.front();
191 _pelQueue.pop_front();
192
193 if (notifyRequired(id))
194 {
195 doNotify = true;
196 }
197 }
198
199 if (doNotify)
200 {
201 // Get the size using the repo attributes
202 Repository::LogID i{Repository::LogID::Pel{id}};
203 if (auto attributes = _repo.getPELAttributes(i); attributes)
204 {
205 auto size = static_cast<size_t>(
206 std::filesystem::file_size((*attributes).get().path));
207 auto rc = _hostIface->sendNewLogCmd(id, size);
208
209 if (rc == CmdStatus::success)
210 {
211 _inProgressPEL = id;
212 }
213 else
214 {
215 // It failed. Retry
216 log<level::ERR>("PLDM send failed", entry("PEL_ID=0x%X", id));
217 _pelQueue.push_front(id);
218 _inProgressPEL = 0;
219 _retryTimer.restartOnce(_hostIface->getSendRetryDelay());
220 }
221 }
222 else
223 {
224 log<level::ERR>("PEL ID not in repository. Cannot notify host",
225 entry("PEL_ID=0x%X", id));
226 }
227 }
Matt Spinlerf60ac272019-12-11 13:47:50 -0600228}
229
230void HostNotifier::hostStateChange(bool hostUp)
231{
Matt Spinler3019c6f2019-12-11 15:24:45 -0600232 _retryCount = 0;
233
234 if (hostUp && !_pelQueue.empty())
235 {
236 doNewLogNotify();
237 }
238 else if (!hostUp)
239 {
240 stopCommand();
241
242 // Reset the state on any PELs that were sent but not acked back
243 // to new so they'll get sent again.
244 for (auto id : _sentPELs)
245 {
246 _pelQueue.push_back(id);
247 _repo.setPELHostTransState(id, TransmissionState::newPEL);
248 }
249
250 _sentPELs.clear();
251 }
Matt Spinlerf60ac272019-12-11 13:47:50 -0600252}
253
254void HostNotifier::commandResponse(ResponseStatus status)
255{
Matt Spinlerf869fcf2019-12-11 15:02:20 -0600256 auto id = _inProgressPEL;
257 _inProgressPEL = 0;
258
259 if (status == ResponseStatus::success)
260 {
261 _retryCount = 0;
262
263 _sentPELs.push_back(id);
264
265 _repo.setPELHostTransState(id, TransmissionState::sent);
266
267 if (!_pelQueue.empty())
268 {
269 doNewLogNotify();
270 }
271 }
272 else
273 {
274 log<level::ERR>("PLDM command response failure",
275 entry("PEL_ID=0x%X", id));
276 // Retry
277 _pelQueue.push_front(id);
278 _retryTimer.restartOnce(_hostIface->getReceiveRetryDelay());
279 }
280}
281
282void HostNotifier::retryTimerExpired()
283{
284 if (_dataIface.isHostUp())
285 {
286 log<level::INFO>("Attempting command retry",
287 entry("PEL_ID=0x%X", _pelQueue.front()));
288 _retryCount++;
289 doNewLogNotify();
290 }
Matt Spinlerf60ac272019-12-11 13:47:50 -0600291}
292
Matt Spinler3019c6f2019-12-11 15:24:45 -0600293void HostNotifier::stopCommand()
294{
295 _retryCount = 0;
296
297 if (_inProgressPEL != 0)
298 {
299 _pelQueue.push_front(_inProgressPEL);
300 _inProgressPEL = 0;
301 }
302
303 if (_retryTimer.isEnabled())
304 {
305 _retryTimer.setEnabled(false);
306 }
307
308 if (_hostIface->cmdInProgress())
309 {
310 _hostIface->cancelCmd();
311 }
312}
313
Matt Spinlerf60ac272019-12-11 13:47:50 -0600314} // namespace openpower::pels