blob: 4efa83a9ed544abb753c996670a091094a270502 [file] [log] [blame]
Andrew Jeffery1e531af2018-08-07 13:32:57 +09301// SPDX-License-Identifier: Apache-2.0
2// Copyright (C) 2018 IBM Corp.
3#include "config.h"
4
5#include <errno.h>
6#include <stdint.h>
Evan Lojewskif1e547c2019-03-14 14:34:33 +10307#include <stdio.h>
Stewart Smithef0c8362018-11-19 13:49:46 +11008#include <unistd.h>
Andrew Jeffery1e531af2018-08-07 13:32:57 +09309
Evan Lojewskif1e547c2019-03-14 14:34:33 +103010#include "backend.h"
Andrew Jeffery5335f092018-08-09 14:56:08 +093011#include "common.h"
Andrew Jeffery1e531af2018-08-07 13:32:57 +093012#include "lpc.h"
Andrew Jefferycb935042019-03-15 09:54:33 +103013#include "mboxd.h"
14#include "protocol.h"
Andrew Jeffery1e531af2018-08-07 13:32:57 +093015#include "windows.h"
16
Patrick Williams68a24c92023-07-25 12:02:16 -050017#pragma GCC diagnostic push
18#pragma GCC diagnostic ignored "-Wpointer-arith"
19#pragma GCC diagnostic ignored "-Wunused-result"
Stewart Smithef0c8362018-11-19 13:49:46 +110020
Andrew Jeffery26558db2018-08-10 00:22:38 +093021#define BLOCK_SIZE_SHIFT_V1 12 /* 4K */
22
Andrew Jeffery0453aa42018-08-21 08:25:46 +093023static inline uint8_t protocol_get_bmc_event_mask(struct mbox_context *context)
24{
25 if (context->version == API_VERSION_1) {
26 return BMC_EVENT_V1_MASK;
27 }
28
29 return BMC_EVENT_V2_MASK;
30}
31
Andrew Jeffery5335f092018-08-09 14:56:08 +093032/*
Andrew Jefferyfe0c9e82018-11-01 14:02:17 +103033 * protocol_events_put() - Push the full set/cleared state of BMC events on the
34 * provided transport
35 * @context: The mbox context pointer
36 * @ops: The operations struct for the transport of interest
37 *
38 * Return: 0 on success otherwise negative error code
39 */
40int protocol_events_put(struct mbox_context *context,
41 const struct transport_ops *ops)
42{
43 const uint8_t mask = protocol_get_bmc_event_mask(context);
44
45 return ops->put_events(context, mask);
46}
47
48/*
49 * protocol_events_set() - Update the set BMC events on the active transport
Andrew Jeffery5335f092018-08-09 14:56:08 +093050 * @context: The mbox context pointer
51 * @bmc_event: The bits to set
Andrew Jeffery5335f092018-08-09 14:56:08 +093052 *
53 * Return: 0 on success otherwise negative error code
54 */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +093055int protocol_events_set(struct mbox_context *context, uint8_t bmc_event)
Andrew Jeffery5335f092018-08-09 14:56:08 +093056{
Andrew Jeffery0453aa42018-08-21 08:25:46 +093057 const uint8_t mask = protocol_get_bmc_event_mask(context);
Andrew Jeffery5335f092018-08-09 14:56:08 +093058
Andrew Jeffery0453aa42018-08-21 08:25:46 +093059 /*
60 * Store the raw value, as we may up- or down- grade the protocol
61 * version and subsequently need to flush the appropriate set. Instead
62 * we pass the masked value through to the transport
63 */
64 context->bmc_events |= bmc_event;
Andrew Jeffery5335f092018-08-09 14:56:08 +093065
Andrew Jefferyf62601b2018-11-01 13:44:25 +103066 return context->transport->set_events(context, bmc_event, mask);
Andrew Jeffery5335f092018-08-09 14:56:08 +093067}
68
69/*
Andrew Jefferyfe0c9e82018-11-01 14:02:17 +103070 * protocol_events_clear() - Update the cleared BMC events on the active
71 * transport
Andrew Jeffery5335f092018-08-09 14:56:08 +093072 * @context: The mbox context pointer
73 * @bmc_event: The bits to clear
Andrew Jeffery5335f092018-08-09 14:56:08 +093074 *
75 * Return: 0 on success otherwise negative error code
76 */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +093077int protocol_events_clear(struct mbox_context *context, uint8_t bmc_event)
Andrew Jeffery5335f092018-08-09 14:56:08 +093078{
Andrew Jeffery0453aa42018-08-21 08:25:46 +093079 const uint8_t mask = protocol_get_bmc_event_mask(context);
80
81 context->bmc_events &= ~bmc_event;
82
Andrew Jefferyf62601b2018-11-01 13:44:25 +103083 return context->transport->clear_events(context, bmc_event, mask);
Andrew Jeffery5335f092018-08-09 14:56:08 +093084}
85
Evan Lojewskif1e547c2019-03-14 14:34:33 +103086static int protocol_negotiate_version(struct mbox_context *context,
87 uint8_t requested);
88
Andrew Jefferycb935042019-03-15 09:54:33 +103089static int protocol_v1_reset(struct mbox_context *context)
Andrew Jefferyab666a52018-08-07 14:28:09 +093090{
Evan Lojewskif1e547c2019-03-14 14:34:33 +103091 return __protocol_reset(context);
Andrew Jefferyab666a52018-08-07 14:28:09 +093092}
93
Andrew Jefferycb935042019-03-15 09:54:33 +103094static int protocol_negotiate_version(struct mbox_context *context,
95 uint8_t requested);
96
97static int protocol_v1_get_info(struct mbox_context *context,
98 struct protocol_get_info *io)
Andrew Jeffery1e531af2018-08-07 13:32:57 +093099{
100 uint8_t old_version = context->version;
101 int rc;
102
103 /* Bootstrap protocol version. This may involve {up,down}grading */
104 rc = protocol_negotiate_version(context, io->req.api_version);
105 if (rc < 0)
106 return rc;
107
108 /* Do the {up,down}grade if necessary*/
109 if (rc != old_version) {
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930110 /* Doing version negotiation, don't alert host to reset */
111 windows_reset_all(context);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930112 return context->protocol->get_info(context, io);
113 }
114
115 /* Record the negotiated version for the response */
116 io->resp.api_version = rc;
117
118 /* Now do all required intialisation for v1 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030119 context->backend.block_size_shift = BLOCK_SIZE_SHIFT_V1;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930120 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030121 1 << context->backend.block_size_shift, context->backend.block_size_shift);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930122
123 /* Knowing blocksize we can allocate the window dirty_bytemap */
124 windows_alloc_dirty_bytemap(context);
125
126 io->resp.v1.read_window_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030127 context->windows.default_size >> context->backend.block_size_shift;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930128 io->resp.v1.write_window_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030129 context->windows.default_size >> context->backend.block_size_shift;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930130
131 return lpc_map_memory(context);
132}
133
Andrew Jefferycb935042019-03-15 09:54:33 +1030134static int protocol_v1_get_flash_info(struct mbox_context *context,
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030135 struct protocol_get_flash_info *io)
Andrew Jeffery91a87452018-08-07 14:54:14 +0930136{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030137 io->resp.v1.flash_size = context->backend.flash_size;
138 io->resp.v1.erase_size = 1 << context->backend.erase_size_shift;
Andrew Jeffery91a87452018-08-07 14:54:14 +0930139
140 return 0;
141}
142
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930143/*
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930144 * get_lpc_addr_shifted() - Get lpc address of the current window
145 * @context: The mbox context pointer
146 *
147 * Return: The lpc address to access that offset shifted by block size
148 */
149static inline uint16_t get_lpc_addr_shifted(struct mbox_context *context)
150{
151 uint32_t lpc_addr, mem_offset;
152
153 /* Offset of the current window in the reserved memory region */
154 mem_offset = context->current->mem - context->mem;
155 /* Total LPC Address */
156 lpc_addr = context->lpc_base + mem_offset;
157
158 MSG_DBG("LPC address of current window: 0x%.8x\n", lpc_addr);
159
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030160 return lpc_addr >> context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930161}
162
Stewart Smithef0c8362018-11-19 13:49:46 +1100163static inline int64_t blktrace_gettime(void)
164{
165 struct timespec ts;
166 int64_t n;
167
168 clock_gettime(CLOCK_REALTIME, &ts);
169 n = (int64_t)(ts.tv_sec) * (int64_t)1000000000 + (int64_t)(ts.tv_nsec);
170
171 return n;
172}
173
174static void blktrace_flush_start(struct mbox_context *context)
175{
176 struct blk_io_trace *trace = &context->trace;
177 struct timespec now;
178
179 if (!context->blktracefd)
180 return;
181
182 if (!context->blktrace_start) {
183 clock_gettime(CLOCK_REALTIME, &now);
184 context->blktrace_start = blktrace_gettime();
185 }
186
187 trace->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
188 trace->sequence++;
189 trace->time = blktrace_gettime() - context->blktrace_start;
190 trace->sector = context->current->flash_offset / 512;
191 trace->bytes = context->current->size;
192 if (context->current_is_write)
193 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_WRITE);
194 else
195 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_READ);
196 trace->pid = 0;
197 trace->device = 0;
198 trace->cpu = 0;
199 trace->error = 0;
200 trace->pdu_len = 0;
201 write(context->blktracefd, trace, sizeof(*trace));
202 trace->sequence++;
203 trace->time = blktrace_gettime() - context->blktrace_start;
204 trace->action &= ~BLK_TA_QUEUE;
205 trace->action |= BLK_TA_ISSUE;
206 write(context->blktracefd, trace, sizeof(*trace));
207}
208
209static void blktrace_flush_done(struct mbox_context *context)
210{
211 struct blk_io_trace *trace = &context->trace;
212
213 if (!context->blktracefd)
214 return;
215
216 trace->sequence++;
217 trace->time = blktrace_gettime() - context->blktrace_start;
218 trace->action &= ~BLK_TA_ISSUE;
219 trace->action |= BLK_TA_COMPLETE;
220 write(context->blktracefd, trace, sizeof(*trace));
221}
222
223static void blktrace_window_start(struct mbox_context *context)
224{
225 struct blk_io_trace *trace = &context->trace;
226
227 if (!context->blktracefd)
228 return;
229
230 if (!context->blktrace_start)
231 context->blktrace_start = blktrace_gettime();
232
233 trace->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
234 trace->sequence++;
235 trace->time = blktrace_gettime() - context->blktrace_start;
236 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_READ);
237 trace->pid = 0;
238 trace->device = 0;
239 trace->cpu = 0;
240 trace->error = 0;
241 trace->pdu_len = 0;
242}
243
244static void blktrace_window_done(struct mbox_context *context)
245{
246 struct blk_io_trace *trace = &context->trace;
247
248 if (!context->blktracefd)
249 return;
250
251 trace->sector = context->current->flash_offset / 512;
252 trace->bytes = context->current->size;
253 write(context->blktracefd, trace, sizeof(*trace));
254 trace->sequence++;
255 trace->action &= ~BLK_TA_QUEUE;
256 trace->action |= BLK_TA_ISSUE;
257 write(context->blktracefd, trace, sizeof(*trace));
258
259 trace->sequence++;
260 trace->time = blktrace_gettime() - context->blktrace_start;
261 trace->action &= ~BLK_TA_ISSUE;
262 trace->action |= BLK_TA_COMPLETE;
263 write(context->blktracefd, trace, sizeof(*trace));
264}
265
Andrew Jefferycb935042019-03-15 09:54:33 +1030266static int protocol_v1_create_window(struct mbox_context *context,
267 struct protocol_create_window *io)
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930268{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030269 struct backend *backend = &context->backend;
270 uint32_t offset;
271 uint32_t size;
Andrew Jefferycb935042019-03-15 09:54:33 +1030272 int rc;
273
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030274 offset = io->req.offset << backend->block_size_shift;
275 size = io->req.size << backend->block_size_shift;
276 rc = backend_validate(backend, offset, size, io->req.ro);
Andrew Jefferycb935042019-03-15 09:54:33 +1030277 if (rc < 0) {
278 /* Backend does not allow window to be created. */
279 return rc;
280 }
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930281
282 /* Close the current window if there is one */
283 if (context->current) {
Andrew Jefferyf21c81c2018-08-09 13:57:46 +0930284 /* There is an implicit flush if it was a write window
285 *
286 * protocol_v2_create_window() calls
287 * protocol_v1_create_window(), so use indirect call to
288 * write_flush() to make sure we pick the right one.
289 */
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930290 if (context->current_is_write) {
Stewart Smithef0c8362018-11-19 13:49:46 +1100291 blktrace_flush_start(context);
Andrew Jefferyf21c81c2018-08-09 13:57:46 +0930292 rc = context->protocol->flush(context, NULL);
Stewart Smithef0c8362018-11-19 13:49:46 +1100293 blktrace_flush_done(context);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930294 if (rc < 0) {
295 MSG_ERR("Couldn't Flush Write Window\n");
296 return rc;
297 }
298 }
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930299 windows_close_current(context, FLAGS_NONE);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930300 }
301
302 /* Offset the host has requested */
303 MSG_INFO("Host requested flash @ 0x%.8x\n", offset);
304 /* Check if we have an existing window */
Stewart Smithef0c8362018-11-19 13:49:46 +1100305 blktrace_window_start(context);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930306 context->current = windows_search(context, offset,
307 context->version == API_VERSION_1);
308
309 if (!context->current) { /* No existing window */
310 MSG_DBG("No existing window which maps that flash offset\n");
311 rc = windows_create_map(context, &context->current,
312 offset,
313 context->version == API_VERSION_1);
314 if (rc < 0) { /* Unable to map offset */
315 MSG_ERR("Couldn't create window mapping for offset 0x%.8x\n",
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930316 offset);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930317 return rc;
318 }
319 }
Stewart Smithef0c8362018-11-19 13:49:46 +1100320 blktrace_window_done(context);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930321
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930322 context->current_is_write = !io->req.ro;
323
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930324 MSG_INFO("Window @ %p for size 0x%.8x maps flash offset 0x%.8x\n",
325 context->current->mem, context->current->size,
326 context->current->flash_offset);
327
328 io->resp.lpc_address = get_lpc_addr_shifted(context);
329
330 return 0;
331}
332
Andrew Jefferycb935042019-03-15 09:54:33 +1030333static int protocol_v1_mark_dirty(struct mbox_context *context,
334 struct protocol_mark_dirty *io)
Andrew Jefferya336e432018-08-07 16:00:40 +0930335{
336 uint32_t offset = io->req.v1.offset;
337 uint32_t size = io->req.v1.size;
338 uint32_t off;
339
340 if (!(context->current && context->current_is_write)) {
341 MSG_ERR("Tried to call mark dirty without open write window\n");
342 return -EPERM;
343 }
344
345 /* For V1 offset given relative to flash - we want the window */
346 off = offset - ((context->current->flash_offset) >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030347 context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930348 if (off > offset) { /* Underflow - before current window */
349 MSG_ERR("Tried to mark dirty before start of window\n");
350 MSG_ERR("requested offset: 0x%x window start: 0x%x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030351 offset << context->backend.block_size_shift,
Andrew Jefferya336e432018-08-07 16:00:40 +0930352 context->current->flash_offset);
353 return -EINVAL;
354 }
355 offset = off;
356 /*
357 * We only track dirty at the block level.
358 * For protocol V1 we can get away with just marking the whole
359 * block dirty.
360 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030361 size = align_up(size, 1 << context->backend.block_size_shift);
362 size >>= context->backend.block_size_shift;
Andrew Jefferya336e432018-08-07 16:00:40 +0930363
364 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030365 offset << context->backend.block_size_shift,
366 size << context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930367
368 return window_set_bytemap(context, context->current, offset, size,
369 WINDOW_DIRTY);
370}
371
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930372static int generic_flush(struct mbox_context *context)
373{
Patrick Williams68a24c92023-07-25 12:02:16 -0500374 int rc, offset, count;
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930375 uint8_t prev;
Patrick Williams68a24c92023-07-25 12:02:16 -0500376 size_t i;
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930377
378 offset = 0;
379 count = 0;
380 prev = WINDOW_CLEAN;
381
382 MSG_INFO("Flush window @ %p for size 0x%.8x which maps flash @ 0x%.8x\n",
383 context->current->mem, context->current->size,
384 context->current->flash_offset);
385
386 /*
387 * We look for streaks of the same type and keep a count, when the type
388 * (dirty/erased) changes we perform the required action on the backing
389 * store and update the current streak-type
390 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030391 for (i = 0; i < (context->current->size >> context->backend.block_size_shift);
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930392 i++) {
393 uint8_t cur = context->current->dirty_bmap[i];
394 if (cur != WINDOW_CLEAN) {
395 if (cur == prev) { /* Same as previous block, incrmnt */
396 count++;
397 } else if (prev == WINDOW_CLEAN) { /* Start of run */
398 offset = i;
399 count++;
400 } else { /* Change in streak type */
401 rc = window_flush(context, offset, count,
402 prev);
403 if (rc < 0) {
404 return rc;
405 }
406 offset = i;
407 count = 1;
408 }
409 } else {
410 if (prev != WINDOW_CLEAN) { /* End of a streak */
411 rc = window_flush(context, offset, count,
412 prev);
413 if (rc < 0) {
414 return rc;
415 }
416 offset = 0;
417 count = 0;
418 }
419 }
420 prev = cur;
421 }
422
423 if (prev != WINDOW_CLEAN) { /* Still the last streak to write */
424 rc = window_flush(context, offset, count, prev);
425 if (rc < 0) {
426 return rc;
427 }
428 }
429
430 /* Clear the dirty bytemap since we have written back all changes */
431 return window_set_bytemap(context, context->current, 0,
432 context->current->size >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030433 context->backend.block_size_shift,
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930434 WINDOW_CLEAN);
435}
436
Andrew Jefferycb935042019-03-15 09:54:33 +1030437static int protocol_v1_flush(struct mbox_context *context,
438 struct protocol_flush *io)
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930439{
440 int rc;
441
442 if (!(context->current && context->current_is_write)) {
443 MSG_ERR("Tried to call flush without open write window\n");
444 return -EPERM;
445 }
446
447 /*
448 * For V1 the Flush command acts much the same as the dirty command
449 * except with a flush as well. Only do this on an actual flush
450 * command not when we call flush because we've implicitly closed a
451 * window because we might not have the required args in req.
452 */
Andrew Jeffery093eda52018-08-07 23:10:43 +0930453 if (io) {
454 struct protocol_mark_dirty *mdio = (void *)io;
455 rc = protocol_v1_mark_dirty(context, mdio);
456 if (rc < 0) {
457 return rc;
458 }
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930459 }
460
461 return generic_flush(context);
462}
463
Andrew Jefferycb935042019-03-15 09:54:33 +1030464static int protocol_v1_close(struct mbox_context *context,
465 struct protocol_close *io)
Andrew Jeffery093eda52018-08-07 23:10:43 +0930466{
467 int rc;
468
469 /* Close the current window if there is one */
470 if (!context->current) {
471 return 0;
472 }
473
474 /* There is an implicit flush if it was a write window */
475 if (context->current_is_write) {
476 rc = protocol_v1_flush(context, NULL);
477 if (rc < 0) {
478 MSG_ERR("Couldn't Flush Write Window\n");
479 return rc;
480 }
481 }
482
483 /* Host asked for it -> Don't set the BMC Event */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930484 windows_close_current(context, io->req.flags);
Andrew Jeffery093eda52018-08-07 23:10:43 +0930485
486 return 0;
487}
488
Andrew Jefferycb935042019-03-15 09:54:33 +1030489static int protocol_v1_ack(struct mbox_context *context,
490 struct protocol_ack *io)
Andrew Jefferyc5c83042018-08-07 23:22:05 +0930491{
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930492 return protocol_events_clear(context,
493 (io->req.flags & BMC_EVENT_ACK_MASK));
Andrew Jefferyc5c83042018-08-07 23:22:05 +0930494}
495
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930496/*
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930497 * get_suggested_timeout() - get the suggested timeout value in seconds
498 * @context: The mbox context pointer
499 *
500 * Return: Suggested timeout in seconds
501 */
502static uint16_t get_suggested_timeout(struct mbox_context *context)
503{
504 struct window_context *window = windows_find_largest(context);
505 uint32_t max_size_mb = window ? (window->size >> 20) : 0;
506 uint16_t ret;
507
508 ret = align_up(max_size_mb * FLASH_ACCESS_MS_PER_MB, 1000) / 1000;
509
510 MSG_DBG("Suggested Timeout: %us, max window size: %uMB, for %dms/MB\n",
511 ret, max_size_mb, FLASH_ACCESS_MS_PER_MB);
512 return ret;
513}
514
Andrew Jefferycb935042019-03-15 09:54:33 +1030515static int protocol_v2_get_info(struct mbox_context *context,
516 struct protocol_get_info *io)
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930517{
518 uint8_t old_version = context->version;
519 int rc;
520
521 /* Bootstrap protocol version. This may involve {up,down}grading */
522 rc = protocol_negotiate_version(context, io->req.api_version);
523 if (rc < 0)
524 return rc;
525
526 /* Do the {up,down}grade if necessary*/
527 if (rc != old_version) {
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930528 /* Doing version negotiation, don't alert host to reset */
529 windows_reset_all(context);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930530 return context->protocol->get_info(context, io);
531 }
532
533 /* Record the negotiated version for the response */
534 io->resp.api_version = rc;
535
536 /* Now do all required intialisation for v2 */
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930537
538 /* Knowing blocksize we can allocate the window dirty_bytemap */
539 windows_alloc_dirty_bytemap(context);
540
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030541 io->resp.v2.block_size_shift = context->backend.block_size_shift;
542 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
543 1 << context->backend.block_size_shift, context->backend.block_size_shift);
544
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930545 io->resp.v2.timeout = get_suggested_timeout(context);
546
547 return lpc_map_memory(context);
548}
549
Andrew Jefferycb935042019-03-15 09:54:33 +1030550static int protocol_v2_get_flash_info(struct mbox_context *context,
551 struct protocol_get_flash_info *io)
Andrew Jeffery91a87452018-08-07 14:54:14 +0930552{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030553 struct backend *backend = &context->backend;
554
Andrew Jeffery91a87452018-08-07 14:54:14 +0930555 io->resp.v2.flash_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030556 backend->flash_size >> backend->block_size_shift;
Andrew Jeffery91a87452018-08-07 14:54:14 +0930557 io->resp.v2.erase_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030558 ((1 << backend->erase_size_shift) >> backend->block_size_shift);
Andrew Jeffery91a87452018-08-07 14:54:14 +0930559
560 return 0;
561}
562
Andrew Jefferycb935042019-03-15 09:54:33 +1030563static int protocol_v2_create_window(struct mbox_context *context,
564 struct protocol_create_window *io)
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930565{
566 int rc;
567
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930568 rc = protocol_v1_create_window(context, io);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930569 if (rc < 0)
570 return rc;
571
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030572 io->resp.size = context->current->size >> context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930573 io->resp.offset = context->current->flash_offset >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030574 context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930575
576 return 0;
577}
578
Andrew Jefferycb935042019-03-15 09:54:33 +1030579static int protocol_v2_mark_dirty(struct mbox_context *context,
580 struct protocol_mark_dirty *io)
Andrew Jefferya336e432018-08-07 16:00:40 +0930581{
582 if (!(context->current && context->current_is_write)) {
583 MSG_ERR("Tried to call mark dirty without open write window\n");
584 return -EPERM;
585 }
586
587 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030588 io->req.v2.offset << context->backend.block_size_shift,
589 io->req.v2.size << context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930590
591 return window_set_bytemap(context, context->current, io->req.v2.offset,
592 io->req.v2.size, WINDOW_DIRTY);
593}
594
Andrew Jefferycb935042019-03-15 09:54:33 +1030595static int protocol_v2_erase(struct mbox_context *context,
596 struct protocol_erase *io)
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930597{
598 size_t start, len;
599 int rc;
600
601 if (!(context->current && context->current_is_write)) {
602 MSG_ERR("Tried to call erase without open write window\n");
603 return -EPERM;
604 }
605
606 MSG_INFO("Erase window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030607 io->req.offset << context->backend.block_size_shift,
608 io->req.size << context->backend.block_size_shift);
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930609
610 rc = window_set_bytemap(context, context->current, io->req.offset,
611 io->req.size, WINDOW_ERASED);
612 if (rc < 0) {
613 return rc;
614 }
615
616 /* Write 0xFF to mem -> This ensures consistency between flash & ram */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030617 start = io->req.offset << context->backend.block_size_shift;
618 len = io->req.size << context->backend.block_size_shift;
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930619 memset(context->current->mem + start, 0xFF, len);
620
621 return 0;
622}
623
Patrick Williams68a24c92023-07-25 12:02:16 -0500624static int protocol_v2_flush(struct mbox_context *context __attribute__((unused)),
625 struct protocol_flush *io __attribute__((unused)))
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930626{
627 if (!(context->current && context->current_is_write)) {
628 MSG_ERR("Tried to call flush without open write window\n");
629 return -EPERM;
630 }
631
632 return generic_flush(context);
633}
634
Andrew Jefferycb935042019-03-15 09:54:33 +1030635static int protocol_v2_close(struct mbox_context *context,
636 struct protocol_close *io)
Andrew Jeffery093eda52018-08-07 23:10:43 +0930637{
638 int rc;
639
640 /* Close the current window if there is one */
641 if (!context->current) {
642 return 0;
643 }
644
645 /* There is an implicit flush if it was a write window */
646 if (context->current_is_write) {
647 rc = protocol_v2_flush(context, NULL);
648 if (rc < 0) {
649 MSG_ERR("Couldn't Flush Write Window\n");
650 return rc;
651 }
652 }
653
654 /* Host asked for it -> Don't set the BMC Event */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930655 windows_close_current(context, io->req.flags);
Andrew Jeffery093eda52018-08-07 23:10:43 +0930656
657 return 0;
658}
659
Andrew Jefferycb935042019-03-15 09:54:33 +1030660static const struct protocol_ops protocol_ops_v1 = {
661 .reset = protocol_v1_reset,
662 .get_info = protocol_v1_get_info,
663 .get_flash_info = protocol_v1_get_flash_info,
664 .create_window = protocol_v1_create_window,
665 .mark_dirty = protocol_v1_mark_dirty,
666 .erase = NULL,
667 .flush = protocol_v1_flush,
668 .close = protocol_v1_close,
669 .ack = protocol_v1_ack,
670};
671
672static const struct protocol_ops protocol_ops_v2 = {
673 .reset = protocol_v1_reset,
674 .get_info = protocol_v2_get_info,
675 .get_flash_info = protocol_v2_get_flash_info,
676 .create_window = protocol_v2_create_window,
677 .mark_dirty = protocol_v2_mark_dirty,
678 .erase = protocol_v2_erase,
679 .flush = protocol_v2_flush,
680 .close = protocol_v2_close,
681 .ack = protocol_v1_ack,
682};
683
684static const struct protocol_ops *protocol_ops_map[] = {
685 [0] = NULL,
686 [1] = &protocol_ops_v1,
687 [2] = &protocol_ops_v2,
688};
689
690static int protocol_negotiate_version(struct mbox_context *context,
691 uint8_t requested)
692{
693 /* Check we support the version requested */
694 if (requested < API_MIN_VERSION)
695 return -EINVAL;
696
697 context->version = (requested > API_MAX_VERSION) ?
698 API_MAX_VERSION : requested;
699
700 context->protocol = protocol_ops_map[context->version];
701
702 return context->version;
703}
704
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930705int protocol_init(struct mbox_context *context)
706{
Andrew Jefferyc7d19472018-08-08 11:43:08 +0930707 protocol_negotiate_version(context, API_MAX_VERSION);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930708
709 return 0;
710}
711
Patrick Williams68a24c92023-07-25 12:02:16 -0500712void protocol_free(struct mbox_context *context __attribute__((unused)))
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930713{
714 return;
715}
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030716
717/* Don't do any state manipulation, just perform the reset */
718int __protocol_reset(struct mbox_context *context)
719{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030720 enum backend_reset_mode mode;
721 int rc;
722
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030723 windows_reset_all(context);
724
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030725 rc = backend_reset(&context->backend, context->mem, context->mem_size);
726 if (rc < 0)
727 return rc;
728
729 mode = rc;
730 if (!(mode == reset_lpc_flash || mode == reset_lpc_memory))
731 return -EINVAL;
732
733 if (mode == reset_lpc_flash)
734 return lpc_map_flash(context);
735
736 assert(mode == reset_lpc_memory);
737 return lpc_map_memory(context);
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030738}
739
740/* Prevent the host from performing actions whilst reset takes place */
741int protocol_reset(struct mbox_context *context)
742{
743 int rc;
744
745 rc = protocol_events_clear(context, BMC_EVENT_DAEMON_READY);
746 if (rc < 0) {
747 MSG_ERR("Failed to clear daemon ready state, reset failed\n");
748 return rc;
749 }
750
751 rc = __protocol_reset(context);
752 if (rc < 0) {
753 MSG_ERR("Failed to reset protocol, daemon remains not ready\n");
754 return rc;
755 }
756
757 rc = protocol_events_set(context,
758 BMC_EVENT_DAEMON_READY | BMC_EVENT_PROTOCOL_RESET);
759 if (rc < 0) {
760 MSG_ERR("Failed to set daemon ready state, daemon remains not ready\n");
761 return rc;
762 }
763
764 return 0;
765}
Patrick Williams68a24c92023-07-25 12:02:16 -0500766
767#pragma GCC diagnostic pop