blob: 2f87692da694472935fdcb49fb3538c7e95ee942 [file] [log] [blame]
Jeremy Kerr3d36ee22019-05-30 11:15:37 +08001/* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +08002
3#include <assert.h>
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +09304#include <errno.h>
Jeremy Kerr4cdc2002019-02-07 16:49:12 +08005#include <stdarg.h>
6#include <stddef.h>
7#include <stdint.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11
12#undef pr_fmt
13#define pr_fmt(fmt) "core: " fmt
14
15#include "libmctp.h"
16#include "libmctp-alloc.h"
17#include "libmctp-log.h"
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +010018#include "libmctp-cmds.h"
Andrew Jefferyc2b833e2020-10-28 14:28:37 +103019#include "range.h"
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080020
21/* Internal data structures */
22
Andrew Jefferyc61501c2021-01-27 23:24:18 +103023enum mctp_bus_state {
24 mctp_bus_state_constructed = 0,
25 mctp_bus_state_tx_enabled,
26 mctp_bus_state_tx_disabled,
27};
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080028
Andrew Jefferyc61501c2021-01-27 23:24:18 +103029struct mctp_bus {
30 mctp_eid_t eid;
31 struct mctp_binding *binding;
32 enum mctp_bus_state state;
33
34 struct mctp_pktbuf *tx_queue_head;
35 struct mctp_pktbuf *tx_queue_tail;
Jeremy Kerrcc2458d2019-03-01 08:23:33 +080036
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080037 /* todo: routing */
38};
39
Jeremy Kerr24db71f2019-02-07 21:37:35 +080040struct mctp_msg_ctx {
41 uint8_t src;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080042 uint8_t dest;
Jeremy Kerr24db71f2019-02-07 21:37:35 +080043 uint8_t tag;
44 uint8_t last_seq;
45 void *buf;
46 size_t buf_size;
47 size_t buf_alloc_size;
Sumanth Bhat69f545f2021-05-18 09:16:43 +000048 size_t fragment_size;
Jeremy Kerr24db71f2019-02-07 21:37:35 +080049};
50
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080051struct mctp {
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080052 int n_busses;
53 struct mctp_bus *busses;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080054
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080055 /* Message RX callback */
56 mctp_rx_fn message_rx;
57 void *message_rx_data;
Jeremy Kerr24db71f2019-02-07 21:37:35 +080058
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +093059 /* Packet capture callback */
60 mctp_capture_fn capture;
61 void *capture_data;
62
Jeremy Kerr24db71f2019-02-07 21:37:35 +080063 /* Message reassembly.
64 * @todo: flexible context count
65 */
66 struct mctp_msg_ctx msg_ctxs[16];
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080067
68 enum {
69 ROUTE_ENDPOINT,
70 ROUTE_BRIDGE,
71 } route_policy;
Sumanth Bhat2c820c52020-07-02 00:26:25 +053072 size_t max_message_size;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080073};
74
75#ifndef BUILD_ASSERT
76#define BUILD_ASSERT(x) \
77 do { (void)sizeof(char[0-(!(x))]); } while (0)
78#endif
79
Jeremy Kerr24db71f2019-02-07 21:37:35 +080080#ifndef ARRAY_SIZE
81#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
82#endif
83
Sumanth Bhat2c820c52020-07-02 00:26:25 +053084/* 64kb should be sufficient for a single message. Applications
85 * requiring higher sizes can override by setting max_message_size.*/
86#ifndef MCTP_MAX_MESSAGE_SIZE
87#define MCTP_MAX_MESSAGE_SIZE 65536
88#endif
89
Andrew Jefferyb93b6112020-06-05 14:13:44 +093090static int mctp_message_tx_on_bus(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +053091 mctp_eid_t dest, bool tag_owner,
92 uint8_t msg_tag, void *msg, size_t msg_len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080093
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080094struct mctp_pktbuf *mctp_pktbuf_alloc(struct mctp_binding *binding, size_t len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080095{
96 struct mctp_pktbuf *buf;
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080097 size_t size;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080098
Andrew Jeffery39da3d02021-03-12 16:51:26 +103099 size = binding->pkt_size + binding->pkt_header + binding->pkt_trailer;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800100
101 /* todo: pools */
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800102 buf = __mctp_alloc(sizeof(*buf) + size);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800103
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800104 buf->size = size;
Andrew Jeffery39da3d02021-03-12 16:51:26 +1030105 buf->start = binding->pkt_header;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800106 buf->end = buf->start + len;
107 buf->mctp_hdr_off = buf->start;
Jeremy Kerrdd109f12019-04-04 11:46:49 +0800108 buf->next = NULL;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800109
110 return buf;
111}
112
113void mctp_pktbuf_free(struct mctp_pktbuf *pkt)
114{
115 __mctp_free(pkt);
116}
117
118struct mctp_hdr *mctp_pktbuf_hdr(struct mctp_pktbuf *pkt)
119{
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700120 return (struct mctp_hdr *)(pkt->data + pkt->mctp_hdr_off);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800121}
122
123void *mctp_pktbuf_data(struct mctp_pktbuf *pkt)
124{
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700125 return pkt->data + pkt->mctp_hdr_off + sizeof(struct mctp_hdr);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800126}
127
Andrew Jefferyb942e3a2020-06-23 09:54:02 +0930128size_t mctp_pktbuf_size(struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800129{
130 return pkt->end - pkt->start;
131}
132
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800133void *mctp_pktbuf_alloc_start(struct mctp_pktbuf *pkt, size_t size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800134{
135 assert(size <= pkt->start);
136 pkt->start -= size;
137 return pkt->data + pkt->start;
138}
139
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800140void *mctp_pktbuf_alloc_end(struct mctp_pktbuf *pkt, size_t size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800141{
142 void *buf;
143
Andrew Jeffery3ac70d62020-07-01 00:50:44 +0930144 assert(size <= (pkt->size - pkt->end));
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800145 buf = pkt->data + pkt->end;
146 pkt->end += size;
147 return buf;
148}
149
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800150int mctp_pktbuf_push(struct mctp_pktbuf *pkt, void *data, size_t len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800151{
152 void *p;
153
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800154 if (pkt->end + len > pkt->size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800155 return -1;
156
157 p = pkt->data + pkt->end;
158
159 pkt->end += len;
160 memcpy(p, data, len);
161
162 return 0;
163}
164
Andrew Jefferyeba19a32021-03-09 23:09:40 +1030165void *mctp_pktbuf_pop(struct mctp_pktbuf *pkt, size_t len)
166{
167 if (len > mctp_pktbuf_size(pkt))
168 return NULL;
169
170 pkt->end -= len;
171 return pkt->data + pkt->end;
172}
173
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800174/* Message reassembly */
175static struct mctp_msg_ctx *mctp_msg_ctx_lookup(struct mctp *mctp,
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800176 uint8_t src, uint8_t dest, uint8_t tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800177{
178 unsigned int i;
179
180 /* @todo: better lookup, if we add support for more outstanding
181 * message contexts */
182 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
183 struct mctp_msg_ctx *ctx = &mctp->msg_ctxs[i];
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800184 if (ctx->src == src && ctx->dest == dest && ctx->tag == tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800185 return ctx;
186 }
187
188 return NULL;
189}
190
191static struct mctp_msg_ctx *mctp_msg_ctx_create(struct mctp *mctp,
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800192 uint8_t src, uint8_t dest, uint8_t tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800193{
Jeremy Kerr11a234e2019-02-27 17:59:53 +0800194 struct mctp_msg_ctx *ctx = NULL;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800195 unsigned int i;
196
197 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
198 struct mctp_msg_ctx *tmp = &mctp->msg_ctxs[i];
199 if (!tmp->src) {
200 ctx = tmp;
201 break;
202 }
203 }
204
205 if (!ctx)
206 return NULL;
207
208 ctx->src = src;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800209 ctx->dest = dest;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800210 ctx->tag = tag;
Jeremy Kerr9a3da812019-08-02 15:57:53 +0800211 ctx->buf_size = 0;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800212
213 return ctx;
214}
215
216static void mctp_msg_ctx_drop(struct mctp_msg_ctx *ctx)
217{
218 ctx->src = 0;
219}
220
221static void mctp_msg_ctx_reset(struct mctp_msg_ctx *ctx)
222{
223 ctx->buf_size = 0;
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000224 ctx->fragment_size = 0;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800225}
226
227static int mctp_msg_ctx_add_pkt(struct mctp_msg_ctx *ctx,
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530228 struct mctp_pktbuf *pkt, size_t max_size)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800229{
230 size_t len;
231
232 len = mctp_pktbuf_size(pkt) - sizeof(struct mctp_hdr);
233
Sumanth Bhatbc79c242021-06-16 12:36:56 +0530234 if (len + ctx->buf_size < ctx->buf_size) {
235 return -1;
236 }
237
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800238 if (ctx->buf_size + len > ctx->buf_alloc_size) {
239 size_t new_alloc_size;
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030240 void *lbuf;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800241
Andrew Jeffery5a508912020-11-03 22:21:45 +1030242 /* @todo: finer-grained allocation */
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800243 if (!ctx->buf_alloc_size) {
Andrew Jefferyc2b833e2020-10-28 14:28:37 +1030244 new_alloc_size = MAX(len, 4096UL);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800245 } else {
Sumanth Bhatbc79c242021-06-16 12:36:56 +0530246 new_alloc_size = MAX(ctx->buf_alloc_size * 2, len + ctx->buf_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800247 }
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030248
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530249 /* Don't allow heap to grow beyond a limit */
250 if (new_alloc_size > max_size)
251 return -1;
252
253
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030254 lbuf = __mctp_realloc(ctx->buf, new_alloc_size);
255 if (lbuf) {
256 ctx->buf = lbuf;
257 ctx->buf_alloc_size = new_alloc_size;
258 } else {
259 __mctp_free(ctx->buf);
260 return -1;
261 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800262 }
263
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700264 memcpy((uint8_t *)ctx->buf + ctx->buf_size, mctp_pktbuf_data(pkt), len);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800265 ctx->buf_size += len;
266
267 return 0;
268}
269
270/* Core API functions */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800271struct mctp *mctp_init(void)
272{
273 struct mctp *mctp;
274
275 mctp = __mctp_alloc(sizeof(*mctp));
Sumanth Bhat96d54492020-07-14 17:10:04 +0530276
277 if(!mctp)
278 return NULL;
279
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800280 memset(mctp, 0, sizeof(*mctp));
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530281 mctp->max_message_size = MCTP_MAX_MESSAGE_SIZE;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800282
283 return mctp;
284}
285
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530286void mctp_set_max_message_size(struct mctp *mctp, size_t message_size)
287{
288 mctp->max_message_size = message_size;
289}
290
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930291void mctp_set_capture_handler(struct mctp *mctp, mctp_capture_fn fn, void *user)
292{
293 mctp->capture = fn;
294 mctp->capture_data = user;
295}
296
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030297static void mctp_bus_destroy(struct mctp_bus *bus)
298{
299 while (bus->tx_queue_head) {
300 struct mctp_pktbuf *curr = bus->tx_queue_head;
301
302 bus->tx_queue_head = curr->next;
303 mctp_pktbuf_free(curr);
304 }
305}
306
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030307void mctp_destroy(struct mctp *mctp)
308{
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930309 size_t i;
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030310
311 /* Cleanup message assembly contexts */
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930312 BUILD_ASSERT(ARRAY_SIZE(mctp->msg_ctxs) < SIZE_MAX);
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030313 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
314 struct mctp_msg_ctx *tmp = &mctp->msg_ctxs[i];
315 if (tmp->buf)
316 __mctp_free(tmp->buf);
317 }
318
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030319 while (mctp->n_busses--)
320 mctp_bus_destroy(&mctp->busses[mctp->n_busses]);
321
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030322 __mctp_free(mctp->busses);
323 __mctp_free(mctp);
324}
325
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800326int mctp_set_rx_all(struct mctp *mctp, mctp_rx_fn fn, void *data)
327{
328 mctp->message_rx = fn;
329 mctp->message_rx_data = data;
330 return 0;
331}
332
333static struct mctp_bus *find_bus_for_eid(struct mctp *mctp,
334 mctp_eid_t dest __attribute__((unused)))
335{
Brad Bishop663ec392021-10-07 21:16:48 -0400336 if (mctp->n_busses == 0)
337 return NULL;
338
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800339 /* for now, just use the first bus. For full routing support,
340 * we will need a table of neighbours */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800341 return &mctp->busses[0];
342}
343
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800344int mctp_register_bus(struct mctp *mctp,
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800345 struct mctp_binding *binding,
346 mctp_eid_t eid)
347{
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930348 int rc = 0;
349
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800350 /* todo: multiple busses */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800351 assert(mctp->n_busses == 0);
352 mctp->n_busses = 1;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930353
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800354 mctp->busses = __mctp_alloc(sizeof(struct mctp_bus));
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930355 if (!mctp->busses)
356 return -ENOMEM;
357
James Feist62d72362019-12-13 13:43:32 -0800358 memset(mctp->busses, 0, sizeof(struct mctp_bus));
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800359 mctp->busses[0].binding = binding;
360 mctp->busses[0].eid = eid;
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800361 binding->bus = &mctp->busses[0];
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800362 binding->mctp = mctp;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800363 mctp->route_policy = ROUTE_ENDPOINT;
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800364
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930365 if (binding->start) {
366 rc = binding->start(binding);
367 if (rc < 0) {
368 mctp_prerr("Failed to start binding: %d", rc);
Andrew Jeffery19275232021-01-29 14:13:25 +1030369 binding->bus = NULL;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930370 __mctp_free(mctp->busses);
371 mctp->busses = NULL;
Andrew Jeffery2304c832021-01-29 11:52:49 +1030372 mctp->n_busses = 0;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930373 }
374 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800375
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930376 return rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800377}
378
Andrew Jeffery2094c3c2021-08-26 12:32:46 +0930379void mctp_unregister_bus(struct mctp *mctp, struct mctp_binding *binding)
380{
381 /*
382 * We only support one bus right now; once the call completes we will
383 * have no more busses
384 */
385 mctp->n_busses = 0;
386 binding->mctp = NULL;
387 binding->bus = NULL;
388 free(mctp->busses);
389}
390
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800391int mctp_bridge_busses(struct mctp *mctp,
392 struct mctp_binding *b1, struct mctp_binding *b2)
393{
Andrew Jeffery19275232021-01-29 14:13:25 +1030394 int rc = 0;
395
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800396 assert(mctp->n_busses == 0);
397 mctp->busses = __mctp_alloc(2 * sizeof(struct mctp_bus));
Helen Huanga523bcc2021-05-19 15:44:56 +0800398 if (!mctp->busses)
399 return -ENOMEM;
James Feist62d72362019-12-13 13:43:32 -0800400 memset(mctp->busses, 0, 2 * sizeof(struct mctp_bus));
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800401 mctp->n_busses = 2;
402 mctp->busses[0].binding = b1;
403 b1->bus = &mctp->busses[0];
404 b1->mctp = mctp;
405 mctp->busses[1].binding = b2;
406 b2->bus = &mctp->busses[1];
407 b2->mctp = mctp;
408
409 mctp->route_policy = ROUTE_BRIDGE;
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800410
Andrew Jeffery19275232021-01-29 14:13:25 +1030411 if (b1->start) {
412 rc = b1->start(b1);
413 if (rc < 0) {
414 mctp_prerr("Failed to start bridged bus %s: %d",
415 b1->name, rc);
416 goto done;
417 }
418 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800419
Andrew Jeffery19275232021-01-29 14:13:25 +1030420 if (b2->start) {
421 rc = b2->start(b2);
422 if (rc < 0) {
423 mctp_prerr("Failed to start bridged bus %s: %d",
424 b2->name, rc);
425 goto done;
426 }
427 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800428
Andrew Jeffery19275232021-01-29 14:13:25 +1030429done:
430 return rc;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800431}
432
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100433static inline bool mctp_ctrl_cmd_is_transport(struct mctp_ctrl_msg_hdr *hdr)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800434{
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100435 return ((hdr->command_code >= MCTP_CTRL_CMD_FIRST_TRANSPORT) &&
436 (hdr->command_code <= MCTP_CTRL_CMD_LAST_TRANSPORT));
437}
438
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930439static bool mctp_ctrl_handle_msg(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530440 uint8_t msg_tag, bool tag_owner, void *buffer,
441 size_t length)
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100442{
443 struct mctp_ctrl_msg_hdr *msg_hdr = buffer;
444
445 /*
446 * Control message is received. If a transport control message handler
447 * is provided, it will called. If there is no dedicated handler, this
448 * function returns false and data can be handled by the generic
449 * message handler. The transport control message handler will be
450 * provided with messages in the command range 0xF0 - 0xFF.
451 */
452 if (mctp_ctrl_cmd_is_transport(msg_hdr)) {
453 if (bus->binding->control_rx != NULL) {
454 /* MCTP bus binding handler */
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530455 bus->binding->control_rx(src, msg_tag, tag_owner,
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100456 bus->binding->control_rx_data,
457 buffer, length);
458 return true;
459 }
460 }
461
462 /*
463 * Command was not handled, due to lack of specific callback.
464 * It will be passed to regular message_rx handler.
465 */
466 return false;
467}
468
469static inline bool mctp_rx_dest_is_local(struct mctp_bus *bus, mctp_eid_t dest)
470{
471 return dest == bus->eid || dest == MCTP_EID_NULL ||
472 dest == MCTP_EID_BROADCAST;
473}
474
475static inline bool mctp_ctrl_cmd_is_request(struct mctp_ctrl_msg_hdr *hdr)
476{
477 return hdr->ic_msg_type == MCTP_CTRL_HDR_MSG_TYPE &&
478 hdr->rq_dgram_inst & MCTP_CTRL_HDR_FLAG_REQUEST;
479}
480
481/*
482 * Receive the complete MCTP message and route it.
483 * Asserts:
484 * 'buf' is not NULL.
485 */
486static void mctp_rx(struct mctp *mctp, struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530487 mctp_eid_t dest, bool tag_owner, uint8_t msg_tag, void *buf,
488 size_t len)
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100489{
490 assert(buf != NULL);
491
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800492 if (mctp->route_policy == ROUTE_ENDPOINT &&
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100493 mctp_rx_dest_is_local(bus, dest)) {
494 /* Handle MCTP Control Messages: */
495 if (len >= sizeof(struct mctp_ctrl_msg_hdr)) {
496 struct mctp_ctrl_msg_hdr *msg_hdr = buf;
497
498 /*
499 * Identify if this is a control request message.
500 * See DSP0236 v1.3.0 sec. 11.5.
501 */
502 if (mctp_ctrl_cmd_is_request(msg_hdr)) {
503 bool handled;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530504 handled = mctp_ctrl_handle_msg(
505 bus, src, msg_tag, tag_owner, buf, len);
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100506 if (handled)
507 return;
508 }
509 }
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530510
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100511 if (mctp->message_rx)
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530512 mctp->message_rx(src, tag_owner, msg_tag,
513 mctp->message_rx_data, buf, len);
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100514 }
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800515
516 if (mctp->route_policy == ROUTE_BRIDGE) {
517 int i;
518
519 for (i = 0; i < mctp->n_busses; i++) {
520 struct mctp_bus *dest_bus = &mctp->busses[i];
521 if (dest_bus == bus)
522 continue;
523
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530524 mctp_message_tx_on_bus(dest_bus, src, dest, tag_owner,
525 msg_tag, buf, len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800526 }
527
528 }
529}
530
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800531void mctp_bus_rx(struct mctp_binding *binding, struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800532{
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800533 struct mctp_bus *bus = binding->bus;
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800534 struct mctp *mctp = binding->mctp;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800535 uint8_t flags, exp_seq, seq, tag;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800536 struct mctp_msg_ctx *ctx;
537 struct mctp_hdr *hdr;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530538 bool tag_owner;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800539 size_t len;
540 void *p;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800541 int rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800542
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800543 assert(bus);
544
Sumanth Bhatd97869d2020-07-02 00:46:13 +0530545 /* Drop packet if it was smaller than mctp hdr size */
546 if (mctp_pktbuf_size(pkt) <= sizeof(struct mctp_hdr))
547 goto out;
548
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930549 if (mctp->capture)
550 mctp->capture(pkt, mctp->capture_data);
551
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800552 hdr = mctp_pktbuf_hdr(pkt);
553
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800554 /* small optimisation: don't bother reassembly if we're going to
555 * drop the packet in mctp_rx anyway */
556 if (mctp->route_policy == ROUTE_ENDPOINT && hdr->dest != bus->eid)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800557 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800558
559 flags = hdr->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
560 tag = (hdr->flags_seq_tag >> MCTP_HDR_TAG_SHIFT) & MCTP_HDR_TAG_MASK;
561 seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) & MCTP_HDR_SEQ_MASK;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530562 tag_owner =
563 (hdr->flags_seq_tag >> MCTP_HDR_TO_SHIFT) & MCTP_HDR_TO_MASK;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800564
565 switch (flags) {
566 case MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM:
567 /* single-packet message - send straight up to rx function,
568 * no need to create a message context */
569 len = pkt->end - pkt->mctp_hdr_off - sizeof(struct mctp_hdr);
Andrew Jefferyb4ae00b2021-01-18 15:52:09 +1030570 p = pkt->data + pkt->mctp_hdr_off + sizeof(struct mctp_hdr);
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530571 mctp_rx(mctp, bus, hdr->src, hdr->dest, tag_owner, tag, p, len);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800572 break;
573
574 case MCTP_HDR_FLAG_SOM:
575 /* start of a new message - start the new context for
576 * future message reception. If an existing context is
577 * already present, drop it. */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800578 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800579 if (ctx) {
580 mctp_msg_ctx_reset(ctx);
581 } else {
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800582 ctx = mctp_msg_ctx_create(mctp,
583 hdr->src, hdr->dest, tag);
Sumanth Bhat34d4c962021-06-16 12:50:48 +0530584 /* If context creation fails due to exhaution of contexts we
585 * can support, drop the packet */
586 if (!ctx) {
587 mctp_prdebug("Context buffers exhausted.");
588 goto out;
589 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800590 }
591
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000592 /* Save the fragment size, subsequent middle fragments
593 * should of the same size */
594 ctx->fragment_size = mctp_pktbuf_size(pkt);
595
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530596 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800597 if (rc) {
598 mctp_msg_ctx_drop(ctx);
599 } else {
600 ctx->last_seq = seq;
601 }
602
603 break;
604
605 case MCTP_HDR_FLAG_EOM:
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800606 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800607 if (!ctx)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800608 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800609
Ed Tanousc2def9f2019-02-21 08:33:08 -0800610 exp_seq = (ctx->last_seq + 1) % 4;
611
612 if (exp_seq != seq) {
613 mctp_prdebug(
614 "Sequence number %d does not match expected %d",
615 seq, exp_seq);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800616 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800617 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800618 }
619
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000620 len = mctp_pktbuf_size(pkt);
621
622 if (len > ctx->fragment_size) {
623 mctp_prdebug("Unexpected fragment size. Expected" \
624 " less than %zu, received = %zu",
625 ctx->fragment_size, len);
626 mctp_msg_ctx_drop(ctx);
627 goto out;
628 }
629
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530630 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800631 if (!rc)
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530632 mctp_rx(mctp, bus, ctx->src, ctx->dest, tag_owner, tag,
633 ctx->buf, ctx->buf_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800634
635 mctp_msg_ctx_drop(ctx);
636 break;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800637
638 case 0:
639 /* Neither SOM nor EOM */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800640 ctx = mctp_msg_ctx_lookup(mctp, hdr->src,hdr->dest, tag);
Ed Tanousc2def9f2019-02-21 08:33:08 -0800641 if (!ctx)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800642 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800643
644 exp_seq = (ctx->last_seq + 1) % 4;
645 if (exp_seq != seq) {
646 mctp_prdebug(
647 "Sequence number %d does not match expected %d",
648 seq, exp_seq);
649 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800650 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800651 }
652
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000653 len = mctp_pktbuf_size(pkt);
654
655 if (len != ctx->fragment_size) {
656 mctp_prdebug("Unexpected fragment size. Expected = %zu " \
657 "received = %zu", ctx->fragment_size, len);
658 mctp_msg_ctx_drop(ctx);
659 goto out;
660 }
661
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530662 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Ed Tanousc2def9f2019-02-21 08:33:08 -0800663 if (rc) {
664 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800665 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800666 }
667 ctx->last_seq = seq;
668
669 break;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800670 }
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800671out:
672 mctp_pktbuf_free(pkt);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800673}
674
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800675static int mctp_packet_tx(struct mctp_bus *bus,
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800676 struct mctp_pktbuf *pkt)
677{
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930678 struct mctp *mctp = bus->binding->mctp;
679
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030680 if (bus->state != mctp_bus_state_tx_enabled)
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800681 return -1;
682
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930683 if (mctp->capture)
684 mctp->capture(pkt, mctp->capture_data);
685
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800686 return bus->binding->tx(bus->binding, pkt);
687}
688
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800689static void mctp_send_tx_queue(struct mctp_bus *bus)
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800690{
691 struct mctp_pktbuf *pkt;
692
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800693 while ((pkt = bus->tx_queue_head)) {
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800694 int rc;
695
696 rc = mctp_packet_tx(bus, pkt);
Andrew Jeffery0721f582022-09-29 12:12:39 +0930697 switch (rc) {
698 /* If transmission succeded, or */
699 case 0:
700 /* If the packet is somehow too large */
701 case -EMSGSIZE:
702 /* Drop the packet */
703 bus->tx_queue_head = pkt->next;
704 mctp_pktbuf_free(pkt);
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800705 break;
706
Andrew Jeffery0721f582022-09-29 12:12:39 +0930707 /* If the binding was busy, or */
708 case -EBUSY:
709 /* Some other unknown error occurred */
710 default:
711 /* Make sure the tail pointer is consistent and retry later */
712 goto cleanup_tail;
713 };
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800714 }
715
Andrew Jeffery0721f582022-09-29 12:12:39 +0930716cleanup_tail:
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800717 if (!bus->tx_queue_head)
718 bus->tx_queue_tail = NULL;
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800719
720}
721
722void mctp_binding_set_tx_enabled(struct mctp_binding *binding, bool enable)
723{
724 struct mctp_bus *bus = binding->bus;
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030725
726 switch(bus->state) {
727 case mctp_bus_state_constructed:
728 if (!enable)
729 return;
730
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030731 if (binding->pkt_size < MCTP_PACKET_SIZE(MCTP_BTU)) {
732 mctp_prerr("Cannot start %s binding with invalid MTU: %zu",
733 binding->name,
734 MCTP_BODY_SIZE(binding->pkt_size));
735 return;
736 }
737
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030738 bus->state = mctp_bus_state_tx_enabled;
739 mctp_prinfo("%s binding started", binding->name);
740 return;
741 case mctp_bus_state_tx_enabled:
742 if (enable)
743 return;
744
745 bus->state = mctp_bus_state_tx_disabled;
746 mctp_prdebug("%s binding Tx disabled", binding->name);
747 return;
748 case mctp_bus_state_tx_disabled:
749 if (!enable)
750 return;
751
752 bus->state = mctp_bus_state_tx_enabled;
753 mctp_prdebug("%s binding Tx enabled", binding->name);
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800754 mctp_send_tx_queue(bus);
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030755 return;
756 }
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800757}
758
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930759static int mctp_message_tx_on_bus(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530760 mctp_eid_t dest, bool tag_owner,
761 uint8_t msg_tag, void *msg, size_t msg_len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800762{
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800763 size_t max_payload_len, payload_len, p;
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800764 struct mctp_pktbuf *pkt;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800765 struct mctp_hdr *hdr;
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800766 int i;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800767
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030768 if (bus->state == mctp_bus_state_constructed)
769 return -ENXIO;
770
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530771 if ((msg_tag & MCTP_HDR_TAG_MASK) != msg_tag)
772 return -EINVAL;
773
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030774 max_payload_len = MCTP_BODY_SIZE(bus->binding->pkt_size);
775
776 {
777 const bool valid_mtu = max_payload_len >= MCTP_BTU;
778 assert(valid_mtu);
779 if (!valid_mtu)
780 return -EINVAL;
781 }
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800782
Andrew Jeffery298865f2020-02-06 11:51:29 +1030783 mctp_prdebug("%s: Generating packets for transmission of %zu byte message from %hhu to %hhu",
784 __func__, msg_len, src, dest);
785
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800786 /* queue up packets, each of max MCTP_MTU size */
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800787 for (p = 0, i = 0; p < msg_len; i++) {
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800788 payload_len = msg_len - p;
789 if (payload_len > max_payload_len)
790 payload_len = max_payload_len;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800791
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800792 pkt = mctp_pktbuf_alloc(bus->binding,
793 payload_len + sizeof(*hdr));
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800794 hdr = mctp_pktbuf_hdr(pkt);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800795
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800796 hdr->ver = bus->binding->version & 0xf;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800797 hdr->dest = dest;
798 hdr->src = src;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530799 hdr->flags_seq_tag = (tag_owner << MCTP_HDR_TO_SHIFT) |
800 (msg_tag << MCTP_HDR_TAG_SHIFT);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800801
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800802 if (i == 0)
803 hdr->flags_seq_tag |= MCTP_HDR_FLAG_SOM;
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800804 if (p + payload_len >= msg_len)
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800805 hdr->flags_seq_tag |= MCTP_HDR_FLAG_EOM;
806 hdr->flags_seq_tag |=
807 (i & MCTP_HDR_SEQ_MASK) << MCTP_HDR_SEQ_SHIFT;
808
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700809 memcpy(mctp_pktbuf_data(pkt), (uint8_t *)msg + p, payload_len);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800810
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800811 /* add to tx queue */
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800812 if (bus->tx_queue_tail)
813 bus->tx_queue_tail->next = pkt;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800814 else
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800815 bus->tx_queue_head = pkt;
816 bus->tx_queue_tail = pkt;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800817
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800818 p += payload_len;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800819 }
820
Andrew Jeffery298865f2020-02-06 11:51:29 +1030821 mctp_prdebug("%s: Enqueued %d packets", __func__, i);
822
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800823 mctp_send_tx_queue(bus);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800824
825 return 0;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800826}
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800827
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530828int mctp_message_tx(struct mctp *mctp, mctp_eid_t eid, bool tag_owner,
829 uint8_t msg_tag, void *msg, size_t msg_len)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800830{
831 struct mctp_bus *bus;
832
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530833 /* TODO: Protect against same tag being used across
834 * different callers */
835 if ((msg_tag & MCTP_HDR_TAG_MASK) != msg_tag) {
836 mctp_prerr("Incorrect message tag %u passed.", msg_tag);
837 return -EINVAL;
838 }
839
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800840 bus = find_bus_for_eid(mctp, eid);
Brad Bishop663ec392021-10-07 21:16:48 -0400841 if (!bus)
842 return 0;
843
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530844 return mctp_message_tx_on_bus(bus, bus->eid, eid, tag_owner, msg_tag,
845 msg, msg_len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800846}