blob: 254eb25ad81be7331cb9d325ed0aa6bd934661fd [file] [log] [blame]
Jeremy Kerr3d36ee22019-05-30 11:15:37 +08001/* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +08002
3#include <assert.h>
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +09304#include <errno.h>
Jeremy Kerr4cdc2002019-02-07 16:49:12 +08005#include <stdarg.h>
6#include <stddef.h>
7#include <stdint.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11
12#undef pr_fmt
13#define pr_fmt(fmt) "core: " fmt
14
15#include "libmctp.h"
16#include "libmctp-alloc.h"
17#include "libmctp-log.h"
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +010018#include "libmctp-cmds.h"
Andrew Jefferyc2b833e2020-10-28 14:28:37 +103019#include "range.h"
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080020
21/* Internal data structures */
22
Andrew Jefferyc61501c2021-01-27 23:24:18 +103023enum mctp_bus_state {
24 mctp_bus_state_constructed = 0,
25 mctp_bus_state_tx_enabled,
26 mctp_bus_state_tx_disabled,
27};
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080028
Andrew Jefferyc61501c2021-01-27 23:24:18 +103029struct mctp_bus {
30 mctp_eid_t eid;
31 struct mctp_binding *binding;
32 enum mctp_bus_state state;
33
34 struct mctp_pktbuf *tx_queue_head;
35 struct mctp_pktbuf *tx_queue_tail;
Jeremy Kerrcc2458d2019-03-01 08:23:33 +080036
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080037 /* todo: routing */
38};
39
Jeremy Kerr24db71f2019-02-07 21:37:35 +080040struct mctp_msg_ctx {
Patrick Williamsa721c2d2022-12-04 14:30:26 -060041 uint8_t src;
42 uint8_t dest;
43 uint8_t tag;
44 uint8_t last_seq;
45 void *buf;
46 size_t buf_size;
47 size_t buf_alloc_size;
48 size_t fragment_size;
Jeremy Kerr24db71f2019-02-07 21:37:35 +080049};
50
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080051struct mctp {
Patrick Williamsa721c2d2022-12-04 14:30:26 -060052 int n_busses;
53 struct mctp_bus *busses;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080054
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080055 /* Message RX callback */
Patrick Williamsa721c2d2022-12-04 14:30:26 -060056 mctp_rx_fn message_rx;
57 void *message_rx_data;
Jeremy Kerr24db71f2019-02-07 21:37:35 +080058
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +093059 /* Packet capture callback */
Patrick Williamsa721c2d2022-12-04 14:30:26 -060060 mctp_capture_fn capture;
61 void *capture_data;
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +093062
Jeremy Kerr24db71f2019-02-07 21:37:35 +080063 /* Message reassembly.
64 * @todo: flexible context count
65 */
Patrick Williamsa721c2d2022-12-04 14:30:26 -060066 struct mctp_msg_ctx msg_ctxs[16];
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080067
68 enum {
69 ROUTE_ENDPOINT,
70 ROUTE_BRIDGE,
Patrick Williamsa721c2d2022-12-04 14:30:26 -060071 } route_policy;
Sumanth Bhat2c820c52020-07-02 00:26:25 +053072 size_t max_message_size;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080073};
74
75#ifndef BUILD_ASSERT
Patrick Williamsa721c2d2022-12-04 14:30:26 -060076#define BUILD_ASSERT(x) \
77 do { \
78 (void)sizeof(char[0 - (!(x))]); \
79 } while (0)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080080#endif
81
Jeremy Kerr24db71f2019-02-07 21:37:35 +080082#ifndef ARRAY_SIZE
83#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
84#endif
85
Sumanth Bhat2c820c52020-07-02 00:26:25 +053086/* 64kb should be sufficient for a single message. Applications
87 * requiring higher sizes can override by setting max_message_size.*/
88#ifndef MCTP_MAX_MESSAGE_SIZE
89#define MCTP_MAX_MESSAGE_SIZE 65536
90#endif
91
Andrew Jefferyb93b6112020-06-05 14:13:44 +093092static int mctp_message_tx_on_bus(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +053093 mctp_eid_t dest, bool tag_owner,
94 uint8_t msg_tag, void *msg, size_t msg_len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080095
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080096struct mctp_pktbuf *mctp_pktbuf_alloc(struct mctp_binding *binding, size_t len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080097{
98 struct mctp_pktbuf *buf;
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080099 size_t size;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800100
Andrew Jeffery39da3d02021-03-12 16:51:26 +1030101 size = binding->pkt_size + binding->pkt_header + binding->pkt_trailer;
Rashmica Gupta487b31e2022-09-14 18:49:45 +1000102 if (len > size) {
103 return NULL;
104 }
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800105
106 /* todo: pools */
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800107 buf = __mctp_alloc(sizeof(*buf) + size);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800108
Pedro Martelletto2608b292023-03-30 13:28:28 +0000109 if (!buf)
110 return NULL;
111
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800112 buf->size = size;
Andrew Jeffery39da3d02021-03-12 16:51:26 +1030113 buf->start = binding->pkt_header;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800114 buf->end = buf->start + len;
115 buf->mctp_hdr_off = buf->start;
Jeremy Kerrdd109f12019-04-04 11:46:49 +0800116 buf->next = NULL;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800117
118 return buf;
119}
120
121void mctp_pktbuf_free(struct mctp_pktbuf *pkt)
122{
123 __mctp_free(pkt);
124}
125
126struct mctp_hdr *mctp_pktbuf_hdr(struct mctp_pktbuf *pkt)
127{
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700128 return (struct mctp_hdr *)(pkt->data + pkt->mctp_hdr_off);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800129}
130
131void *mctp_pktbuf_data(struct mctp_pktbuf *pkt)
132{
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700133 return pkt->data + pkt->mctp_hdr_off + sizeof(struct mctp_hdr);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800134}
135
Andrew Jefferyb942e3a2020-06-23 09:54:02 +0930136size_t mctp_pktbuf_size(struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800137{
138 return pkt->end - pkt->start;
139}
140
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800141void *mctp_pktbuf_alloc_start(struct mctp_pktbuf *pkt, size_t size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800142{
143 assert(size <= pkt->start);
144 pkt->start -= size;
145 return pkt->data + pkt->start;
146}
147
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800148void *mctp_pktbuf_alloc_end(struct mctp_pktbuf *pkt, size_t size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800149{
150 void *buf;
151
Andrew Jeffery3ac70d62020-07-01 00:50:44 +0930152 assert(size <= (pkt->size - pkt->end));
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800153 buf = pkt->data + pkt->end;
154 pkt->end += size;
155 return buf;
156}
157
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800158int mctp_pktbuf_push(struct mctp_pktbuf *pkt, void *data, size_t len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800159{
160 void *p;
161
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800162 if (pkt->end + len > pkt->size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800163 return -1;
164
165 p = pkt->data + pkt->end;
166
167 pkt->end += len;
168 memcpy(p, data, len);
169
170 return 0;
171}
172
Andrew Jefferyeba19a32021-03-09 23:09:40 +1030173void *mctp_pktbuf_pop(struct mctp_pktbuf *pkt, size_t len)
174{
175 if (len > mctp_pktbuf_size(pkt))
176 return NULL;
177
178 pkt->end -= len;
179 return pkt->data + pkt->end;
180}
181
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800182/* Message reassembly */
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600183static struct mctp_msg_ctx *mctp_msg_ctx_lookup(struct mctp *mctp, uint8_t src,
184 uint8_t dest, uint8_t tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800185{
186 unsigned int i;
187
188 /* @todo: better lookup, if we add support for more outstanding
189 * message contexts */
190 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
191 struct mctp_msg_ctx *ctx = &mctp->msg_ctxs[i];
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800192 if (ctx->src == src && ctx->dest == dest && ctx->tag == tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800193 return ctx;
194 }
195
196 return NULL;
197}
198
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600199static struct mctp_msg_ctx *mctp_msg_ctx_create(struct mctp *mctp, uint8_t src,
200 uint8_t dest, uint8_t tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800201{
Jeremy Kerr11a234e2019-02-27 17:59:53 +0800202 struct mctp_msg_ctx *ctx = NULL;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800203 unsigned int i;
204
205 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
206 struct mctp_msg_ctx *tmp = &mctp->msg_ctxs[i];
207 if (!tmp->src) {
208 ctx = tmp;
209 break;
210 }
211 }
212
213 if (!ctx)
214 return NULL;
215
216 ctx->src = src;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800217 ctx->dest = dest;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800218 ctx->tag = tag;
Jeremy Kerr9a3da812019-08-02 15:57:53 +0800219 ctx->buf_size = 0;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800220
221 return ctx;
222}
223
224static void mctp_msg_ctx_drop(struct mctp_msg_ctx *ctx)
225{
226 ctx->src = 0;
227}
228
229static void mctp_msg_ctx_reset(struct mctp_msg_ctx *ctx)
230{
231 ctx->buf_size = 0;
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000232 ctx->fragment_size = 0;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800233}
234
235static int mctp_msg_ctx_add_pkt(struct mctp_msg_ctx *ctx,
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600236 struct mctp_pktbuf *pkt, size_t max_size)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800237{
238 size_t len;
239
240 len = mctp_pktbuf_size(pkt) - sizeof(struct mctp_hdr);
241
Sumanth Bhatbc79c242021-06-16 12:36:56 +0530242 if (len + ctx->buf_size < ctx->buf_size) {
243 return -1;
244 }
245
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800246 if (ctx->buf_size + len > ctx->buf_alloc_size) {
247 size_t new_alloc_size;
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030248 void *lbuf;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800249
Andrew Jeffery5a508912020-11-03 22:21:45 +1030250 /* @todo: finer-grained allocation */
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800251 if (!ctx->buf_alloc_size) {
Andrew Jefferyc2b833e2020-10-28 14:28:37 +1030252 new_alloc_size = MAX(len, 4096UL);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800253 } else {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600254 new_alloc_size = MAX(ctx->buf_alloc_size * 2,
255 len + ctx->buf_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800256 }
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030257
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530258 /* Don't allow heap to grow beyond a limit */
259 if (new_alloc_size > max_size)
260 return -1;
261
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030262 lbuf = __mctp_realloc(ctx->buf, new_alloc_size);
263 if (lbuf) {
264 ctx->buf = lbuf;
265 ctx->buf_alloc_size = new_alloc_size;
266 } else {
267 __mctp_free(ctx->buf);
268 return -1;
269 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800270 }
271
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700272 memcpy((uint8_t *)ctx->buf + ctx->buf_size, mctp_pktbuf_data(pkt), len);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800273 ctx->buf_size += len;
274
275 return 0;
276}
277
278/* Core API functions */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800279struct mctp *mctp_init(void)
280{
281 struct mctp *mctp;
282
283 mctp = __mctp_alloc(sizeof(*mctp));
Sumanth Bhat96d54492020-07-14 17:10:04 +0530284
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600285 if (!mctp)
Sumanth Bhat96d54492020-07-14 17:10:04 +0530286 return NULL;
287
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800288 memset(mctp, 0, sizeof(*mctp));
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530289 mctp->max_message_size = MCTP_MAX_MESSAGE_SIZE;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800290
291 return mctp;
292}
293
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530294void mctp_set_max_message_size(struct mctp *mctp, size_t message_size)
295{
296 mctp->max_message_size = message_size;
297}
298
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930299void mctp_set_capture_handler(struct mctp *mctp, mctp_capture_fn fn, void *user)
300{
301 mctp->capture = fn;
302 mctp->capture_data = user;
303}
304
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030305static void mctp_bus_destroy(struct mctp_bus *bus)
306{
307 while (bus->tx_queue_head) {
308 struct mctp_pktbuf *curr = bus->tx_queue_head;
309
310 bus->tx_queue_head = curr->next;
311 mctp_pktbuf_free(curr);
312 }
313}
314
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030315void mctp_destroy(struct mctp *mctp)
316{
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930317 size_t i;
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030318
319 /* Cleanup message assembly contexts */
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930320 BUILD_ASSERT(ARRAY_SIZE(mctp->msg_ctxs) < SIZE_MAX);
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030321 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
322 struct mctp_msg_ctx *tmp = &mctp->msg_ctxs[i];
323 if (tmp->buf)
324 __mctp_free(tmp->buf);
325 }
326
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030327 while (mctp->n_busses--)
328 mctp_bus_destroy(&mctp->busses[mctp->n_busses]);
329
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030330 __mctp_free(mctp->busses);
331 __mctp_free(mctp);
332}
333
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800334int mctp_set_rx_all(struct mctp *mctp, mctp_rx_fn fn, void *data)
335{
336 mctp->message_rx = fn;
337 mctp->message_rx_data = data;
338 return 0;
339}
340
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600341static struct mctp_bus *find_bus_for_eid(struct mctp *mctp, mctp_eid_t dest
342 __attribute__((unused)))
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800343{
Brad Bishop663ec392021-10-07 21:16:48 -0400344 if (mctp->n_busses == 0)
345 return NULL;
346
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800347 /* for now, just use the first bus. For full routing support,
348 * we will need a table of neighbours */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800349 return &mctp->busses[0];
350}
351
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600352int mctp_register_bus(struct mctp *mctp, struct mctp_binding *binding,
353 mctp_eid_t eid)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800354{
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930355 int rc = 0;
356
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800357 /* todo: multiple busses */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800358 assert(mctp->n_busses == 0);
359 mctp->n_busses = 1;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930360
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800361 mctp->busses = __mctp_alloc(sizeof(struct mctp_bus));
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930362 if (!mctp->busses)
363 return -ENOMEM;
364
James Feist62d72362019-12-13 13:43:32 -0800365 memset(mctp->busses, 0, sizeof(struct mctp_bus));
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800366 mctp->busses[0].binding = binding;
367 mctp->busses[0].eid = eid;
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800368 binding->bus = &mctp->busses[0];
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800369 binding->mctp = mctp;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800370 mctp->route_policy = ROUTE_ENDPOINT;
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800371
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930372 if (binding->start) {
373 rc = binding->start(binding);
374 if (rc < 0) {
375 mctp_prerr("Failed to start binding: %d", rc);
Andrew Jeffery19275232021-01-29 14:13:25 +1030376 binding->bus = NULL;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930377 __mctp_free(mctp->busses);
378 mctp->busses = NULL;
Andrew Jeffery2304c832021-01-29 11:52:49 +1030379 mctp->n_busses = 0;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930380 }
381 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800382
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930383 return rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800384}
385
Andrew Jeffery2094c3c2021-08-26 12:32:46 +0930386void mctp_unregister_bus(struct mctp *mctp, struct mctp_binding *binding)
387{
388 /*
389 * We only support one bus right now; once the call completes we will
390 * have no more busses
391 */
392 mctp->n_busses = 0;
393 binding->mctp = NULL;
394 binding->bus = NULL;
Benjamin Gwinb7824b62023-08-08 18:44:53 +0000395 __mctp_free(mctp->busses);
Andrew Jeffery2094c3c2021-08-26 12:32:46 +0930396}
397
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600398int mctp_bridge_busses(struct mctp *mctp, struct mctp_binding *b1,
399 struct mctp_binding *b2)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800400{
Andrew Jeffery19275232021-01-29 14:13:25 +1030401 int rc = 0;
402
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800403 assert(mctp->n_busses == 0);
404 mctp->busses = __mctp_alloc(2 * sizeof(struct mctp_bus));
Helen Huanga523bcc2021-05-19 15:44:56 +0800405 if (!mctp->busses)
406 return -ENOMEM;
James Feist62d72362019-12-13 13:43:32 -0800407 memset(mctp->busses, 0, 2 * sizeof(struct mctp_bus));
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800408 mctp->n_busses = 2;
409 mctp->busses[0].binding = b1;
410 b1->bus = &mctp->busses[0];
411 b1->mctp = mctp;
412 mctp->busses[1].binding = b2;
413 b2->bus = &mctp->busses[1];
414 b2->mctp = mctp;
415
416 mctp->route_policy = ROUTE_BRIDGE;
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800417
Andrew Jeffery19275232021-01-29 14:13:25 +1030418 if (b1->start) {
419 rc = b1->start(b1);
420 if (rc < 0) {
421 mctp_prerr("Failed to start bridged bus %s: %d",
422 b1->name, rc);
423 goto done;
424 }
425 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800426
Andrew Jeffery19275232021-01-29 14:13:25 +1030427 if (b2->start) {
428 rc = b2->start(b2);
429 if (rc < 0) {
430 mctp_prerr("Failed to start bridged bus %s: %d",
431 b2->name, rc);
432 goto done;
433 }
434 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800435
Andrew Jeffery19275232021-01-29 14:13:25 +1030436done:
437 return rc;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800438}
439
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100440static inline bool mctp_ctrl_cmd_is_transport(struct mctp_ctrl_msg_hdr *hdr)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800441{
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100442 return ((hdr->command_code >= MCTP_CTRL_CMD_FIRST_TRANSPORT) &&
443 (hdr->command_code <= MCTP_CTRL_CMD_LAST_TRANSPORT));
444}
445
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930446static bool mctp_ctrl_handle_msg(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530447 uint8_t msg_tag, bool tag_owner, void *buffer,
448 size_t length)
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100449{
450 struct mctp_ctrl_msg_hdr *msg_hdr = buffer;
451
452 /*
453 * Control message is received. If a transport control message handler
454 * is provided, it will called. If there is no dedicated handler, this
455 * function returns false and data can be handled by the generic
456 * message handler. The transport control message handler will be
457 * provided with messages in the command range 0xF0 - 0xFF.
458 */
459 if (mctp_ctrl_cmd_is_transport(msg_hdr)) {
460 if (bus->binding->control_rx != NULL) {
461 /* MCTP bus binding handler */
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530462 bus->binding->control_rx(src, msg_tag, tag_owner,
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100463 bus->binding->control_rx_data,
464 buffer, length);
465 return true;
466 }
467 }
468
469 /*
470 * Command was not handled, due to lack of specific callback.
471 * It will be passed to regular message_rx handler.
472 */
473 return false;
474}
475
476static inline bool mctp_rx_dest_is_local(struct mctp_bus *bus, mctp_eid_t dest)
477{
478 return dest == bus->eid || dest == MCTP_EID_NULL ||
479 dest == MCTP_EID_BROADCAST;
480}
481
482static inline bool mctp_ctrl_cmd_is_request(struct mctp_ctrl_msg_hdr *hdr)
483{
484 return hdr->ic_msg_type == MCTP_CTRL_HDR_MSG_TYPE &&
485 hdr->rq_dgram_inst & MCTP_CTRL_HDR_FLAG_REQUEST;
486}
487
488/*
489 * Receive the complete MCTP message and route it.
490 * Asserts:
491 * 'buf' is not NULL.
492 */
493static void mctp_rx(struct mctp *mctp, struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530494 mctp_eid_t dest, bool tag_owner, uint8_t msg_tag, void *buf,
495 size_t len)
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100496{
497 assert(buf != NULL);
498
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800499 if (mctp->route_policy == ROUTE_ENDPOINT &&
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100500 mctp_rx_dest_is_local(bus, dest)) {
501 /* Handle MCTP Control Messages: */
502 if (len >= sizeof(struct mctp_ctrl_msg_hdr)) {
503 struct mctp_ctrl_msg_hdr *msg_hdr = buf;
504
505 /*
506 * Identify if this is a control request message.
507 * See DSP0236 v1.3.0 sec. 11.5.
508 */
509 if (mctp_ctrl_cmd_is_request(msg_hdr)) {
510 bool handled;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530511 handled = mctp_ctrl_handle_msg(
512 bus, src, msg_tag, tag_owner, buf, len);
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100513 if (handled)
514 return;
515 }
516 }
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530517
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100518 if (mctp->message_rx)
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530519 mctp->message_rx(src, tag_owner, msg_tag,
520 mctp->message_rx_data, buf, len);
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100521 }
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800522
523 if (mctp->route_policy == ROUTE_BRIDGE) {
524 int i;
525
526 for (i = 0; i < mctp->n_busses; i++) {
527 struct mctp_bus *dest_bus = &mctp->busses[i];
528 if (dest_bus == bus)
529 continue;
530
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530531 mctp_message_tx_on_bus(dest_bus, src, dest, tag_owner,
532 msg_tag, buf, len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800533 }
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800534 }
535}
536
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800537void mctp_bus_rx(struct mctp_binding *binding, struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800538{
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800539 struct mctp_bus *bus = binding->bus;
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800540 struct mctp *mctp = binding->mctp;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800541 uint8_t flags, exp_seq, seq, tag;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800542 struct mctp_msg_ctx *ctx;
543 struct mctp_hdr *hdr;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530544 bool tag_owner;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800545 size_t len;
546 void *p;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800547 int rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800548
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800549 assert(bus);
550
Sumanth Bhatd97869d2020-07-02 00:46:13 +0530551 /* Drop packet if it was smaller than mctp hdr size */
552 if (mctp_pktbuf_size(pkt) <= sizeof(struct mctp_hdr))
553 goto out;
554
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930555 if (mctp->capture)
Rashmica Guptaf2988972022-11-09 12:26:44 +1100556 mctp->capture(pkt, MCTP_MESSAGE_CAPTURE_INCOMING,
557 mctp->capture_data);
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930558
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800559 hdr = mctp_pktbuf_hdr(pkt);
560
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800561 /* small optimisation: don't bother reassembly if we're going to
562 * drop the packet in mctp_rx anyway */
563 if (mctp->route_policy == ROUTE_ENDPOINT && hdr->dest != bus->eid)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800564 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800565
566 flags = hdr->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
567 tag = (hdr->flags_seq_tag >> MCTP_HDR_TAG_SHIFT) & MCTP_HDR_TAG_MASK;
568 seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) & MCTP_HDR_SEQ_MASK;
Andrew Jeffery7f7fdc12023-05-12 15:56:47 +0930569 tag_owner = (hdr->flags_seq_tag >> MCTP_HDR_TO_SHIFT) &
570 MCTP_HDR_TO_MASK;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800571
572 switch (flags) {
573 case MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM:
574 /* single-packet message - send straight up to rx function,
575 * no need to create a message context */
576 len = pkt->end - pkt->mctp_hdr_off - sizeof(struct mctp_hdr);
Andrew Jefferyb4ae00b2021-01-18 15:52:09 +1030577 p = pkt->data + pkt->mctp_hdr_off + sizeof(struct mctp_hdr);
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530578 mctp_rx(mctp, bus, hdr->src, hdr->dest, tag_owner, tag, p, len);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800579 break;
580
581 case MCTP_HDR_FLAG_SOM:
582 /* start of a new message - start the new context for
583 * future message reception. If an existing context is
584 * already present, drop it. */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800585 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800586 if (ctx) {
587 mctp_msg_ctx_reset(ctx);
588 } else {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600589 ctx = mctp_msg_ctx_create(mctp, hdr->src, hdr->dest,
590 tag);
Sumanth Bhat34d4c962021-06-16 12:50:48 +0530591 /* If context creation fails due to exhaution of contexts we
592 * can support, drop the packet */
593 if (!ctx) {
594 mctp_prdebug("Context buffers exhausted.");
595 goto out;
596 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800597 }
598
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000599 /* Save the fragment size, subsequent middle fragments
600 * should of the same size */
601 ctx->fragment_size = mctp_pktbuf_size(pkt);
602
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530603 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800604 if (rc) {
605 mctp_msg_ctx_drop(ctx);
606 } else {
607 ctx->last_seq = seq;
608 }
609
610 break;
611
612 case MCTP_HDR_FLAG_EOM:
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800613 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800614 if (!ctx)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800615 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800616
Ed Tanousc2def9f2019-02-21 08:33:08 -0800617 exp_seq = (ctx->last_seq + 1) % 4;
618
619 if (exp_seq != seq) {
620 mctp_prdebug(
621 "Sequence number %d does not match expected %d",
622 seq, exp_seq);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800623 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800624 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800625 }
626
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000627 len = mctp_pktbuf_size(pkt);
628
629 if (len > ctx->fragment_size) {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600630 mctp_prdebug("Unexpected fragment size. Expected"
631 " less than %zu, received = %zu",
632 ctx->fragment_size, len);
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000633 mctp_msg_ctx_drop(ctx);
634 goto out;
635 }
636
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530637 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800638 if (!rc)
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530639 mctp_rx(mctp, bus, ctx->src, ctx->dest, tag_owner, tag,
640 ctx->buf, ctx->buf_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800641
642 mctp_msg_ctx_drop(ctx);
643 break;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800644
645 case 0:
646 /* Neither SOM nor EOM */
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600647 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Ed Tanousc2def9f2019-02-21 08:33:08 -0800648 if (!ctx)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800649 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800650
651 exp_seq = (ctx->last_seq + 1) % 4;
652 if (exp_seq != seq) {
653 mctp_prdebug(
654 "Sequence number %d does not match expected %d",
655 seq, exp_seq);
656 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800657 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800658 }
659
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000660 len = mctp_pktbuf_size(pkt);
661
662 if (len != ctx->fragment_size) {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600663 mctp_prdebug("Unexpected fragment size. Expected = %zu "
664 "received = %zu",
665 ctx->fragment_size, len);
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000666 mctp_msg_ctx_drop(ctx);
667 goto out;
668 }
669
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530670 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Ed Tanousc2def9f2019-02-21 08:33:08 -0800671 if (rc) {
672 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800673 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800674 }
675 ctx->last_seq = seq;
676
677 break;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800678 }
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800679out:
680 mctp_pktbuf_free(pkt);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800681}
682
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600683static int mctp_packet_tx(struct mctp_bus *bus, struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800684{
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930685 struct mctp *mctp = bus->binding->mctp;
686
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030687 if (bus->state != mctp_bus_state_tx_enabled)
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800688 return -1;
689
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930690 if (mctp->capture)
Rashmica Guptaf2988972022-11-09 12:26:44 +1100691 mctp->capture(pkt, MCTP_MESSAGE_CAPTURE_OUTGOING,
692 mctp->capture_data);
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930693
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800694 return bus->binding->tx(bus->binding, pkt);
695}
696
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800697static void mctp_send_tx_queue(struct mctp_bus *bus)
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800698{
699 struct mctp_pktbuf *pkt;
700
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800701 while ((pkt = bus->tx_queue_head)) {
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800702 int rc;
703
704 rc = mctp_packet_tx(bus, pkt);
Andrew Jeffery0721f582022-09-29 12:12:39 +0930705 switch (rc) {
706 /* If transmission succeded, or */
707 case 0:
708 /* If the packet is somehow too large */
709 case -EMSGSIZE:
710 /* Drop the packet */
711 bus->tx_queue_head = pkt->next;
712 mctp_pktbuf_free(pkt);
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800713 break;
714
Andrew Jeffery0721f582022-09-29 12:12:39 +0930715 /* If the binding was busy, or */
716 case -EBUSY:
717 /* Some other unknown error occurred */
718 default:
719 /* Make sure the tail pointer is consistent and retry later */
720 goto cleanup_tail;
721 };
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800722 }
723
Andrew Jeffery0721f582022-09-29 12:12:39 +0930724cleanup_tail:
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800725 if (!bus->tx_queue_head)
726 bus->tx_queue_tail = NULL;
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800727}
728
729void mctp_binding_set_tx_enabled(struct mctp_binding *binding, bool enable)
730{
731 struct mctp_bus *bus = binding->bus;
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030732
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600733 switch (bus->state) {
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030734 case mctp_bus_state_constructed:
735 if (!enable)
736 return;
737
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030738 if (binding->pkt_size < MCTP_PACKET_SIZE(MCTP_BTU)) {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600739 mctp_prerr(
740 "Cannot start %s binding with invalid MTU: %zu",
741 binding->name,
742 MCTP_BODY_SIZE(binding->pkt_size));
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030743 return;
744 }
745
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030746 bus->state = mctp_bus_state_tx_enabled;
747 mctp_prinfo("%s binding started", binding->name);
748 return;
749 case mctp_bus_state_tx_enabled:
750 if (enable)
751 return;
752
753 bus->state = mctp_bus_state_tx_disabled;
754 mctp_prdebug("%s binding Tx disabled", binding->name);
755 return;
756 case mctp_bus_state_tx_disabled:
757 if (!enable)
758 return;
759
760 bus->state = mctp_bus_state_tx_enabled;
761 mctp_prdebug("%s binding Tx enabled", binding->name);
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800762 mctp_send_tx_queue(bus);
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030763 return;
764 }
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800765}
766
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930767static int mctp_message_tx_on_bus(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530768 mctp_eid_t dest, bool tag_owner,
769 uint8_t msg_tag, void *msg, size_t msg_len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800770{
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800771 size_t max_payload_len, payload_len, p;
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800772 struct mctp_pktbuf *pkt;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800773 struct mctp_hdr *hdr;
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800774 int i;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800775
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030776 if (bus->state == mctp_bus_state_constructed)
777 return -ENXIO;
778
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530779 if ((msg_tag & MCTP_HDR_TAG_MASK) != msg_tag)
780 return -EINVAL;
781
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030782 max_payload_len = MCTP_BODY_SIZE(bus->binding->pkt_size);
783
784 {
785 const bool valid_mtu = max_payload_len >= MCTP_BTU;
786 assert(valid_mtu);
787 if (!valid_mtu)
788 return -EINVAL;
789 }
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800790
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600791 mctp_prdebug(
792 "%s: Generating packets for transmission of %zu byte message from %hhu to %hhu",
793 __func__, msg_len, src, dest);
Andrew Jeffery298865f2020-02-06 11:51:29 +1030794
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800795 /* queue up packets, each of max MCTP_MTU size */
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800796 for (p = 0, i = 0; p < msg_len; i++) {
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800797 payload_len = msg_len - p;
798 if (payload_len > max_payload_len)
799 payload_len = max_payload_len;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800800
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800801 pkt = mctp_pktbuf_alloc(bus->binding,
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600802 payload_len + sizeof(*hdr));
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800803 hdr = mctp_pktbuf_hdr(pkt);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800804
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800805 hdr->ver = bus->binding->version & 0xf;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800806 hdr->dest = dest;
807 hdr->src = src;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530808 hdr->flags_seq_tag = (tag_owner << MCTP_HDR_TO_SHIFT) |
809 (msg_tag << MCTP_HDR_TAG_SHIFT);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800810
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800811 if (i == 0)
812 hdr->flags_seq_tag |= MCTP_HDR_FLAG_SOM;
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800813 if (p + payload_len >= msg_len)
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800814 hdr->flags_seq_tag |= MCTP_HDR_FLAG_EOM;
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600815 hdr->flags_seq_tag |= (i & MCTP_HDR_SEQ_MASK)
816 << MCTP_HDR_SEQ_SHIFT;
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800817
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700818 memcpy(mctp_pktbuf_data(pkt), (uint8_t *)msg + p, payload_len);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800819
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800820 /* add to tx queue */
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800821 if (bus->tx_queue_tail)
822 bus->tx_queue_tail->next = pkt;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800823 else
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800824 bus->tx_queue_head = pkt;
825 bus->tx_queue_tail = pkt;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800826
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800827 p += payload_len;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800828 }
829
Andrew Jeffery298865f2020-02-06 11:51:29 +1030830 mctp_prdebug("%s: Enqueued %d packets", __func__, i);
831
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800832 mctp_send_tx_queue(bus);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800833
834 return 0;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800835}
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800836
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530837int mctp_message_tx(struct mctp *mctp, mctp_eid_t eid, bool tag_owner,
838 uint8_t msg_tag, void *msg, size_t msg_len)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800839{
840 struct mctp_bus *bus;
841
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530842 /* TODO: Protect against same tag being used across
843 * different callers */
844 if ((msg_tag & MCTP_HDR_TAG_MASK) != msg_tag) {
845 mctp_prerr("Incorrect message tag %u passed.", msg_tag);
846 return -EINVAL;
847 }
848
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800849 bus = find_bus_for_eid(mctp, eid);
Brad Bishop663ec392021-10-07 21:16:48 -0400850 if (!bus)
851 return 0;
852
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530853 return mctp_message_tx_on_bus(bus, bus->eid, eid, tag_owner, msg_tag,
854 msg, msg_len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800855}