blob: 3a4ed89a6f27cea05661076f8d124d1c89d98646 [file] [log] [blame]
Jeremy Kerr3d36ee22019-05-30 11:15:37 +08001/* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +08002
3#include <assert.h>
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +09304#include <errno.h>
Jeremy Kerr4cdc2002019-02-07 16:49:12 +08005#include <stdarg.h>
6#include <stddef.h>
7#include <stdint.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11
12#undef pr_fmt
13#define pr_fmt(fmt) "core: " fmt
14
15#include "libmctp.h"
16#include "libmctp-alloc.h"
17#include "libmctp-log.h"
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +010018#include "libmctp-cmds.h"
Andrew Jefferyc2b833e2020-10-28 14:28:37 +103019#include "range.h"
Matt Johnston4a09e1d2024-09-13 14:55:58 +080020#include "compiler.h"
Matt Johnstonf9b99f12024-09-17 16:48:34 +080021#include "core-internal.h"
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080022
Jeremy Kerr24db71f2019-02-07 21:37:35 +080023#ifndef ARRAY_SIZE
24#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
25#endif
26
Andrew Jefferyb93b6112020-06-05 14:13:44 +093027static int mctp_message_tx_on_bus(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +053028 mctp_eid_t dest, bool tag_owner,
29 uint8_t msg_tag, void *msg, size_t msg_len);
Matt Johnston61c95992024-09-16 16:50:35 +080030static void mctp_dealloc_tag(struct mctp_bus *bus, mctp_eid_t local,
31 mctp_eid_t remote, uint8_t tag);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080032
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080033struct mctp_pktbuf *mctp_pktbuf_alloc(struct mctp_binding *binding, size_t len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080034{
Matt Johnston4a09e1d2024-09-13 14:55:58 +080035 size_t size =
36 binding->pkt_size + binding->pkt_header + binding->pkt_trailer;
Rashmica Gupta487b31e2022-09-14 18:49:45 +100037 if (len > size) {
38 return NULL;
39 }
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080040
Matt Johnston4a09e1d2024-09-13 14:55:58 +080041 void *storage = __mctp_alloc(size + sizeof(struct mctp_pktbuf));
42 if (!storage) {
Pedro Martelletto2608b292023-03-30 13:28:28 +000043 return NULL;
Matt Johnston4a09e1d2024-09-13 14:55:58 +080044 }
45 struct mctp_pktbuf *pkt = mctp_pktbuf_init(binding, storage);
46 pkt->alloc = true;
47 pkt->end = pkt->start + len;
48 return pkt;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080049}
50
51void mctp_pktbuf_free(struct mctp_pktbuf *pkt)
52{
Matt Johnston4a09e1d2024-09-13 14:55:58 +080053 if (pkt->alloc) {
54 __mctp_free(pkt);
55 } else {
56 mctp_prdebug("pktbuf_free called for non-alloced");
57 }
58}
59
60struct mctp_pktbuf *mctp_pktbuf_init(struct mctp_binding *binding,
61 void *storage)
62{
63 size_t size =
64 binding->pkt_size + binding->pkt_header + binding->pkt_trailer;
65 struct mctp_pktbuf *buf = (struct mctp_pktbuf *)storage;
66 buf->size = size;
67 buf->start = binding->pkt_header;
68 buf->end = buf->start;
69 buf->mctp_hdr_off = buf->start;
70 buf->alloc = false;
71
72 return buf;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080073}
74
75struct mctp_hdr *mctp_pktbuf_hdr(struct mctp_pktbuf *pkt)
76{
Moritz Fischer7aaccb52022-06-28 20:04:04 -070077 return (struct mctp_hdr *)(pkt->data + pkt->mctp_hdr_off);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080078}
79
80void *mctp_pktbuf_data(struct mctp_pktbuf *pkt)
81{
Moritz Fischer7aaccb52022-06-28 20:04:04 -070082 return pkt->data + pkt->mctp_hdr_off + sizeof(struct mctp_hdr);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080083}
84
Matt Johnston4a09e1d2024-09-13 14:55:58 +080085size_t mctp_pktbuf_size(const struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080086{
87 return pkt->end - pkt->start;
88}
89
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080090void *mctp_pktbuf_alloc_start(struct mctp_pktbuf *pkt, size_t size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080091{
92 assert(size <= pkt->start);
93 pkt->start -= size;
94 return pkt->data + pkt->start;
95}
96
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080097void *mctp_pktbuf_alloc_end(struct mctp_pktbuf *pkt, size_t size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080098{
99 void *buf;
100
Andrew Jeffery3ac70d62020-07-01 00:50:44 +0930101 assert(size <= (pkt->size - pkt->end));
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800102 buf = pkt->data + pkt->end;
103 pkt->end += size;
104 return buf;
105}
106
Matt Johnstondfbf0fd2024-10-28 14:40:29 +0800107int mctp_pktbuf_push(struct mctp_pktbuf *pkt, const void *data, size_t len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800108{
109 void *p;
110
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800111 if (pkt->end + len > pkt->size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800112 return -1;
113
114 p = pkt->data + pkt->end;
115
116 pkt->end += len;
117 memcpy(p, data, len);
118
119 return 0;
120}
121
Andrew Jefferyeba19a32021-03-09 23:09:40 +1030122void *mctp_pktbuf_pop(struct mctp_pktbuf *pkt, size_t len)
123{
124 if (len > mctp_pktbuf_size(pkt))
125 return NULL;
126
127 pkt->end -= len;
128 return pkt->data + pkt->end;
129}
130
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800131/* Allocate a duplicate of the message and copy it */
132static void *mctp_msg_dup(const void *msg, size_t msg_len, struct mctp *mctp)
133{
134 void *copy = __mctp_msg_alloc(msg_len, mctp);
135 if (!copy) {
136 mctp_prdebug("msg dup len %zu failed", msg_len);
137 return NULL;
138 }
139
140 memcpy(copy, msg, msg_len);
141 return copy;
142}
143
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800144/* Message reassembly */
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600145static struct mctp_msg_ctx *mctp_msg_ctx_lookup(struct mctp *mctp, uint8_t src,
146 uint8_t dest, uint8_t tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800147{
148 unsigned int i;
149
150 /* @todo: better lookup, if we add support for more outstanding
151 * message contexts */
152 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
153 struct mctp_msg_ctx *ctx = &mctp->msg_ctxs[i];
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800154 if (ctx->buf && ctx->src == src && ctx->dest == dest &&
155 ctx->tag == tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800156 return ctx;
157 }
158
159 return NULL;
160}
161
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600162static struct mctp_msg_ctx *mctp_msg_ctx_create(struct mctp *mctp, uint8_t src,
163 uint8_t dest, uint8_t tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800164{
Jeremy Kerr11a234e2019-02-27 17:59:53 +0800165 struct mctp_msg_ctx *ctx = NULL;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800166 unsigned int i;
167
168 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
169 struct mctp_msg_ctx *tmp = &mctp->msg_ctxs[i];
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800170 if (!tmp->buf) {
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800171 ctx = tmp;
172 break;
173 }
174 }
175
176 if (!ctx)
177 return NULL;
178
179 ctx->src = src;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800180 ctx->dest = dest;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800181 ctx->tag = tag;
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800182
Jeremy Kerr9a3da812019-08-02 15:57:53 +0800183 ctx->buf_size = 0;
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800184 ctx->buf_alloc_size = mctp->max_message_size;
185 ctx->buf = __mctp_msg_alloc(ctx->buf_alloc_size, mctp);
186 if (!ctx->buf) {
187 return NULL;
188 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800189
190 return ctx;
191}
192
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800193static void mctp_msg_ctx_drop(struct mctp_bus *bus, struct mctp_msg_ctx *ctx)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800194{
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800195 /* Free and mark as unused */
196 __mctp_msg_free(ctx->buf, bus->mctp);
197 ctx->buf = NULL;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800198}
199
200static void mctp_msg_ctx_reset(struct mctp_msg_ctx *ctx)
201{
202 ctx->buf_size = 0;
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000203 ctx->fragment_size = 0;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800204}
205
206static int mctp_msg_ctx_add_pkt(struct mctp_msg_ctx *ctx,
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800207 struct mctp_pktbuf *pkt)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800208{
209 size_t len;
210
211 len = mctp_pktbuf_size(pkt) - sizeof(struct mctp_hdr);
212
Sumanth Bhatbc79c242021-06-16 12:36:56 +0530213 if (len + ctx->buf_size < ctx->buf_size) {
214 return -1;
215 }
216
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800217 if (ctx->buf_size + len > ctx->buf_alloc_size) {
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800218 return -1;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800219 }
220
Moritz Fischer7aaccb52022-06-28 20:04:04 -0700221 memcpy((uint8_t *)ctx->buf + ctx->buf_size, mctp_pktbuf_data(pkt), len);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800222 ctx->buf_size += len;
223
224 return 0;
225}
226
227/* Core API functions */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800228struct mctp *mctp_init(void)
229{
230 struct mctp *mctp;
231
232 mctp = __mctp_alloc(sizeof(*mctp));
Sumanth Bhat96d54492020-07-14 17:10:04 +0530233
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600234 if (!mctp)
Sumanth Bhat96d54492020-07-14 17:10:04 +0530235 return NULL;
236
Matt Johnstonf9b99f12024-09-17 16:48:34 +0800237 mctp_setup(mctp, sizeof(*mctp));
Matt Johnston722d0db2024-09-13 15:51:30 +0800238 return mctp;
239}
240
Matt Johnstonf9b99f12024-09-17 16:48:34 +0800241int mctp_setup(struct mctp *mctp, size_t struct_mctp_size)
Matt Johnston722d0db2024-09-13 15:51:30 +0800242{
Matt Johnstonf9b99f12024-09-17 16:48:34 +0800243 if (struct_mctp_size < sizeof(struct mctp)) {
244 mctp_prdebug("Mismatching struct mctp");
245 return -EINVAL;
246 }
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800247 memset(mctp, 0, sizeof(*mctp));
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530248 mctp->max_message_size = MCTP_MAX_MESSAGE_SIZE;
Matt Johnstonf9b99f12024-09-17 16:48:34 +0800249 return 0;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800250}
251
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530252void mctp_set_max_message_size(struct mctp *mctp, size_t message_size)
253{
254 mctp->max_message_size = message_size;
255}
256
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930257void mctp_set_capture_handler(struct mctp *mctp, mctp_capture_fn fn, void *user)
258{
259 mctp->capture = fn;
260 mctp->capture_data = user;
261}
262
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800263static void mctp_bus_destroy(struct mctp_bus *bus, struct mctp *mctp)
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030264{
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800265 if (bus->tx_msg) {
266 __mctp_msg_free(bus->tx_msg, mctp);
267 bus->tx_msg = NULL;
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030268 }
269}
270
Matt Johnston722d0db2024-09-13 15:51:30 +0800271void mctp_cleanup(struct mctp *mctp)
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030272{
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930273 size_t i;
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030274
275 /* Cleanup message assembly contexts */
Matt Johnston3ef47782024-12-11 15:19:06 +0800276 static_assert(ARRAY_SIZE(mctp->msg_ctxs) < SIZE_MAX, "size");
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030277 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
278 struct mctp_msg_ctx *tmp = &mctp->msg_ctxs[i];
279 if (tmp->buf)
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800280 __mctp_msg_free(tmp->buf, mctp);
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030281 }
282
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030283 while (mctp->n_busses--)
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800284 mctp_bus_destroy(&mctp->busses[mctp->n_busses], mctp);
Matt Johnston722d0db2024-09-13 15:51:30 +0800285}
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030286
Matt Johnston722d0db2024-09-13 15:51:30 +0800287void mctp_destroy(struct mctp *mctp)
288{
289 mctp_cleanup(mctp);
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030290 __mctp_free(mctp);
291}
292
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800293int mctp_set_rx_all(struct mctp *mctp, mctp_rx_fn fn, void *data)
294{
295 mctp->message_rx = fn;
296 mctp->message_rx_data = data;
297 return 0;
298}
299
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600300static struct mctp_bus *find_bus_for_eid(struct mctp *mctp, mctp_eid_t dest
301 __attribute__((unused)))
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800302{
Brad Bishop663ec392021-10-07 21:16:48 -0400303 if (mctp->n_busses == 0)
304 return NULL;
305
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800306 /* for now, just use the first bus. For full routing support,
307 * we will need a table of neighbours */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800308 return &mctp->busses[0];
309}
310
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600311int mctp_register_bus(struct mctp *mctp, struct mctp_binding *binding,
312 mctp_eid_t eid)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800313{
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930314 int rc = 0;
315
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800316 /* todo: multiple busses */
Matt Johnston722d0db2024-09-13 15:51:30 +0800317 static_assert(MCTP_MAX_BUSSES >= 1, "need a bus");
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800318 assert(mctp->n_busses == 0);
319 mctp->n_busses = 1;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930320
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800321 assert(binding->tx_storage);
322
James Feist62d72362019-12-13 13:43:32 -0800323 memset(mctp->busses, 0, sizeof(struct mctp_bus));
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800324 mctp->busses[0].mctp = mctp;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800325 mctp->busses[0].binding = binding;
326 mctp->busses[0].eid = eid;
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800327 binding->bus = &mctp->busses[0];
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800328 binding->mctp = mctp;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800329 mctp->route_policy = ROUTE_ENDPOINT;
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800330
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930331 if (binding->start) {
332 rc = binding->start(binding);
333 if (rc < 0) {
334 mctp_prerr("Failed to start binding: %d", rc);
Andrew Jeffery19275232021-01-29 14:13:25 +1030335 binding->bus = NULL;
Andrew Jeffery2304c832021-01-29 11:52:49 +1030336 mctp->n_busses = 0;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930337 }
338 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800339
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930340 return rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800341}
342
Andrew Jeffery2094c3c2021-08-26 12:32:46 +0930343void mctp_unregister_bus(struct mctp *mctp, struct mctp_binding *binding)
344{
345 /*
346 * We only support one bus right now; once the call completes we will
347 * have no more busses
348 */
349 mctp->n_busses = 0;
350 binding->mctp = NULL;
351 binding->bus = NULL;
Andrew Jeffery2094c3c2021-08-26 12:32:46 +0930352}
353
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600354int mctp_bridge_busses(struct mctp *mctp, struct mctp_binding *b1,
355 struct mctp_binding *b2)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800356{
Andrew Jeffery19275232021-01-29 14:13:25 +1030357 int rc = 0;
358
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800359 assert(b1->tx_storage);
360 assert(b2->tx_storage);
361
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800362 assert(mctp->n_busses == 0);
Matt Johnston722d0db2024-09-13 15:51:30 +0800363 assert(MCTP_MAX_BUSSES >= 2);
James Feist62d72362019-12-13 13:43:32 -0800364 memset(mctp->busses, 0, 2 * sizeof(struct mctp_bus));
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800365 mctp->n_busses = 2;
366 mctp->busses[0].binding = b1;
367 b1->bus = &mctp->busses[0];
368 b1->mctp = mctp;
369 mctp->busses[1].binding = b2;
370 b2->bus = &mctp->busses[1];
371 b2->mctp = mctp;
372
373 mctp->route_policy = ROUTE_BRIDGE;
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800374
Andrew Jeffery19275232021-01-29 14:13:25 +1030375 if (b1->start) {
376 rc = b1->start(b1);
377 if (rc < 0) {
378 mctp_prerr("Failed to start bridged bus %s: %d",
379 b1->name, rc);
380 goto done;
381 }
382 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800383
Andrew Jeffery19275232021-01-29 14:13:25 +1030384 if (b2->start) {
385 rc = b2->start(b2);
386 if (rc < 0) {
387 mctp_prerr("Failed to start bridged bus %s: %d",
388 b2->name, rc);
389 goto done;
390 }
391 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800392
Andrew Jeffery19275232021-01-29 14:13:25 +1030393done:
394 return rc;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800395}
396
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100397static inline bool mctp_ctrl_cmd_is_transport(struct mctp_ctrl_msg_hdr *hdr)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800398{
Matt Johnston3ef47782024-12-11 15:19:06 +0800399#pragma GCC diagnostic push
400#pragma GCC diagnostic ignored "-Wtype-limits"
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100401 return ((hdr->command_code >= MCTP_CTRL_CMD_FIRST_TRANSPORT) &&
402 (hdr->command_code <= MCTP_CTRL_CMD_LAST_TRANSPORT));
Matt Johnston3ef47782024-12-11 15:19:06 +0800403#pragma GCC diagnostic pop
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100404}
405
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930406static bool mctp_ctrl_handle_msg(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530407 uint8_t msg_tag, bool tag_owner, void *buffer,
408 size_t length)
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100409{
410 struct mctp_ctrl_msg_hdr *msg_hdr = buffer;
411
412 /*
413 * Control message is received. If a transport control message handler
414 * is provided, it will called. If there is no dedicated handler, this
415 * function returns false and data can be handled by the generic
416 * message handler. The transport control message handler will be
417 * provided with messages in the command range 0xF0 - 0xFF.
418 */
419 if (mctp_ctrl_cmd_is_transport(msg_hdr)) {
420 if (bus->binding->control_rx != NULL) {
421 /* MCTP bus binding handler */
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530422 bus->binding->control_rx(src, msg_tag, tag_owner,
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100423 bus->binding->control_rx_data,
424 buffer, length);
425 return true;
426 }
427 }
428
429 /*
430 * Command was not handled, due to lack of specific callback.
431 * It will be passed to regular message_rx handler.
432 */
433 return false;
434}
435
436static inline bool mctp_rx_dest_is_local(struct mctp_bus *bus, mctp_eid_t dest)
437{
438 return dest == bus->eid || dest == MCTP_EID_NULL ||
439 dest == MCTP_EID_BROADCAST;
440}
441
442static inline bool mctp_ctrl_cmd_is_request(struct mctp_ctrl_msg_hdr *hdr)
443{
444 return hdr->ic_msg_type == MCTP_CTRL_HDR_MSG_TYPE &&
445 hdr->rq_dgram_inst & MCTP_CTRL_HDR_FLAG_REQUEST;
446}
447
448/*
449 * Receive the complete MCTP message and route it.
450 * Asserts:
451 * 'buf' is not NULL.
452 */
453static void mctp_rx(struct mctp *mctp, struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530454 mctp_eid_t dest, bool tag_owner, uint8_t msg_tag, void *buf,
455 size_t len)
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100456{
457 assert(buf != NULL);
458
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800459 if (mctp->route_policy == ROUTE_ENDPOINT &&
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100460 mctp_rx_dest_is_local(bus, dest)) {
Matt Johnston61c95992024-09-16 16:50:35 +0800461 /* Note responses to allocated tags */
462 if (!tag_owner) {
463 mctp_dealloc_tag(bus, dest, src, msg_tag);
464 }
465
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100466 /* Handle MCTP Control Messages: */
467 if (len >= sizeof(struct mctp_ctrl_msg_hdr)) {
468 struct mctp_ctrl_msg_hdr *msg_hdr = buf;
469
470 /*
471 * Identify if this is a control request message.
472 * See DSP0236 v1.3.0 sec. 11.5.
473 */
474 if (mctp_ctrl_cmd_is_request(msg_hdr)) {
475 bool handled;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530476 handled = mctp_ctrl_handle_msg(
477 bus, src, msg_tag, tag_owner, buf, len);
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100478 if (handled)
479 return;
480 }
481 }
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530482
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100483 if (mctp->message_rx)
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530484 mctp->message_rx(src, tag_owner, msg_tag,
485 mctp->message_rx_data, buf, len);
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100486 }
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800487
488 if (mctp->route_policy == ROUTE_BRIDGE) {
489 int i;
490
491 for (i = 0; i < mctp->n_busses; i++) {
492 struct mctp_bus *dest_bus = &mctp->busses[i];
493 if (dest_bus == bus)
494 continue;
495
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800496 void *copy = mctp_msg_dup(buf, len, mctp);
497 if (!copy) {
498 return;
499 }
500
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530501 mctp_message_tx_on_bus(dest_bus, src, dest, tag_owner,
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800502 msg_tag, copy, len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800503 }
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800504 }
505}
506
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800507void mctp_bus_rx(struct mctp_binding *binding, struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800508{
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800509 struct mctp_bus *bus = binding->bus;
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800510 struct mctp *mctp = binding->mctp;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800511 uint8_t flags, exp_seq, seq, tag;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800512 struct mctp_msg_ctx *ctx;
513 struct mctp_hdr *hdr;
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530514 bool tag_owner;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800515 size_t len;
516 void *p;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800517 int rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800518
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800519 assert(bus);
520
Sumanth Bhatd97869d2020-07-02 00:46:13 +0530521 /* Drop packet if it was smaller than mctp hdr size */
Matt Johnston86e9a972024-10-28 15:06:33 +0800522 if (mctp_pktbuf_size(pkt) < sizeof(struct mctp_hdr))
Sumanth Bhatd97869d2020-07-02 00:46:13 +0530523 goto out;
524
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930525 if (mctp->capture)
Rashmica Guptaf2988972022-11-09 12:26:44 +1100526 mctp->capture(pkt, MCTP_MESSAGE_CAPTURE_INCOMING,
527 mctp->capture_data);
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930528
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800529 hdr = mctp_pktbuf_hdr(pkt);
530
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800531 /* small optimisation: don't bother reassembly if we're going to
532 * drop the packet in mctp_rx anyway */
John Chung133df7a2024-05-14 16:19:56 +0800533 if (mctp->route_policy == ROUTE_ENDPOINT &&
534 !mctp_rx_dest_is_local(bus, hdr->dest))
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800535 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800536
537 flags = hdr->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
538 tag = (hdr->flags_seq_tag >> MCTP_HDR_TAG_SHIFT) & MCTP_HDR_TAG_MASK;
539 seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) & MCTP_HDR_SEQ_MASK;
Andrew Jeffery7f7fdc12023-05-12 15:56:47 +0930540 tag_owner = (hdr->flags_seq_tag >> MCTP_HDR_TO_SHIFT) &
541 MCTP_HDR_TO_MASK;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800542
543 switch (flags) {
544 case MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM:
545 /* single-packet message - send straight up to rx function,
546 * no need to create a message context */
547 len = pkt->end - pkt->mctp_hdr_off - sizeof(struct mctp_hdr);
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800548 p = mctp_msg_dup(pkt->data + pkt->mctp_hdr_off +
549 sizeof(struct mctp_hdr),
550 len, mctp);
551 if (p) {
552 mctp_rx(mctp, bus, hdr->src, hdr->dest, tag_owner, tag,
553 p, len);
554 __mctp_msg_free(p, mctp);
555 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800556 break;
557
558 case MCTP_HDR_FLAG_SOM:
559 /* start of a new message - start the new context for
560 * future message reception. If an existing context is
561 * already present, drop it. */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800562 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800563 if (ctx) {
564 mctp_msg_ctx_reset(ctx);
565 } else {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600566 ctx = mctp_msg_ctx_create(mctp, hdr->src, hdr->dest,
567 tag);
Sumanth Bhat34d4c962021-06-16 12:50:48 +0530568 /* If context creation fails due to exhaution of contexts we
569 * can support, drop the packet */
570 if (!ctx) {
571 mctp_prdebug("Context buffers exhausted.");
572 goto out;
573 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800574 }
575
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000576 /* Save the fragment size, subsequent middle fragments
577 * should of the same size */
578 ctx->fragment_size = mctp_pktbuf_size(pkt);
579
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800580 rc = mctp_msg_ctx_add_pkt(ctx, pkt);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800581 if (rc) {
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800582 mctp_msg_ctx_drop(bus, ctx);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800583 } else {
584 ctx->last_seq = seq;
585 }
586
587 break;
588
589 case MCTP_HDR_FLAG_EOM:
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800590 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800591 if (!ctx)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800592 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800593
Ed Tanousc2def9f2019-02-21 08:33:08 -0800594 exp_seq = (ctx->last_seq + 1) % 4;
595
596 if (exp_seq != seq) {
597 mctp_prdebug(
598 "Sequence number %d does not match expected %d",
599 seq, exp_seq);
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800600 mctp_msg_ctx_drop(bus, ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800601 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800602 }
603
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000604 len = mctp_pktbuf_size(pkt);
605
606 if (len > ctx->fragment_size) {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600607 mctp_prdebug("Unexpected fragment size. Expected"
608 " less than %zu, received = %zu",
609 ctx->fragment_size, len);
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800610 mctp_msg_ctx_drop(bus, ctx);
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000611 goto out;
612 }
613
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800614 rc = mctp_msg_ctx_add_pkt(ctx, pkt);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800615 if (!rc)
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530616 mctp_rx(mctp, bus, ctx->src, ctx->dest, tag_owner, tag,
617 ctx->buf, ctx->buf_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800618
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800619 mctp_msg_ctx_drop(bus, ctx);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800620 break;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800621
622 case 0:
623 /* Neither SOM nor EOM */
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600624 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Ed Tanousc2def9f2019-02-21 08:33:08 -0800625 if (!ctx)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800626 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800627
628 exp_seq = (ctx->last_seq + 1) % 4;
629 if (exp_seq != seq) {
630 mctp_prdebug(
631 "Sequence number %d does not match expected %d",
632 seq, exp_seq);
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800633 mctp_msg_ctx_drop(bus, ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800634 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800635 }
636
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000637 len = mctp_pktbuf_size(pkt);
638
639 if (len != ctx->fragment_size) {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600640 mctp_prdebug("Unexpected fragment size. Expected = %zu "
641 "received = %zu",
642 ctx->fragment_size, len);
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800643 mctp_msg_ctx_drop(bus, ctx);
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000644 goto out;
645 }
646
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800647 rc = mctp_msg_ctx_add_pkt(ctx, pkt);
Ed Tanousc2def9f2019-02-21 08:33:08 -0800648 if (rc) {
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800649 mctp_msg_ctx_drop(bus, ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800650 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800651 }
652 ctx->last_seq = seq;
653
654 break;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800655 }
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800656out:
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800657 return;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800658}
659
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600660static int mctp_packet_tx(struct mctp_bus *bus, struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800661{
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930662 struct mctp *mctp = bus->binding->mctp;
663
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800664 if (bus->state != mctp_bus_state_tx_enabled) {
665 mctp_prdebug("tx with bus disabled");
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800666 return -1;
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800667 }
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800668
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930669 if (mctp->capture)
Rashmica Guptaf2988972022-11-09 12:26:44 +1100670 mctp->capture(pkt, MCTP_MESSAGE_CAPTURE_OUTGOING,
671 mctp->capture_data);
Andrew Jeffery5d3d4e62021-08-20 16:44:40 +0930672
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800673 return bus->binding->tx(bus->binding, pkt);
674}
675
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800676/* Returns a pointer to the binding's tx_storage */
677static struct mctp_pktbuf *mctp_next_tx_pkt(struct mctp_bus *bus)
678{
679 if (!bus->tx_msg) {
680 return NULL;
681 }
682
683 size_t p = bus->tx_msgpos;
684 size_t msg_len = bus->tx_msglen;
685 size_t payload_len = msg_len - p;
686 size_t max_payload_len = MCTP_BODY_SIZE(bus->binding->pkt_size);
687 if (payload_len > max_payload_len)
688 payload_len = max_payload_len;
689
690 struct mctp_pktbuf *pkt =
691 mctp_pktbuf_init(bus->binding, bus->binding->tx_storage);
692 struct mctp_hdr *hdr = mctp_pktbuf_hdr(pkt);
693
694 hdr->ver = bus->binding->version & 0xf;
695 hdr->dest = bus->tx_dest;
696 hdr->src = bus->tx_src;
697 hdr->flags_seq_tag = (bus->tx_to << MCTP_HDR_TO_SHIFT) |
698 (bus->tx_tag << MCTP_HDR_TAG_SHIFT);
699
700 if (p == 0)
701 hdr->flags_seq_tag |= MCTP_HDR_FLAG_SOM;
702 if (p + payload_len >= msg_len)
703 hdr->flags_seq_tag |= MCTP_HDR_FLAG_EOM;
704 hdr->flags_seq_tag |= bus->tx_seq << MCTP_HDR_SEQ_SHIFT;
705
706 memcpy(mctp_pktbuf_data(pkt), (uint8_t *)bus->tx_msg + p, payload_len);
707 pkt->end = pkt->start + sizeof(*hdr) + payload_len;
708 bus->tx_pktlen = payload_len;
709
710 mctp_prdebug(
711 "tx dst %d tag %d payload len %zu seq %d. msg pos %zu len %zu",
712 hdr->dest, bus->tx_tag, payload_len, bus->tx_seq, p, msg_len);
713
714 return pkt;
715}
716
717/* Called when a packet has successfully been sent */
718static void mctp_tx_complete(struct mctp_bus *bus)
719{
720 if (!bus->tx_msg) {
721 mctp_prdebug("tx complete no message");
722 return;
723 }
724
725 bus->tx_seq = (bus->tx_seq + 1) & MCTP_HDR_SEQ_MASK;
726 bus->tx_msgpos += bus->tx_pktlen;
727
728 if (bus->tx_msgpos >= bus->tx_msglen) {
729 __mctp_msg_free(bus->tx_msg, bus->binding->mctp);
730 bus->tx_msg = NULL;
731 }
732}
733
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800734static void mctp_send_tx_queue(struct mctp_bus *bus)
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800735{
736 struct mctp_pktbuf *pkt;
737
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800738 while (bus->tx_msg && bus->state == mctp_bus_state_tx_enabled) {
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800739 int rc;
740
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800741 pkt = mctp_next_tx_pkt(bus);
742
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800743 rc = mctp_packet_tx(bus, pkt);
Andrew Jeffery0721f582022-09-29 12:12:39 +0930744 switch (rc) {
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800745 /* If transmission succeded */
Andrew Jeffery0721f582022-09-29 12:12:39 +0930746 case 0:
Andrew Jeffery0721f582022-09-29 12:12:39 +0930747 /* Drop the packet */
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800748 mctp_tx_complete(bus);
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800749 break;
750
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800751 /* If the binding was busy */
Andrew Jeffery0721f582022-09-29 12:12:39 +0930752 case -EBUSY:
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800753 /* Keep the packet for next try */
754 mctp_prdebug("tx EBUSY");
755 return;
756
Andrew Jeffery0721f582022-09-29 12:12:39 +0930757 /* Some other unknown error occurred */
758 default:
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800759 /* Drop the packet */
760 mctp_prdebug("tx drop %d", rc);
761 mctp_tx_complete(bus);
762 return;
Andrew Jeffery0721f582022-09-29 12:12:39 +0930763 };
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800764 }
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800765}
766
767void mctp_binding_set_tx_enabled(struct mctp_binding *binding, bool enable)
768{
769 struct mctp_bus *bus = binding->bus;
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030770
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600771 switch (bus->state) {
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030772 case mctp_bus_state_constructed:
773 if (!enable)
774 return;
775
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030776 if (binding->pkt_size < MCTP_PACKET_SIZE(MCTP_BTU)) {
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600777 mctp_prerr(
778 "Cannot start %s binding with invalid MTU: %zu",
779 binding->name,
780 MCTP_BODY_SIZE(binding->pkt_size));
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030781 return;
782 }
783
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030784 bus->state = mctp_bus_state_tx_enabled;
785 mctp_prinfo("%s binding started", binding->name);
786 return;
787 case mctp_bus_state_tx_enabled:
788 if (enable)
789 return;
790
791 bus->state = mctp_bus_state_tx_disabled;
792 mctp_prdebug("%s binding Tx disabled", binding->name);
793 return;
794 case mctp_bus_state_tx_disabled:
795 if (!enable)
796 return;
797
798 bus->state = mctp_bus_state_tx_enabled;
799 mctp_prdebug("%s binding Tx enabled", binding->name);
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800800 mctp_send_tx_queue(bus);
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030801 return;
802 }
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800803}
804
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930805static int mctp_message_tx_on_bus(struct mctp_bus *bus, mctp_eid_t src,
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530806 mctp_eid_t dest, bool tag_owner,
807 uint8_t msg_tag, void *msg, size_t msg_len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800808{
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800809 size_t max_payload_len;
810 int rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800811
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800812 if (bus->state == mctp_bus_state_constructed) {
813 rc = -ENXIO;
814 goto err;
815 }
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030816
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800817 if ((msg_tag & MCTP_HDR_TAG_MASK) != msg_tag) {
818 rc = -EINVAL;
819 goto err;
820 }
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530821
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030822 max_payload_len = MCTP_BODY_SIZE(bus->binding->pkt_size);
823
824 {
825 const bool valid_mtu = max_payload_len >= MCTP_BTU;
826 assert(valid_mtu);
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800827 if (!valid_mtu) {
828 rc = -EINVAL;
829 goto err;
830 }
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030831 }
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800832
Patrick Williamsa721c2d2022-12-04 14:30:26 -0600833 mctp_prdebug(
834 "%s: Generating packets for transmission of %zu byte message from %hhu to %hhu",
835 __func__, msg_len, src, dest);
Andrew Jeffery298865f2020-02-06 11:51:29 +1030836
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800837 if (bus->tx_msg) {
838 mctp_prdebug("Bus busy");
839 rc = -EBUSY;
840 goto err;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800841 }
842
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800843 /* Take the message to send */
844 bus->tx_msg = msg;
845 bus->tx_msglen = msg_len;
846 bus->tx_msgpos = 0;
847 /* bus->tx_seq is allowed to continue from previous message */
848 bus->tx_src = src;
849 bus->tx_dest = dest;
850 bus->tx_to = tag_owner;
851 bus->tx_tag = msg_tag;
Andrew Jeffery298865f2020-02-06 11:51:29 +1030852
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800853 mctp_send_tx_queue(bus);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800854 return 0;
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800855
856err:
857 __mctp_msg_free(msg, bus->binding->mctp);
858 return rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800859}
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800860
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800861int mctp_message_tx_alloced(struct mctp *mctp, mctp_eid_t eid, bool tag_owner,
862 uint8_t msg_tag, void *msg, size_t msg_len)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800863{
864 struct mctp_bus *bus;
865
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530866 /* TODO: Protect against same tag being used across
867 * different callers */
868 if ((msg_tag & MCTP_HDR_TAG_MASK) != msg_tag) {
869 mctp_prerr("Incorrect message tag %u passed.", msg_tag);
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800870 __mctp_msg_free(msg, mctp);
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530871 return -EINVAL;
872 }
873
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800874 bus = find_bus_for_eid(mctp, eid);
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800875 if (!bus) {
876 __mctp_msg_free(msg, mctp);
Brad Bishop663ec392021-10-07 21:16:48 -0400877 return 0;
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800878 }
Brad Bishop663ec392021-10-07 21:16:48 -0400879
Sumanth Bhatf39c3852022-01-10 17:04:10 +0530880 return mctp_message_tx_on_bus(bus, bus->eid, eid, tag_owner, msg_tag,
881 msg, msg_len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800882}
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800883
884int mctp_message_tx(struct mctp *mctp, mctp_eid_t eid, bool tag_owner,
885 uint8_t msg_tag, const void *msg, size_t msg_len)
886{
887 void *copy = mctp_msg_dup(msg, msg_len, mctp);
888 if (!copy) {
889 return -ENOMEM;
890 }
891
892 return mctp_message_tx_alloced(mctp, eid, tag_owner, msg_tag, copy,
893 msg_len);
894}
895
Matt Johnston61c95992024-09-16 16:50:35 +0800896static void mctp_dealloc_tag(struct mctp_bus *bus, mctp_eid_t local,
897 mctp_eid_t remote, uint8_t tag)
898{
899 struct mctp *mctp = bus->binding->mctp;
900 if (local == 0 || remote == 0) {
901 return;
902 }
903
904 for (size_t i = 0; i < ARRAY_SIZE(mctp->req_tags); i++) {
905 struct mctp_req_tag *r = &mctp->req_tags[i];
906 if (r->local == local && r->remote == remote && r->tag == tag) {
907 r->local = 0;
908 r->remote = 0;
909 r->tag = 0;
910 return;
911 }
912 }
913}
914
915static int mctp_alloc_tag(struct mctp *mctp, mctp_eid_t local,
916 mctp_eid_t remote, uint8_t *ret_tag)
917{
918 assert(local != 0);
919 assert(remote != 0);
920
921 uint8_t used = 0;
922 struct mctp_req_tag *spare = NULL;
923 /* Find which tags and slots are used/spare */
924 for (size_t i = 0; i < ARRAY_SIZE(mctp->req_tags); i++) {
925 struct mctp_req_tag *r = &mctp->req_tags[i];
926 if (r->local == 0) {
927 spare = r;
928 } else {
929 // TODO: check timeouts
930 if (r->local == local && r->remote == remote) {
931 used |= 1 << r->tag;
932 }
933 }
934 }
935
936 if (spare == NULL) {
937 // All req_tag slots are in-use
938 return -EBUSY;
939 }
940
941 for (uint8_t t = 0; t < 8; t++) {
942 uint8_t tag = (t + mctp->tag_round_robin) % 8;
943 if ((used & 1 << tag) == 0) {
944 spare->local = local;
945 spare->remote = remote;
946 spare->tag = tag;
947 *ret_tag = tag;
948 mctp->tag_round_robin = (tag + 1) % 8;
949 return 0;
950 }
951 }
952
953 // All 8 tags are used for this src/dest pair
954 return -EBUSY;
955}
956
957int mctp_message_tx_request(struct mctp *mctp, mctp_eid_t eid, void *msg,
958 size_t msg_len, uint8_t *ret_alloc_msg_tag)
959{
960 int rc;
961 struct mctp_bus *bus;
962
963 bus = find_bus_for_eid(mctp, eid);
964 if (!bus) {
965 __mctp_msg_free(msg, mctp);
966 return 0;
967 }
968
969 uint8_t alloc_tag;
970 rc = mctp_alloc_tag(mctp, bus->eid, eid, &alloc_tag);
971 if (rc) {
972 mctp_prdebug("Failed allocating tag");
973 __mctp_msg_free(msg, mctp);
974 return rc;
975 }
976
977 if (ret_alloc_msg_tag) {
978 *ret_alloc_msg_tag = alloc_tag;
979 }
980
981 return mctp_message_tx_alloced(mctp, eid, true, alloc_tag, msg,
982 msg_len);
983}
984
Matt Johnston4a09e1d2024-09-13 14:55:58 +0800985bool mctp_is_tx_ready(struct mctp *mctp, mctp_eid_t eid)
986{
987 struct mctp_bus *bus;
988
989 bus = find_bus_for_eid(mctp, eid);
990 if (!bus) {
991 return true;
992 }
993 return bus->tx_msg == NULL;
994}
995
996void *mctp_get_alloc_ctx(struct mctp *mctp)
997{
998 return mctp->alloc_ctx;
999}
1000
1001void mctp_set_alloc_ctx(struct mctp *mctp, void *ctx)
1002{
1003 mctp->alloc_ctx = ctx;
1004}