blob: 1050c4da9aa515dcf84d86ea053b938057cad9af [file] [log] [blame]
Jeremy Kerr3d36ee22019-05-30 11:15:37 +08001/* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +08002
3#include <assert.h>
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +09304#include <errno.h>
Jeremy Kerr4cdc2002019-02-07 16:49:12 +08005#include <stdarg.h>
6#include <stddef.h>
7#include <stdint.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11
12#undef pr_fmt
13#define pr_fmt(fmt) "core: " fmt
14
15#include "libmctp.h"
16#include "libmctp-alloc.h"
17#include "libmctp-log.h"
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +010018#include "libmctp-cmds.h"
Andrew Jefferyc2b833e2020-10-28 14:28:37 +103019#include "range.h"
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080020
21/* Internal data structures */
22
Andrew Jefferyc61501c2021-01-27 23:24:18 +103023enum mctp_bus_state {
24 mctp_bus_state_constructed = 0,
25 mctp_bus_state_tx_enabled,
26 mctp_bus_state_tx_disabled,
27};
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080028
Andrew Jefferyc61501c2021-01-27 23:24:18 +103029struct mctp_bus {
30 mctp_eid_t eid;
31 struct mctp_binding *binding;
32 enum mctp_bus_state state;
33
34 struct mctp_pktbuf *tx_queue_head;
35 struct mctp_pktbuf *tx_queue_tail;
Jeremy Kerrcc2458d2019-03-01 08:23:33 +080036
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080037 /* todo: routing */
38};
39
Jeremy Kerr24db71f2019-02-07 21:37:35 +080040struct mctp_msg_ctx {
41 uint8_t src;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080042 uint8_t dest;
Jeremy Kerr24db71f2019-02-07 21:37:35 +080043 uint8_t tag;
44 uint8_t last_seq;
45 void *buf;
46 size_t buf_size;
47 size_t buf_alloc_size;
Sumanth Bhat69f545f2021-05-18 09:16:43 +000048 size_t fragment_size;
Jeremy Kerr24db71f2019-02-07 21:37:35 +080049};
50
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080051struct mctp {
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080052 int n_busses;
53 struct mctp_bus *busses;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080054
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080055 /* Message RX callback */
56 mctp_rx_fn message_rx;
57 void *message_rx_data;
Jeremy Kerr24db71f2019-02-07 21:37:35 +080058
59 /* Message reassembly.
60 * @todo: flexible context count
61 */
62 struct mctp_msg_ctx msg_ctxs[16];
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080063
64 enum {
65 ROUTE_ENDPOINT,
66 ROUTE_BRIDGE,
67 } route_policy;
Sumanth Bhat2c820c52020-07-02 00:26:25 +053068 size_t max_message_size;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080069};
70
71#ifndef BUILD_ASSERT
72#define BUILD_ASSERT(x) \
73 do { (void)sizeof(char[0-(!(x))]); } while (0)
74#endif
75
Jeremy Kerr24db71f2019-02-07 21:37:35 +080076#ifndef ARRAY_SIZE
77#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
78#endif
79
Sumanth Bhat2c820c52020-07-02 00:26:25 +053080/* 64kb should be sufficient for a single message. Applications
81 * requiring higher sizes can override by setting max_message_size.*/
82#ifndef MCTP_MAX_MESSAGE_SIZE
83#define MCTP_MAX_MESSAGE_SIZE 65536
84#endif
85
Andrew Jefferyb93b6112020-06-05 14:13:44 +093086static int mctp_message_tx_on_bus(struct mctp_bus *bus, mctp_eid_t src,
87 mctp_eid_t dest, void *msg, size_t msg_len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +080088
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080089struct mctp_pktbuf *mctp_pktbuf_alloc(struct mctp_binding *binding, size_t len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080090{
91 struct mctp_pktbuf *buf;
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080092 size_t size;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080093
Andrew Jeffery39da3d02021-03-12 16:51:26 +103094 size = binding->pkt_size + binding->pkt_header + binding->pkt_trailer;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080095
96 /* todo: pools */
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080097 buf = __mctp_alloc(sizeof(*buf) + size);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +080098
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +080099 buf->size = size;
Andrew Jeffery39da3d02021-03-12 16:51:26 +1030100 buf->start = binding->pkt_header;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800101 buf->end = buf->start + len;
102 buf->mctp_hdr_off = buf->start;
Jeremy Kerrdd109f12019-04-04 11:46:49 +0800103 buf->next = NULL;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800104
105 return buf;
106}
107
108void mctp_pktbuf_free(struct mctp_pktbuf *pkt)
109{
110 __mctp_free(pkt);
111}
112
113struct mctp_hdr *mctp_pktbuf_hdr(struct mctp_pktbuf *pkt)
114{
115 return (void *)pkt->data + pkt->mctp_hdr_off;
116}
117
118void *mctp_pktbuf_data(struct mctp_pktbuf *pkt)
119{
120 return (void *)pkt->data + pkt->mctp_hdr_off + sizeof(struct mctp_hdr);
121}
122
Andrew Jefferyb942e3a2020-06-23 09:54:02 +0930123size_t mctp_pktbuf_size(struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800124{
125 return pkt->end - pkt->start;
126}
127
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800128void *mctp_pktbuf_alloc_start(struct mctp_pktbuf *pkt, size_t size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800129{
130 assert(size <= pkt->start);
131 pkt->start -= size;
132 return pkt->data + pkt->start;
133}
134
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800135void *mctp_pktbuf_alloc_end(struct mctp_pktbuf *pkt, size_t size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800136{
137 void *buf;
138
Andrew Jeffery3ac70d62020-07-01 00:50:44 +0930139 assert(size <= (pkt->size - pkt->end));
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800140 buf = pkt->data + pkt->end;
141 pkt->end += size;
142 return buf;
143}
144
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800145int mctp_pktbuf_push(struct mctp_pktbuf *pkt, void *data, size_t len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800146{
147 void *p;
148
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800149 if (pkt->end + len > pkt->size)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800150 return -1;
151
152 p = pkt->data + pkt->end;
153
154 pkt->end += len;
155 memcpy(p, data, len);
156
157 return 0;
158}
159
Andrew Jefferyeba19a32021-03-09 23:09:40 +1030160void *mctp_pktbuf_pop(struct mctp_pktbuf *pkt, size_t len)
161{
162 if (len > mctp_pktbuf_size(pkt))
163 return NULL;
164
165 pkt->end -= len;
166 return pkt->data + pkt->end;
167}
168
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800169/* Message reassembly */
170static struct mctp_msg_ctx *mctp_msg_ctx_lookup(struct mctp *mctp,
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800171 uint8_t src, uint8_t dest, uint8_t tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800172{
173 unsigned int i;
174
175 /* @todo: better lookup, if we add support for more outstanding
176 * message contexts */
177 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
178 struct mctp_msg_ctx *ctx = &mctp->msg_ctxs[i];
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800179 if (ctx->src == src && ctx->dest == dest && ctx->tag == tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800180 return ctx;
181 }
182
183 return NULL;
184}
185
186static struct mctp_msg_ctx *mctp_msg_ctx_create(struct mctp *mctp,
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800187 uint8_t src, uint8_t dest, uint8_t tag)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800188{
Jeremy Kerr11a234e2019-02-27 17:59:53 +0800189 struct mctp_msg_ctx *ctx = NULL;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800190 unsigned int i;
191
192 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
193 struct mctp_msg_ctx *tmp = &mctp->msg_ctxs[i];
194 if (!tmp->src) {
195 ctx = tmp;
196 break;
197 }
198 }
199
200 if (!ctx)
201 return NULL;
202
203 ctx->src = src;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800204 ctx->dest = dest;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800205 ctx->tag = tag;
Jeremy Kerr9a3da812019-08-02 15:57:53 +0800206 ctx->buf_size = 0;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800207
208 return ctx;
209}
210
211static void mctp_msg_ctx_drop(struct mctp_msg_ctx *ctx)
212{
213 ctx->src = 0;
214}
215
216static void mctp_msg_ctx_reset(struct mctp_msg_ctx *ctx)
217{
218 ctx->buf_size = 0;
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000219 ctx->fragment_size = 0;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800220}
221
222static int mctp_msg_ctx_add_pkt(struct mctp_msg_ctx *ctx,
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530223 struct mctp_pktbuf *pkt, size_t max_size)
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800224{
225 size_t len;
226
227 len = mctp_pktbuf_size(pkt) - sizeof(struct mctp_hdr);
228
Sumanth Bhatbc79c242021-06-16 12:36:56 +0530229 if (len + ctx->buf_size < ctx->buf_size) {
230 return -1;
231 }
232
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800233 if (ctx->buf_size + len > ctx->buf_alloc_size) {
234 size_t new_alloc_size;
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030235 void *lbuf;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800236
Andrew Jeffery5a508912020-11-03 22:21:45 +1030237 /* @todo: finer-grained allocation */
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800238 if (!ctx->buf_alloc_size) {
Andrew Jefferyc2b833e2020-10-28 14:28:37 +1030239 new_alloc_size = MAX(len, 4096UL);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800240 } else {
Sumanth Bhatbc79c242021-06-16 12:36:56 +0530241 new_alloc_size = MAX(ctx->buf_alloc_size * 2, len + ctx->buf_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800242 }
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030243
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530244 /* Don't allow heap to grow beyond a limit */
245 if (new_alloc_size > max_size)
246 return -1;
247
248
Andrew Jeffery00ecc6c2020-03-10 23:16:53 +1030249 lbuf = __mctp_realloc(ctx->buf, new_alloc_size);
250 if (lbuf) {
251 ctx->buf = lbuf;
252 ctx->buf_alloc_size = new_alloc_size;
253 } else {
254 __mctp_free(ctx->buf);
255 return -1;
256 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800257 }
258
259 memcpy(ctx->buf + ctx->buf_size, mctp_pktbuf_data(pkt), len);
260 ctx->buf_size += len;
261
262 return 0;
263}
264
265/* Core API functions */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800266struct mctp *mctp_init(void)
267{
268 struct mctp *mctp;
269
270 mctp = __mctp_alloc(sizeof(*mctp));
Sumanth Bhat96d54492020-07-14 17:10:04 +0530271
272 if(!mctp)
273 return NULL;
274
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800275 memset(mctp, 0, sizeof(*mctp));
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530276 mctp->max_message_size = MCTP_MAX_MESSAGE_SIZE;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800277
278 return mctp;
279}
280
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530281void mctp_set_max_message_size(struct mctp *mctp, size_t message_size)
282{
283 mctp->max_message_size = message_size;
284}
285
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030286static void mctp_bus_destroy(struct mctp_bus *bus)
287{
288 while (bus->tx_queue_head) {
289 struct mctp_pktbuf *curr = bus->tx_queue_head;
290
291 bus->tx_queue_head = curr->next;
292 mctp_pktbuf_free(curr);
293 }
294}
295
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030296void mctp_destroy(struct mctp *mctp)
297{
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930298 size_t i;
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030299
300 /* Cleanup message assembly contexts */
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930301 BUILD_ASSERT(ARRAY_SIZE(mctp->msg_ctxs) < SIZE_MAX);
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030302 for (i = 0; i < ARRAY_SIZE(mctp->msg_ctxs); i++) {
303 struct mctp_msg_ctx *tmp = &mctp->msg_ctxs[i];
304 if (tmp->buf)
305 __mctp_free(tmp->buf);
306 }
307
Andrew Jeffery3ae89dc2021-01-28 15:24:36 +1030308 while (mctp->n_busses--)
309 mctp_bus_destroy(&mctp->busses[mctp->n_busses]);
310
Andrew Jefferyfa56ca52020-03-10 23:18:22 +1030311 __mctp_free(mctp->busses);
312 __mctp_free(mctp);
313}
314
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800315int mctp_set_rx_all(struct mctp *mctp, mctp_rx_fn fn, void *data)
316{
317 mctp->message_rx = fn;
318 mctp->message_rx_data = data;
319 return 0;
320}
321
322static struct mctp_bus *find_bus_for_eid(struct mctp *mctp,
323 mctp_eid_t dest __attribute__((unused)))
324{
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800325 /* for now, just use the first bus. For full routing support,
326 * we will need a table of neighbours */
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800327 return &mctp->busses[0];
328}
329
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800330int mctp_register_bus(struct mctp *mctp,
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800331 struct mctp_binding *binding,
332 mctp_eid_t eid)
333{
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930334 int rc = 0;
335
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800336 /* todo: multiple busses */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800337 assert(mctp->n_busses == 0);
338 mctp->n_busses = 1;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930339
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800340 mctp->busses = __mctp_alloc(sizeof(struct mctp_bus));
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930341 if (!mctp->busses)
342 return -ENOMEM;
343
James Feist62d72362019-12-13 13:43:32 -0800344 memset(mctp->busses, 0, sizeof(struct mctp_bus));
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800345 mctp->busses[0].binding = binding;
346 mctp->busses[0].eid = eid;
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800347 binding->bus = &mctp->busses[0];
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800348 binding->mctp = mctp;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800349 mctp->route_policy = ROUTE_ENDPOINT;
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800350
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930351 if (binding->start) {
352 rc = binding->start(binding);
353 if (rc < 0) {
354 mctp_prerr("Failed to start binding: %d", rc);
Andrew Jeffery19275232021-01-29 14:13:25 +1030355 binding->bus = NULL;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930356 __mctp_free(mctp->busses);
357 mctp->busses = NULL;
Andrew Jeffery2304c832021-01-29 11:52:49 +1030358 mctp->n_busses = 0;
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930359 }
360 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800361
Andrew Jeffery3e8a12a2020-06-05 16:08:30 +0930362 return rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800363}
364
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800365int mctp_bridge_busses(struct mctp *mctp,
366 struct mctp_binding *b1, struct mctp_binding *b2)
367{
Andrew Jeffery19275232021-01-29 14:13:25 +1030368 int rc = 0;
369
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800370 assert(mctp->n_busses == 0);
371 mctp->busses = __mctp_alloc(2 * sizeof(struct mctp_bus));
Helen Huanga523bcc2021-05-19 15:44:56 +0800372 if (!mctp->busses)
373 return -ENOMEM;
James Feist62d72362019-12-13 13:43:32 -0800374 memset(mctp->busses, 0, 2 * sizeof(struct mctp_bus));
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800375 mctp->n_busses = 2;
376 mctp->busses[0].binding = b1;
377 b1->bus = &mctp->busses[0];
378 b1->mctp = mctp;
379 mctp->busses[1].binding = b2;
380 b2->bus = &mctp->busses[1];
381 b2->mctp = mctp;
382
383 mctp->route_policy = ROUTE_BRIDGE;
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800384
Andrew Jeffery19275232021-01-29 14:13:25 +1030385 if (b1->start) {
386 rc = b1->start(b1);
387 if (rc < 0) {
388 mctp_prerr("Failed to start bridged bus %s: %d",
389 b1->name, rc);
390 goto done;
391 }
392 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800393
Andrew Jeffery19275232021-01-29 14:13:25 +1030394 if (b2->start) {
395 rc = b2->start(b2);
396 if (rc < 0) {
397 mctp_prerr("Failed to start bridged bus %s: %d",
398 b2->name, rc);
399 goto done;
400 }
401 }
Jeremy Kerr3b36d172019-09-04 11:56:09 +0800402
Andrew Jeffery19275232021-01-29 14:13:25 +1030403done:
404 return rc;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800405}
406
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100407static inline bool mctp_ctrl_cmd_is_transport(struct mctp_ctrl_msg_hdr *hdr)
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800408{
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100409 return ((hdr->command_code >= MCTP_CTRL_CMD_FIRST_TRANSPORT) &&
410 (hdr->command_code <= MCTP_CTRL_CMD_LAST_TRANSPORT));
411}
412
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930413static bool mctp_ctrl_handle_msg(struct mctp_bus *bus, mctp_eid_t src,
414 void *buffer, size_t length)
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100415{
416 struct mctp_ctrl_msg_hdr *msg_hdr = buffer;
417
418 /*
419 * Control message is received. If a transport control message handler
420 * is provided, it will called. If there is no dedicated handler, this
421 * function returns false and data can be handled by the generic
422 * message handler. The transport control message handler will be
423 * provided with messages in the command range 0xF0 - 0xFF.
424 */
425 if (mctp_ctrl_cmd_is_transport(msg_hdr)) {
426 if (bus->binding->control_rx != NULL) {
427 /* MCTP bus binding handler */
428 bus->binding->control_rx(src,
429 bus->binding->control_rx_data,
430 buffer, length);
431 return true;
432 }
433 }
434
435 /*
436 * Command was not handled, due to lack of specific callback.
437 * It will be passed to regular message_rx handler.
438 */
439 return false;
440}
441
442static inline bool mctp_rx_dest_is_local(struct mctp_bus *bus, mctp_eid_t dest)
443{
444 return dest == bus->eid || dest == MCTP_EID_NULL ||
445 dest == MCTP_EID_BROADCAST;
446}
447
448static inline bool mctp_ctrl_cmd_is_request(struct mctp_ctrl_msg_hdr *hdr)
449{
450 return hdr->ic_msg_type == MCTP_CTRL_HDR_MSG_TYPE &&
451 hdr->rq_dgram_inst & MCTP_CTRL_HDR_FLAG_REQUEST;
452}
453
454/*
455 * Receive the complete MCTP message and route it.
456 * Asserts:
457 * 'buf' is not NULL.
458 */
459static void mctp_rx(struct mctp *mctp, struct mctp_bus *bus, mctp_eid_t src,
460 mctp_eid_t dest, void *buf, size_t len)
461{
462 assert(buf != NULL);
463
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800464 if (mctp->route_policy == ROUTE_ENDPOINT &&
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100465 mctp_rx_dest_is_local(bus, dest)) {
466 /* Handle MCTP Control Messages: */
467 if (len >= sizeof(struct mctp_ctrl_msg_hdr)) {
468 struct mctp_ctrl_msg_hdr *msg_hdr = buf;
469
470 /*
471 * Identify if this is a control request message.
472 * See DSP0236 v1.3.0 sec. 11.5.
473 */
474 if (mctp_ctrl_cmd_is_request(msg_hdr)) {
475 bool handled;
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930476 handled = mctp_ctrl_handle_msg(bus, src, buf,
477 len);
Wiktor GoĊ‚gowskiba6727e2020-03-13 18:25:01 +0100478 if (handled)
479 return;
480 }
481 }
482 if (mctp->message_rx)
483 mctp->message_rx(src, mctp->message_rx_data, buf, len);
484 }
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800485
486 if (mctp->route_policy == ROUTE_BRIDGE) {
487 int i;
488
489 for (i = 0; i < mctp->n_busses; i++) {
490 struct mctp_bus *dest_bus = &mctp->busses[i];
491 if (dest_bus == bus)
492 continue;
493
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930494 mctp_message_tx_on_bus(dest_bus, src, dest, buf, len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800495 }
496
497 }
498}
499
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800500void mctp_bus_rx(struct mctp_binding *binding, struct mctp_pktbuf *pkt)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800501{
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800502 struct mctp_bus *bus = binding->bus;
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800503 struct mctp *mctp = binding->mctp;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800504 uint8_t flags, exp_seq, seq, tag;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800505 struct mctp_msg_ctx *ctx;
506 struct mctp_hdr *hdr;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800507 size_t len;
508 void *p;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800509 int rc;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800510
Jeremy Kerr7520cec2019-03-01 07:13:18 +0800511 assert(bus);
512
Sumanth Bhatd97869d2020-07-02 00:46:13 +0530513 /* Drop packet if it was smaller than mctp hdr size */
514 if (mctp_pktbuf_size(pkt) <= sizeof(struct mctp_hdr))
515 goto out;
516
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800517 hdr = mctp_pktbuf_hdr(pkt);
518
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800519 /* small optimisation: don't bother reassembly if we're going to
520 * drop the packet in mctp_rx anyway */
521 if (mctp->route_policy == ROUTE_ENDPOINT && hdr->dest != bus->eid)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800522 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800523
524 flags = hdr->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
525 tag = (hdr->flags_seq_tag >> MCTP_HDR_TAG_SHIFT) & MCTP_HDR_TAG_MASK;
526 seq = (hdr->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) & MCTP_HDR_SEQ_MASK;
527
528 switch (flags) {
529 case MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM:
530 /* single-packet message - send straight up to rx function,
531 * no need to create a message context */
532 len = pkt->end - pkt->mctp_hdr_off - sizeof(struct mctp_hdr);
Andrew Jefferyb4ae00b2021-01-18 15:52:09 +1030533 p = pkt->data + pkt->mctp_hdr_off + sizeof(struct mctp_hdr);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800534 mctp_rx(mctp, bus, hdr->src, hdr->dest, p, len);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800535 break;
536
537 case MCTP_HDR_FLAG_SOM:
538 /* start of a new message - start the new context for
539 * future message reception. If an existing context is
540 * already present, drop it. */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800541 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800542 if (ctx) {
543 mctp_msg_ctx_reset(ctx);
544 } else {
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800545 ctx = mctp_msg_ctx_create(mctp,
546 hdr->src, hdr->dest, tag);
Sumanth Bhat34d4c962021-06-16 12:50:48 +0530547 /* If context creation fails due to exhaution of contexts we
548 * can support, drop the packet */
549 if (!ctx) {
550 mctp_prdebug("Context buffers exhausted.");
551 goto out;
552 }
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800553 }
554
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000555 /* Save the fragment size, subsequent middle fragments
556 * should of the same size */
557 ctx->fragment_size = mctp_pktbuf_size(pkt);
558
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530559 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800560 if (rc) {
561 mctp_msg_ctx_drop(ctx);
562 } else {
563 ctx->last_seq = seq;
564 }
565
566 break;
567
568 case MCTP_HDR_FLAG_EOM:
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800569 ctx = mctp_msg_ctx_lookup(mctp, hdr->src, hdr->dest, tag);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800570 if (!ctx)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800571 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800572
Ed Tanousc2def9f2019-02-21 08:33:08 -0800573 exp_seq = (ctx->last_seq + 1) % 4;
574
575 if (exp_seq != seq) {
576 mctp_prdebug(
577 "Sequence number %d does not match expected %d",
578 seq, exp_seq);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800579 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800580 goto out;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800581 }
582
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000583 len = mctp_pktbuf_size(pkt);
584
585 if (len > ctx->fragment_size) {
586 mctp_prdebug("Unexpected fragment size. Expected" \
587 " less than %zu, received = %zu",
588 ctx->fragment_size, len);
589 mctp_msg_ctx_drop(ctx);
590 goto out;
591 }
592
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530593 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800594 if (!rc)
595 mctp_rx(mctp, bus, ctx->src, ctx->dest,
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800596 ctx->buf, ctx->buf_size);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800597
598 mctp_msg_ctx_drop(ctx);
599 break;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800600
601 case 0:
602 /* Neither SOM nor EOM */
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800603 ctx = mctp_msg_ctx_lookup(mctp, hdr->src,hdr->dest, tag);
Ed Tanousc2def9f2019-02-21 08:33:08 -0800604 if (!ctx)
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800605 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800606
607 exp_seq = (ctx->last_seq + 1) % 4;
608 if (exp_seq != seq) {
609 mctp_prdebug(
610 "Sequence number %d does not match expected %d",
611 seq, exp_seq);
612 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800613 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800614 }
615
Sumanth Bhat69f545f2021-05-18 09:16:43 +0000616 len = mctp_pktbuf_size(pkt);
617
618 if (len != ctx->fragment_size) {
619 mctp_prdebug("Unexpected fragment size. Expected = %zu " \
620 "received = %zu", ctx->fragment_size, len);
621 mctp_msg_ctx_drop(ctx);
622 goto out;
623 }
624
Sumanth Bhat2c820c52020-07-02 00:26:25 +0530625 rc = mctp_msg_ctx_add_pkt(ctx, pkt, mctp->max_message_size);
Ed Tanousc2def9f2019-02-21 08:33:08 -0800626 if (rc) {
627 mctp_msg_ctx_drop(ctx);
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800628 goto out;
Ed Tanousc2def9f2019-02-21 08:33:08 -0800629 }
630 ctx->last_seq = seq;
631
632 break;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800633 }
Jeremy Kerrc1693af2019-08-05 14:30:59 +0800634out:
635 mctp_pktbuf_free(pkt);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800636}
637
Jeremy Kerr0a00dca2019-03-01 08:01:35 +0800638static int mctp_packet_tx(struct mctp_bus *bus,
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800639 struct mctp_pktbuf *pkt)
640{
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030641 if (bus->state != mctp_bus_state_tx_enabled)
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800642 return -1;
643
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800644 return bus->binding->tx(bus->binding, pkt);
645}
646
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800647static void mctp_send_tx_queue(struct mctp_bus *bus)
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800648{
649 struct mctp_pktbuf *pkt;
650
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800651 while ((pkt = bus->tx_queue_head)) {
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800652 int rc;
653
654 rc = mctp_packet_tx(bus, pkt);
655 if (rc)
656 break;
657
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800658 bus->tx_queue_head = pkt->next;
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800659 mctp_pktbuf_free(pkt);
660 }
661
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800662 if (!bus->tx_queue_head)
663 bus->tx_queue_tail = NULL;
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800664
665}
666
667void mctp_binding_set_tx_enabled(struct mctp_binding *binding, bool enable)
668{
669 struct mctp_bus *bus = binding->bus;
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030670
671 switch(bus->state) {
672 case mctp_bus_state_constructed:
673 if (!enable)
674 return;
675
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030676 if (binding->pkt_size < MCTP_PACKET_SIZE(MCTP_BTU)) {
677 mctp_prerr("Cannot start %s binding with invalid MTU: %zu",
678 binding->name,
679 MCTP_BODY_SIZE(binding->pkt_size));
680 return;
681 }
682
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030683 bus->state = mctp_bus_state_tx_enabled;
684 mctp_prinfo("%s binding started", binding->name);
685 return;
686 case mctp_bus_state_tx_enabled:
687 if (enable)
688 return;
689
690 bus->state = mctp_bus_state_tx_disabled;
691 mctp_prdebug("%s binding Tx disabled", binding->name);
692 return;
693 case mctp_bus_state_tx_disabled:
694 if (!enable)
695 return;
696
697 bus->state = mctp_bus_state_tx_enabled;
698 mctp_prdebug("%s binding Tx enabled", binding->name);
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800699 mctp_send_tx_queue(bus);
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030700 return;
701 }
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800702}
703
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930704static int mctp_message_tx_on_bus(struct mctp_bus *bus, mctp_eid_t src,
705 mctp_eid_t dest, void *msg, size_t msg_len)
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800706{
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800707 size_t max_payload_len, payload_len, p;
Jeremy Kerr1cd31182019-02-27 18:01:00 +0800708 struct mctp_pktbuf *pkt;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800709 struct mctp_hdr *hdr;
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800710 int i;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800711
Andrew Jefferyc61501c2021-01-27 23:24:18 +1030712 if (bus->state == mctp_bus_state_constructed)
713 return -ENXIO;
714
Andrew Jeffery1fa707e2021-01-28 15:22:11 +1030715 max_payload_len = MCTP_BODY_SIZE(bus->binding->pkt_size);
716
717 {
718 const bool valid_mtu = max_payload_len >= MCTP_BTU;
719 assert(valid_mtu);
720 if (!valid_mtu)
721 return -EINVAL;
722 }
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800723
Andrew Jeffery298865f2020-02-06 11:51:29 +1030724 mctp_prdebug("%s: Generating packets for transmission of %zu byte message from %hhu to %hhu",
725 __func__, msg_len, src, dest);
726
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800727 /* queue up packets, each of max MCTP_MTU size */
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800728 for (p = 0, i = 0; p < msg_len; i++) {
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800729 payload_len = msg_len - p;
730 if (payload_len > max_payload_len)
731 payload_len = max_payload_len;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800732
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800733 pkt = mctp_pktbuf_alloc(bus->binding,
734 payload_len + sizeof(*hdr));
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800735 hdr = mctp_pktbuf_hdr(pkt);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800736
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800737 /* todo: tags */
738 hdr->ver = bus->binding->version & 0xf;
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800739 hdr->dest = dest;
740 hdr->src = src;
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800741 hdr->flags_seq_tag = MCTP_HDR_FLAG_TO |
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800742 (0 << MCTP_HDR_TAG_SHIFT);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800743
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800744 if (i == 0)
745 hdr->flags_seq_tag |= MCTP_HDR_FLAG_SOM;
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800746 if (p + payload_len >= msg_len)
Jeremy Kerrc855d7b2019-08-01 21:18:09 +0800747 hdr->flags_seq_tag |= MCTP_HDR_FLAG_EOM;
748 hdr->flags_seq_tag |=
749 (i & MCTP_HDR_SEQ_MASK) << MCTP_HDR_SEQ_SHIFT;
750
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800751 memcpy(mctp_pktbuf_data(pkt), msg + p, payload_len);
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800752
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800753 /* add to tx queue */
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800754 if (bus->tx_queue_tail)
755 bus->tx_queue_tail->next = pkt;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800756 else
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800757 bus->tx_queue_head = pkt;
758 bus->tx_queue_tail = pkt;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800759
Jeremy Kerrdf15f7e2019-08-05 15:41:19 +0800760 p += payload_len;
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800761 }
762
Andrew Jeffery298865f2020-02-06 11:51:29 +1030763 mctp_prdebug("%s: Enqueued %d packets", __func__, i);
764
Jeremy Kerrcc2458d2019-03-01 08:23:33 +0800765 mctp_send_tx_queue(bus);
Jeremy Kerr24db71f2019-02-07 21:37:35 +0800766
767 return 0;
Jeremy Kerr4cdc2002019-02-07 16:49:12 +0800768}
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800769
770int mctp_message_tx(struct mctp *mctp, mctp_eid_t eid,
771 void *msg, size_t msg_len)
772{
773 struct mctp_bus *bus;
774
775 bus = find_bus_for_eid(mctp, eid);
Andrew Jefferyb93b6112020-06-05 14:13:44 +0930776 return mctp_message_tx_on_bus(bus, bus->eid, eid, msg, msg_len);
Jeremy Kerr1a4ec3c2019-09-03 11:01:50 +0800777}