Jeremy Kerr | 3d36ee2 | 2019-05-30 11:15:37 +0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 2 | |
| 3 | #ifndef _LIBMCTP_H |
| 4 | #define _LIBMCTP_H |
| 5 | |
Deepak Kodihalli | b11ad2c | 2019-02-28 03:42:22 -0600 | [diff] [blame] | 6 | #ifdef __cplusplus |
| 7 | extern "C" { |
| 8 | #endif |
| 9 | |
Jeremy Kerr | c7e764a | 2019-05-28 16:49:03 +0800 | [diff] [blame] | 10 | #include <stdarg.h> |
Jeremy Kerr | 1cd3118 | 2019-02-27 18:01:00 +0800 | [diff] [blame] | 11 | #include <stdbool.h> |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 12 | #include <stdint.h> |
Jeremy Kerr | f9ffd59 | 2019-03-06 09:09:38 +0800 | [diff] [blame] | 13 | #include <stddef.h> |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 14 | |
| 15 | typedef uint8_t mctp_eid_t; |
| 16 | |
Wiktor Gołgowski | ba6727e | 2020-03-13 18:25:01 +0100 | [diff] [blame] | 17 | /* Special Endpoint ID values */ |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 18 | #define MCTP_EID_NULL 0 |
Wiktor Gołgowski | ba6727e | 2020-03-13 18:25:01 +0100 | [diff] [blame] | 19 | #define MCTP_EID_BROADCAST 0xff |
| 20 | |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 21 | /* MCTP packet definitions */ |
| 22 | struct mctp_hdr { |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 23 | uint8_t ver; |
| 24 | uint8_t dest; |
| 25 | uint8_t src; |
| 26 | uint8_t flags_seq_tag; |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 27 | }; |
| 28 | |
| 29 | /* Definitions for flags_seq_tag field */ |
Sumanth Bhat | f39c385 | 2022-01-10 17:04:10 +0530 | [diff] [blame] | 30 | #define MCTP_HDR_FLAG_SOM (1 << 7) |
| 31 | #define MCTP_HDR_FLAG_EOM (1 << 6) |
| 32 | #define MCTP_HDR_FLAG_TO (1 << 3) |
| 33 | #define MCTP_HDR_TO_SHIFT (3) |
| 34 | #define MCTP_HDR_TO_MASK (1) |
| 35 | #define MCTP_HDR_SEQ_SHIFT (4) |
| 36 | #define MCTP_HDR_SEQ_MASK (0x3) |
| 37 | #define MCTP_HDR_TAG_SHIFT (0) |
| 38 | #define MCTP_HDR_TAG_MASK (0x7) |
| 39 | |
Rashmica Gupta | f298897 | 2022-11-09 12:26:44 +1100 | [diff] [blame] | 40 | #define MCTP_MESSAGE_TO_SRC true |
| 41 | #define MCTP_MESSAGE_TO_DST false |
| 42 | #define MCTP_MESSAGE_CAPTURE_OUTGOING true |
| 43 | #define MCTP_MESSAGE_CAPTURE_INCOMING false |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 44 | |
Andrew Jeffery | 73c268e | 2020-01-30 10:16:09 +1030 | [diff] [blame] | 45 | /* Baseline Transmission Unit and packet size */ |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 46 | #define MCTP_BTU 64 |
| 47 | #define MCTP_PACKET_SIZE(unit) ((unit) + sizeof(struct mctp_hdr)) |
| 48 | #define MCTP_BODY_SIZE(unit) ((unit) - sizeof(struct mctp_hdr)) |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 49 | |
| 50 | /* packet buffers */ |
| 51 | |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 52 | struct mctp_pktbuf { |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 53 | size_t start, end, size; |
| 54 | size_t mctp_hdr_off; |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 55 | bool alloc; |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 56 | unsigned char data[]; |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 57 | }; |
| 58 | |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 59 | #define MCTP_PKTBUF_SIZE(payload) \ |
| 60 | (MCTP_PACKET_SIZE(payload) + sizeof(struct mctp_pktbuf)) |
| 61 | |
| 62 | struct mctp; |
| 63 | struct mctp_bus; |
Jeremy Kerr | df15f7e | 2019-08-05 15:41:19 +0800 | [diff] [blame] | 64 | struct mctp_binding; |
| 65 | |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 66 | /* Initialise a mctp_pktbuf in static storage. Should not be freed. |
| 67 | * Storage must be sized to fit the binding, |
| 68 | * MCTP_PKTBUF_SIZE(binding->pkt_size + binding->pkt_header + binding->pkt_trailer) */ |
| 69 | struct mctp_pktbuf *mctp_pktbuf_init(struct mctp_binding *binding, |
| 70 | void *storage); |
| 71 | /* Allocate and initialise a mctp_pktbuf. Should be freed with |
| 72 | * mctp_pktbuf_free */ |
| 73 | struct mctp_pktbuf *mctp_pktbuf_alloc(struct mctp_binding *binding, size_t len); |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 74 | void mctp_pktbuf_free(struct mctp_pktbuf *pkt); |
| 75 | struct mctp_hdr *mctp_pktbuf_hdr(struct mctp_pktbuf *pkt); |
| 76 | void *mctp_pktbuf_data(struct mctp_pktbuf *pkt); |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 77 | size_t mctp_pktbuf_size(const struct mctp_pktbuf *pkt); |
Jeremy Kerr | df15f7e | 2019-08-05 15:41:19 +0800 | [diff] [blame] | 78 | void *mctp_pktbuf_alloc_start(struct mctp_pktbuf *pkt, size_t size); |
| 79 | void *mctp_pktbuf_alloc_end(struct mctp_pktbuf *pkt, size_t size); |
Matt Johnston | dfbf0fd | 2024-10-28 14:40:29 +0800 | [diff] [blame] | 80 | int mctp_pktbuf_push(struct mctp_pktbuf *pkt, const void *data, size_t len); |
Andrew Jeffery | eba19a3 | 2021-03-09 23:09:40 +1030 | [diff] [blame] | 81 | void *mctp_pktbuf_pop(struct mctp_pktbuf *pkt, size_t len); |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 82 | |
| 83 | /* MCTP core */ |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 84 | |
Matt Johnston | 722d0db | 2024-09-13 15:51:30 +0800 | [diff] [blame] | 85 | /* Allocate and setup a MCTP instance */ |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 86 | struct mctp *mctp_init(void); |
Matt Johnston | 722d0db | 2024-09-13 15:51:30 +0800 | [diff] [blame] | 87 | /* Cleanup and deallocate a MCTP instance from mctp_init() */ |
| 88 | void mctp_destroy(struct mctp *mctp); |
| 89 | |
| 90 | /* Setup a MCTP instance */ |
Matt Johnston | f9b99f1 | 2024-09-17 16:48:34 +0800 | [diff] [blame] | 91 | int mctp_setup(struct mctp *mctp, size_t struct_mctp_size); |
Matt Johnston | 722d0db | 2024-09-13 15:51:30 +0800 | [diff] [blame] | 92 | /* Release resource of a MCTP instance */ |
| 93 | void mctp_cleanup(struct mctp *mctp); |
| 94 | |
Sumanth Bhat | 2c820c5 | 2020-07-02 00:26:25 +0530 | [diff] [blame] | 95 | void mctp_set_max_message_size(struct mctp *mctp, size_t message_size); |
Rashmica Gupta | f298897 | 2022-11-09 12:26:44 +1100 | [diff] [blame] | 96 | typedef void (*mctp_capture_fn)(struct mctp_pktbuf *pkt, bool outgoing, |
| 97 | void *user); |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 98 | void mctp_set_capture_handler(struct mctp *mctp, mctp_capture_fn fn, |
| 99 | void *user); |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 100 | |
Jeremy Kerr | 7520cec | 2019-03-01 07:13:18 +0800 | [diff] [blame] | 101 | /* Register a binding to the MCTP core, and creates a bus (populating |
| 102 | * binding->bus). |
Jeremy Kerr | 1a4ec3c | 2019-09-03 11:01:50 +0800 | [diff] [blame] | 103 | * |
| 104 | * If this function is called, the MCTP stack is initialised as an 'endpoint', |
| 105 | * and will deliver local packets to a RX callback - see `mctp_set_rx_all()` |
| 106 | * below. |
Jeremy Kerr | 7520cec | 2019-03-01 07:13:18 +0800 | [diff] [blame] | 107 | */ |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 108 | int mctp_register_bus(struct mctp *mctp, struct mctp_binding *binding, |
| 109 | mctp_eid_t eid); |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 110 | |
Andrew Jeffery | 2094c3c | 2021-08-26 12:32:46 +0930 | [diff] [blame] | 111 | void mctp_unregister_bus(struct mctp *mctp, struct mctp_binding *binding); |
| 112 | |
Matt Johnston | 4058b2c | 2024-11-07 14:53:50 +0800 | [diff] [blame] | 113 | int mctp_bus_set_eid(struct mctp_binding *binding, mctp_eid_t eid); |
| 114 | |
Jeremy Kerr | 1a4ec3c | 2019-09-03 11:01:50 +0800 | [diff] [blame] | 115 | /* Create a simple bidirectional bridge between busses. |
| 116 | * |
| 117 | * In this mode, the MCTP stack is initialised as a bridge. There is no EID |
| 118 | * defined, so no packets are considered local. Instead, all messages from one |
| 119 | * binding are forwarded to the other. |
| 120 | */ |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 121 | int mctp_bridge_busses(struct mctp *mctp, struct mctp_binding *b1, |
| 122 | struct mctp_binding *b2); |
Jeremy Kerr | 1a4ec3c | 2019-09-03 11:01:50 +0800 | [diff] [blame] | 123 | |
Sumanth Bhat | f39c385 | 2022-01-10 17:04:10 +0530 | [diff] [blame] | 124 | typedef void (*mctp_rx_fn)(uint8_t src_eid, bool tag_owner, uint8_t msg_tag, |
| 125 | void *data, void *msg, size_t len); |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 126 | |
| 127 | int mctp_set_rx_all(struct mctp *mctp, mctp_rx_fn fn, void *data); |
| 128 | |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 129 | /* Transmit a message. |
| 130 | * @msg: The message buffer to send. Must be suitable for |
| 131 | * free(), or the custom mctp_set_alloc_ops() m_msg_free. |
| 132 | * The mctp stack will take ownership of the buffer |
| 133 | * and release it when message transmission is complete or fails. |
| 134 | * |
| 135 | * If an asynchronous binding is being used, it will return -EBUSY if |
| 136 | * a message is already pending for transmission (msg will be freed as usual). |
| 137 | * Asynchronous users can test mctp_is_tx_ready() prior to sending. |
| 138 | */ |
| 139 | int mctp_message_tx_alloced(struct mctp *mctp, mctp_eid_t eid, bool tag_owner, |
| 140 | uint8_t msg_tag, void *msg, size_t msg_len); |
| 141 | |
| 142 | /* Transmit a message. |
| 143 | * @msg: The message buffer to send. Ownership of this buffer |
| 144 | * remains with the caller (a copy is made internally with __mctp_msg_alloc). |
| 145 | * |
| 146 | * If an asynchronous binding is being used, it will return -EBUSY if |
| 147 | * a message is already pending for transmission. |
| 148 | * Asynchronous users can test mctp_is_tx_ready() prior to sending. |
| 149 | * |
| 150 | * This is equivalent to duplicating `msg` then calling mctp_message_tx_alloc(). |
| 151 | */ |
Sumanth Bhat | f39c385 | 2022-01-10 17:04:10 +0530 | [diff] [blame] | 152 | int mctp_message_tx(struct mctp *mctp, mctp_eid_t eid, bool tag_owner, |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 153 | uint8_t msg_tag, const void *msg, size_t msg_len); |
| 154 | |
Matt Johnston | 61c9599 | 2024-09-16 16:50:35 +0800 | [diff] [blame] | 155 | /* Transmit a request message. |
| 156 | * @msg: The message buffer to send. Must be suitable for |
| 157 | * free(), or the custom mctp_set_alloc_ops() m_msg_free. |
| 158 | * |
| 159 | * A tag with Tag Owner bit set will allocated for the sent message, |
| 160 | * and returned to the caller (TO bit is unset in the returned @alloc_msg_tag). |
| 161 | * alloc_msg_tag may be NULL to ignore the returned tag. |
| 162 | * If no tags are spare -EBUSY will be returned. |
| 163 | * |
| 164 | * If an asynchronous binding is being used, it will return -EBUSY if |
| 165 | * a message is already pending for transmission (msg will be freed). |
| 166 | * Asynchronous users can test mctp_is_tx_ready() prior to sending. |
| 167 | */ |
| 168 | int mctp_message_tx_request(struct mctp *mctp, mctp_eid_t eid, void *msg, |
| 169 | size_t msg_len, uint8_t *alloc_msg_tag); |
| 170 | |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 171 | bool mctp_is_tx_ready(struct mctp *mctp, mctp_eid_t eid); |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 172 | |
| 173 | /* hardware bindings */ |
Andrew Jeffery | 0721f58 | 2022-09-29 12:12:39 +0930 | [diff] [blame] | 174 | |
| 175 | /** |
| 176 | * @tx: Binding function to transmit one packet on the interface |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 177 | * @tx_storage: A buffer for transmitting packets. Must be sized |
| 178 | * as MCTP_PKTBUF_SIZE(mtu). |
Andrew Jeffery | 0721f58 | 2022-09-29 12:12:39 +0930 | [diff] [blame] | 179 | * Return: |
| 180 | * * 0 - Success, pktbuf can be released |
| 181 | * * -EMSGSIZE - Packet exceeds binding MTU, pktbuf must be dropped |
| 182 | * * -EBUSY - Packet unable to be transmitted, pktbuf must be retained |
| 183 | */ |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 184 | struct mctp_binding { |
Wiktor Gołgowski | ba6727e | 2020-03-13 18:25:01 +0100 | [diff] [blame] | 185 | const char *name; |
| 186 | uint8_t version; |
| 187 | struct mctp_bus *bus; |
| 188 | struct mctp *mctp; |
Andrew Jeffery | e889b19 | 2021-03-12 12:19:33 +1030 | [diff] [blame] | 189 | size_t pkt_size; |
Andrew Jeffery | 39da3d0 | 2021-03-12 16:51:26 +1030 | [diff] [blame] | 190 | size_t pkt_header; |
| 191 | size_t pkt_trailer; |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 192 | void *tx_storage; |
Wiktor Gołgowski | ba6727e | 2020-03-13 18:25:01 +0100 | [diff] [blame] | 193 | int (*start)(struct mctp_binding *binding); |
| 194 | int (*tx)(struct mctp_binding *binding, struct mctp_pktbuf *pkt); |
| 195 | mctp_rx_fn control_rx; |
| 196 | void *control_rx_data; |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 197 | }; |
| 198 | |
Jeremy Kerr | 1cd3118 | 2019-02-27 18:01:00 +0800 | [diff] [blame] | 199 | void mctp_binding_set_tx_enabled(struct mctp_binding *binding, bool enable); |
| 200 | |
Jeremy Kerr | c1693af | 2019-08-05 14:30:59 +0800 | [diff] [blame] | 201 | /* |
| 202 | * Receive a packet from binding to core. Takes ownership of pkt, free()-ing it |
| 203 | * after use. |
| 204 | */ |
Jeremy Kerr | 0a00dca | 2019-03-01 08:01:35 +0800 | [diff] [blame] | 205 | void mctp_bus_rx(struct mctp_binding *binding, struct mctp_pktbuf *pkt); |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 206 | |
| 207 | /* environment-specific allocation */ |
Matt Johnston | 4a09e1d | 2024-09-13 14:55:58 +0800 | [diff] [blame] | 208 | void mctp_set_alloc_ops(void *(*m_alloc)(size_t), void (*m_free)(void *), |
| 209 | void *(*m_msg_alloc)(size_t, void *), |
| 210 | void (*m_msg_free)(void *, void *)); |
| 211 | /* Gets/sets context that will be passed to custom m_msg_ ops */ |
| 212 | void *mctp_get_alloc_ctx(struct mctp *mctp); |
| 213 | void mctp_set_alloc_ctx(struct mctp *mctp, void *ctx); |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 214 | |
Jeremy Kerr | c7e764a | 2019-05-28 16:49:03 +0800 | [diff] [blame] | 215 | /* environment-specific logging */ |
Jeremy Kerr | cc49e16 | 2019-05-30 21:11:16 +0800 | [diff] [blame] | 216 | |
Jeremy Kerr | c7e764a | 2019-05-28 16:49:03 +0800 | [diff] [blame] | 217 | void mctp_set_log_stdio(int level); |
| 218 | void mctp_set_log_syslog(void); |
| 219 | void mctp_set_log_custom(void (*fn)(int, const char *, va_list)); |
| 220 | |
Jeremy Kerr | cc49e16 | 2019-05-30 21:11:16 +0800 | [diff] [blame] | 221 | /* these should match the syslog-standard LOG_* definitions, for |
| 222 | * easier use with syslog */ |
Patrick Williams | a721c2d | 2022-12-04 14:30:26 -0600 | [diff] [blame] | 223 | #define MCTP_LOG_ERR 3 |
| 224 | #define MCTP_LOG_WARNING 4 |
| 225 | #define MCTP_LOG_NOTICE 5 |
| 226 | #define MCTP_LOG_INFO 6 |
| 227 | #define MCTP_LOG_DEBUG 7 |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 228 | |
Matt Johnston | 44e64df | 2024-11-05 16:59:42 +0800 | [diff] [blame] | 229 | /* Environment-specific time functionality */ |
| 230 | /* The `now` callback returns a timestamp in milliseconds. |
| 231 | * Timestamps should be monotonically increasing, and can have an arbitrary |
| 232 | * origin. (As long as returned timestamps aren't too close to UINT64_MAX, not |
| 233 | * a problem forany reasonable implementation). */ |
| 234 | void mctp_set_now_op(struct mctp *mctp, uint64_t (*now)(void *), void *ctx); |
| 235 | /* Returns a timestamp in milliseconds */ |
| 236 | uint64_t mctp_now(struct mctp *mctp); |
| 237 | |
Matt Johnston | 4058b2c | 2024-11-07 14:53:50 +0800 | [diff] [blame] | 238 | int mctp_control_handler_enable(struct mctp *mctp); |
| 239 | void mctp_control_handler_disable(struct mctp *mctp); |
| 240 | |
| 241 | /* Add/remove message types to be reported by Get MCTP Version Support. |
| 242 | * Control type is added automatically for the control handler */ |
| 243 | int mctp_control_add_type(struct mctp *mctp, uint8_t msg_type); |
| 244 | void mctp_control_remove_type(struct mctp *mctp, uint8_t msg_type); |
| 245 | |
Deepak Kodihalli | b11ad2c | 2019-02-28 03:42:22 -0600 | [diff] [blame] | 246 | #ifdef __cplusplus |
| 247 | } |
| 248 | #endif |
| 249 | |
Jeremy Kerr | 4cdc200 | 2019-02-07 16:49:12 +0800 | [diff] [blame] | 250 | #endif /* _LIBMCTP_H */ |