Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 1 | /** |
| 2 | * Copyright © 2017 IBM Corporation |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 17 | #include <assert.h> |
| 18 | #include <stdint.h> |
| 19 | #include <stdlib.h> |
| 20 | #include <string.h> |
| 21 | |
| 22 | #include "console-server.h" |
| 23 | |
Andrew Jeffery | 15ceb3e | 2023-04-18 11:52:36 +0930 | [diff] [blame] | 24 | static inline size_t min(size_t a, size_t b) |
| 25 | { |
| 26 | return a < b ? a : b; |
| 27 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 28 | |
| 29 | struct ringbuffer { |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 30 | uint8_t *buf; |
| 31 | size_t size; |
| 32 | size_t tail; |
| 33 | struct ringbuffer_consumer **consumers; |
| 34 | int n_consumers; |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 35 | }; |
| 36 | |
| 37 | struct ringbuffer_consumer { |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 38 | struct ringbuffer *rb; |
| 39 | ringbuffer_poll_fn_t poll_fn; |
| 40 | void *poll_data; |
| 41 | size_t pos; |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 42 | }; |
| 43 | |
| 44 | struct ringbuffer *ringbuffer_init(size_t size) |
| 45 | { |
| 46 | struct ringbuffer *rb; |
| 47 | |
| 48 | rb = malloc(sizeof(*rb) + size); |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 49 | if (!rb) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 50 | return NULL; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 51 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 52 | |
| 53 | memset(rb, 0, sizeof(*rb)); |
| 54 | rb->size = size; |
| 55 | rb->buf = (void *)(rb + 1); |
| 56 | |
| 57 | return rb; |
| 58 | } |
| 59 | |
| 60 | void ringbuffer_fini(struct ringbuffer *rb) |
| 61 | { |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 62 | while (rb->n_consumers) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 63 | ringbuffer_consumer_unregister(rb->consumers[0]); |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 64 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 65 | free(rb); |
| 66 | } |
| 67 | |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 68 | struct ringbuffer_consumer * |
| 69 | ringbuffer_consumer_register(struct ringbuffer *rb, ringbuffer_poll_fn_t fn, |
| 70 | void *data) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 71 | { |
| 72 | struct ringbuffer_consumer *rbc; |
| 73 | int n; |
| 74 | |
| 75 | rbc = malloc(sizeof(*rbc)); |
| 76 | rbc->rb = rb; |
| 77 | rbc->poll_fn = fn; |
| 78 | rbc->poll_data = data; |
| 79 | rbc->pos = rb->tail; |
| 80 | |
| 81 | n = rb->n_consumers++; |
Andrew Jeffery | 91b5217 | 2023-04-19 12:42:14 +0930 | [diff] [blame] | 82 | /* |
| 83 | * We're managing an array of pointers to aggregates, so don't warn about sizeof() on a |
| 84 | * pointer type. |
| 85 | */ |
| 86 | /* NOLINTBEGIN(bugprone-sizeof-expression) */ |
| 87 | rb->consumers = reallocarray(rb->consumers, rb->n_consumers, |
| 88 | sizeof(*rb->consumers)); |
| 89 | /* NOLINTEND(bugprone-sizeof-expression) */ |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 90 | rb->consumers[n] = rbc; |
| 91 | |
| 92 | return rbc; |
| 93 | } |
| 94 | |
| 95 | void ringbuffer_consumer_unregister(struct ringbuffer_consumer *rbc) |
| 96 | { |
| 97 | struct ringbuffer *rb = rbc->rb; |
| 98 | int i; |
| 99 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 100 | for (i = 0; i < rb->n_consumers; i++) { |
| 101 | if (rb->consumers[i] == rbc) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 102 | break; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 103 | } |
| 104 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 105 | |
| 106 | assert(i < rb->n_consumers); |
| 107 | |
| 108 | rb->n_consumers--; |
| 109 | |
Andrew Jeffery | 91b5217 | 2023-04-19 12:42:14 +0930 | [diff] [blame] | 110 | /* |
| 111 | * We're managing an array of pointers to aggregates, so don't warn about sizeof() on a |
| 112 | * pointer type. |
| 113 | */ |
| 114 | /* NOLINTBEGIN(bugprone-sizeof-expression) */ |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 115 | memmove(&rb->consumers[i], &rb->consumers[i + 1], |
| 116 | sizeof(*rb->consumers) * (rb->n_consumers - i)); |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 117 | |
Andrew Jeffery | 91b5217 | 2023-04-19 12:42:14 +0930 | [diff] [blame] | 118 | rb->consumers = reallocarray(rb->consumers, rb->n_consumers, |
| 119 | sizeof(*rb->consumers)); |
| 120 | /* NOLINTEND(bugprone-sizeof-expression) */ |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 121 | |
| 122 | free(rbc); |
| 123 | } |
| 124 | |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame] | 125 | size_t ringbuffer_len(struct ringbuffer_consumer *rbc) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 126 | { |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 127 | if (rbc->pos <= rbc->rb->tail) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 128 | return rbc->rb->tail - rbc->pos; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 129 | } |
Andrew Jeffery | 0b7b047 | 2023-04-19 12:48:51 +0930 | [diff] [blame] | 130 | return rbc->rb->tail + rbc->rb->size - rbc->pos; |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | static size_t ringbuffer_space(struct ringbuffer_consumer *rbc) |
| 134 | { |
| 135 | return rbc->rb->size - ringbuffer_len(rbc) - 1; |
| 136 | } |
| 137 | |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 138 | static int ringbuffer_consumer_ensure_space(struct ringbuffer_consumer *rbc, |
| 139 | size_t len) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 140 | { |
| 141 | enum ringbuffer_poll_ret prc; |
Andrew Jeffery | 5c359cc | 2023-04-18 22:50:07 +0930 | [diff] [blame] | 142 | size_t force_len; |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 143 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 144 | if (ringbuffer_space(rbc) >= len) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 145 | return 0; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 146 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 147 | |
| 148 | force_len = len - ringbuffer_space(rbc); |
| 149 | |
| 150 | prc = rbc->poll_fn(rbc->poll_data, force_len); |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 151 | if (prc != RINGBUFFER_POLL_OK) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 152 | return -1; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 153 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 154 | |
| 155 | return 0; |
| 156 | } |
| 157 | |
| 158 | int ringbuffer_queue(struct ringbuffer *rb, uint8_t *data, size_t len) |
| 159 | { |
| 160 | struct ringbuffer_consumer *rbc; |
| 161 | size_t wlen; |
| 162 | int i, rc; |
| 163 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 164 | if (len >= rb->size) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 165 | return -1; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 166 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 167 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 168 | if (len == 0) { |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame] | 169 | return 0; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 170 | } |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame] | 171 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 172 | /* Ensure there is at least len bytes of space available. |
| 173 | * |
| 174 | * If a client doesn't have sufficient space, perform a blocking write |
| 175 | * (by calling ->poll_fn with force_len) to create it. |
| 176 | */ |
| 177 | for (i = 0; i < rb->n_consumers; i++) { |
| 178 | rbc = rb->consumers[i]; |
| 179 | |
| 180 | rc = ringbuffer_consumer_ensure_space(rbc, len); |
| 181 | if (rc) { |
| 182 | ringbuffer_consumer_unregister(rbc); |
| 183 | i--; |
| 184 | continue; |
| 185 | } |
| 186 | |
| 187 | assert(ringbuffer_space(rbc) >= len); |
| 188 | } |
| 189 | |
| 190 | /* Now that we know we have enough space, add new data to tail */ |
| 191 | wlen = min(len, rb->size - rb->tail); |
| 192 | memcpy(rb->buf + rb->tail, data, wlen); |
| 193 | rb->tail = (rb->tail + wlen) % rb->size; |
| 194 | len -= wlen; |
| 195 | data += wlen; |
| 196 | |
| 197 | memcpy(rb->buf, data, len); |
| 198 | rb->tail += len; |
| 199 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 200 | /* Inform consumers of new data in non-blocking mode, by calling |
| 201 | * ->poll_fn with 0 force_len */ |
| 202 | for (i = 0; i < rb->n_consumers; i++) { |
| 203 | enum ringbuffer_poll_ret prc; |
| 204 | |
| 205 | rbc = rb->consumers[i]; |
| 206 | prc = rbc->poll_fn(rbc->poll_data, 0); |
| 207 | if (prc == RINGBUFFER_POLL_REMOVE) { |
| 208 | ringbuffer_consumer_unregister(rbc); |
| 209 | i--; |
| 210 | } |
| 211 | } |
| 212 | |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | size_t ringbuffer_dequeue_peek(struct ringbuffer_consumer *rbc, size_t offset, |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 217 | uint8_t **data) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 218 | { |
| 219 | struct ringbuffer *rb = rbc->rb; |
| 220 | size_t pos; |
| 221 | size_t len; |
| 222 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 223 | if (offset >= ringbuffer_len(rbc)) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 224 | return 0; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 225 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 226 | |
| 227 | pos = (rbc->pos + offset) % rb->size; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 228 | if (pos <= rb->tail) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 229 | len = rb->tail - pos; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 230 | } else { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 231 | len = rb->size - pos; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 232 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 233 | |
| 234 | *data = rb->buf + pos; |
| 235 | return len; |
| 236 | } |
| 237 | |
| 238 | int ringbuffer_dequeue_commit(struct ringbuffer_consumer *rbc, size_t len) |
| 239 | { |
| 240 | assert(len <= ringbuffer_len(rbc)); |
| 241 | rbc->pos = (rbc->pos + len) % rbc->rb->size; |
| 242 | return 0; |
| 243 | } |