Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 1 | /** |
| 2 | * Copyright © 2017 IBM Corporation |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 17 | #include <assert.h> |
| 18 | #include <stdint.h> |
| 19 | #include <stdlib.h> |
| 20 | #include <string.h> |
| 21 | |
| 22 | #include "console-server.h" |
| 23 | |
Andrew Jeffery | 15ceb3e | 2023-04-18 11:52:36 +0930 | [diff] [blame] | 24 | static inline size_t min(size_t a, size_t b) |
| 25 | { |
| 26 | return a < b ? a : b; |
| 27 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 28 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 29 | struct ringbuffer *ringbuffer_init(size_t size) |
| 30 | { |
| 31 | struct ringbuffer *rb; |
| 32 | |
| 33 | rb = malloc(sizeof(*rb) + size); |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 34 | if (!rb) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 35 | return NULL; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 36 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 37 | |
| 38 | memset(rb, 0, sizeof(*rb)); |
| 39 | rb->size = size; |
| 40 | rb->buf = (void *)(rb + 1); |
| 41 | |
| 42 | return rb; |
| 43 | } |
| 44 | |
| 45 | void ringbuffer_fini(struct ringbuffer *rb) |
| 46 | { |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 47 | while (rb->n_consumers) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 48 | ringbuffer_consumer_unregister(rb->consumers[0]); |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 49 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 50 | free(rb); |
| 51 | } |
| 52 | |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 53 | struct ringbuffer_consumer * |
| 54 | ringbuffer_consumer_register(struct ringbuffer *rb, ringbuffer_poll_fn_t fn, |
| 55 | void *data) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 56 | { |
| 57 | struct ringbuffer_consumer *rbc; |
| 58 | int n; |
| 59 | |
| 60 | rbc = malloc(sizeof(*rbc)); |
| 61 | rbc->rb = rb; |
| 62 | rbc->poll_fn = fn; |
| 63 | rbc->poll_data = data; |
| 64 | rbc->pos = rb->tail; |
| 65 | |
| 66 | n = rb->n_consumers++; |
Andrew Jeffery | 91b5217 | 2023-04-19 12:42:14 +0930 | [diff] [blame] | 67 | /* |
| 68 | * We're managing an array of pointers to aggregates, so don't warn about sizeof() on a |
| 69 | * pointer type. |
| 70 | */ |
| 71 | /* NOLINTBEGIN(bugprone-sizeof-expression) */ |
| 72 | rb->consumers = reallocarray(rb->consumers, rb->n_consumers, |
| 73 | sizeof(*rb->consumers)); |
| 74 | /* NOLINTEND(bugprone-sizeof-expression) */ |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 75 | rb->consumers[n] = rbc; |
| 76 | |
| 77 | return rbc; |
| 78 | } |
| 79 | |
| 80 | void ringbuffer_consumer_unregister(struct ringbuffer_consumer *rbc) |
| 81 | { |
| 82 | struct ringbuffer *rb = rbc->rb; |
| 83 | int i; |
| 84 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 85 | for (i = 0; i < rb->n_consumers; i++) { |
| 86 | if (rb->consumers[i] == rbc) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 87 | break; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 88 | } |
| 89 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 90 | |
| 91 | assert(i < rb->n_consumers); |
| 92 | |
| 93 | rb->n_consumers--; |
| 94 | |
Andrew Jeffery | 91b5217 | 2023-04-19 12:42:14 +0930 | [diff] [blame] | 95 | /* |
| 96 | * We're managing an array of pointers to aggregates, so don't warn about sizeof() on a |
| 97 | * pointer type. |
| 98 | */ |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 99 | memmove(&rb->consumers[i], &rb->consumers[i + 1], |
John Wang | 2f1abc3 | 2024-06-04 20:58:33 +0800 | [diff] [blame] | 100 | /* NOLINTNEXTLINE(bugprone-sizeof-expression) */ |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 101 | sizeof(*rb->consumers) * (rb->n_consumers - i)); |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 102 | |
John Wang | 2f1abc3 | 2024-06-04 20:58:33 +0800 | [diff] [blame] | 103 | if (rb->n_consumers == 0) { |
| 104 | free(rb->consumers); |
| 105 | rb->consumers = NULL; |
| 106 | } else { |
| 107 | rb->consumers = reallocarray( |
| 108 | rb->consumers, rb->n_consumers, |
| 109 | /* NOLINTNEXTLINE(bugprone-sizeof-expression) */ |
| 110 | sizeof(*rb->consumers)); |
| 111 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 112 | |
| 113 | free(rbc); |
| 114 | } |
| 115 | |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame] | 116 | size_t ringbuffer_len(struct ringbuffer_consumer *rbc) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 117 | { |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 118 | if (rbc->pos <= rbc->rb->tail) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 119 | return rbc->rb->tail - rbc->pos; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 120 | } |
Andrew Jeffery | 0b7b047 | 2023-04-19 12:48:51 +0930 | [diff] [blame] | 121 | return rbc->rb->tail + rbc->rb->size - rbc->pos; |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | static size_t ringbuffer_space(struct ringbuffer_consumer *rbc) |
| 125 | { |
| 126 | return rbc->rb->size - ringbuffer_len(rbc) - 1; |
| 127 | } |
| 128 | |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 129 | static int ringbuffer_consumer_ensure_space(struct ringbuffer_consumer *rbc, |
| 130 | size_t len) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 131 | { |
| 132 | enum ringbuffer_poll_ret prc; |
Andrew Jeffery | 5c359cc | 2023-04-18 22:50:07 +0930 | [diff] [blame] | 133 | size_t force_len; |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 134 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 135 | if (ringbuffer_space(rbc) >= len) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 136 | return 0; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 137 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 138 | |
| 139 | force_len = len - ringbuffer_space(rbc); |
| 140 | |
| 141 | prc = rbc->poll_fn(rbc->poll_data, force_len); |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 142 | if (prc != RINGBUFFER_POLL_OK) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 143 | return -1; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 144 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 145 | |
| 146 | return 0; |
| 147 | } |
| 148 | |
| 149 | int ringbuffer_queue(struct ringbuffer *rb, uint8_t *data, size_t len) |
| 150 | { |
| 151 | struct ringbuffer_consumer *rbc; |
| 152 | size_t wlen; |
Andrew Jeffery | b70f871 | 2023-04-19 12:53:34 +0930 | [diff] [blame] | 153 | int i; |
| 154 | int rc; |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 155 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 156 | if (len >= rb->size) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 157 | return -1; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 158 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 159 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 160 | if (len == 0) { |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame] | 161 | return 0; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 162 | } |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame] | 163 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 164 | /* Ensure there is at least len bytes of space available. |
| 165 | * |
| 166 | * If a client doesn't have sufficient space, perform a blocking write |
| 167 | * (by calling ->poll_fn with force_len) to create it. |
| 168 | */ |
| 169 | for (i = 0; i < rb->n_consumers; i++) { |
| 170 | rbc = rb->consumers[i]; |
| 171 | |
| 172 | rc = ringbuffer_consumer_ensure_space(rbc, len); |
| 173 | if (rc) { |
| 174 | ringbuffer_consumer_unregister(rbc); |
| 175 | i--; |
| 176 | continue; |
| 177 | } |
| 178 | |
| 179 | assert(ringbuffer_space(rbc) >= len); |
| 180 | } |
| 181 | |
| 182 | /* Now that we know we have enough space, add new data to tail */ |
| 183 | wlen = min(len, rb->size - rb->tail); |
| 184 | memcpy(rb->buf + rb->tail, data, wlen); |
| 185 | rb->tail = (rb->tail + wlen) % rb->size; |
| 186 | len -= wlen; |
| 187 | data += wlen; |
| 188 | |
| 189 | memcpy(rb->buf, data, len); |
| 190 | rb->tail += len; |
| 191 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 192 | /* Inform consumers of new data in non-blocking mode, by calling |
| 193 | * ->poll_fn with 0 force_len */ |
| 194 | for (i = 0; i < rb->n_consumers; i++) { |
| 195 | enum ringbuffer_poll_ret prc; |
| 196 | |
| 197 | rbc = rb->consumers[i]; |
| 198 | prc = rbc->poll_fn(rbc->poll_data, 0); |
| 199 | if (prc == RINGBUFFER_POLL_REMOVE) { |
| 200 | ringbuffer_consumer_unregister(rbc); |
| 201 | i--; |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | size_t ringbuffer_dequeue_peek(struct ringbuffer_consumer *rbc, size_t offset, |
Andrew Jeffery | a72711a | 2023-04-18 18:19:41 +0930 | [diff] [blame] | 209 | uint8_t **data) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 210 | { |
| 211 | struct ringbuffer *rb = rbc->rb; |
| 212 | size_t pos; |
| 213 | size_t len; |
| 214 | |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 215 | if (offset >= ringbuffer_len(rbc)) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 216 | return 0; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 217 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 218 | |
| 219 | pos = (rbc->pos + offset) % rb->size; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 220 | if (pos <= rb->tail) { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 221 | len = rb->tail - pos; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 222 | } else { |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 223 | len = rb->size - pos; |
Andrew Jeffery | 2834c5b | 2023-04-19 12:47:09 +0930 | [diff] [blame] | 224 | } |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 225 | |
| 226 | *data = rb->buf + pos; |
| 227 | return len; |
| 228 | } |
| 229 | |
| 230 | int ringbuffer_dequeue_commit(struct ringbuffer_consumer *rbc, size_t len) |
| 231 | { |
| 232 | assert(len <= ringbuffer_len(rbc)); |
| 233 | rbc->pos = (rbc->pos + len) % rbc->rb->size; |
| 234 | return 0; |
| 235 | } |