Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 1 | /** |
| 2 | * Copyright © 2017 IBM Corporation |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define _GNU_SOURCE |
| 18 | |
| 19 | #include <assert.h> |
| 20 | #include <stdint.h> |
| 21 | #include <stdlib.h> |
| 22 | #include <string.h> |
| 23 | |
| 24 | #include "console-server.h" |
| 25 | |
| 26 | #define min(a,b) ({ \ |
| 27 | const typeof(a) _a = (a); \ |
| 28 | const typeof(b) _b = (b); \ |
| 29 | _a < _b ? _a : _b; \ |
| 30 | }) |
| 31 | |
| 32 | struct ringbuffer { |
| 33 | uint8_t *buf; |
| 34 | size_t size; |
| 35 | size_t tail; |
| 36 | struct ringbuffer_consumer **consumers; |
| 37 | int n_consumers; |
| 38 | }; |
| 39 | |
| 40 | struct ringbuffer_consumer { |
| 41 | struct ringbuffer *rb; |
| 42 | ringbuffer_poll_fn_t poll_fn; |
| 43 | void *poll_data; |
| 44 | size_t pos; |
| 45 | }; |
| 46 | |
| 47 | struct ringbuffer *ringbuffer_init(size_t size) |
| 48 | { |
| 49 | struct ringbuffer *rb; |
| 50 | |
| 51 | rb = malloc(sizeof(*rb) + size); |
| 52 | if (!rb) |
| 53 | return NULL; |
| 54 | |
| 55 | memset(rb, 0, sizeof(*rb)); |
| 56 | rb->size = size; |
| 57 | rb->buf = (void *)(rb + 1); |
| 58 | |
| 59 | return rb; |
| 60 | } |
| 61 | |
| 62 | void ringbuffer_fini(struct ringbuffer *rb) |
| 63 | { |
| 64 | while (rb->n_consumers) |
| 65 | ringbuffer_consumer_unregister(rb->consumers[0]); |
| 66 | free(rb); |
| 67 | } |
| 68 | |
| 69 | struct ringbuffer_consumer *ringbuffer_consumer_register(struct ringbuffer *rb, |
| 70 | ringbuffer_poll_fn_t fn, void *data) |
| 71 | { |
| 72 | struct ringbuffer_consumer *rbc; |
| 73 | int n; |
| 74 | |
| 75 | rbc = malloc(sizeof(*rbc)); |
| 76 | rbc->rb = rb; |
| 77 | rbc->poll_fn = fn; |
| 78 | rbc->poll_data = data; |
| 79 | rbc->pos = rb->tail; |
| 80 | |
| 81 | n = rb->n_consumers++; |
| 82 | rb->consumers = realloc(rb->consumers, |
| 83 | sizeof(*rb->consumers) * rb->n_consumers); |
| 84 | rb->consumers[n] = rbc; |
| 85 | |
| 86 | return rbc; |
| 87 | } |
| 88 | |
| 89 | void ringbuffer_consumer_unregister(struct ringbuffer_consumer *rbc) |
| 90 | { |
| 91 | struct ringbuffer *rb = rbc->rb; |
| 92 | int i; |
| 93 | |
| 94 | for (i = 0; i < rb->n_consumers; i++) |
| 95 | if (rb->consumers[i] == rbc) |
| 96 | break; |
| 97 | |
| 98 | assert(i < rb->n_consumers); |
| 99 | |
| 100 | rb->n_consumers--; |
| 101 | |
| 102 | memmove(&rb->consumers[i], &rb->consumers[i+1], |
| 103 | sizeof(*rb->consumers) * (rb->n_consumers - i)); |
| 104 | |
| 105 | rb->consumers = realloc(rb->consumers, |
| 106 | sizeof(*rb->consumers) * rb->n_consumers); |
| 107 | |
| 108 | free(rbc); |
| 109 | } |
| 110 | |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame^] | 111 | size_t ringbuffer_len(struct ringbuffer_consumer *rbc) |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 112 | { |
| 113 | if (rbc->pos <= rbc->rb->tail) |
| 114 | return rbc->rb->tail - rbc->pos; |
| 115 | else |
| 116 | return rbc->rb->tail + rbc->rb->size - rbc->pos; |
| 117 | } |
| 118 | |
| 119 | static size_t ringbuffer_space(struct ringbuffer_consumer *rbc) |
| 120 | { |
| 121 | return rbc->rb->size - ringbuffer_len(rbc) - 1; |
| 122 | } |
| 123 | |
| 124 | static int ringbuffer_consumer_ensure_space( |
| 125 | struct ringbuffer_consumer *rbc, size_t len) |
| 126 | { |
| 127 | enum ringbuffer_poll_ret prc; |
| 128 | int force_len; |
| 129 | |
| 130 | if (ringbuffer_space(rbc) >= len) |
| 131 | return 0; |
| 132 | |
| 133 | force_len = len - ringbuffer_space(rbc); |
| 134 | |
| 135 | prc = rbc->poll_fn(rbc->poll_data, force_len); |
| 136 | if (prc != RINGBUFFER_POLL_OK) |
| 137 | return -1; |
| 138 | |
| 139 | return 0; |
| 140 | } |
| 141 | |
| 142 | int ringbuffer_queue(struct ringbuffer *rb, uint8_t *data, size_t len) |
| 143 | { |
| 144 | struct ringbuffer_consumer *rbc; |
| 145 | size_t wlen; |
| 146 | int i, rc; |
| 147 | |
| 148 | if (len >= rb->size) |
| 149 | return -1; |
| 150 | |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame^] | 151 | if (len == 0) |
| 152 | return 0; |
| 153 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 154 | /* Ensure there is at least len bytes of space available. |
| 155 | * |
| 156 | * If a client doesn't have sufficient space, perform a blocking write |
| 157 | * (by calling ->poll_fn with force_len) to create it. |
| 158 | */ |
| 159 | for (i = 0; i < rb->n_consumers; i++) { |
| 160 | rbc = rb->consumers[i]; |
| 161 | |
| 162 | rc = ringbuffer_consumer_ensure_space(rbc, len); |
| 163 | if (rc) { |
| 164 | ringbuffer_consumer_unregister(rbc); |
| 165 | i--; |
| 166 | continue; |
| 167 | } |
| 168 | |
| 169 | assert(ringbuffer_space(rbc) >= len); |
| 170 | } |
| 171 | |
| 172 | /* Now that we know we have enough space, add new data to tail */ |
| 173 | wlen = min(len, rb->size - rb->tail); |
| 174 | memcpy(rb->buf + rb->tail, data, wlen); |
| 175 | rb->tail = (rb->tail + wlen) % rb->size; |
| 176 | len -= wlen; |
| 177 | data += wlen; |
| 178 | |
| 179 | memcpy(rb->buf, data, len); |
| 180 | rb->tail += len; |
| 181 | |
Johnathan Mantey | 1cecc5d | 2019-02-28 15:01:46 -0800 | [diff] [blame^] | 182 | |
Jeremy Kerr | c9775ce | 2017-02-07 16:25:34 +0800 | [diff] [blame] | 183 | /* Inform consumers of new data in non-blocking mode, by calling |
| 184 | * ->poll_fn with 0 force_len */ |
| 185 | for (i = 0; i < rb->n_consumers; i++) { |
| 186 | enum ringbuffer_poll_ret prc; |
| 187 | |
| 188 | rbc = rb->consumers[i]; |
| 189 | prc = rbc->poll_fn(rbc->poll_data, 0); |
| 190 | if (prc == RINGBUFFER_POLL_REMOVE) { |
| 191 | ringbuffer_consumer_unregister(rbc); |
| 192 | i--; |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | size_t ringbuffer_dequeue_peek(struct ringbuffer_consumer *rbc, size_t offset, |
| 200 | uint8_t **data) |
| 201 | { |
| 202 | struct ringbuffer *rb = rbc->rb; |
| 203 | size_t pos; |
| 204 | size_t len; |
| 205 | |
| 206 | if (offset >= ringbuffer_len(rbc)) |
| 207 | return 0; |
| 208 | |
| 209 | pos = (rbc->pos + offset) % rb->size; |
| 210 | if (pos <= rb->tail) |
| 211 | len = rb->tail - pos; |
| 212 | else |
| 213 | len = rb->size - pos; |
| 214 | |
| 215 | *data = rb->buf + pos; |
| 216 | return len; |
| 217 | } |
| 218 | |
| 219 | int ringbuffer_dequeue_commit(struct ringbuffer_consumer *rbc, size_t len) |
| 220 | { |
| 221 | assert(len <= ringbuffer_len(rbc)); |
| 222 | rbc->pos = (rbc->pos + len) % rbc->rb->size; |
| 223 | return 0; |
| 224 | } |