blob: 14cb63d6c2989b827f4894cbfed76e25e32c81c4 [file] [log] [blame]
Jeremy Kerrc9775ce2017-02-07 16:25:34 +08001/**
2 * Copyright © 2017 IBM Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define _GNU_SOURCE
18
19#include <assert.h>
20#include <stdint.h>
21#include <stdlib.h>
22#include <string.h>
23
24#include "console-server.h"
25
Andrew Jeffery15ceb3e2023-04-18 11:52:36 +093026static inline size_t min(size_t a, size_t b)
27{
28 return a < b ? a : b;
29}
Jeremy Kerrc9775ce2017-02-07 16:25:34 +080030
31struct ringbuffer {
32 uint8_t *buf;
33 size_t size;
34 size_t tail;
35 struct ringbuffer_consumer **consumers;
36 int n_consumers;
37};
38
39struct ringbuffer_consumer {
40 struct ringbuffer *rb;
41 ringbuffer_poll_fn_t poll_fn;
42 void *poll_data;
43 size_t pos;
44};
45
46struct ringbuffer *ringbuffer_init(size_t size)
47{
48 struct ringbuffer *rb;
49
50 rb = malloc(sizeof(*rb) + size);
51 if (!rb)
52 return NULL;
53
54 memset(rb, 0, sizeof(*rb));
55 rb->size = size;
56 rb->buf = (void *)(rb + 1);
57
58 return rb;
59}
60
61void ringbuffer_fini(struct ringbuffer *rb)
62{
63 while (rb->n_consumers)
64 ringbuffer_consumer_unregister(rb->consumers[0]);
65 free(rb);
66}
67
68struct ringbuffer_consumer *ringbuffer_consumer_register(struct ringbuffer *rb,
69 ringbuffer_poll_fn_t fn, void *data)
70{
71 struct ringbuffer_consumer *rbc;
72 int n;
73
74 rbc = malloc(sizeof(*rbc));
75 rbc->rb = rb;
76 rbc->poll_fn = fn;
77 rbc->poll_data = data;
78 rbc->pos = rb->tail;
79
80 n = rb->n_consumers++;
81 rb->consumers = realloc(rb->consumers,
82 sizeof(*rb->consumers) * rb->n_consumers);
83 rb->consumers[n] = rbc;
84
85 return rbc;
86}
87
88void ringbuffer_consumer_unregister(struct ringbuffer_consumer *rbc)
89{
90 struct ringbuffer *rb = rbc->rb;
91 int i;
92
93 for (i = 0; i < rb->n_consumers; i++)
94 if (rb->consumers[i] == rbc)
95 break;
96
97 assert(i < rb->n_consumers);
98
99 rb->n_consumers--;
100
101 memmove(&rb->consumers[i], &rb->consumers[i+1],
102 sizeof(*rb->consumers) * (rb->n_consumers - i));
103
104 rb->consumers = realloc(rb->consumers,
105 sizeof(*rb->consumers) * rb->n_consumers);
106
107 free(rbc);
108}
109
Johnathan Mantey1cecc5d2019-02-28 15:01:46 -0800110size_t ringbuffer_len(struct ringbuffer_consumer *rbc)
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800111{
112 if (rbc->pos <= rbc->rb->tail)
113 return rbc->rb->tail - rbc->pos;
114 else
115 return rbc->rb->tail + rbc->rb->size - rbc->pos;
116}
117
118static size_t ringbuffer_space(struct ringbuffer_consumer *rbc)
119{
120 return rbc->rb->size - ringbuffer_len(rbc) - 1;
121}
122
123static int ringbuffer_consumer_ensure_space(
124 struct ringbuffer_consumer *rbc, size_t len)
125{
126 enum ringbuffer_poll_ret prc;
127 int force_len;
128
129 if (ringbuffer_space(rbc) >= len)
130 return 0;
131
132 force_len = len - ringbuffer_space(rbc);
133
134 prc = rbc->poll_fn(rbc->poll_data, force_len);
135 if (prc != RINGBUFFER_POLL_OK)
136 return -1;
137
138 return 0;
139}
140
141int ringbuffer_queue(struct ringbuffer *rb, uint8_t *data, size_t len)
142{
143 struct ringbuffer_consumer *rbc;
144 size_t wlen;
145 int i, rc;
146
147 if (len >= rb->size)
148 return -1;
149
Johnathan Mantey1cecc5d2019-02-28 15:01:46 -0800150 if (len == 0)
151 return 0;
152
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800153 /* Ensure there is at least len bytes of space available.
154 *
155 * If a client doesn't have sufficient space, perform a blocking write
156 * (by calling ->poll_fn with force_len) to create it.
157 */
158 for (i = 0; i < rb->n_consumers; i++) {
159 rbc = rb->consumers[i];
160
161 rc = ringbuffer_consumer_ensure_space(rbc, len);
162 if (rc) {
163 ringbuffer_consumer_unregister(rbc);
164 i--;
165 continue;
166 }
167
168 assert(ringbuffer_space(rbc) >= len);
169 }
170
171 /* Now that we know we have enough space, add new data to tail */
172 wlen = min(len, rb->size - rb->tail);
173 memcpy(rb->buf + rb->tail, data, wlen);
174 rb->tail = (rb->tail + wlen) % rb->size;
175 len -= wlen;
176 data += wlen;
177
178 memcpy(rb->buf, data, len);
179 rb->tail += len;
180
Johnathan Mantey1cecc5d2019-02-28 15:01:46 -0800181
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800182 /* Inform consumers of new data in non-blocking mode, by calling
183 * ->poll_fn with 0 force_len */
184 for (i = 0; i < rb->n_consumers; i++) {
185 enum ringbuffer_poll_ret prc;
186
187 rbc = rb->consumers[i];
188 prc = rbc->poll_fn(rbc->poll_data, 0);
189 if (prc == RINGBUFFER_POLL_REMOVE) {
190 ringbuffer_consumer_unregister(rbc);
191 i--;
192 }
193 }
194
195 return 0;
196}
197
198size_t ringbuffer_dequeue_peek(struct ringbuffer_consumer *rbc, size_t offset,
199 uint8_t **data)
200{
201 struct ringbuffer *rb = rbc->rb;
202 size_t pos;
203 size_t len;
204
205 if (offset >= ringbuffer_len(rbc))
206 return 0;
207
208 pos = (rbc->pos + offset) % rb->size;
209 if (pos <= rb->tail)
210 len = rb->tail - pos;
211 else
212 len = rb->size - pos;
213
214 *data = rb->buf + pos;
215 return len;
216}
217
218int ringbuffer_dequeue_commit(struct ringbuffer_consumer *rbc, size_t len)
219{
220 assert(len <= ringbuffer_len(rbc));
221 rbc->pos = (rbc->pos + len) % rbc->rb->size;
222 return 0;
223}