blob: f37dcb886ac0bce2d1ca42b9e455433c5b9e692c [file] [log] [blame]
Jeremy Kerr9326d772016-03-17 17:15:02 +08001/**
2 * Copyright © 2016 IBM Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Jeremy Kerr2bd05182016-03-10 16:59:43 +080016
Jeremy Kerrce0e68b2016-03-18 13:55:46 +080017#include <assert.h>
Jeremy Kerr2bd05182016-03-10 16:59:43 +080018#include <err.h>
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +080019#include <errno.h>
Andrew Jeffery5c359cc2023-04-18 22:50:07 +093020#include <limits.h>
Jeremy Kerr2bd05182016-03-10 16:59:43 +080021#include <stdbool.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <termios.h>
26#include <unistd.h>
27#include <endian.h>
28
29#include <sys/socket.h>
30#include <sys/un.h>
Jonathan Domancc075302023-03-31 10:20:48 -070031#include <systemd/sd-daemon.h>
Jeremy Kerr2bd05182016-03-10 16:59:43 +080032
33#include "console-server.h"
34
Johnathan Mantey1cecc5d2019-02-28 15:01:46 -080035#define SOCKET_HANDLER_PKT_SIZE 512
36/* Set poll() timeout to 4000 uS, or 4 mS */
37#define SOCKET_HANDLER_PKT_US_TIMEOUT 4000
38
Jeremy Kerr2bd05182016-03-10 16:59:43 +080039struct client {
Andrew Jefferya72711a2023-04-18 18:19:41 +093040 struct socket_handler *sh;
41 struct poller *poller;
42 struct ringbuffer_consumer *rbc;
43 int fd;
44 bool blocked;
Jeremy Kerr2bd05182016-03-10 16:59:43 +080045};
46
47struct socket_handler {
Andrew Jefferya72711a2023-04-18 18:19:41 +093048 struct handler handler;
49 struct console *console;
50 struct poller *poller;
51 int sd;
Jeremy Kerr2bd05182016-03-10 16:59:43 +080052
Andrew Jefferya72711a2023-04-18 18:19:41 +093053 struct client **clients;
54 int n_clients;
Jeremy Kerr2bd05182016-03-10 16:59:43 +080055};
56
Johnathan Mantey1cecc5d2019-02-28 15:01:46 -080057static struct timeval const socket_handler_timeout = {
58 .tv_sec = 0,
59 .tv_usec = SOCKET_HANDLER_PKT_US_TIMEOUT
60};
61
Jeremy Kerr2bd05182016-03-10 16:59:43 +080062static struct socket_handler *to_socket_handler(struct handler *handler)
63{
64 return container_of(handler, struct socket_handler, handler);
65}
66
Jeremy Kerrc9775ce2017-02-07 16:25:34 +080067static void client_close(struct client *client)
Jeremy Kerr2bd05182016-03-10 16:59:43 +080068{
Jeremy Kerrc9775ce2017-02-07 16:25:34 +080069 struct socket_handler *sh = client->sh;
Jeremy Kerr2bd05182016-03-10 16:59:43 +080070 int idx;
71
72 close(client->fd);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +093073 if (client->poller) {
Jeremy Kerr55c97122017-02-07 17:06:46 +080074 console_poller_unregister(sh->console, client->poller);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +093075 }
Jeremy Kerr2bd05182016-03-10 16:59:43 +080076
Andrew Jeffery2834c5b2023-04-19 12:47:09 +093077 if (client->rbc) {
Jeremy Kerrc9775ce2017-02-07 16:25:34 +080078 ringbuffer_consumer_unregister(client->rbc);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +093079 }
Jeremy Kerrc9775ce2017-02-07 16:25:34 +080080
Andrew Jeffery2834c5b2023-04-19 12:47:09 +093081 for (idx = 0; idx < sh->n_clients; idx++) {
82 if (sh->clients[idx] == client) {
Jeremy Kerrce0e68b2016-03-18 13:55:46 +080083 break;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +093084 }
85 }
Jeremy Kerrce0e68b2016-03-18 13:55:46 +080086
87 assert(idx < sh->n_clients);
88
89 free(client);
90 client = NULL;
Jeremy Kerr2bd05182016-03-10 16:59:43 +080091
92 sh->n_clients--;
Andrew Jeffery91b52172023-04-19 12:42:14 +093093 /*
94 * We're managing an array of pointers to aggregates, so don't warn about sizeof() on a
95 * pointer type.
96 */
97 /* NOLINTBEGIN(bugprone-sizeof-expression) */
Andrew Jefferya72711a2023-04-18 18:19:41 +093098 memmove(&sh->clients[idx], &sh->clients[idx + 1],
99 sizeof(*sh->clients) * (sh->n_clients - idx));
100 sh->clients =
Andrew Jeffery91b52172023-04-19 12:42:14 +0930101 reallocarray(sh->clients, sh->n_clients, sizeof(*sh->clients));
102 /* NOLINTEND(bugprone-sizeof-expression) */
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800103}
104
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800105static void client_set_blocked(struct client *client, bool blocked)
Jeremy Kerr848fc872017-01-17 13:50:05 +0800106{
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800107 int events;
108
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930109 if (client->blocked == blocked) {
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800110 return;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930111 }
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800112
113 client->blocked = blocked;
114
115 events = POLLIN;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930116 if (client->blocked) {
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800117 events |= POLLOUT;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930118 }
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800119
120 console_poller_set_events(client->sh->console, client->poller, events);
121}
122
Andrew Jefferya72711a2023-04-18 18:19:41 +0930123static ssize_t send_all(struct client *client, void *buf, size_t len,
124 bool block)
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800125{
Andrew Jeffery5c359cc2023-04-18 22:50:07 +0930126 int fd, flags;
127 ssize_t rc;
Jeremy Kerr848fc872017-01-17 13:50:05 +0800128 size_t pos;
129
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930130 if (len > SSIZE_MAX) {
Andrew Jeffery5c359cc2023-04-18 22:50:07 +0930131 return -EINVAL;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930132 }
Andrew Jeffery5c359cc2023-04-18 22:50:07 +0930133
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800134 fd = client->fd;
135
Jeremy Kerr848fc872017-01-17 13:50:05 +0800136 flags = MSG_NOSIGNAL;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930137 if (!block) {
Jeremy Kerr848fc872017-01-17 13:50:05 +0800138 flags |= MSG_DONTWAIT;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930139 }
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800140
141 for (pos = 0; pos < len; pos += rc) {
Andrew Jeffery21f4cb42023-04-18 14:38:37 +0930142 rc = send(fd, (char *)buf + pos, len - pos, flags);
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800143 if (rc < 0) {
Andrew Jefferya72711a2023-04-18 18:19:41 +0930144 if (!block &&
145 (errno == EAGAIN || errno == EWOULDBLOCK)) {
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800146 client_set_blocked(client, true);
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800147 break;
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800148 }
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800149
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930150 if (errno == EINTR) {
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800151 continue;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930152 }
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800153
154 return -1;
155 }
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930156 if (rc == 0) {
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800157 return -1;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930158 }
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800159 }
160
Andrew Jeffery5c359cc2023-04-18 22:50:07 +0930161 return (ssize_t)pos;
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800162}
163
Jeremy Kerr848fc872017-01-17 13:50:05 +0800164/* Drain the queue to the socket and update the queue buffer. If force_len is
165 * set, send at least that many bytes from the queue, possibly while blocking
166 */
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800167static int client_drain_queue(struct client *client, size_t force_len)
Jeremy Kerr848fc872017-01-17 13:50:05 +0800168{
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800169 uint8_t *buf;
Jeremy Kerr848fc872017-01-17 13:50:05 +0800170 ssize_t wlen;
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800171 size_t len, total_len;
Jeremy Kerr848fc872017-01-17 13:50:05 +0800172 bool block;
173
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800174 total_len = 0;
175 wlen = 0;
176 block = !!force_len;
Jeremy Kerr848fc872017-01-17 13:50:05 +0800177
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800178 /* if we're already blocked, no need for the write */
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930179 if (!block && client->blocked) {
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800180 return 0;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930181 }
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800182
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800183 for (;;) {
184 len = ringbuffer_dequeue_peek(client->rbc, total_len, &buf);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930185 if (!len) {
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800186 break;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930187 }
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800188
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800189 wlen = send_all(client, buf, len, block);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930190 if (wlen <= 0) {
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800191 break;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930192 }
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800193
194 total_len += wlen;
195
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930196 if (force_len && total_len >= force_len) {
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800197 break;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930198 }
Jeremy Kerr848fc872017-01-17 13:50:05 +0800199 }
200
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930201 if (wlen < 0) {
Jeremy Kerr848fc872017-01-17 13:50:05 +0800202 return -1;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930203 }
Jeremy Kerr848fc872017-01-17 13:50:05 +0800204
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930205 if (force_len && total_len < force_len) {
Jeremy Kerr848fc872017-01-17 13:50:05 +0800206 return -1;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930207 }
Jeremy Kerr848fc872017-01-17 13:50:05 +0800208
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800209 ringbuffer_dequeue_commit(client->rbc, total_len);
Jeremy Kerr848fc872017-01-17 13:50:05 +0800210 return 0;
211}
212
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800213static enum ringbuffer_poll_ret client_ringbuffer_poll(void *arg,
Andrew Jefferya72711a2023-04-18 18:19:41 +0930214 size_t force_len)
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800215{
216 struct client *client = arg;
Andrew Jeffery5c359cc2023-04-18 22:50:07 +0930217 size_t len;
218 int rc;
Johnathan Mantey1cecc5d2019-02-28 15:01:46 -0800219
220 len = ringbuffer_len(client->rbc);
221 if (!force_len && (len < SOCKET_HANDLER_PKT_SIZE)) {
222 /* Do nothing until many small requests have accumulated, or
223 * the UART is idle for awhile (as determined by the timeout
224 * value supplied to the poll function call in console_server.c. */
225 console_poller_set_timeout(client->sh->console, client->poller,
226 &socket_handler_timeout);
227 return RINGBUFFER_POLL_OK;
228 }
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800229
230 rc = client_drain_queue(client, force_len);
231 if (rc) {
232 client->rbc = NULL;
233 client_close(client);
234 return RINGBUFFER_POLL_REMOVE;
235 }
236
237 return RINGBUFFER_POLL_OK;
238}
239
Andrew Jefferya72711a2023-04-18 18:19:41 +0930240static enum poller_ret
241client_timeout(struct handler *handler __attribute__((unused)), void *data)
Johnathan Mantey1cecc5d2019-02-28 15:01:46 -0800242{
243 struct client *client = data;
244 int rc = 0;
245
246 if (client->blocked) {
247 /* nothing to do here, we'll call client_drain_queue when
248 * we become unblocked */
249 return POLLER_OK;
250 }
251
252 rc = client_drain_queue(client, 0);
253 if (rc) {
254 client_close(client);
255 return POLLER_REMOVE;
256 }
257
258 return POLLER_OK;
259}
260
Andrew Jefferya72711a2023-04-18 18:19:41 +0930261static enum poller_ret client_poll(struct handler *handler, int events,
262 void *data)
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800263{
264 struct socket_handler *sh = to_socket_handler(handler);
265 struct client *client = data;
266 uint8_t buf[4096];
Andrew Jeffery5c359cc2023-04-18 22:50:07 +0930267 ssize_t rc;
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800268
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800269 if (events & POLLIN) {
Jeremy Kerr68a2ece2017-01-17 13:52:41 +0800270 rc = recv(client->fd, buf, sizeof(buf), MSG_DONTWAIT);
Jeremy Kerr848fc872017-01-17 13:50:05 +0800271 if (rc < 0) {
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930272 if (errno == EAGAIN || errno == EWOULDBLOCK) {
Jeremy Kerr848fc872017-01-17 13:50:05 +0800273 return POLLER_OK;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930274 }
Andrew Jeffery0b7b0472023-04-19 12:48:51 +0930275 goto err_close;
Jeremy Kerr848fc872017-01-17 13:50:05 +0800276 }
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930277 if (rc == 0) {
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800278 goto err_close;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930279 }
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800280
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800281 console_data_out(sh->console, buf, rc);
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800282 }
283
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800284 if (events & POLLOUT) {
Jeremy Kerr6b1fed22017-02-07 21:40:38 +0800285 client_set_blocked(client, false);
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800286 rc = client_drain_queue(client, 0);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930287 if (rc) {
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800288 goto err_close;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930289 }
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800290 }
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800291
292 return POLLER_OK;
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800293
294err_close:
295 client->poller = NULL;
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800296 client_close(client);
Jeremy Kerr4d80a5b2016-03-18 13:47:43 +0800297 return POLLER_REMOVE;
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800298}
299
Andrew Jefferya72711a2023-04-18 18:19:41 +0930300static enum poller_ret socket_poll(struct handler *handler, int events,
301 void __attribute__((unused)) * data)
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800302{
303 struct socket_handler *sh = to_socket_handler(handler);
304 struct client *client;
305 int fd, n;
306
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930307 if (!(events & POLLIN)) {
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800308 return POLLER_OK;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930309 }
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800310
Jeremy Kerr68a2ece2017-01-17 13:52:41 +0800311 fd = accept(sh->sd, NULL, NULL);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930312 if (fd < 0) {
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800313 return POLLER_OK;
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930314 }
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800315
Jeremy Kerrce0e68b2016-03-18 13:55:46 +0800316 client = malloc(sizeof(*client));
317 memset(client, 0, sizeof(*client));
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800318
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800319 client->sh = sh;
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800320 client->fd = fd;
Jeremy Kerr55c97122017-02-07 17:06:46 +0800321 client->poller = console_poller_register(sh->console, handler,
Andrew Jefferya72711a2023-04-18 18:19:41 +0930322 client_poll, client_timeout,
323 client->fd, POLLIN, client);
324 client->rbc = console_ringbuffer_consumer_register(
325 sh->console, client_ringbuffer_poll, client);
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800326
Jeremy Kerrce0e68b2016-03-18 13:55:46 +0800327 n = sh->n_clients++;
Andrew Jeffery91b52172023-04-19 12:42:14 +0930328 /*
329 * We're managing an array of pointers to aggregates, so don't warn about sizeof() on a
330 * pointer type.
331 */
332 /* NOLINTBEGIN(bugprone-sizeof-expression) */
Andrew Jefferya72711a2023-04-18 18:19:41 +0930333 sh->clients =
Andrew Jeffery91b52172023-04-19 12:42:14 +0930334 reallocarray(sh->clients, sh->n_clients, sizeof(*sh->clients));
335 /* NOLINTEND(bugprone-sizeof-expression) */
Jeremy Kerrce0e68b2016-03-18 13:55:46 +0800336 sh->clients[n] = client;
337
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800338 return POLLER_OK;
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800339}
340
Jeremy Kerrd47963e2016-03-16 17:29:55 +0800341static int socket_init(struct handler *handler, struct console *console,
Andrew Jefferya72711a2023-04-18 18:19:41 +0930342 struct config *config)
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800343{
344 struct socket_handler *sh = to_socket_handler(handler);
345 struct sockaddr_un addr;
Andrew Jeffery5e7c0782020-02-10 12:12:36 +1030346 size_t addrlen;
347 ssize_t len;
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800348 int rc;
349
350 sh->console = console;
351 sh->clients = NULL;
352 sh->n_clients = 0;
Jeremy Kerr5708dfb2017-01-19 15:11:19 +0800353
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800354 memset(&addr, 0, sizeof(addr));
355 addr.sun_family = AF_UNIX;
Andrew Jefferyddf2ab72020-02-10 12:36:09 +1030356 len = console_socket_path(&addr, config_get_value(config, "socket-id"));
Andrew Jeffery5e7c0782020-02-10 12:12:36 +1030357 if (len < 0) {
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930358 if (errno) {
Andrew Jeffery5e7c0782020-02-10 12:12:36 +1030359 warn("Failed to configure socket: %s", strerror(errno));
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930360 } else {
Andrew Jeffery5e7c0782020-02-10 12:12:36 +1030361 warn("Socket name length exceeds buffer limits");
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930362 }
Jonathan Domancc075302023-03-31 10:20:48 -0700363 return -1;
Andrew Jeffery5e7c0782020-02-10 12:12:36 +1030364 }
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800365
Jonathan Domancc075302023-03-31 10:20:48 -0700366 /* Try to take a socket from systemd first */
367 if (sd_listen_fds(0) == 1 &&
Andrew Jefferya72711a2023-04-18 18:19:41 +0930368 sd_is_socket_unix(SD_LISTEN_FDS_START, SOCK_STREAM, 1,
369 addr.sun_path, len) > 0) {
Jonathan Domancc075302023-03-31 10:20:48 -0700370 sh->sd = SD_LISTEN_FDS_START;
371 } else {
372 sh->sd = socket(AF_UNIX, SOCK_STREAM, 0);
Andrew Jefferya72711a2023-04-18 18:19:41 +0930373 if (sh->sd < 0) {
Jonathan Domancc075302023-03-31 10:20:48 -0700374 warn("Can't create socket");
375 return -1;
376 }
Andrew Jeffery5e7c0782020-02-10 12:12:36 +1030377
Jonathan Domancc075302023-03-31 10:20:48 -0700378 addrlen = sizeof(addr) - sizeof(addr.sun_path) + len;
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800379
Jonathan Domancc075302023-03-31 10:20:48 -0700380 rc = bind(sh->sd, (struct sockaddr *)&addr, addrlen);
381 if (rc) {
382 socket_path_t name;
383 console_socket_path_readable(&addr, addrlen, name);
384 warn("Can't bind to socket path %s (terminated at first null)",
Andrew Jefferya72711a2023-04-18 18:19:41 +0930385 name);
Jonathan Domancc075302023-03-31 10:20:48 -0700386 goto cleanup;
387 }
388
389 rc = listen(sh->sd, 1);
390 if (rc) {
391 warn("Can't listen for incoming connections");
392 goto cleanup;
393 }
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800394 }
395
Jeremy Kerr55c97122017-02-07 17:06:46 +0800396 sh->poller = console_poller_register(console, handler, socket_poll,
Andrew Jefferya72711a2023-04-18 18:19:41 +0930397 NULL, sh->sd, POLLIN, NULL);
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800398
399 return 0;
Andrew Jeffery5e7c0782020-02-10 12:12:36 +1030400cleanup:
401 close(sh->sd);
402 return -1;
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800403}
404
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800405static void socket_fini(struct handler *handler)
406{
407 struct socket_handler *sh = to_socket_handler(handler);
Jeremy Kerr89ea8192016-03-15 17:57:43 +0800408
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930409 while (sh->n_clients) {
Jeremy Kerrc9775ce2017-02-07 16:25:34 +0800410 client_close(sh->clients[0]);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930411 }
Jeremy Kerr89ea8192016-03-15 17:57:43 +0800412
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930413 if (sh->poller) {
Jeremy Kerr55c97122017-02-07 17:06:46 +0800414 console_poller_unregister(sh->console, sh->poller);
Andrew Jeffery2834c5b2023-04-19 12:47:09 +0930415 }
Jeremy Kerr89ea8192016-03-15 17:57:43 +0800416
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800417 close(sh->sd);
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800418}
419
420static struct socket_handler socket_handler = {
421 .handler = {
422 .name = "socket",
423 .init = socket_init,
Jeremy Kerr2bd05182016-03-10 16:59:43 +0800424 .fini = socket_fini,
425 },
426};
427
Jeremy Kerr55c97122017-02-07 17:06:46 +0800428console_handler_register(&socket_handler.handler);