Eliminate excessive CPU consumption when redirecting UART over SSH
Redirecting the external UART via SSH caused the console-server,
console-client, and dropbear to consume ~30% of the available CPU each
when a large amount of data was being written to the UART output.
Buffering all of the small 16550 FIFO bytes into a larger packet and
then sending that to the SSH SW allows more efficient transmission
over the ethernet connection.
Tested this by "ssh root@<bmc.ip.addr> -p 2200" on a system running a
CentOS distribution. Using a BASH console run a large binary file
through "od -t x1 <fname>" to create a large amount of traffic. At
the BMC console run "top" to review the CPU usage. My experience is
after this change is applied:
console-server: ~25% CPU
dropbear: ~3% CPU
console-client: ~1% CPU
Change-Id: Ibabfd285e97a487e7ff040e1cb3159fbff360328
Signed-off-by: Johnathan Mantey <johnathanx.mantey@intel.com>
diff --git a/socket-handler.c b/socket-handler.c
index 1b2cb4f..be7daa4 100644
--- a/socket-handler.c
+++ b/socket-handler.c
@@ -32,6 +32,10 @@
#include "console-server.h"
+#define SOCKET_HANDLER_PKT_SIZE 512
+/* Set poll() timeout to 4000 uS, or 4 mS */
+#define SOCKET_HANDLER_PKT_US_TIMEOUT 4000
+
struct client {
struct socket_handler *sh;
struct poller *poller;
@@ -50,6 +54,11 @@
int n_clients;
};
+static struct timeval const socket_handler_timeout = {
+ .tv_sec = 0,
+ .tv_usec = SOCKET_HANDLER_PKT_US_TIMEOUT
+};
+
static struct socket_handler *to_socket_handler(struct handler *handler)
{
return container_of(handler, struct socket_handler, handler);
@@ -179,7 +188,17 @@
size_t force_len)
{
struct client *client = arg;
- int rc;
+ int rc, len;
+
+ len = ringbuffer_len(client->rbc);
+ if (!force_len && (len < SOCKET_HANDLER_PKT_SIZE)) {
+ /* Do nothing until many small requests have accumulated, or
+ * the UART is idle for awhile (as determined by the timeout
+ * value supplied to the poll function call in console_server.c. */
+ console_poller_set_timeout(client->sh->console, client->poller,
+ &socket_handler_timeout);
+ return RINGBUFFER_POLL_OK;
+ }
rc = client_drain_queue(client, force_len);
if (rc) {
@@ -191,6 +210,26 @@
return RINGBUFFER_POLL_OK;
}
+static enum poller_ret client_timeout(struct handler *handler, void *data)
+{
+ struct client *client = data;
+ int rc = 0;
+
+ if (client->blocked) {
+ /* nothing to do here, we'll call client_drain_queue when
+ * we become unblocked */
+ return POLLER_OK;
+ }
+
+ rc = client_drain_queue(client, 0);
+ if (rc) {
+ client_close(client);
+ return POLLER_REMOVE;
+ }
+
+ return POLLER_OK;
+}
+
static enum poller_ret client_poll(struct handler *handler,
int events, void *data)
{
@@ -248,7 +287,8 @@
client->sh = sh;
client->fd = fd;
client->poller = console_poller_register(sh->console, handler,
- client_poll, client->fd, POLLIN, client);
+ client_poll, client_timeout, client->fd, POLLIN,
+ client);
client->rbc = console_ringbuffer_consumer_register(sh->console,
client_ringbuffer_poll, client);
@@ -297,7 +337,7 @@
}
sh->poller = console_poller_register(console, handler, socket_poll,
- sh->sd, POLLIN, NULL);
+ NULL, sh->sd, POLLIN, NULL);
return 0;
}