blob: 034ab13303bf6832fa23f86bb245d62bcbba8afe [file] [log] [blame]
Backport patch to fix CVE-2018-5743.
Ref:
https://security-tracker.debian.org/tracker/CVE-2018-5743
CVE: CVE-2018-5743
Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/2ab8a08]
Signed-off-by: Kai Kang <kai.kang@windriver.com>
From 2ab8a085b3c666f28f1f9229bd6ecb59915b26c3 Mon Sep 17 00:00:00 2001
From: Evan Hunt <each@isc.org>
Date: Fri, 5 Apr 2019 16:12:18 -0700
Subject: [PATCH 4/6] better tcpquota accounting and client mortality checks
- ensure that tcpactive is cleaned up correctly when accept() fails.
- set 'client->tcpattached' when the client is attached to the tcpquota.
carry this value on to new clients sharing the same pipeline group.
don't call isc_quota_detach() on the tcpquota unless tcpattached is
set. this way clients that were allowed to accept TCP connections
despite being over quota (and therefore, were never attached to the
quota) will not inadvertently detach from it and mess up the
accounting.
- simplify the code for tcpquota disconnection by using a new function
tcpquota_disconnect().
- before deciding whether to reject a new connection due to quota
exhaustion, check to see whether there are at least two active
clients. previously, this was "at least one", but that could be
insufficient if there was one other client in READING state (waiting
for messages on an open connection) but none in READY (listening
for new connections).
- before deciding whether a TCP client object can to go inactive, we
must ensure there are enough other clients to maintain service
afterward -- both accepting new connections and reading/processing new
queries. A TCP client can't shut down unless at least one
client is accepting new connections and (in the case of pipelined
clients) at least one additional client is waiting to read.
(cherry picked from commit c7394738b2445c16f728a88394864dd61baad900)
(cherry picked from commit e965d5f11d3d0f6d59704e614fceca2093cb1856)
(cherry picked from commit 87d431161450777ea093821212abfb52d51b36e3)
---
bin/named/client.c | 244 +++++++++++++++++++------------
bin/named/include/named/client.h | 3 +-
2 files changed, 152 insertions(+), 95 deletions(-)
diff --git a/bin/named/client.c b/bin/named/client.c
index 277656cef0..61e96dd28c 100644
--- a/bin/named/client.c
+++ b/bin/named/client.c
@@ -244,13 +244,14 @@ static void client_start(isc_task_t *task, isc_event_t *event);
static void client_request(isc_task_t *task, isc_event_t *event);
static void ns_client_dumpmessage(ns_client_t *client, const char *reason);
static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
- dns_dispatch_t *disp, bool tcp);
+ dns_dispatch_t *disp, ns_client_t *oldclient,
+ bool tcp);
static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp,
isc_socket_t *sock, ns_client_t *oldclient);
static inline bool
allowed(isc_netaddr_t *addr, dns_name_t *signer,
isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen,
- uint8_t *ecs_scope, dns_acl_t *acl)
+ uint8_t *ecs_scope, dns_acl_t *acl);
static void compute_cookie(ns_client_t *client, uint32_t when,
uint32_t nonce, const unsigned char *secret,
isc_buffer_t *buf);
@@ -319,7 +320,7 @@ pipeline_init(ns_client_t *client) {
* contention here is expected to be negligible, given that this code
* is only executed for TCP connections.
*/
- refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs));
+ refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs));
isc_refcount_init(refs, 1);
client->pipeline_refs = refs;
}
@@ -331,13 +332,13 @@ pipeline_init(ns_client_t *client) {
*/
static void
pipeline_attach(ns_client_t *source, ns_client_t *target) {
- int old_refs;
+ int refs;
REQUIRE(source->pipeline_refs != NULL);
REQUIRE(target->pipeline_refs == NULL);
- old_refs = isc_refcount_increment(source->pipeline_refs);
- INSIST(old_refs > 0);
+ isc_refcount_increment(source->pipeline_refs, &refs);
+ INSIST(refs > 1);
target->pipeline_refs = source->pipeline_refs;
}
@@ -349,25 +350,51 @@ pipeline_attach(ns_client_t *source, ns_client_t *target) {
*/
static bool
pipeline_detach(ns_client_t *client) {
- isc_refcount_t *refs;
- int old_refs;
+ isc_refcount_t *refcount;
+ int refs;
REQUIRE(client->pipeline_refs != NULL);
- refs = client->pipeline_refs;
+ refcount = client->pipeline_refs;
client->pipeline_refs = NULL;
- old_refs = isc_refcount_decrement(refs);
- INSIST(old_refs > 0);
+ isc_refcount_decrement(refcount, refs);
- if (old_refs == 1) {
- isc_mem_free(client->sctx->mctx, refs);
+ if (refs == 0) {
+ isc_mem_free(ns_g_mctx, refs);
return (true);
}
return (false);
}
+/*
+ * Detach a client from the TCP client quota if appropriate, and set
+ * the quota pointer to NULL.
+ *
+ * Sometimes when the TCP client quota is exhausted but there are no other
+ * clients servicing the interface, a client will be allowed to continue
+ * running despite not having been attached to the quota. In this event,
+ * the TCP quota was never attached to the client, so when the client (or
+ * associated pipeline group) shuts down, the quota must NOT be detached.
+ *
+ * Otherwise, if the quota pointer is set, it should be detached. If not
+ * set at all, we just return without doing anything.
+ */
+static void
+tcpquota_disconnect(ns_client_t *client) {
+ if (client->tcpquota == NULL) {
+ return;
+ }
+
+ if (client->tcpattached) {
+ isc_quota_detach(&client->tcpquota);
+ client->tcpattached = false;
+ } else {
+ client->tcpquota = NULL;
+ }
+}
+
/*%
* Check for a deactivation or shutdown request and take appropriate
* action. Returns true if either is in progress; in this case
@@ -490,38 +517,31 @@ exit_check(ns_client_t *client) {
client->tcpmsg_valid = false;
}
- if (client->tcpquota != NULL) {
- if (client->pipeline_refs == NULL ||
- pipeline_detach(client))
- {
- /*
- * Only detach from the TCP client quota if
- * there are no more client structures using
- * this TCP connection.
- *
- * Note that we check 'pipeline_refs' and not
- * 'pipelined' because in some cases (e.g.
- * after receiving a request with an opcode
- * different than QUERY) 'pipelined' is set to
- * false after the reference counter gets
- * allocated in pipeline_init() and we must
- * still drop our reference as failing to do so
- * would prevent the reference counter itself
- * from being freed.
- */
- isc_quota_detach(&client->tcpquota);
- } else {
- /*
- * There are other client structures using this
- * TCP connection, so we cannot detach from the
- * TCP client quota to prevent excess TCP
- * connections from being accepted. However,
- * this client structure might later be reused
- * for accepting new connections and thus must
- * have its 'tcpquota' field set to NULL.
- */
- client->tcpquota = NULL;
- }
+ /*
+ * Detach from pipeline group and from TCP client quota,
+ * if appropriate.
+ *
+ * - If no pipeline group is active, attempt to
+ * detach from the TCP client quota.
+ *
+ * - If a pipeline group is active, detach from it;
+ * if the return code indicates that there no more
+ * clients left if this pipeline group, we also detach
+ * from the TCP client quota.
+ *
+ * - Otherwise we don't try to detach, we just set the
+ * TCP quota pointer to NULL if it wasn't NULL already.
+ *
+ * tcpquota_disconnect() will set tcpquota to NULL, either
+ * by detaching it or by assignment, depending on the
+ * needs of the client. See the comments on that function
+ * for further information.
+ */
+ if (client->pipeline_refs == NULL || pipeline_detach(client)) {
+ tcpquota_disconnect(client);
+ } else {
+ client->tcpquota = NULL;
+ client->tcpattached = false;
}
if (client->tcpsocket != NULL) {
@@ -544,8 +564,6 @@ exit_check(ns_client_t *client) {
client->timerset = false;
}
- client->pipelined = false;
-
client->peeraddr_valid = false;
client->state = NS_CLIENTSTATE_READY;
@@ -558,18 +576,27 @@ exit_check(ns_client_t *client) {
* active and force it to go inactive if not.
*
* UDP clients go inactive at this point, but a TCP client
- * will needs to remain active if no other clients are
- * listening for TCP requests on this interface, to
- * prevent this interface from going nonresponsive.
+ * may need to remain active and go into ready state if
+ * no other clients are available to listen for TCP
+ * requests on this interface or (in the case of pipelined
+ * clients) to read for additional messages on the current
+ * connection.
*/
if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) {
LOCK(&client->interface->lock);
- if (client->interface->ntcpaccepting == 0) {
+ if ((client->interface->ntcpaccepting == 0 ||
+ (client->pipelined &&
+ client->interface->ntcpactive < 2)) &&
+ client->newstate != NS_CLIENTSTATE_FREED)
+ {
client->mortal = false;
+ client->newstate = NS_CLIENTSTATE_READY;
}
UNLOCK(&client->interface->lock);
}
+ client->pipelined = false;
+
/*
* We don't need the client; send it to the inactive
* queue for recycling.
@@ -2634,6 +2661,18 @@ client_request(isc_task_t *task, isc_event_t *event) {
client->pipelined = false;
}
if (TCP_CLIENT(client) && client->pipelined) {
+ /*
+ * We're pipelining. Replace the client; the
+ * the replacement can read the TCP socket looking
+ * for new messages and this client can process the
+ * current message asynchronously.
+ *
+ * There are now at least three clients using this
+ * TCP socket - one accepting new connections,
+ * one reading an existing connection to get new
+ * messages, and one answering the message already
+ * received.
+ */
result = ns_client_replace(client);
if (result != ISC_R_SUCCESS) {
client->pipelined = false;
@@ -3197,6 +3236,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
client->pipelined = false;
client->pipeline_refs = NULL;
client->tcpquota = NULL;
+ client->tcpattached = false;
client->recursionquota = NULL;
client->interface = NULL;
client->peeraddr_valid = false;
@@ -3359,9 +3399,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3),
"accept failed: %s",
isc_result_totext(nevent->result));
- if (client->tcpquota != NULL) {
- isc_quota_detach(&client->tcpquota);
- }
+ tcpquota_disconnect(client);
}
if (exit_check(client))
@@ -3402,7 +3440,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) {
client->pipelined = false;
result = ns_client_replace(client);
if (result == ISC_R_SUCCESS &&
- (client->sctx->keepresporder == NULL ||
+ (ns_g_server->keepresporder == NULL ||
!allowed(&netaddr, NULL, NULL, 0, NULL,
ns_g_server->keepresporder)))
{
@@ -3429,7 +3467,7 @@ client_accept(ns_client_t *client) {
* in named.conf. If we can't attach to it here, that means the TCP
* client quota has been exceeded.
*/
- result = isc_quota_attach(&client->sctx->tcpquota,
+ result = isc_quota_attach(&ns_g_server->tcpquota,
&client->tcpquota);
if (result != ISC_R_SUCCESS) {
bool exit;
@@ -3447,27 +3485,27 @@ client_accept(ns_client_t *client) {
* interface to be starved, with no clients able
* to accept new connections.
*
- * So, we check here to see if any other client
- * is already servicing TCP queries on this
+ * So, we check here to see if any other clients
+ * are already servicing TCP queries on this
* interface (whether accepting, reading, or
- * processing).
- *
- * If so, then it's okay *not* to call
- * accept - we can let this client to go inactive
- * and the other one handle the next connection
- * when it's ready.
+ * processing). If there are at least two
+ * (one reading and one processing a request)
+ * then it's okay *not* to call accept - we
+ * can let this client go inactive and another
+ * one will resume accepting when it's done.
*
- * But if not, then we need to be a little bit
- * flexible about the quota. We allow *one* extra
- * TCP client through, to ensure we're listening on
- * every interface.
+ * If there aren't enough active clients on the
+ * interface, then we can be a little bit
+ * flexible about the quota. We'll allow *one*
+ * extra client through to ensure we're listening
+ * on every interface.
*
- * (Note: In practice this means that the *real*
- * TCP client quota is tcp-clients plus the number
- * of interfaces.)
+ * (Note: In practice this means that the real
+ * TCP client quota is tcp-clients plus the
+ * number of listening interfaces plus 2.)
*/
LOCK(&client->interface->lock);
- exit = (client->interface->ntcpactive > 0);
+ exit = (client->interface->ntcpactive > 1);
UNLOCK(&client->interface->lock);
if (exit) {
@@ -3475,6 +3513,9 @@ client_accept(ns_client_t *client) {
(void)exit_check(client);
return;
}
+
+ } else {
+ client->tcpattached = true;
}
/*
@@ -3507,9 +3548,16 @@ client_accept(ns_client_t *client) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"isc_socket_accept() failed: %s",
isc_result_totext(result));
- if (client->tcpquota != NULL) {
- isc_quota_detach(&client->tcpquota);
+
+ tcpquota_disconnect(client);
+
+ if (client->tcpactive) {
+ LOCK(&client->interface->lock);
+ client->interface->ntcpactive--;
+ UNLOCK(&client->interface->lock);
+ client->tcpactive = false;
}
+
return;
}
@@ -3527,13 +3575,12 @@ client_accept(ns_client_t *client) {
* once the connection is established.
*
* When the client object is shutting down after handling a TCP
- * request (see exit_check()), it looks to see whether this value is
- * non-zero. If so, that means another client has already called
- * accept() and is waiting to establish the next connection, which
- * means the first client is free to go inactive. Otherwise,
- * the first client must come back and call accept() again; this
- * guarantees there will always be at least one client listening
- * for new TCP connections on each interface.
+ * request (see exit_check()), if this value is at least one, that
+ * means another client has called accept() and is waiting to
+ * establish the next connection. That means the client may be
+ * be free to become inactive; otherwise it may need to start
+ * listening for connections itself to prevent the interface
+ * going dead.
*/
LOCK(&client->interface->lock);
client->interface->ntcpaccepting++;
@@ -3613,19 +3660,19 @@ ns_client_replace(ns_client_t *client) {
client->tcpsocket, client);
} else {
result = get_client(client->manager, client->interface,
- client->dispatch, tcp);
+ client->dispatch, client, tcp);
+
+ /*
+ * The responsibility for listening for new requests is hereby
+ * transferred to the new client. Therefore, the old client
+ * should refrain from listening for any more requests.
+ */
+ client->mortal = true;
}
if (result != ISC_R_SUCCESS) {
return (result);
}
- /*
- * The responsibility for listening for new requests is hereby
- * transferred to the new client. Therefore, the old client
- * should refrain from listening for any more requests.
- */
- client->mortal = true;
-
return (ISC_R_SUCCESS);
}
@@ -3759,7 +3806,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) {
static isc_result_t
get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
- dns_dispatch_t *disp, bool tcp)
+ dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp)
{
isc_result_t result = ISC_R_SUCCESS;
isc_event_t *ev;
@@ -3803,6 +3850,16 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp,
client->dscp = ifp->dscp;
if (tcp) {
+ client->tcpattached = false;
+ if (oldclient != NULL) {
+ client->tcpattached = oldclient->tcpattached;
+ }
+
+ LOCK(&client->interface->lock);
+ client->interface->ntcpactive++;
+ UNLOCK(&client->interface->lock);
+ client->tcpactive = true;
+
client->attributes |= NS_CLIENTATTR_TCP;
isc_socket_attach(ifp->tcpsocket,
&client->tcplistener);
@@ -3866,7 +3923,8 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
ns_interface_attach(ifp, &client->interface);
client->newstate = client->state = NS_CLIENTSTATE_WORKING;
INSIST(client->recursionquota == NULL);
- client->tcpquota = &client->sctx->tcpquota;
+ client->tcpquota = &ns_g_server->tcpquota;
+ client->tcpattached = oldclient->tcpattached;
client->dscp = ifp->dscp;
@@ -3885,7 +3943,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock,
LOCK(&client->interface->lock);
client->interface->ntcpactive++;
UNLOCK(&client->interface->lock);
-
client->tcpactive = true;
INSIST(client->tcpmsg_valid == false);
@@ -3913,7 +3970,8 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n,
MTRACE("createclients");
for (disp = 0; disp < n; disp++) {
- result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp);
+ result = get_client(manager, ifp, ifp->udpdispatch[disp],
+ NULL, tcp);
if (result != ISC_R_SUCCESS)
break;
}
diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h
index aeed9ccdda..e2c40acd28 100644
--- a/bin/named/include/named/client.h
+++ b/bin/named/include/named/client.h
@@ -9,8 +9,6 @@
* information regarding copyright ownership.
*/
-/* $Id: client.h,v 1.96 2012/01/31 23:47:31 tbox Exp $ */
-
#ifndef NAMED_CLIENT_H
#define NAMED_CLIENT_H 1
@@ -136,6 +134,7 @@ struct ns_client {
bool pipelined; /*%< TCP queries not in sequence */
isc_refcount_t *pipeline_refs;
isc_quota_t *tcpquota;
+ bool tcpattached;
isc_quota_t *recursionquota;
ns_interface_t *interface;
--
2.20.1