Brad Bishop | 64c979e | 2019-11-04 13:55:29 -0500 | [diff] [blame^] | 1 | Backport patch to fix CVE-2018-5743. |
| 2 | |
| 3 | Ref: |
| 4 | https://security-tracker.debian.org/tracker/CVE-2018-5743 |
| 5 | |
| 6 | CVE: CVE-2018-5743 |
| 7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/2ab8a08] |
| 8 | |
| 9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> |
| 10 | |
| 11 | From 2ab8a085b3c666f28f1f9229bd6ecb59915b26c3 Mon Sep 17 00:00:00 2001 |
| 12 | From: Evan Hunt <each@isc.org> |
| 13 | Date: Fri, 5 Apr 2019 16:12:18 -0700 |
| 14 | Subject: [PATCH 4/6] better tcpquota accounting and client mortality checks |
| 15 | |
| 16 | - ensure that tcpactive is cleaned up correctly when accept() fails. |
| 17 | - set 'client->tcpattached' when the client is attached to the tcpquota. |
| 18 | carry this value on to new clients sharing the same pipeline group. |
| 19 | don't call isc_quota_detach() on the tcpquota unless tcpattached is |
| 20 | set. this way clients that were allowed to accept TCP connections |
| 21 | despite being over quota (and therefore, were never attached to the |
| 22 | quota) will not inadvertently detach from it and mess up the |
| 23 | accounting. |
| 24 | - simplify the code for tcpquota disconnection by using a new function |
| 25 | tcpquota_disconnect(). |
| 26 | - before deciding whether to reject a new connection due to quota |
| 27 | exhaustion, check to see whether there are at least two active |
| 28 | clients. previously, this was "at least one", but that could be |
| 29 | insufficient if there was one other client in READING state (waiting |
| 30 | for messages on an open connection) but none in READY (listening |
| 31 | for new connections). |
| 32 | - before deciding whether a TCP client object can to go inactive, we |
| 33 | must ensure there are enough other clients to maintain service |
| 34 | afterward -- both accepting new connections and reading/processing new |
| 35 | queries. A TCP client can't shut down unless at least one |
| 36 | client is accepting new connections and (in the case of pipelined |
| 37 | clients) at least one additional client is waiting to read. |
| 38 | |
| 39 | (cherry picked from commit c7394738b2445c16f728a88394864dd61baad900) |
| 40 | (cherry picked from commit e965d5f11d3d0f6d59704e614fceca2093cb1856) |
| 41 | (cherry picked from commit 87d431161450777ea093821212abfb52d51b36e3) |
| 42 | --- |
| 43 | bin/named/client.c | 244 +++++++++++++++++++------------ |
| 44 | bin/named/include/named/client.h | 3 +- |
| 45 | 2 files changed, 152 insertions(+), 95 deletions(-) |
| 46 | |
| 47 | diff --git a/bin/named/client.c b/bin/named/client.c |
| 48 | index 277656cef0..61e96dd28c 100644 |
| 49 | --- a/bin/named/client.c |
| 50 | +++ b/bin/named/client.c |
| 51 | @@ -244,13 +244,14 @@ static void client_start(isc_task_t *task, isc_event_t *event); |
| 52 | static void client_request(isc_task_t *task, isc_event_t *event); |
| 53 | static void ns_client_dumpmessage(ns_client_t *client, const char *reason); |
| 54 | static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, |
| 55 | - dns_dispatch_t *disp, bool tcp); |
| 56 | + dns_dispatch_t *disp, ns_client_t *oldclient, |
| 57 | + bool tcp); |
| 58 | static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, |
| 59 | isc_socket_t *sock, ns_client_t *oldclient); |
| 60 | static inline bool |
| 61 | allowed(isc_netaddr_t *addr, dns_name_t *signer, |
| 62 | isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen, |
| 63 | - uint8_t *ecs_scope, dns_acl_t *acl) |
| 64 | + uint8_t *ecs_scope, dns_acl_t *acl); |
| 65 | static void compute_cookie(ns_client_t *client, uint32_t when, |
| 66 | uint32_t nonce, const unsigned char *secret, |
| 67 | isc_buffer_t *buf); |
| 68 | @@ -319,7 +320,7 @@ pipeline_init(ns_client_t *client) { |
| 69 | * contention here is expected to be negligible, given that this code |
| 70 | * is only executed for TCP connections. |
| 71 | */ |
| 72 | - refs = isc_mem_allocate(client->sctx->mctx, sizeof(*refs)); |
| 73 | + refs = isc_mem_allocate(ns_g_mctx, sizeof(*refs)); |
| 74 | isc_refcount_init(refs, 1); |
| 75 | client->pipeline_refs = refs; |
| 76 | } |
| 77 | @@ -331,13 +332,13 @@ pipeline_init(ns_client_t *client) { |
| 78 | */ |
| 79 | static void |
| 80 | pipeline_attach(ns_client_t *source, ns_client_t *target) { |
| 81 | - int old_refs; |
| 82 | + int refs; |
| 83 | |
| 84 | REQUIRE(source->pipeline_refs != NULL); |
| 85 | REQUIRE(target->pipeline_refs == NULL); |
| 86 | |
| 87 | - old_refs = isc_refcount_increment(source->pipeline_refs); |
| 88 | - INSIST(old_refs > 0); |
| 89 | + isc_refcount_increment(source->pipeline_refs, &refs); |
| 90 | + INSIST(refs > 1); |
| 91 | target->pipeline_refs = source->pipeline_refs; |
| 92 | } |
| 93 | |
| 94 | @@ -349,25 +350,51 @@ pipeline_attach(ns_client_t *source, ns_client_t *target) { |
| 95 | */ |
| 96 | static bool |
| 97 | pipeline_detach(ns_client_t *client) { |
| 98 | - isc_refcount_t *refs; |
| 99 | - int old_refs; |
| 100 | + isc_refcount_t *refcount; |
| 101 | + int refs; |
| 102 | |
| 103 | REQUIRE(client->pipeline_refs != NULL); |
| 104 | |
| 105 | - refs = client->pipeline_refs; |
| 106 | + refcount = client->pipeline_refs; |
| 107 | client->pipeline_refs = NULL; |
| 108 | |
| 109 | - old_refs = isc_refcount_decrement(refs); |
| 110 | - INSIST(old_refs > 0); |
| 111 | + isc_refcount_decrement(refcount, refs); |
| 112 | |
| 113 | - if (old_refs == 1) { |
| 114 | - isc_mem_free(client->sctx->mctx, refs); |
| 115 | + if (refs == 0) { |
| 116 | + isc_mem_free(ns_g_mctx, refs); |
| 117 | return (true); |
| 118 | } |
| 119 | |
| 120 | return (false); |
| 121 | } |
| 122 | |
| 123 | +/* |
| 124 | + * Detach a client from the TCP client quota if appropriate, and set |
| 125 | + * the quota pointer to NULL. |
| 126 | + * |
| 127 | + * Sometimes when the TCP client quota is exhausted but there are no other |
| 128 | + * clients servicing the interface, a client will be allowed to continue |
| 129 | + * running despite not having been attached to the quota. In this event, |
| 130 | + * the TCP quota was never attached to the client, so when the client (or |
| 131 | + * associated pipeline group) shuts down, the quota must NOT be detached. |
| 132 | + * |
| 133 | + * Otherwise, if the quota pointer is set, it should be detached. If not |
| 134 | + * set at all, we just return without doing anything. |
| 135 | + */ |
| 136 | +static void |
| 137 | +tcpquota_disconnect(ns_client_t *client) { |
| 138 | + if (client->tcpquota == NULL) { |
| 139 | + return; |
| 140 | + } |
| 141 | + |
| 142 | + if (client->tcpattached) { |
| 143 | + isc_quota_detach(&client->tcpquota); |
| 144 | + client->tcpattached = false; |
| 145 | + } else { |
| 146 | + client->tcpquota = NULL; |
| 147 | + } |
| 148 | +} |
| 149 | + |
| 150 | /*% |
| 151 | * Check for a deactivation or shutdown request and take appropriate |
| 152 | * action. Returns true if either is in progress; in this case |
| 153 | @@ -490,38 +517,31 @@ exit_check(ns_client_t *client) { |
| 154 | client->tcpmsg_valid = false; |
| 155 | } |
| 156 | |
| 157 | - if (client->tcpquota != NULL) { |
| 158 | - if (client->pipeline_refs == NULL || |
| 159 | - pipeline_detach(client)) |
| 160 | - { |
| 161 | - /* |
| 162 | - * Only detach from the TCP client quota if |
| 163 | - * there are no more client structures using |
| 164 | - * this TCP connection. |
| 165 | - * |
| 166 | - * Note that we check 'pipeline_refs' and not |
| 167 | - * 'pipelined' because in some cases (e.g. |
| 168 | - * after receiving a request with an opcode |
| 169 | - * different than QUERY) 'pipelined' is set to |
| 170 | - * false after the reference counter gets |
| 171 | - * allocated in pipeline_init() and we must |
| 172 | - * still drop our reference as failing to do so |
| 173 | - * would prevent the reference counter itself |
| 174 | - * from being freed. |
| 175 | - */ |
| 176 | - isc_quota_detach(&client->tcpquota); |
| 177 | - } else { |
| 178 | - /* |
| 179 | - * There are other client structures using this |
| 180 | - * TCP connection, so we cannot detach from the |
| 181 | - * TCP client quota to prevent excess TCP |
| 182 | - * connections from being accepted. However, |
| 183 | - * this client structure might later be reused |
| 184 | - * for accepting new connections and thus must |
| 185 | - * have its 'tcpquota' field set to NULL. |
| 186 | - */ |
| 187 | - client->tcpquota = NULL; |
| 188 | - } |
| 189 | + /* |
| 190 | + * Detach from pipeline group and from TCP client quota, |
| 191 | + * if appropriate. |
| 192 | + * |
| 193 | + * - If no pipeline group is active, attempt to |
| 194 | + * detach from the TCP client quota. |
| 195 | + * |
| 196 | + * - If a pipeline group is active, detach from it; |
| 197 | + * if the return code indicates that there no more |
| 198 | + * clients left if this pipeline group, we also detach |
| 199 | + * from the TCP client quota. |
| 200 | + * |
| 201 | + * - Otherwise we don't try to detach, we just set the |
| 202 | + * TCP quota pointer to NULL if it wasn't NULL already. |
| 203 | + * |
| 204 | + * tcpquota_disconnect() will set tcpquota to NULL, either |
| 205 | + * by detaching it or by assignment, depending on the |
| 206 | + * needs of the client. See the comments on that function |
| 207 | + * for further information. |
| 208 | + */ |
| 209 | + if (client->pipeline_refs == NULL || pipeline_detach(client)) { |
| 210 | + tcpquota_disconnect(client); |
| 211 | + } else { |
| 212 | + client->tcpquota = NULL; |
| 213 | + client->tcpattached = false; |
| 214 | } |
| 215 | |
| 216 | if (client->tcpsocket != NULL) { |
| 217 | @@ -544,8 +564,6 @@ exit_check(ns_client_t *client) { |
| 218 | client->timerset = false; |
| 219 | } |
| 220 | |
| 221 | - client->pipelined = false; |
| 222 | - |
| 223 | client->peeraddr_valid = false; |
| 224 | |
| 225 | client->state = NS_CLIENTSTATE_READY; |
| 226 | @@ -558,18 +576,27 @@ exit_check(ns_client_t *client) { |
| 227 | * active and force it to go inactive if not. |
| 228 | * |
| 229 | * UDP clients go inactive at this point, but a TCP client |
| 230 | - * will needs to remain active if no other clients are |
| 231 | - * listening for TCP requests on this interface, to |
| 232 | - * prevent this interface from going nonresponsive. |
| 233 | + * may need to remain active and go into ready state if |
| 234 | + * no other clients are available to listen for TCP |
| 235 | + * requests on this interface or (in the case of pipelined |
| 236 | + * clients) to read for additional messages on the current |
| 237 | + * connection. |
| 238 | */ |
| 239 | if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) { |
| 240 | LOCK(&client->interface->lock); |
| 241 | - if (client->interface->ntcpaccepting == 0) { |
| 242 | + if ((client->interface->ntcpaccepting == 0 || |
| 243 | + (client->pipelined && |
| 244 | + client->interface->ntcpactive < 2)) && |
| 245 | + client->newstate != NS_CLIENTSTATE_FREED) |
| 246 | + { |
| 247 | client->mortal = false; |
| 248 | + client->newstate = NS_CLIENTSTATE_READY; |
| 249 | } |
| 250 | UNLOCK(&client->interface->lock); |
| 251 | } |
| 252 | |
| 253 | + client->pipelined = false; |
| 254 | + |
| 255 | /* |
| 256 | * We don't need the client; send it to the inactive |
| 257 | * queue for recycling. |
| 258 | @@ -2634,6 +2661,18 @@ client_request(isc_task_t *task, isc_event_t *event) { |
| 259 | client->pipelined = false; |
| 260 | } |
| 261 | if (TCP_CLIENT(client) && client->pipelined) { |
| 262 | + /* |
| 263 | + * We're pipelining. Replace the client; the |
| 264 | + * the replacement can read the TCP socket looking |
| 265 | + * for new messages and this client can process the |
| 266 | + * current message asynchronously. |
| 267 | + * |
| 268 | + * There are now at least three clients using this |
| 269 | + * TCP socket - one accepting new connections, |
| 270 | + * one reading an existing connection to get new |
| 271 | + * messages, and one answering the message already |
| 272 | + * received. |
| 273 | + */ |
| 274 | result = ns_client_replace(client); |
| 275 | if (result != ISC_R_SUCCESS) { |
| 276 | client->pipelined = false; |
| 277 | @@ -3197,6 +3236,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { |
| 278 | client->pipelined = false; |
| 279 | client->pipeline_refs = NULL; |
| 280 | client->tcpquota = NULL; |
| 281 | + client->tcpattached = false; |
| 282 | client->recursionquota = NULL; |
| 283 | client->interface = NULL; |
| 284 | client->peeraddr_valid = false; |
| 285 | @@ -3359,9 +3399,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) { |
| 286 | NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3), |
| 287 | "accept failed: %s", |
| 288 | isc_result_totext(nevent->result)); |
| 289 | - if (client->tcpquota != NULL) { |
| 290 | - isc_quota_detach(&client->tcpquota); |
| 291 | - } |
| 292 | + tcpquota_disconnect(client); |
| 293 | } |
| 294 | |
| 295 | if (exit_check(client)) |
| 296 | @@ -3402,7 +3440,7 @@ client_newconn(isc_task_t *task, isc_event_t *event) { |
| 297 | client->pipelined = false; |
| 298 | result = ns_client_replace(client); |
| 299 | if (result == ISC_R_SUCCESS && |
| 300 | - (client->sctx->keepresporder == NULL || |
| 301 | + (ns_g_server->keepresporder == NULL || |
| 302 | !allowed(&netaddr, NULL, NULL, 0, NULL, |
| 303 | ns_g_server->keepresporder))) |
| 304 | { |
| 305 | @@ -3429,7 +3467,7 @@ client_accept(ns_client_t *client) { |
| 306 | * in named.conf. If we can't attach to it here, that means the TCP |
| 307 | * client quota has been exceeded. |
| 308 | */ |
| 309 | - result = isc_quota_attach(&client->sctx->tcpquota, |
| 310 | + result = isc_quota_attach(&ns_g_server->tcpquota, |
| 311 | &client->tcpquota); |
| 312 | if (result != ISC_R_SUCCESS) { |
| 313 | bool exit; |
| 314 | @@ -3447,27 +3485,27 @@ client_accept(ns_client_t *client) { |
| 315 | * interface to be starved, with no clients able |
| 316 | * to accept new connections. |
| 317 | * |
| 318 | - * So, we check here to see if any other client |
| 319 | - * is already servicing TCP queries on this |
| 320 | + * So, we check here to see if any other clients |
| 321 | + * are already servicing TCP queries on this |
| 322 | * interface (whether accepting, reading, or |
| 323 | - * processing). |
| 324 | - * |
| 325 | - * If so, then it's okay *not* to call |
| 326 | - * accept - we can let this client to go inactive |
| 327 | - * and the other one handle the next connection |
| 328 | - * when it's ready. |
| 329 | + * processing). If there are at least two |
| 330 | + * (one reading and one processing a request) |
| 331 | + * then it's okay *not* to call accept - we |
| 332 | + * can let this client go inactive and another |
| 333 | + * one will resume accepting when it's done. |
| 334 | * |
| 335 | - * But if not, then we need to be a little bit |
| 336 | - * flexible about the quota. We allow *one* extra |
| 337 | - * TCP client through, to ensure we're listening on |
| 338 | - * every interface. |
| 339 | + * If there aren't enough active clients on the |
| 340 | + * interface, then we can be a little bit |
| 341 | + * flexible about the quota. We'll allow *one* |
| 342 | + * extra client through to ensure we're listening |
| 343 | + * on every interface. |
| 344 | * |
| 345 | - * (Note: In practice this means that the *real* |
| 346 | - * TCP client quota is tcp-clients plus the number |
| 347 | - * of interfaces.) |
| 348 | + * (Note: In practice this means that the real |
| 349 | + * TCP client quota is tcp-clients plus the |
| 350 | + * number of listening interfaces plus 2.) |
| 351 | */ |
| 352 | LOCK(&client->interface->lock); |
| 353 | - exit = (client->interface->ntcpactive > 0); |
| 354 | + exit = (client->interface->ntcpactive > 1); |
| 355 | UNLOCK(&client->interface->lock); |
| 356 | |
| 357 | if (exit) { |
| 358 | @@ -3475,6 +3513,9 @@ client_accept(ns_client_t *client) { |
| 359 | (void)exit_check(client); |
| 360 | return; |
| 361 | } |
| 362 | + |
| 363 | + } else { |
| 364 | + client->tcpattached = true; |
| 365 | } |
| 366 | |
| 367 | /* |
| 368 | @@ -3507,9 +3548,16 @@ client_accept(ns_client_t *client) { |
| 369 | UNEXPECTED_ERROR(__FILE__, __LINE__, |
| 370 | "isc_socket_accept() failed: %s", |
| 371 | isc_result_totext(result)); |
| 372 | - if (client->tcpquota != NULL) { |
| 373 | - isc_quota_detach(&client->tcpquota); |
| 374 | + |
| 375 | + tcpquota_disconnect(client); |
| 376 | + |
| 377 | + if (client->tcpactive) { |
| 378 | + LOCK(&client->interface->lock); |
| 379 | + client->interface->ntcpactive--; |
| 380 | + UNLOCK(&client->interface->lock); |
| 381 | + client->tcpactive = false; |
| 382 | } |
| 383 | + |
| 384 | return; |
| 385 | } |
| 386 | |
| 387 | @@ -3527,13 +3575,12 @@ client_accept(ns_client_t *client) { |
| 388 | * once the connection is established. |
| 389 | * |
| 390 | * When the client object is shutting down after handling a TCP |
| 391 | - * request (see exit_check()), it looks to see whether this value is |
| 392 | - * non-zero. If so, that means another client has already called |
| 393 | - * accept() and is waiting to establish the next connection, which |
| 394 | - * means the first client is free to go inactive. Otherwise, |
| 395 | - * the first client must come back and call accept() again; this |
| 396 | - * guarantees there will always be at least one client listening |
| 397 | - * for new TCP connections on each interface. |
| 398 | + * request (see exit_check()), if this value is at least one, that |
| 399 | + * means another client has called accept() and is waiting to |
| 400 | + * establish the next connection. That means the client may be |
| 401 | + * be free to become inactive; otherwise it may need to start |
| 402 | + * listening for connections itself to prevent the interface |
| 403 | + * going dead. |
| 404 | */ |
| 405 | LOCK(&client->interface->lock); |
| 406 | client->interface->ntcpaccepting++; |
| 407 | @@ -3613,19 +3660,19 @@ ns_client_replace(ns_client_t *client) { |
| 408 | client->tcpsocket, client); |
| 409 | } else { |
| 410 | result = get_client(client->manager, client->interface, |
| 411 | - client->dispatch, tcp); |
| 412 | + client->dispatch, client, tcp); |
| 413 | + |
| 414 | + /* |
| 415 | + * The responsibility for listening for new requests is hereby |
| 416 | + * transferred to the new client. Therefore, the old client |
| 417 | + * should refrain from listening for any more requests. |
| 418 | + */ |
| 419 | + client->mortal = true; |
| 420 | } |
| 421 | if (result != ISC_R_SUCCESS) { |
| 422 | return (result); |
| 423 | } |
| 424 | |
| 425 | - /* |
| 426 | - * The responsibility for listening for new requests is hereby |
| 427 | - * transferred to the new client. Therefore, the old client |
| 428 | - * should refrain from listening for any more requests. |
| 429 | - */ |
| 430 | - client->mortal = true; |
| 431 | - |
| 432 | return (ISC_R_SUCCESS); |
| 433 | } |
| 434 | |
| 435 | @@ -3759,7 +3806,7 @@ ns_clientmgr_destroy(ns_clientmgr_t **managerp) { |
| 436 | |
| 437 | static isc_result_t |
| 438 | get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, |
| 439 | - dns_dispatch_t *disp, bool tcp) |
| 440 | + dns_dispatch_t *disp, ns_client_t *oldclient, bool tcp) |
| 441 | { |
| 442 | isc_result_t result = ISC_R_SUCCESS; |
| 443 | isc_event_t *ev; |
| 444 | @@ -3803,6 +3850,16 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, |
| 445 | client->dscp = ifp->dscp; |
| 446 | |
| 447 | if (tcp) { |
| 448 | + client->tcpattached = false; |
| 449 | + if (oldclient != NULL) { |
| 450 | + client->tcpattached = oldclient->tcpattached; |
| 451 | + } |
| 452 | + |
| 453 | + LOCK(&client->interface->lock); |
| 454 | + client->interface->ntcpactive++; |
| 455 | + UNLOCK(&client->interface->lock); |
| 456 | + client->tcpactive = true; |
| 457 | + |
| 458 | client->attributes |= NS_CLIENTATTR_TCP; |
| 459 | isc_socket_attach(ifp->tcpsocket, |
| 460 | &client->tcplistener); |
| 461 | @@ -3866,7 +3923,8 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, |
| 462 | ns_interface_attach(ifp, &client->interface); |
| 463 | client->newstate = client->state = NS_CLIENTSTATE_WORKING; |
| 464 | INSIST(client->recursionquota == NULL); |
| 465 | - client->tcpquota = &client->sctx->tcpquota; |
| 466 | + client->tcpquota = &ns_g_server->tcpquota; |
| 467 | + client->tcpattached = oldclient->tcpattached; |
| 468 | |
| 469 | client->dscp = ifp->dscp; |
| 470 | |
| 471 | @@ -3885,7 +3943,6 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, |
| 472 | LOCK(&client->interface->lock); |
| 473 | client->interface->ntcpactive++; |
| 474 | UNLOCK(&client->interface->lock); |
| 475 | - |
| 476 | client->tcpactive = true; |
| 477 | |
| 478 | INSIST(client->tcpmsg_valid == false); |
| 479 | @@ -3913,7 +3970,8 @@ ns_clientmgr_createclients(ns_clientmgr_t *manager, unsigned int n, |
| 480 | MTRACE("createclients"); |
| 481 | |
| 482 | for (disp = 0; disp < n; disp++) { |
| 483 | - result = get_client(manager, ifp, ifp->udpdispatch[disp], tcp); |
| 484 | + result = get_client(manager, ifp, ifp->udpdispatch[disp], |
| 485 | + NULL, tcp); |
| 486 | if (result != ISC_R_SUCCESS) |
| 487 | break; |
| 488 | } |
| 489 | diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h |
| 490 | index aeed9ccdda..e2c40acd28 100644 |
| 491 | --- a/bin/named/include/named/client.h |
| 492 | +++ b/bin/named/include/named/client.h |
| 493 | @@ -9,8 +9,6 @@ |
| 494 | * information regarding copyright ownership. |
| 495 | */ |
| 496 | |
| 497 | -/* $Id: client.h,v 1.96 2012/01/31 23:47:31 tbox Exp $ */ |
| 498 | - |
| 499 | #ifndef NAMED_CLIENT_H |
| 500 | #define NAMED_CLIENT_H 1 |
| 501 | |
| 502 | @@ -136,6 +134,7 @@ struct ns_client { |
| 503 | bool pipelined; /*%< TCP queries not in sequence */ |
| 504 | isc_refcount_t *pipeline_refs; |
| 505 | isc_quota_t *tcpquota; |
| 506 | + bool tcpattached; |
| 507 | isc_quota_t *recursionquota; |
| 508 | ns_interface_t *interface; |
| 509 | |
| 510 | -- |
| 511 | 2.20.1 |
| 512 | |