Brad Bishop | 64c979e | 2019-11-04 13:55:29 -0500 | [diff] [blame^] | 1 | Backport patch to fix CVE-2018-5743. |
| 2 | |
| 3 | Ref: |
| 4 | https://security-tracker.debian.org/tracker/CVE-2018-5743 |
| 5 | |
| 6 | CVE: CVE-2018-5743 |
| 7 | Upstream-Status: Backport [https://gitlab.isc.org/isc-projects/bind9/commit/719f604] |
| 8 | |
| 9 | Signed-off-by: Kai Kang <kai.kang@windriver.com> |
| 10 | |
| 11 | From 719f604e3fad5b7479bd14e2fa0ef4413f0a8fdc Mon Sep 17 00:00:00 2001 |
| 12 | From: =?UTF-8?q?Witold=20Kr=C4=99cicki?= <wpk@isc.org> |
| 13 | Date: Fri, 4 Jan 2019 12:50:51 +0100 |
| 14 | Subject: [PATCH 2/6] tcp-clients could still be exceeded (v2) |
| 15 | |
| 16 | the TCP client quota could still be ineffective under some |
| 17 | circumstances. this change: |
| 18 | |
| 19 | - improves quota accounting to ensure that TCP clients are |
| 20 | properly limited, while still guaranteeing that at least one client |
| 21 | is always available to serve TCP connections on each interface. |
| 22 | - uses more descriptive names and removes one (ntcptarget) that |
| 23 | was no longer needed |
| 24 | - adds comments |
| 25 | |
| 26 | (cherry picked from commit 924651f1d5e605cd186d03f4f7340bcc54d77cc2) |
| 27 | (cherry picked from commit 55a7a458e30e47874d34bdf1079eb863a0512396) |
| 28 | --- |
| 29 | bin/named/client.c | 311 ++++++++++++++++++++----- |
| 30 | bin/named/include/named/client.h | 14 +- |
| 31 | bin/named/include/named/interfacemgr.h | 11 +- |
| 32 | bin/named/interfacemgr.c | 8 +- |
| 33 | 4 files changed, 267 insertions(+), 77 deletions(-) |
| 34 | |
| 35 | diff --git a/bin/named/client.c b/bin/named/client.c |
| 36 | index 0739dd48af..a7b49a0f71 100644 |
| 37 | --- a/bin/named/client.c |
| 38 | +++ b/bin/named/client.c |
| 39 | @@ -246,10 +246,11 @@ static void ns_client_dumpmessage(ns_client_t *client, const char *reason); |
| 40 | static isc_result_t get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, |
| 41 | dns_dispatch_t *disp, bool tcp); |
| 42 | static isc_result_t get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, |
| 43 | - isc_socket_t *sock); |
| 44 | + isc_socket_t *sock, ns_client_t *oldclient); |
| 45 | static inline bool |
| 46 | -allowed(isc_netaddr_t *addr, dns_name_t *signer, isc_netaddr_t *ecs_addr, |
| 47 | - uint8_t ecs_addrlen, uint8_t *ecs_scope, dns_acl_t *acl); |
| 48 | +allowed(isc_netaddr_t *addr, dns_name_t *signer, |
| 49 | + isc_netaddr_t *ecs_addr, uint8_t ecs_addrlen, |
| 50 | + uint8_t *ecs_scope, dns_acl_t *acl) |
| 51 | static void compute_cookie(ns_client_t *client, uint32_t when, |
| 52 | uint32_t nonce, const unsigned char *secret, |
| 53 | isc_buffer_t *buf); |
| 54 | @@ -405,8 +406,11 @@ exit_check(ns_client_t *client) { |
| 55 | */ |
| 56 | INSIST(client->recursionquota == NULL); |
| 57 | INSIST(client->newstate <= NS_CLIENTSTATE_READY); |
| 58 | - if (client->nreads > 0) |
| 59 | + |
| 60 | + if (client->nreads > 0) { |
| 61 | dns_tcpmsg_cancelread(&client->tcpmsg); |
| 62 | + } |
| 63 | + |
| 64 | if (client->nreads != 0) { |
| 65 | /* Still waiting for read cancel completion. */ |
| 66 | return (true); |
| 67 | @@ -416,25 +420,58 @@ exit_check(ns_client_t *client) { |
| 68 | dns_tcpmsg_invalidate(&client->tcpmsg); |
| 69 | client->tcpmsg_valid = false; |
| 70 | } |
| 71 | + |
| 72 | if (client->tcpsocket != NULL) { |
| 73 | CTRACE("closetcp"); |
| 74 | isc_socket_detach(&client->tcpsocket); |
| 75 | + |
| 76 | + if (client->tcpactive) { |
| 77 | + LOCK(&client->interface->lock); |
| 78 | + INSIST(client->interface->ntcpactive > 0); |
| 79 | + client->interface->ntcpactive--; |
| 80 | + UNLOCK(&client->interface->lock); |
| 81 | + client->tcpactive = false; |
| 82 | + } |
| 83 | } |
| 84 | |
| 85 | if (client->tcpquota != NULL) { |
| 86 | - isc_quota_detach(&client->tcpquota); |
| 87 | - } else { |
| 88 | /* |
| 89 | - * We went over quota with this client, we don't |
| 90 | - * want to restart listening unless this is the |
| 91 | - * last client on this interface, which is |
| 92 | - * checked later. |
| 93 | + * If we are not in a pipeline group, or |
| 94 | + * we are the last client in the group, detach from |
| 95 | + * tcpquota; otherwise, transfer the quota to |
| 96 | + * another client in the same group. |
| 97 | */ |
| 98 | - if (TCP_CLIENT(client)) { |
| 99 | - client->mortal = true; |
| 100 | + if (!ISC_LINK_LINKED(client, glink) || |
| 101 | + (client->glink.next == NULL && |
| 102 | + client->glink.prev == NULL)) |
| 103 | + { |
| 104 | + isc_quota_detach(&client->tcpquota); |
| 105 | + } else if (client->glink.next != NULL) { |
| 106 | + INSIST(client->glink.next->tcpquota == NULL); |
| 107 | + client->glink.next->tcpquota = client->tcpquota; |
| 108 | + client->tcpquota = NULL; |
| 109 | + } else { |
| 110 | + INSIST(client->glink.prev->tcpquota == NULL); |
| 111 | + client->glink.prev->tcpquota = client->tcpquota; |
| 112 | + client->tcpquota = NULL; |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | + /* |
| 117 | + * Unlink from pipeline group. |
| 118 | + */ |
| 119 | + if (ISC_LINK_LINKED(client, glink)) { |
| 120 | + if (client->glink.next != NULL) { |
| 121 | + client->glink.next->glink.prev = |
| 122 | + client->glink.prev; |
| 123 | + } |
| 124 | + if (client->glink.prev != NULL) { |
| 125 | + client->glink.prev->glink.next = |
| 126 | + client->glink.next; |
| 127 | + } |
| 128 | + ISC_LINK_INIT(client, glink); |
| 129 | + } |
| 130 | + |
| 131 | if (client->timerset) { |
| 132 | (void)isc_timer_reset(client->timer, |
| 133 | isc_timertype_inactive, |
| 134 | @@ -455,15 +492,16 @@ exit_check(ns_client_t *client) { |
| 135 | * that already. Check whether this client needs to remain |
| 136 | * active and force it to go inactive if not. |
| 137 | * |
| 138 | - * UDP clients go inactive at this point, but TCP clients |
| 139 | - * may remain active if we have fewer active TCP client |
| 140 | - * objects than desired due to an earlier quota exhaustion. |
| 141 | + * UDP clients go inactive at this point, but a TCP client |
| 142 | + * will needs to remain active if no other clients are |
| 143 | + * listening for TCP requests on this interface, to |
| 144 | + * prevent this interface from going nonresponsive. |
| 145 | */ |
| 146 | if (client->mortal && TCP_CLIENT(client) && !ns_g_clienttest) { |
| 147 | LOCK(&client->interface->lock); |
| 148 | - if (client->interface->ntcpcurrent < |
| 149 | - client->interface->ntcptarget) |
| 150 | + if (client->interface->ntcpaccepting == 0) { |
| 151 | client->mortal = false; |
| 152 | + } |
| 153 | UNLOCK(&client->interface->lock); |
| 154 | } |
| 155 | |
| 156 | @@ -472,15 +510,17 @@ exit_check(ns_client_t *client) { |
| 157 | * queue for recycling. |
| 158 | */ |
| 159 | if (client->mortal) { |
| 160 | - if (client->newstate > NS_CLIENTSTATE_INACTIVE) |
| 161 | + if (client->newstate > NS_CLIENTSTATE_INACTIVE) { |
| 162 | client->newstate = NS_CLIENTSTATE_INACTIVE; |
| 163 | + } |
| 164 | } |
| 165 | |
| 166 | if (NS_CLIENTSTATE_READY == client->newstate) { |
| 167 | if (TCP_CLIENT(client)) { |
| 168 | client_accept(client); |
| 169 | - } else |
| 170 | + } else { |
| 171 | client_udprecv(client); |
| 172 | + } |
| 173 | client->newstate = NS_CLIENTSTATE_MAX; |
| 174 | return (true); |
| 175 | } |
| 176 | @@ -492,41 +532,57 @@ exit_check(ns_client_t *client) { |
| 177 | /* |
| 178 | * We are trying to enter the inactive state. |
| 179 | */ |
| 180 | - if (client->naccepts > 0) |
| 181 | + if (client->naccepts > 0) { |
| 182 | isc_socket_cancel(client->tcplistener, client->task, |
| 183 | ISC_SOCKCANCEL_ACCEPT); |
| 184 | + } |
| 185 | |
| 186 | /* Still waiting for accept cancel completion. */ |
| 187 | - if (! (client->naccepts == 0)) |
| 188 | + if (! (client->naccepts == 0)) { |
| 189 | return (true); |
| 190 | + } |
| 191 | |
| 192 | /* Accept cancel is complete. */ |
| 193 | - if (client->nrecvs > 0) |
| 194 | + if (client->nrecvs > 0) { |
| 195 | isc_socket_cancel(client->udpsocket, client->task, |
| 196 | ISC_SOCKCANCEL_RECV); |
| 197 | + } |
| 198 | |
| 199 | /* Still waiting for recv cancel completion. */ |
| 200 | - if (! (client->nrecvs == 0)) |
| 201 | + if (! (client->nrecvs == 0)) { |
| 202 | return (true); |
| 203 | + } |
| 204 | |
| 205 | /* Still waiting for control event to be delivered */ |
| 206 | - if (client->nctls > 0) |
| 207 | + if (client->nctls > 0) { |
| 208 | return (true); |
| 209 | - |
| 210 | - /* Deactivate the client. */ |
| 211 | - if (client->interface) |
| 212 | - ns_interface_detach(&client->interface); |
| 213 | + } |
| 214 | |
| 215 | INSIST(client->naccepts == 0); |
| 216 | INSIST(client->recursionquota == NULL); |
| 217 | - if (client->tcplistener != NULL) |
| 218 | + if (client->tcplistener != NULL) { |
| 219 | isc_socket_detach(&client->tcplistener); |
| 220 | |
| 221 | - if (client->udpsocket != NULL) |
| 222 | + if (client->tcpactive) { |
| 223 | + LOCK(&client->interface->lock); |
| 224 | + INSIST(client->interface->ntcpactive > 0); |
| 225 | + client->interface->ntcpactive--; |
| 226 | + UNLOCK(&client->interface->lock); |
| 227 | + client->tcpactive = false; |
| 228 | + } |
| 229 | + } |
| 230 | + if (client->udpsocket != NULL) { |
| 231 | isc_socket_detach(&client->udpsocket); |
| 232 | + } |
| 233 | |
| 234 | - if (client->dispatch != NULL) |
| 235 | + /* Deactivate the client. */ |
| 236 | + if (client->interface != NULL) { |
| 237 | + ns_interface_detach(&client->interface); |
| 238 | + } |
| 239 | + |
| 240 | + if (client->dispatch != NULL) { |
| 241 | dns_dispatch_detach(&client->dispatch); |
| 242 | + } |
| 243 | |
| 244 | client->attributes = 0; |
| 245 | client->mortal = false; |
| 246 | @@ -551,10 +607,13 @@ exit_check(ns_client_t *client) { |
| 247 | client->newstate = NS_CLIENTSTATE_MAX; |
| 248 | if (!ns_g_clienttest && manager != NULL && |
| 249 | !manager->exiting) |
| 250 | + { |
| 251 | ISC_QUEUE_PUSH(manager->inactive, client, |
| 252 | ilink); |
| 253 | - if (client->needshutdown) |
| 254 | + } |
| 255 | + if (client->needshutdown) { |
| 256 | isc_task_shutdown(client->task); |
| 257 | + } |
| 258 | return (true); |
| 259 | } |
| 260 | } |
| 261 | @@ -675,7 +734,6 @@ client_start(isc_task_t *task, isc_event_t *event) { |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | - |
| 266 | /*% |
| 267 | * The client's task has received a shutdown event. |
| 268 | */ |
| 269 | @@ -2507,17 +2565,12 @@ client_request(isc_task_t *task, isc_event_t *event) { |
| 270 | /* |
| 271 | * Pipeline TCP query processing. |
| 272 | */ |
| 273 | - if (client->message->opcode != dns_opcode_query) |
| 274 | + if (client->message->opcode != dns_opcode_query) { |
| 275 | client->pipelined = false; |
| 276 | + } |
| 277 | if (TCP_CLIENT(client) && client->pipelined) { |
| 278 | - result = isc_quota_reserve(&ns_g_server->tcpquota); |
| 279 | - if (result == ISC_R_SUCCESS) |
| 280 | - result = ns_client_replace(client); |
| 281 | + result = ns_client_replace(client); |
| 282 | if (result != ISC_R_SUCCESS) { |
| 283 | - ns_client_log(client, NS_LOGCATEGORY_CLIENT, |
| 284 | - NS_LOGMODULE_CLIENT, ISC_LOG_WARNING, |
| 285 | - "no more TCP clients(read): %s", |
| 286 | - isc_result_totext(result)); |
| 287 | client->pipelined = false; |
| 288 | } |
| 289 | } |
| 290 | @@ -3087,6 +3140,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { |
| 291 | client->filter_aaaa = dns_aaaa_ok; |
| 292 | #endif |
| 293 | client->needshutdown = ns_g_clienttest; |
| 294 | + client->tcpactive = false; |
| 295 | |
| 296 | ISC_EVENT_INIT(&client->ctlevent, sizeof(client->ctlevent), 0, NULL, |
| 297 | NS_EVENT_CLIENTCONTROL, client_start, client, client, |
| 298 | @@ -3100,6 +3154,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) { |
| 299 | client->formerrcache.id = 0; |
| 300 | ISC_LINK_INIT(client, link); |
| 301 | ISC_LINK_INIT(client, rlink); |
| 302 | + ISC_LINK_INIT(client, glink); |
| 303 | ISC_QLINK_INIT(client, ilink); |
| 304 | client->keytag = NULL; |
| 305 | client->keytag_len = 0; |
| 306 | @@ -3193,12 +3248,19 @@ client_newconn(isc_task_t *task, isc_event_t *event) { |
| 307 | |
| 308 | INSIST(client->state == NS_CLIENTSTATE_READY); |
| 309 | |
| 310 | + /* |
| 311 | + * The accept() was successful and we're now establishing a new |
| 312 | + * connection. We need to make note of it in the client and |
| 313 | + * interface objects so client objects can do the right thing |
| 314 | + * when going inactive in exit_check() (see comments in |
| 315 | + * client_accept() for details). |
| 316 | + */ |
| 317 | INSIST(client->naccepts == 1); |
| 318 | client->naccepts--; |
| 319 | |
| 320 | LOCK(&client->interface->lock); |
| 321 | - INSIST(client->interface->ntcpcurrent > 0); |
| 322 | - client->interface->ntcpcurrent--; |
| 323 | + INSIST(client->interface->ntcpaccepting > 0); |
| 324 | + client->interface->ntcpaccepting--; |
| 325 | UNLOCK(&client->interface->lock); |
| 326 | |
| 327 | /* |
| 328 | @@ -3232,6 +3294,9 @@ client_newconn(isc_task_t *task, isc_event_t *event) { |
| 329 | NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(3), |
| 330 | "accept failed: %s", |
| 331 | isc_result_totext(nevent->result)); |
| 332 | + if (client->tcpquota != NULL) { |
| 333 | + isc_quota_detach(&client->tcpquota); |
| 334 | + } |
| 335 | } |
| 336 | |
| 337 | if (exit_check(client)) |
| 338 | @@ -3270,18 +3335,12 @@ client_newconn(isc_task_t *task, isc_event_t *event) { |
| 339 | * deny service to legitimate TCP clients. |
| 340 | */ |
| 341 | client->pipelined = false; |
| 342 | - result = isc_quota_attach(&ns_g_server->tcpquota, |
| 343 | - &client->tcpquota); |
| 344 | - if (result == ISC_R_SUCCESS) |
| 345 | - result = ns_client_replace(client); |
| 346 | - if (result != ISC_R_SUCCESS) { |
| 347 | - ns_client_log(client, NS_LOGCATEGORY_CLIENT, |
| 348 | - NS_LOGMODULE_CLIENT, ISC_LOG_WARNING, |
| 349 | - "no more TCP clients(accept): %s", |
| 350 | - isc_result_totext(result)); |
| 351 | - } else if (ns_g_server->keepresporder == NULL || |
| 352 | - !allowed(&netaddr, NULL, NULL, 0, NULL, |
| 353 | - ns_g_server->keepresporder)) { |
| 354 | + result = ns_client_replace(client); |
| 355 | + if (result == ISC_R_SUCCESS && |
| 356 | + (client->sctx->keepresporder == NULL || |
| 357 | + !allowed(&netaddr, NULL, NULL, 0, NULL, |
| 358 | + ns_g_server->keepresporder))) |
| 359 | + { |
| 360 | client->pipelined = true; |
| 361 | } |
| 362 | |
| 363 | @@ -3298,12 +3357,80 @@ client_accept(ns_client_t *client) { |
| 364 | |
| 365 | CTRACE("accept"); |
| 366 | |
| 367 | + /* |
| 368 | + * The tcpquota object can only be simultaneously referenced a |
| 369 | + * pre-defined number of times; this is configured by 'tcp-clients' |
| 370 | + * in named.conf. If we can't attach to it here, that means the TCP |
| 371 | + * client quota has been exceeded. |
| 372 | + */ |
| 373 | + result = isc_quota_attach(&client->sctx->tcpquota, |
| 374 | + &client->tcpquota); |
| 375 | + if (result != ISC_R_SUCCESS) { |
| 376 | + bool exit; |
| 377 | + |
| 378 | + ns_client_log(client, NS_LOGCATEGORY_CLIENT, |
| 379 | + NS_LOGMODULE_CLIENT, ISC_LOG_DEBUG(1), |
| 380 | + "no more TCP clients: %s", |
| 381 | + isc_result_totext(result)); |
| 382 | + |
| 383 | + /* |
| 384 | + * We have exceeded the system-wide TCP client |
| 385 | + * quota. But, we can't just block this accept |
| 386 | + * in all cases, because if we did, a heavy TCP |
| 387 | + * load on other interfaces might cause this |
| 388 | + * interface to be starved, with no clients able |
| 389 | + * to accept new connections. |
| 390 | + * |
| 391 | + * So, we check here to see if any other client |
| 392 | + * is already servicing TCP queries on this |
| 393 | + * interface (whether accepting, reading, or |
| 394 | + * processing). |
| 395 | + * |
| 396 | + * If so, then it's okay *not* to call |
| 397 | + * accept - we can let this client to go inactive |
| 398 | + * and the other one handle the next connection |
| 399 | + * when it's ready. |
| 400 | + * |
| 401 | + * But if not, then we need to be a little bit |
| 402 | + * flexible about the quota. We allow *one* extra |
| 403 | + * TCP client through, to ensure we're listening on |
| 404 | + * every interface. |
| 405 | + * |
| 406 | + * (Note: In practice this means that the *real* |
| 407 | + * TCP client quota is tcp-clients plus the number |
| 408 | + * of interfaces.) |
| 409 | + */ |
| 410 | + LOCK(&client->interface->lock); |
| 411 | + exit = (client->interface->ntcpactive > 0); |
| 412 | + UNLOCK(&client->interface->lock); |
| 413 | + |
| 414 | + if (exit) { |
| 415 | + client->newstate = NS_CLIENTSTATE_INACTIVE; |
| 416 | + (void)exit_check(client); |
| 417 | + return; |
| 418 | + } |
| 419 | + } |
| 420 | + |
| 421 | + /* |
| 422 | + * By incrementing the interface's ntcpactive counter we signal |
| 423 | + * that there is at least one client servicing TCP queries for the |
| 424 | + * interface. |
| 425 | + * |
| 426 | + * We also make note of the fact in the client itself with the |
| 427 | + * tcpactive flag. This ensures proper accounting by preventing |
| 428 | + * us from accidentally incrementing or decrementing ntcpactive |
| 429 | + * more than once per client object. |
| 430 | + */ |
| 431 | + if (!client->tcpactive) { |
| 432 | + LOCK(&client->interface->lock); |
| 433 | + client->interface->ntcpactive++; |
| 434 | + UNLOCK(&client->interface->lock); |
| 435 | + client->tcpactive = true; |
| 436 | + } |
| 437 | + |
| 438 | result = isc_socket_accept(client->tcplistener, client->task, |
| 439 | client_newconn, client); |
| 440 | if (result != ISC_R_SUCCESS) { |
| 441 | - UNEXPECTED_ERROR(__FILE__, __LINE__, |
| 442 | - "isc_socket_accept() failed: %s", |
| 443 | - isc_result_totext(result)); |
| 444 | /* |
| 445 | * XXXRTH What should we do? We're trying to accept but |
| 446 | * it didn't work. If we just give up, then TCP |
| 447 | @@ -3311,12 +3438,39 @@ client_accept(ns_client_t *client) { |
| 448 | * |
| 449 | * For now, we just go idle. |
| 450 | */ |
| 451 | + UNEXPECTED_ERROR(__FILE__, __LINE__, |
| 452 | + "isc_socket_accept() failed: %s", |
| 453 | + isc_result_totext(result)); |
| 454 | + if (client->tcpquota != NULL) { |
| 455 | + isc_quota_detach(&client->tcpquota); |
| 456 | + } |
| 457 | return; |
| 458 | } |
| 459 | + |
| 460 | + /* |
| 461 | + * The client's 'naccepts' counter indicates that this client has |
| 462 | + * called accept() and is waiting for a new connection. It should |
| 463 | + * never exceed 1. |
| 464 | + */ |
| 465 | INSIST(client->naccepts == 0); |
| 466 | client->naccepts++; |
| 467 | + |
| 468 | + /* |
| 469 | + * The interface's 'ntcpaccepting' counter is incremented when |
| 470 | + * any client calls accept(), and decremented in client_newconn() |
| 471 | + * once the connection is established. |
| 472 | + * |
| 473 | + * When the client object is shutting down after handling a TCP |
| 474 | + * request (see exit_check()), it looks to see whether this value is |
| 475 | + * non-zero. If so, that means another client has already called |
| 476 | + * accept() and is waiting to establish the next connection, which |
| 477 | + * means the first client is free to go inactive. Otherwise, |
| 478 | + * the first client must come back and call accept() again; this |
| 479 | + * guarantees there will always be at least one client listening |
| 480 | + * for new TCP connections on each interface. |
| 481 | + */ |
| 482 | LOCK(&client->interface->lock); |
| 483 | - client->interface->ntcpcurrent++; |
| 484 | + client->interface->ntcpaccepting++; |
| 485 | UNLOCK(&client->interface->lock); |
| 486 | } |
| 487 | |
| 488 | @@ -3390,13 +3544,14 @@ ns_client_replace(ns_client_t *client) { |
| 489 | tcp = TCP_CLIENT(client); |
| 490 | if (tcp && client->pipelined) { |
| 491 | result = get_worker(client->manager, client->interface, |
| 492 | - client->tcpsocket); |
| 493 | + client->tcpsocket, client); |
| 494 | } else { |
| 495 | result = get_client(client->manager, client->interface, |
| 496 | client->dispatch, tcp); |
| 497 | } |
| 498 | - if (result != ISC_R_SUCCESS) |
| 499 | + if (result != ISC_R_SUCCESS) { |
| 500 | return (result); |
| 501 | + } |
| 502 | |
| 503 | /* |
| 504 | * The responsibility for listening for new requests is hereby |
| 505 | @@ -3585,6 +3740,7 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, |
| 506 | client->attributes |= NS_CLIENTATTR_TCP; |
| 507 | isc_socket_attach(ifp->tcpsocket, |
| 508 | &client->tcplistener); |
| 509 | + |
| 510 | } else { |
| 511 | isc_socket_t *sock; |
| 512 | |
| 513 | @@ -3602,7 +3758,8 @@ get_client(ns_clientmgr_t *manager, ns_interface_t *ifp, |
| 514 | } |
| 515 | |
| 516 | static isc_result_t |
| 517 | -get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock) |
| 518 | +get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock, |
| 519 | + ns_client_t *oldclient) |
| 520 | { |
| 521 | isc_result_t result = ISC_R_SUCCESS; |
| 522 | isc_event_t *ev; |
| 523 | @@ -3610,6 +3767,7 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock) |
| 524 | MTRACE("get worker"); |
| 525 | |
| 526 | REQUIRE(manager != NULL); |
| 527 | + REQUIRE(oldclient != NULL); |
| 528 | |
| 529 | if (manager->exiting) |
| 530 | return (ISC_R_SHUTTINGDOWN); |
| 531 | @@ -3642,7 +3800,28 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock) |
| 532 | ns_interface_attach(ifp, &client->interface); |
| 533 | client->newstate = client->state = NS_CLIENTSTATE_WORKING; |
| 534 | INSIST(client->recursionquota == NULL); |
| 535 | - client->tcpquota = &ns_g_server->tcpquota; |
| 536 | + |
| 537 | + /* |
| 538 | + * Transfer TCP quota to the new client. |
| 539 | + */ |
| 540 | + INSIST(client->tcpquota == NULL); |
| 541 | + INSIST(oldclient->tcpquota != NULL); |
| 542 | + client->tcpquota = oldclient->tcpquota; |
| 543 | + oldclient->tcpquota = NULL; |
| 544 | + |
| 545 | + /* |
| 546 | + * Link to a pipeline group, creating it if needed. |
| 547 | + */ |
| 548 | + if (!ISC_LINK_LINKED(oldclient, glink)) { |
| 549 | + oldclient->glink.next = NULL; |
| 550 | + oldclient->glink.prev = NULL; |
| 551 | + } |
| 552 | + client->glink.next = oldclient->glink.next; |
| 553 | + client->glink.prev = oldclient; |
| 554 | + if (oldclient->glink.next != NULL) { |
| 555 | + oldclient->glink.next->glink.prev = client; |
| 556 | + } |
| 557 | + oldclient->glink.next = client; |
| 558 | |
| 559 | client->dscp = ifp->dscp; |
| 560 | |
| 561 | @@ -3656,6 +3835,12 @@ get_worker(ns_clientmgr_t *manager, ns_interface_t *ifp, isc_socket_t *sock) |
| 562 | (void)isc_socket_getpeername(client->tcpsocket, &client->peeraddr); |
| 563 | client->peeraddr_valid = true; |
| 564 | |
| 565 | + LOCK(&client->interface->lock); |
| 566 | + client->interface->ntcpactive++; |
| 567 | + UNLOCK(&client->interface->lock); |
| 568 | + |
| 569 | + client->tcpactive = true; |
| 570 | + |
| 571 | INSIST(client->tcpmsg_valid == false); |
| 572 | dns_tcpmsg_init(client->mctx, client->tcpsocket, &client->tcpmsg); |
| 573 | client->tcpmsg_valid = true; |
| 574 | diff --git a/bin/named/include/named/client.h b/bin/named/include/named/client.h |
| 575 | index b23a7b191d..1f7973f9c5 100644 |
| 576 | --- a/bin/named/include/named/client.h |
| 577 | +++ b/bin/named/include/named/client.h |
| 578 | @@ -94,7 +94,8 @@ struct ns_client { |
| 579 | int nupdates; |
| 580 | int nctls; |
| 581 | int references; |
| 582 | - bool needshutdown; /* |
| 583 | + bool tcpactive; |
| 584 | + bool needshutdown; /* |
| 585 | * Used by clienttest to get |
| 586 | * the client to go from |
| 587 | * inactive to free state |
| 588 | @@ -130,9 +131,9 @@ struct ns_client { |
| 589 | isc_stdtime_t now; |
| 590 | isc_time_t tnow; |
| 591 | dns_name_t signername; /*%< [T]SIG key name */ |
| 592 | - dns_name_t * signer; /*%< NULL if not valid sig */ |
| 593 | - bool mortal; /*%< Die after handling request */ |
| 594 | - bool pipelined; /*%< TCP queries not in sequence */ |
| 595 | + dns_name_t *signer; /*%< NULL if not valid sig */ |
| 596 | + bool mortal; /*%< Die after handling request */ |
| 597 | + bool pipelined; /*%< TCP queries not in sequence */ |
| 598 | isc_quota_t *tcpquota; |
| 599 | isc_quota_t *recursionquota; |
| 600 | ns_interface_t *interface; |
| 601 | @@ -143,8 +144,8 @@ struct ns_client { |
| 602 | isc_sockaddr_t destsockaddr; |
| 603 | |
| 604 | isc_netaddr_t ecs_addr; /*%< EDNS client subnet */ |
| 605 | - uint8_t ecs_addrlen; |
| 606 | - uint8_t ecs_scope; |
| 607 | + uint8_t ecs_addrlen; |
| 608 | + uint8_t ecs_scope; |
| 609 | |
| 610 | struct in6_pktinfo pktinfo; |
| 611 | isc_dscp_t dscp; |
| 612 | @@ -166,6 +167,7 @@ struct ns_client { |
| 613 | |
| 614 | ISC_LINK(ns_client_t) link; |
| 615 | ISC_LINK(ns_client_t) rlink; |
| 616 | + ISC_LINK(ns_client_t) glink; |
| 617 | ISC_QLINK(ns_client_t) ilink; |
| 618 | unsigned char cookie[8]; |
| 619 | uint32_t expire; |
| 620 | diff --git a/bin/named/include/named/interfacemgr.h b/bin/named/include/named/interfacemgr.h |
| 621 | index 7d1883e1e8..61b08826a6 100644 |
| 622 | --- a/bin/named/include/named/interfacemgr.h |
| 623 | +++ b/bin/named/include/named/interfacemgr.h |
| 624 | @@ -77,9 +77,14 @@ struct ns_interface { |
| 625 | /*%< UDP dispatchers. */ |
| 626 | isc_socket_t * tcpsocket; /*%< TCP socket. */ |
| 627 | isc_dscp_t dscp; /*%< "listen-on" DSCP value */ |
| 628 | - int ntcptarget; /*%< Desired number of concurrent |
| 629 | - TCP accepts */ |
| 630 | - int ntcpcurrent; /*%< Current ditto, locked */ |
| 631 | + int ntcpaccepting; /*%< Number of clients |
| 632 | + ready to accept new |
| 633 | + TCP connections on this |
| 634 | + interface */ |
| 635 | + int ntcpactive; /*%< Number of clients |
| 636 | + servicing TCP queries |
| 637 | + (whether accepting or |
| 638 | + connected) */ |
| 639 | int nudpdispatch; /*%< Number of UDP dispatches */ |
| 640 | ns_clientmgr_t * clientmgr; /*%< Client manager. */ |
| 641 | ISC_LINK(ns_interface_t) link; |
| 642 | diff --git a/bin/named/interfacemgr.c b/bin/named/interfacemgr.c |
| 643 | index 419927bf54..955096ef47 100644 |
| 644 | --- a/bin/named/interfacemgr.c |
| 645 | +++ b/bin/named/interfacemgr.c |
| 646 | @@ -386,8 +386,8 @@ ns_interface_create(ns_interfacemgr_t *mgr, isc_sockaddr_t *addr, |
| 647 | * connections will be handled in parallel even though there is |
| 648 | * only one client initially. |
| 649 | */ |
| 650 | - ifp->ntcptarget = 1; |
| 651 | - ifp->ntcpcurrent = 0; |
| 652 | + ifp->ntcpaccepting = 0; |
| 653 | + ifp->ntcpactive = 0; |
| 654 | ifp->nudpdispatch = 0; |
| 655 | |
| 656 | ifp->dscp = -1; |
| 657 | @@ -522,9 +522,7 @@ ns_interface_accepttcp(ns_interface_t *ifp) { |
| 658 | */ |
| 659 | (void)isc_socket_filter(ifp->tcpsocket, "dataready"); |
| 660 | |
| 661 | - result = ns_clientmgr_createclients(ifp->clientmgr, |
| 662 | - ifp->ntcptarget, ifp, |
| 663 | - true); |
| 664 | + result = ns_clientmgr_createclients(ifp->clientmgr, 1, ifp, true); |
| 665 | if (result != ISC_R_SUCCESS) { |
| 666 | UNEXPECTED_ERROR(__FILE__, __LINE__, |
| 667 | "TCP ns_clientmgr_createclients(): %s", |
| 668 | -- |
| 669 | 2.20.1 |
| 670 | |