1 /** @file
2
3 A brief file description
4
5 @section license License
6
7 Licensed to the Apache Software Foundation (ASF) under one
8 or more contributor license agreements. See the NOTICE file
9 distributed with this work for additional information
10 regarding copyright ownership. The ASF licenses this file
11 to you under the Apache License, Version 2.0 (the
12 "License"); you may not use this file except in compliance
13 with the License. You may obtain a copy of the License at
14
15 http://www.apache.org/licenses/LICENSE-2.0
16
17 Unless required by applicable law or agreed to in writing, software
18 distributed under the License is distributed on an "AS IS" BASIS,
19 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 See the License for the specific language governing permissions and
21 limitations under the License.
22 */
23
24 /****************************************************************************
25
26 HttpSessionManager.cc
27
28 Description:
29
30
31 ****************************************************************************/
32
33 #include "HttpSessionManager.h"
34 #include "../ProxySession.h"
35 #include "HttpSM.h"
36 #include "HttpDebugNames.h"
37
38 // Initialize a thread to handle HTTP session management
39 void
initialize_thread_for_http_sessions(EThread * thread)40 initialize_thread_for_http_sessions(EThread *thread)
41 {
42 thread->server_session_pool = new ServerSessionPool;
43 }
44
45 HttpSessionManager httpSessionManager;
46
ServerSessionPool()47 ServerSessionPool::ServerSessionPool() : Continuation(new_ProxyMutex()), m_ip_pool(1023), m_fqdn_pool(1023)
48 {
49 SET_HANDLER(&ServerSessionPool::eventHandler);
50 m_ip_pool.set_expansion_policy(IPTable::MANUAL);
51 m_fqdn_pool.set_expansion_policy(FQDNTable::MANUAL);
52 }
53
54 void
purge()55 ServerSessionPool::purge()
56 {
57 // @c do_io_close can free the instance which clears the intrusive links and breaks the iterator.
58 // Therefore @c do_io_close is called on a post-incremented iterator.
59 m_ip_pool.apply([](PoolableSession *ssn) -> void { ssn->do_io_close(); });
60 m_ip_pool.clear();
61 m_fqdn_pool.clear();
62 }
63
64 bool
match(PoolableSession * ss,sockaddr const * addr,CryptoHash const & hostname_hash,TSServerSessionSharingMatchMask match_style)65 ServerSessionPool::match(PoolableSession *ss, sockaddr const *addr, CryptoHash const &hostname_hash,
66 TSServerSessionSharingMatchMask match_style)
67 {
68 bool retval = match_style != 0;
69 if (retval && (TS_SERVER_SESSION_SHARING_MATCH_MASK_IP & match_style)) {
70 retval = ats_ip_addr_port_eq(ss->get_remote_addr(), addr);
71 }
72 if (retval && (TS_SERVER_SESSION_SHARING_MATCH_MASK_HOSTONLY & match_style)) {
73 retval = (ats_ip_port_cast(addr) == ats_ip_port_cast(ss->get_remote_addr()) && ss->hostname_hash == hostname_hash);
74 }
75 return retval;
76 }
77
78 bool
validate_host_sni(HttpSM * sm,NetVConnection * netvc)79 ServerSessionPool::validate_host_sni(HttpSM *sm, NetVConnection *netvc)
80 {
81 bool retval = true;
82 if (sm->t_state.scheme == URL_WKSIDX_HTTPS) {
83 // The sni_servername of the connection was set on HttpSM::do_http_server_open
84 // by fetching the hostname from the server request. So the connection should only
85 // be reused if the hostname in the new request is the same as the host name in the
86 // original request
87 const char *session_sni = netvc->get_sni_servername();
88 if (session_sni) {
89 // TS-4468: If the connection matches, make sure the SNI server
90 // name (if present) matches the request hostname
91 int len = 0;
92 const char *req_host = sm->t_state.hdr_info.server_request.host_get(&len);
93 retval = strncasecmp(session_sni, req_host, len) == 0;
94 Debug("http_ss", "validate_host_sni host=%*.s, sni=%s", len, req_host, session_sni);
95 }
96 }
97 return retval;
98 }
99
100 bool
validate_sni(HttpSM * sm,NetVConnection * netvc)101 ServerSessionPool::validate_sni(HttpSM *sm, NetVConnection *netvc)
102 {
103 bool retval = true;
104 // Verify that the sni name on this connection would match the sni we would have use to create
105 // a new connection.
106 //
107 if (sm->t_state.scheme == URL_WKSIDX_HTTPS) {
108 const char *session_sni = netvc->get_sni_servername();
109 std::string_view proposed_sni = sm->get_outbound_sni();
110 Debug("http_ss", "validate_sni proposed_sni=%.*s, sni=%s", static_cast<int>(proposed_sni.length()), proposed_sni.data(),
111 session_sni);
112 if (!session_sni || proposed_sni.length() == 0) {
113 retval = session_sni == nullptr && proposed_sni.length() == 0;
114 } else {
115 retval = proposed_sni.compare(session_sni) == 0;
116 }
117 }
118 return retval;
119 }
120
121 bool
validate_cert(HttpSM * sm,NetVConnection * netvc)122 ServerSessionPool::validate_cert(HttpSM *sm, NetVConnection *netvc)
123 {
124 bool retval = true;
125 // Verify that the cert file associated this connection would match the cert file we would have use to create
126 // a new connection.
127 //
128 if (sm->t_state.scheme == URL_WKSIDX_HTTPS) {
129 const char *session_cert = netvc->options.ssl_client_cert_name.get();
130 std::string_view proposed_cert = sm->get_outbound_cert();
131 Debug("http_ss", "validate_cert proposed_cert=%.*s, cert=%s", static_cast<int>(proposed_cert.size()), proposed_cert.data(),
132 session_cert);
133 if (!session_cert || proposed_cert.length() == 0) {
134 retval = session_cert == nullptr && proposed_cert.length() == 0;
135 } else {
136 retval = proposed_cert.compare(session_cert) == 0;
137 }
138 }
139 return retval;
140 }
141
142 HSMresult_t
acquireSession(sockaddr const * addr,CryptoHash const & hostname_hash,TSServerSessionSharingMatchMask match_style,HttpSM * sm,PoolableSession * & to_return)143 ServerSessionPool::acquireSession(sockaddr const *addr, CryptoHash const &hostname_hash,
144 TSServerSessionSharingMatchMask match_style, HttpSM *sm, PoolableSession *&to_return)
145 {
146 HSMresult_t zret = HSM_NOT_FOUND;
147 to_return = nullptr;
148
149 if ((TS_SERVER_SESSION_SHARING_MATCH_MASK_HOSTONLY & match_style) && !(TS_SERVER_SESSION_SHARING_MATCH_MASK_IP & match_style)) {
150 Debug("http_ss", "Search for host name only not IP. Pool size %zu", m_fqdn_pool.count());
151 // This is broken out because only in this case do we check the host hash first. The range must be checked
152 // to verify an upstream that matches port and SNI name is selected. Walk backwards to select oldest.
153 in_port_t port = ats_ip_port_cast(addr);
154 auto first = m_fqdn_pool.find(hostname_hash);
155 while (first != m_fqdn_pool.end() && first->hostname_hash == hostname_hash) {
156 Debug("http_ss", "Compare port 0x%x against 0x%x", port, ats_ip_port_cast(first->get_remote_addr()));
157 if (port == ats_ip_port_cast(first->get_remote_addr()) &&
158 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_SNI) || validate_sni(sm, first->get_netvc())) &&
159 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_HOSTSNISYNC) || validate_host_sni(sm, first->get_netvc())) &&
160 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_CERT) || validate_cert(sm, first->get_netvc()))) {
161 zret = HSM_DONE;
162 break;
163 }
164 ++first;
165 }
166 if (zret == HSM_DONE) {
167 to_return = first;
168 HTTP_DECREMENT_DYN_STAT(http_pooled_server_connections_stat);
169 m_fqdn_pool.erase(first);
170 m_ip_pool.erase(to_return);
171 }
172 } else if (TS_SERVER_SESSION_SHARING_MATCH_MASK_IP & match_style) { // matching is not disabled.
173 auto first = m_ip_pool.find(addr);
174 // The range is all that is needed in the match IP case, otherwise need to scan for matching fqdn
175 // And matches the other constraints as well
176 // Note the port is matched as part of the address key so it doesn't need to be checked again.
177 if (match_style & (~TS_SERVER_SESSION_SHARING_MATCH_MASK_IP)) {
178 while (first != m_ip_pool.end() && ats_ip_addr_port_eq(first->get_remote_addr(), addr)) {
179 if ((!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_HOSTONLY) || first->hostname_hash == hostname_hash) &&
180 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_SNI) || validate_sni(sm, first->get_netvc())) &&
181 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_HOSTSNISYNC) || validate_host_sni(sm, first->get_netvc())) &&
182 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_CERT) || validate_cert(sm, first->get_netvc()))) {
183 zret = HSM_DONE;
184 break;
185 }
186 ++first;
187 }
188 } else if (first != m_ip_pool.end()) {
189 zret = HSM_DONE;
190 }
191 if (zret == HSM_DONE) {
192 to_return = first;
193 HTTP_DECREMENT_DYN_STAT(http_pooled_server_connections_stat);
194 m_ip_pool.erase(first);
195 m_fqdn_pool.erase(to_return);
196 }
197 }
198 return zret;
199 }
200
201 void
releaseSession(PoolableSession * ss)202 ServerSessionPool::releaseSession(PoolableSession *ss)
203 {
204 ss->state = PoolableSession::KA_POOLED;
205 // Now we need to issue a read on the connection to detect
206 // if it closes on us. We will get called back in the
207 // continuation for this bucket, ensuring we have the lock
208 // to remove the connection from our lists
209 // Actually need to have a buffer here, otherwise the vc is
210 // disabled
211 ss->do_io_read(this, INT64_MAX, ss->get_reader()->mbuf);
212
213 // Transfer control of the write side as well
214 ss->do_io_write(this, 0, nullptr);
215
216 // we probably don't need the active timeout set, but will leave it for now
217 ss->set_inactivity_timeout(ss->get_netvc()->get_inactivity_timeout());
218 ss->set_active_timeout(ss->get_netvc()->get_active_timeout());
219 // put it in the pools.
220 m_ip_pool.insert(ss);
221 m_fqdn_pool.insert(ss);
222
223 HTTP_INCREMENT_DYN_STAT(http_pooled_server_connections_stat);
224
225 Debug("http_ss",
226 "[%" PRId64 "] [release session] "
227 "session placed into shared pool",
228 ss->connection_id());
229 }
230
231 // Called from the NetProcessor to let us know that a
232 // connection has closed down
233 //
234 int
eventHandler(int event,void * data)235 ServerSessionPool::eventHandler(int event, void *data)
236 {
237 NetVConnection *net_vc = nullptr;
238 PoolableSession *s = nullptr;
239
240 switch (event) {
241 case VC_EVENT_READ_READY:
242 // The server sent us data. This is unexpected so
243 // close the connection
244 /* Fall through */
245 case VC_EVENT_EOS:
246 case VC_EVENT_ERROR:
247 case VC_EVENT_INACTIVITY_TIMEOUT:
248 case VC_EVENT_ACTIVE_TIMEOUT:
249 net_vc = static_cast<NetVConnection *>((static_cast<VIO *>(data))->vc_server);
250 break;
251
252 default:
253 ink_release_assert(0);
254 return 0;
255 }
256
257 sockaddr const *addr = net_vc->get_remote_addr();
258 HttpConfigParams *http_config_params = HttpConfig::acquire();
259 bool found = false;
260
261 for (auto spot = m_ip_pool.find(addr); spot != m_ip_pool.end() && spot->_ip_link.equal(addr, spot); ++spot) {
262 if ((s = spot)->get_netvc() == net_vc) {
263 // if there was a timeout of some kind on a keep alive connection, and
264 // keeping the connection alive will not keep us above the # of max connections
265 // to the origin and we are below the min number of keep alive connections to this
266 // origin, then reset the timeouts on our end and do not close the connection
267 if ((event == VC_EVENT_INACTIVITY_TIMEOUT || event == VC_EVENT_ACTIVE_TIMEOUT) && s->state == PoolableSession::KA_POOLED &&
268 s->conn_track_group) {
269 Debug("http_ss", "s->conn_track_group->min_keep_alive_conns : %d", s->conn_track_group->min_keep_alive_conns);
270 bool connection_count_below_min = s->conn_track_group->_count <= s->conn_track_group->min_keep_alive_conns;
271
272 if (connection_count_below_min) {
273 Debug("http_ss",
274 "[%" PRId64 "] [session_bucket] session received io notice [%s], "
275 "resetting timeout to maintain minimum number of connections",
276 s->connection_id(), HttpDebugNames::get_event_name(event));
277 s->get_netvc()->set_inactivity_timeout(s->get_netvc()->get_inactivity_timeout());
278 s->get_netvc()->set_active_timeout(s->get_netvc()->get_active_timeout());
279 found = true;
280 break;
281 }
282 }
283
284 // We've found our server session. Remove it from
285 // our lists and close it down
286 Debug("http_ss", "[%" PRId64 "] [session_pool] session %p received io notice [%s]", s->connection_id(), s,
287 HttpDebugNames::get_event_name(event));
288 ink_assert(s->state == PoolableSession::KA_POOLED);
289 // Out of the pool! Now!
290 m_ip_pool.erase(spot);
291 m_fqdn_pool.erase(s);
292 // Drop connection on this end.
293 s->do_io_close();
294 found = true;
295 HTTP_DECREMENT_DYN_STAT(http_pooled_server_connections_stat);
296 break;
297 }
298 }
299
300 HttpConfig::release(http_config_params);
301 if (!found) {
302 // We failed to find our session. This can only be the result of a programming flaw. Since we only ever keep
303 // UnixNetVConnections and SSLNetVConnections in the session pool, the dynamic cast won't fail.
304 UnixNetVConnection *unix_net_vc = dynamic_cast<UnixNetVConnection *>(net_vc);
305 if (unix_net_vc) {
306 char peer_ip[INET6_ADDRPORTSTRLEN];
307 ats_ip_nptop(unix_net_vc->get_remote_addr(), peer_ip, sizeof(peer_ip));
308
309 Warning("Connection leak from http keep-alive system fd=%d closed=%d peer_ip_port=%s", unix_net_vc->con.fd,
310 unix_net_vc->closed, peer_ip);
311 }
312 ink_assert(0);
313 }
314 return 0;
315 }
316
317 void
init()318 HttpSessionManager::init()
319 {
320 m_g_pool = new ServerSessionPool;
321 eventProcessor.schedule_spawn(&initialize_thread_for_http_sessions, ET_NET);
322 }
323
324 // TODO: Should this really purge all keep-alive sessions?
325 // Does this make any sense, since we always do the global pool and not the per thread?
326 void
purge_keepalives()327 HttpSessionManager::purge_keepalives()
328 {
329 EThread *ethread = this_ethread();
330
331 MUTEX_TRY_LOCK(lock, m_g_pool->mutex, ethread);
332 if (lock.is_locked()) {
333 m_g_pool->purge();
334 } // should we do something clever if we don't get the lock?
335 }
336
337 HSMresult_t
acquire_session(Continuation *,sockaddr const * ip,const char * hostname,ProxyTransaction * ua_txn,HttpSM * sm)338 HttpSessionManager::acquire_session(Continuation * /* cont ATS_UNUSED */, sockaddr const *ip, const char *hostname,
339 ProxyTransaction *ua_txn, HttpSM *sm)
340 {
341 PoolableSession *to_return = nullptr;
342 TSServerSessionSharingMatchMask match_style =
343 static_cast<TSServerSessionSharingMatchMask>(sm->t_state.txn_conf->server_session_sharing_match);
344 CryptoHash hostname_hash;
345 HSMresult_t retval = HSM_NOT_FOUND;
346
347 CryptoContext().hash_immediate(hostname_hash, (unsigned char *)hostname, strlen(hostname));
348
349 // First check to see if there is a server session bound
350 // to the user agent session
351 to_return = ua_txn->get_server_session();
352 if (to_return != nullptr) {
353 ua_txn->attach_server_session(nullptr);
354
355 // Since the client session is reusing the same server session, it seems that the SNI should match
356 // Will the client make requests to different hosts over the same SSL session? Though checking
357 // the IP/hostname here seems a bit redundant too
358 //
359 if (ServerSessionPool::match(to_return, ip, hostname_hash, match_style) &&
360 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_SNI) ||
361 ServerSessionPool::validate_sni(sm, to_return->get_netvc())) &&
362 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_HOSTSNISYNC) ||
363 ServerSessionPool::validate_host_sni(sm, to_return->get_netvc())) &&
364 (!(match_style & TS_SERVER_SESSION_SHARING_MATCH_MASK_CERT) ||
365 ServerSessionPool::validate_cert(sm, to_return->get_netvc()))) {
366 Debug("http_ss", "[%" PRId64 "] [acquire session] returning attached session ", to_return->connection_id());
367 to_return->state = PoolableSession::SSN_IN_USE;
368 sm->attach_server_session(to_return);
369 return HSM_DONE;
370 }
371 // Release this session back to the main session pool and
372 // then continue looking for one from the shared pool
373 Debug("http_ss",
374 "[%" PRId64 "] [acquire session] "
375 "session not a match, returning to shared pool",
376 to_return->connection_id());
377 to_return->release(nullptr);
378 to_return = nullptr;
379 }
380
381 // Otherwise, check the thread pool first
382 if (this->get_pool_type() == TS_SERVER_SESSION_SHARING_POOL_THREAD ||
383 this->get_pool_type() == TS_SERVER_SESSION_SHARING_POOL_HYBRID) {
384 retval = _acquire_session(ip, hostname_hash, sm, match_style, TS_SERVER_SESSION_SHARING_POOL_THREAD);
385 }
386
387 // If you didn't get a match, and the global pool is an option go there.
388 if (retval != HSM_DONE && (TS_SERVER_SESSION_SHARING_POOL_GLOBAL == this->get_pool_type() ||
389 TS_SERVER_SESSION_SHARING_POOL_HYBRID == this->get_pool_type())) {
390 retval = _acquire_session(ip, hostname_hash, sm, match_style, TS_SERVER_SESSION_SHARING_POOL_GLOBAL);
391 }
392 return retval;
393 }
394
395 HSMresult_t
_acquire_session(sockaddr const * ip,CryptoHash const & hostname_hash,HttpSM * sm,TSServerSessionSharingMatchMask match_style,TSServerSessionSharingPoolType pool_type)396 HttpSessionManager::_acquire_session(sockaddr const *ip, CryptoHash const &hostname_hash, HttpSM *sm,
397 TSServerSessionSharingMatchMask match_style, TSServerSessionSharingPoolType pool_type)
398 {
399 PoolableSession *to_return = nullptr;
400 HSMresult_t retval = HSM_NOT_FOUND;
401
402 // Extend the mutex window until the acquired Server session is attached
403 // to the SM. Releasing the mutex before that results in race conditions
404 // due to a potential parallel network read on the VC with no mutex guarding
405 {
406 // Now check to see if we have a connection in our shared connection pool
407 EThread *ethread = this_ethread();
408 Ptr<ProxyMutex> pool_mutex =
409 (TS_SERVER_SESSION_SHARING_POOL_THREAD == pool_type) ? ethread->server_session_pool->mutex : m_g_pool->mutex;
410 MUTEX_TRY_LOCK(lock, pool_mutex, ethread);
411 if (lock.is_locked()) {
412 if (TS_SERVER_SESSION_SHARING_POOL_THREAD == pool_type) {
413 retval = ethread->server_session_pool->acquireSession(ip, hostname_hash, match_style, sm, to_return);
414 Debug("http_ss", "[acquire session] thread pool search %s", to_return ? "successful" : "failed");
415 } else {
416 retval = m_g_pool->acquireSession(ip, hostname_hash, match_style, sm, to_return);
417 Debug("http_ss", "[acquire session] global pool search %s", to_return ? "successful" : "failed");
418 // At this point to_return has been removed from the pool. Do we need to move it
419 // to the same thread?
420 if (to_return) {
421 UnixNetVConnection *server_vc = dynamic_cast<UnixNetVConnection *>(to_return->get_netvc());
422 if (server_vc) {
423 // Disable i/o on this vc now, but, hold onto the g_pool cont
424 // and the mutex to stop any stray events from getting in
425 server_vc->do_io_read(m_g_pool, 0, nullptr);
426 server_vc->do_io_write(m_g_pool, 0, nullptr);
427 UnixNetVConnection *new_vc = server_vc->migrateToCurrentThread(sm, ethread);
428 // The VC moved, free up the original one
429 if (new_vc != server_vc) {
430 ink_assert(new_vc == nullptr || new_vc->nh != nullptr);
431 if (!new_vc) {
432 // Close out to_return, we were't able to get a connection
433 HTTP_INCREMENT_DYN_STAT(http_origin_shutdown_migration_failure);
434 to_return->do_io_close();
435 to_return = nullptr;
436 retval = HSM_NOT_FOUND;
437 } else {
438 // Keep things from timing out on us
439 new_vc->set_inactivity_timeout(new_vc->get_inactivity_timeout());
440 to_return->set_netvc(new_vc);
441 }
442 } else {
443 // Keep things from timing out on us
444 server_vc->set_inactivity_timeout(server_vc->get_inactivity_timeout());
445 }
446 }
447 }
448 }
449 } else { // Didn't get the lock. to_return is still NULL
450 retval = HSM_RETRY;
451 }
452
453 if (to_return) {
454 Debug("http_ss", "[%" PRId64 "] [acquire session] return session from shared pool", to_return->connection_id());
455 to_return->state = PoolableSession::SSN_IN_USE;
456 // the attach_server_session will issue the do_io_read under the sm lock
457 sm->attach_server_session(to_return);
458 retval = HSM_DONE;
459 }
460 }
461 return retval;
462 }
463
464 HSMresult_t
release_session(PoolableSession * to_release)465 HttpSessionManager::release_session(PoolableSession *to_release)
466 {
467 EThread *ethread = this_ethread();
468 ServerSessionPool *pool =
469 TS_SERVER_SESSION_SHARING_POOL_THREAD == to_release->sharing_pool ? ethread->server_session_pool : m_g_pool;
470 bool released_p = true;
471
472 // The per thread lock looks like it should not be needed but if it's not locked the close checking I/O op will crash.
473 MUTEX_TRY_LOCK(lock, pool->mutex, ethread);
474 if (lock.is_locked()) {
475 pool->releaseSession(to_release);
476 } else if (this->get_pool_type() == TS_SERVER_SESSION_SHARING_POOL_HYBRID) {
477 // Try again with the thread pool
478 to_release->sharing_pool = TS_SERVER_SESSION_SHARING_POOL_THREAD;
479 return release_session(to_release);
480 } else {
481 Debug("http_ss", "[%" PRId64 "] [release session] could not release session due to lock contention",
482 to_release->connection_id());
483 released_p = false;
484 }
485
486 return released_p ? HSM_DONE : HSM_RETRY;
487 }
488