1 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /*
3 * Thread management for memcached.
4 */
5 #include "memcached.h"
6 #ifdef EXTSTORE
7 #include "storage.h"
8 #endif
9 #ifdef HAVE_EVENTFD
10 #include <sys/eventfd.h>
11 #endif
12 #include <assert.h>
13 #include <stdio.h>
14 #include <errno.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <pthread.h>
18
19 #include "queue.h"
20
21 #ifdef __sun
22 #include <atomic.h>
23 #endif
24
25 #ifdef TLS
26 #include <openssl/ssl.h>
27 #endif
28
29 #define ITEMS_PER_ALLOC 64
30
31 /* An item in the connection queue. */
32 enum conn_queue_item_modes {
33 queue_new_conn, /* brand new connection. */
34 queue_pause, /* pause thread */
35 queue_timeout, /* socket sfd timed out */
36 queue_redispatch, /* return conn from side thread */
37 queue_stop, /* exit thread */
38 queue_return_io, /* returning a pending IO object immediately */
39 };
40 typedef struct conn_queue_item CQ_ITEM;
41 struct conn_queue_item {
42 int sfd;
43 enum conn_states init_state;
44 int event_flags;
45 int read_buffer_size;
46 enum network_transport transport;
47 enum conn_queue_item_modes mode;
48 conn *c;
49 void *ssl;
50 io_pending_t *io; // IO when used for deferred IO handling.
51 STAILQ_ENTRY(conn_queue_item) i_next;
52 };
53
54 /* A connection queue. */
55 typedef struct conn_queue CQ;
56 struct conn_queue {
57 STAILQ_HEAD(conn_ev_head, conn_queue_item) head;
58 pthread_mutex_t lock;
59 cache_t *cache; /* freelisted objects */
60 };
61
62 /* Locks for cache LRU operations */
63 pthread_mutex_t lru_locks[POWER_LARGEST];
64
65 /* Connection lock around accepting new connections */
66 pthread_mutex_t conn_lock = PTHREAD_MUTEX_INITIALIZER;
67
68 #if !defined(HAVE_GCC_ATOMICS) && !defined(__sun)
69 pthread_mutex_t atomics_mutex = PTHREAD_MUTEX_INITIALIZER;
70 #endif
71
72 /* Lock for global stats */
73 static pthread_mutex_t stats_lock = PTHREAD_MUTEX_INITIALIZER;
74
75 /* Lock to cause worker threads to hang up after being woken */
76 static pthread_mutex_t worker_hang_lock;
77
78 static pthread_mutex_t *item_locks;
79 /* size of the item lock hash table */
80 static uint32_t item_lock_count;
81 unsigned int item_lock_hashpower;
82 #define hashsize(n) ((unsigned long int)1<<(n))
83 #define hashmask(n) (hashsize(n)-1)
84
85 /*
86 * Each libevent instance has a wakeup pipe, which other threads
87 * can use to signal that they've put a new connection on its queue.
88 */
89 static LIBEVENT_THREAD *threads;
90
91 /*
92 * Number of worker threads that have finished setting themselves up.
93 */
94 static int init_count = 0;
95 static pthread_mutex_t init_lock;
96 static pthread_cond_t init_cond;
97
98 static void notify_worker(LIBEVENT_THREAD *t, CQ_ITEM *item);
99 static void notify_worker_fd(LIBEVENT_THREAD *t, int sfd, enum conn_queue_item_modes mode);
100 static CQ_ITEM *cqi_new(CQ *cq);
101 static void cq_push(CQ *cq, CQ_ITEM *item);
102
103 static void thread_libevent_process(evutil_socket_t fd, short which, void *arg);
104
105 /* item_lock() must be held for an item before any modifications to either its
106 * associated hash bucket, or the structure itself.
107 * LRU modifications must hold the item lock, and the LRU lock.
108 * LRU's accessing items must item_trylock() before modifying an item.
109 * Items accessible from an LRU must not be freed or modified
110 * without first locking and removing from the LRU.
111 */
112
item_lock(uint32_t hv)113 void item_lock(uint32_t hv) {
114 mutex_lock(&item_locks[hv & hashmask(item_lock_hashpower)]);
115 }
116
item_trylock(uint32_t hv)117 void *item_trylock(uint32_t hv) {
118 pthread_mutex_t *lock = &item_locks[hv & hashmask(item_lock_hashpower)];
119 if (pthread_mutex_trylock(lock) == 0) {
120 return lock;
121 }
122 return NULL;
123 }
124
item_trylock_unlock(void * lock)125 void item_trylock_unlock(void *lock) {
126 mutex_unlock((pthread_mutex_t *) lock);
127 }
128
item_unlock(uint32_t hv)129 void item_unlock(uint32_t hv) {
130 mutex_unlock(&item_locks[hv & hashmask(item_lock_hashpower)]);
131 }
132
wait_for_thread_registration(int nthreads)133 static void wait_for_thread_registration(int nthreads) {
134 while (init_count < nthreads) {
135 pthread_cond_wait(&init_cond, &init_lock);
136 }
137 }
138
register_thread_initialized(void)139 static void register_thread_initialized(void) {
140 pthread_mutex_lock(&init_lock);
141 init_count++;
142 pthread_cond_signal(&init_cond);
143 pthread_mutex_unlock(&init_lock);
144 /* Force worker threads to pile up if someone wants us to */
145 pthread_mutex_lock(&worker_hang_lock);
146 pthread_mutex_unlock(&worker_hang_lock);
147 }
148
149 /* Must not be called with any deeper locks held */
pause_threads(enum pause_thread_types type)150 void pause_threads(enum pause_thread_types type) {
151 int i;
152 bool pause_workers = false;
153
154 switch (type) {
155 case PAUSE_ALL_THREADS:
156 slabs_rebalancer_pause();
157 lru_maintainer_pause();
158 lru_crawler_pause();
159 #ifdef EXTSTORE
160 storage_compact_pause();
161 storage_write_pause();
162 #endif
163 case PAUSE_WORKER_THREADS:
164 pause_workers = true;
165 pthread_mutex_lock(&worker_hang_lock);
166 break;
167 case RESUME_ALL_THREADS:
168 slabs_rebalancer_resume();
169 lru_maintainer_resume();
170 lru_crawler_resume();
171 #ifdef EXTSTORE
172 storage_compact_resume();
173 storage_write_resume();
174 #endif
175 case RESUME_WORKER_THREADS:
176 pthread_mutex_unlock(&worker_hang_lock);
177 break;
178 default:
179 fprintf(stderr, "Unknown lock type: %d\n", type);
180 assert(1 == 0);
181 break;
182 }
183
184 /* Only send a message if we have one. */
185 if (!pause_workers) {
186 return;
187 }
188
189 pthread_mutex_lock(&init_lock);
190 init_count = 0;
191 for (i = 0; i < settings.num_threads; i++) {
192 notify_worker_fd(&threads[i], 0, queue_pause);
193 }
194 wait_for_thread_registration(settings.num_threads);
195 pthread_mutex_unlock(&init_lock);
196 }
197
198 // MUST not be called with any deeper locks held
199 // MUST be called only by parent thread
200 // Note: listener thread is the "main" event base, which has exited its
201 // loop in order to call this function.
stop_threads(void)202 void stop_threads(void) {
203 int i;
204
205 // assoc can call pause_threads(), so we have to stop it first.
206 stop_assoc_maintenance_thread();
207 if (settings.verbose > 0)
208 fprintf(stderr, "stopped assoc\n");
209
210 if (settings.verbose > 0)
211 fprintf(stderr, "asking workers to stop\n");
212
213 pthread_mutex_lock(&worker_hang_lock);
214 pthread_mutex_lock(&init_lock);
215 init_count = 0;
216 for (i = 0; i < settings.num_threads; i++) {
217 notify_worker_fd(&threads[i], 0, queue_stop);
218 }
219 wait_for_thread_registration(settings.num_threads);
220 pthread_mutex_unlock(&init_lock);
221
222 // All of the workers are hung but haven't done cleanup yet.
223
224 if (settings.verbose > 0)
225 fprintf(stderr, "asking background threads to stop\n");
226
227 // stop each side thread.
228 // TODO: Verify these all work if the threads are already stopped
229 stop_item_crawler_thread(CRAWLER_WAIT);
230 if (settings.verbose > 0)
231 fprintf(stderr, "stopped lru crawler\n");
232 if (settings.lru_maintainer_thread) {
233 stop_lru_maintainer_thread();
234 if (settings.verbose > 0)
235 fprintf(stderr, "stopped maintainer\n");
236 }
237 if (settings.slab_reassign) {
238 stop_slab_maintenance_thread();
239 if (settings.verbose > 0)
240 fprintf(stderr, "stopped slab mover\n");
241 }
242 logger_stop();
243 if (settings.verbose > 0)
244 fprintf(stderr, "stopped logger thread\n");
245 stop_conn_timeout_thread();
246 if (settings.verbose > 0)
247 fprintf(stderr, "stopped idle timeout thread\n");
248
249 // Close all connections then let the workers finally exit.
250 if (settings.verbose > 0)
251 fprintf(stderr, "closing connections\n");
252 conn_close_all();
253 pthread_mutex_unlock(&worker_hang_lock);
254 if (settings.verbose > 0)
255 fprintf(stderr, "reaping worker threads\n");
256 for (i = 0; i < settings.num_threads; i++) {
257 pthread_join(threads[i].thread_id, NULL);
258 }
259
260 if (settings.verbose > 0)
261 fprintf(stderr, "all background threads stopped\n");
262
263 // At this point, every background thread must be stopped.
264 }
265
266 /*
267 * Initializes a connection queue.
268 */
cq_init(CQ * cq)269 static void cq_init(CQ *cq) {
270 pthread_mutex_init(&cq->lock, NULL);
271 STAILQ_INIT(&cq->head);
272 cq->cache = cache_create("cq", sizeof(CQ_ITEM), sizeof(char *));
273 if (cq->cache == NULL) {
274 fprintf(stderr, "Failed to create connection queue cache\n");
275 exit(EXIT_FAILURE);
276 }
277 }
278
279 /*
280 * Looks for an item on a connection queue, but doesn't block if there isn't
281 * one.
282 * Returns the item, or NULL if no item is available
283 */
cq_pop(CQ * cq)284 static CQ_ITEM *cq_pop(CQ *cq) {
285 CQ_ITEM *item;
286
287 pthread_mutex_lock(&cq->lock);
288 item = STAILQ_FIRST(&cq->head);
289 if (item != NULL) {
290 STAILQ_REMOVE_HEAD(&cq->head, i_next);
291 }
292 pthread_mutex_unlock(&cq->lock);
293
294 return item;
295 }
296
297 /*
298 * Adds an item to a connection queue.
299 */
cq_push(CQ * cq,CQ_ITEM * item)300 static void cq_push(CQ *cq, CQ_ITEM *item) {
301 pthread_mutex_lock(&cq->lock);
302 STAILQ_INSERT_TAIL(&cq->head, item, i_next);
303 pthread_mutex_unlock(&cq->lock);
304 }
305
306 /*
307 * Returns a fresh connection queue item.
308 */
cqi_new(CQ * cq)309 static CQ_ITEM *cqi_new(CQ *cq) {
310 CQ_ITEM *item = cache_alloc(cq->cache);
311 if (item == NULL) {
312 STATS_LOCK();
313 stats.malloc_fails++;
314 STATS_UNLOCK();
315 }
316 return item;
317 }
318
319 /*
320 * Frees a connection queue item (adds it to the freelist.)
321 */
cqi_free(CQ * cq,CQ_ITEM * item)322 static void cqi_free(CQ *cq, CQ_ITEM *item) {
323 cache_free(cq->cache, item);
324 }
325
326 // TODO: Skip notify if queue wasn't empty?
327 // - Requires cq_push() returning a "was empty" flag
328 // - Requires event handling loop to pop the entire queue and work from that
329 // instead of the ev_count work there now.
330 // In testing this does result in a large performance uptick, but unclear how
331 // much that will transfer from a synthetic benchmark.
notify_worker(LIBEVENT_THREAD * t,CQ_ITEM * item)332 static void notify_worker(LIBEVENT_THREAD *t, CQ_ITEM *item) {
333 cq_push(t->ev_queue, item);
334 #ifdef HAVE_EVENTFD
335 uint64_t u = 1;
336 if (write(t->notify_event_fd, &u, sizeof(uint64_t)) != sizeof(uint64_t)) {
337 perror("failed writing to worker eventfd");
338 /* TODO: This is a fatal problem. Can it ever happen temporarily? */
339 }
340 #else
341 char buf[1] = "c";
342 if (write(t->notify_send_fd, buf, 1) != 1) {
343 perror("Failed writing to notify pipe");
344 /* TODO: This is a fatal problem. Can it ever happen temporarily? */
345 }
346 #endif
347 }
348
349 // NOTE: An external func that takes a conn *c might be cleaner overall.
notify_worker_fd(LIBEVENT_THREAD * t,int sfd,enum conn_queue_item_modes mode)350 static void notify_worker_fd(LIBEVENT_THREAD *t, int sfd, enum conn_queue_item_modes mode) {
351 CQ_ITEM *item;
352 while ( (item = cqi_new(t->ev_queue)) == NULL ) {
353 // NOTE: most callers of this function cannot fail, but mallocs in
354 // theory can fail. Small mallocs essentially never do without also
355 // killing the process. Syscalls can also fail but the original code
356 // never handled this either.
357 // As a compromise, I'm leaving this note and this loop: This alloc
358 // cannot fail, but pre-allocating the data is too much code in an
359 // area I want to keep more lean. If this CQ business becomes a more
360 // generic queue I'll reconsider.
361 }
362
363 item->mode = mode;
364 item->sfd = sfd;
365 notify_worker(t, item);
366 }
367
368 /*
369 * Creates a worker thread.
370 */
create_worker(void * (* func)(void *),void * arg)371 static void create_worker(void *(*func)(void *), void *arg) {
372 pthread_attr_t attr;
373 int ret;
374
375 pthread_attr_init(&attr);
376
377 if ((ret = pthread_create(&((LIBEVENT_THREAD*)arg)->thread_id, &attr, func, arg)) != 0) {
378 fprintf(stderr, "Can't create thread: %s\n",
379 strerror(ret));
380 exit(1);
381 }
382 }
383
384 /*
385 * Sets whether or not we accept new connections.
386 */
accept_new_conns(const bool do_accept)387 void accept_new_conns(const bool do_accept) {
388 pthread_mutex_lock(&conn_lock);
389 do_accept_new_conns(do_accept);
390 pthread_mutex_unlock(&conn_lock);
391 }
392 /****************************** LIBEVENT THREADS *****************************/
393
394 /*
395 * Set up a thread's information.
396 */
setup_thread(LIBEVENT_THREAD * me)397 static void setup_thread(LIBEVENT_THREAD *me) {
398 #if defined(LIBEVENT_VERSION_NUMBER) && LIBEVENT_VERSION_NUMBER >= 0x02000101
399 struct event_config *ev_config;
400 ev_config = event_config_new();
401 event_config_set_flag(ev_config, EVENT_BASE_FLAG_NOLOCK);
402 me->base = event_base_new_with_config(ev_config);
403 event_config_free(ev_config);
404 #else
405 me->base = event_init();
406 #endif
407
408 if (! me->base) {
409 fprintf(stderr, "Can't allocate event base\n");
410 exit(1);
411 }
412
413 /* Listen for notifications from other threads */
414 #ifdef HAVE_EVENTFD
415 event_set(&me->notify_event, me->notify_event_fd,
416 EV_READ | EV_PERSIST, thread_libevent_process, me);
417 #else
418 event_set(&me->notify_event, me->notify_receive_fd,
419 EV_READ | EV_PERSIST, thread_libevent_process, me);
420 #endif
421 event_base_set(me->base, &me->notify_event);
422
423 if (event_add(&me->notify_event, 0) == -1) {
424 fprintf(stderr, "Can't monitor libevent notify pipe\n");
425 exit(1);
426 }
427
428 me->ev_queue = malloc(sizeof(struct conn_queue));
429 if (me->ev_queue == NULL) {
430 perror("Failed to allocate memory for connection queue");
431 exit(EXIT_FAILURE);
432 }
433 cq_init(me->ev_queue);
434
435 if (pthread_mutex_init(&me->stats.mutex, NULL) != 0) {
436 perror("Failed to initialize mutex");
437 exit(EXIT_FAILURE);
438 }
439
440 me->rbuf_cache = cache_create("rbuf", READ_BUFFER_SIZE, sizeof(char *));
441 if (me->rbuf_cache == NULL) {
442 fprintf(stderr, "Failed to create read buffer cache\n");
443 exit(EXIT_FAILURE);
444 }
445 // Note: we were cleanly passing in num_threads before, but this now
446 // relies on settings globals too much.
447 if (settings.read_buf_mem_limit) {
448 int limit = settings.read_buf_mem_limit / settings.num_threads;
449 if (limit < READ_BUFFER_SIZE) {
450 limit = 1;
451 } else {
452 limit = limit / READ_BUFFER_SIZE;
453 }
454 cache_set_limit(me->rbuf_cache, limit);
455 }
456
457 me->io_cache = cache_create("io", sizeof(io_pending_t), sizeof(char*));
458 if (me->io_cache == NULL) {
459 fprintf(stderr, "Failed to create IO object cache\n");
460 exit(EXIT_FAILURE);
461 }
462 #ifdef TLS
463 if (settings.ssl_enabled) {
464 me->ssl_wbuf = (char *)malloc((size_t)settings.ssl_wbuf_size);
465 if (me->ssl_wbuf == NULL) {
466 fprintf(stderr, "Failed to allocate the SSL write buffer\n");
467 exit(EXIT_FAILURE);
468 }
469 }
470 #endif
471 #ifdef EXTSTORE
472 // me->storage is set just before this function is called.
473 if (me->storage) {
474 thread_io_queue_add(me, IO_QUEUE_EXTSTORE, me->storage,
475 storage_submit_cb, storage_complete_cb, NULL, storage_finalize_cb);
476 }
477 #endif
478 thread_io_queue_add(me, IO_QUEUE_NONE, NULL, NULL, NULL, NULL, NULL);
479 }
480
481 /*
482 * Worker thread: main event loop
483 */
worker_libevent(void * arg)484 static void *worker_libevent(void *arg) {
485 LIBEVENT_THREAD *me = arg;
486
487 /* Any per-thread setup can happen here; memcached_thread_init() will block until
488 * all threads have finished initializing.
489 */
490 me->l = logger_create();
491 me->lru_bump_buf = item_lru_bump_buf_create();
492 if (me->l == NULL || me->lru_bump_buf == NULL) {
493 abort();
494 }
495
496 if (settings.drop_privileges) {
497 drop_worker_privileges();
498 }
499
500 register_thread_initialized();
501
502 event_base_loop(me->base, 0);
503
504 // same mechanism used to watch for all threads exiting.
505 register_thread_initialized();
506
507 event_base_free(me->base);
508 return NULL;
509 }
510
511
512 /*
513 * Processes an incoming "connection event" item. This is called when
514 * input arrives on the libevent wakeup pipe.
515 */
516 // Syscalls can be expensive enough that handling a few of them once here can
517 // save both throughput and overall latency.
518 #define MAX_PIPE_EVENTS 32
thread_libevent_process(evutil_socket_t fd,short which,void * arg)519 static void thread_libevent_process(evutil_socket_t fd, short which, void *arg) {
520 LIBEVENT_THREAD *me = arg;
521 CQ_ITEM *item;
522 conn *c;
523 uint64_t ev_count = 0; // max number of events to loop through this run.
524 #ifdef HAVE_EVENTFD
525 // NOTE: unlike pipe we aren't limiting the number of events per read.
526 // However we do limit the number of queue pulls to what the count was at
527 // the time of this function firing.
528 if (read(fd, &ev_count, sizeof(uint64_t)) != sizeof(uint64_t)) {
529 if (settings.verbose > 0)
530 fprintf(stderr, "Can't read from libevent pipe\n");
531 return;
532 }
533 #else
534 char buf[MAX_PIPE_EVENTS];
535
536 ev_count = read(fd, buf, MAX_PIPE_EVENTS);
537 if (ev_count == 0) {
538 if (settings.verbose > 0)
539 fprintf(stderr, "Can't read from libevent pipe\n");
540 return;
541 }
542 #endif
543
544 for (int x = 0; x < ev_count; x++) {
545 item = cq_pop(me->ev_queue);
546 if (item == NULL) {
547 return;
548 }
549
550 switch (item->mode) {
551 case queue_new_conn:
552 c = conn_new(item->sfd, item->init_state, item->event_flags,
553 item->read_buffer_size, item->transport,
554 me->base, item->ssl);
555 if (c == NULL) {
556 if (IS_UDP(item->transport)) {
557 fprintf(stderr, "Can't listen for events on UDP socket\n");
558 exit(1);
559 } else {
560 if (settings.verbose > 0) {
561 fprintf(stderr, "Can't listen for events on fd %d\n",
562 item->sfd);
563 }
564 #ifdef TLS
565 if (item->ssl) {
566 SSL_shutdown(item->ssl);
567 SSL_free(item->ssl);
568 }
569 #endif
570 close(item->sfd);
571 }
572 } else {
573 c->thread = me;
574 conn_io_queue_setup(c);
575 #ifdef TLS
576 if (settings.ssl_enabled && c->ssl != NULL) {
577 assert(c->thread && c->thread->ssl_wbuf);
578 c->ssl_wbuf = c->thread->ssl_wbuf;
579 }
580 #endif
581 }
582 break;
583 case queue_pause:
584 /* we were told to pause and report in */
585 register_thread_initialized();
586 break;
587 case queue_timeout:
588 /* a client socket timed out */
589 conn_close_idle(conns[item->sfd]);
590 break;
591 case queue_redispatch:
592 /* a side thread redispatched a client connection */
593 conn_worker_readd(conns[item->sfd]);
594 break;
595 case queue_stop:
596 /* asked to stop */
597 event_base_loopexit(me->base, NULL);
598 break;
599 case queue_return_io:
600 /* getting an individual IO object back */
601 conn_io_queue_return(item->io);
602 break;
603 }
604
605 cqi_free(me->ev_queue, item);
606 }
607 }
608
609 /* Which thread we assigned a connection to most recently. */
610 static int last_thread = -1;
611
612 /* Last thread we assigned to a connection based on napi_id */
613 static int last_thread_by_napi_id = -1;
614
select_thread_round_robin(void)615 static LIBEVENT_THREAD *select_thread_round_robin(void)
616 {
617 int tid = (last_thread + 1) % settings.num_threads;
618
619 last_thread = tid;
620
621 return threads + tid;
622 }
623
reset_threads_napi_id(void)624 static void reset_threads_napi_id(void)
625 {
626 LIBEVENT_THREAD *thread;
627 int i;
628
629 for (i = 0; i < settings.num_threads; i++) {
630 thread = threads + i;
631 thread->napi_id = 0;
632 }
633
634 last_thread_by_napi_id = -1;
635 }
636
637 /* Select a worker thread based on the NAPI ID of an incoming connection
638 * request. NAPI ID is a globally unique ID that identifies a NIC RX queue
639 * on which a flow is received.
640 */
select_thread_by_napi_id(int sfd)641 static LIBEVENT_THREAD *select_thread_by_napi_id(int sfd)
642 {
643 LIBEVENT_THREAD *thread;
644 int napi_id, err, i;
645 socklen_t len;
646 int tid = -1;
647
648 len = sizeof(socklen_t);
649 err = getsockopt(sfd, SOL_SOCKET, SO_INCOMING_NAPI_ID, &napi_id, &len);
650 if ((err == -1) || (napi_id == 0)) {
651 STATS_LOCK();
652 stats.round_robin_fallback++;
653 STATS_UNLOCK();
654 return select_thread_round_robin();
655 }
656
657 select:
658 for (i = 0; i < settings.num_threads; i++) {
659 thread = threads + i;
660 if (last_thread_by_napi_id < i) {
661 thread->napi_id = napi_id;
662 last_thread_by_napi_id = i;
663 tid = i;
664 break;
665 }
666 if (thread->napi_id == napi_id) {
667 tid = i;
668 break;
669 }
670 }
671
672 if (tid == -1) {
673 STATS_LOCK();
674 stats.unexpected_napi_ids++;
675 STATS_UNLOCK();
676 reset_threads_napi_id();
677 goto select;
678 }
679
680 return threads + tid;
681 }
682
683 /*
684 * Dispatches a new connection to another thread. This is only ever called
685 * from the main thread, either during initialization (for UDP) or because
686 * of an incoming connection.
687 */
dispatch_conn_new(int sfd,enum conn_states init_state,int event_flags,int read_buffer_size,enum network_transport transport,void * ssl)688 void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags,
689 int read_buffer_size, enum network_transport transport, void *ssl) {
690 CQ_ITEM *item = NULL;
691 LIBEVENT_THREAD *thread;
692
693 if (!settings.num_napi_ids)
694 thread = select_thread_round_robin();
695 else
696 thread = select_thread_by_napi_id(sfd);
697
698 item = cqi_new(thread->ev_queue);
699 if (item == NULL) {
700 close(sfd);
701 /* given that malloc failed this may also fail, but let's try */
702 fprintf(stderr, "Failed to allocate memory for connection object\n");
703 return;
704 }
705
706 item->sfd = sfd;
707 item->init_state = init_state;
708 item->event_flags = event_flags;
709 item->read_buffer_size = read_buffer_size;
710 item->transport = transport;
711 item->mode = queue_new_conn;
712 item->ssl = ssl;
713
714 MEMCACHED_CONN_DISPATCH(sfd, (int64_t)thread->thread_id);
715 notify_worker(thread, item);
716 }
717
718 /*
719 * Re-dispatches a connection back to the original thread. Can be called from
720 * any side thread borrowing a connection.
721 */
redispatch_conn(conn * c)722 void redispatch_conn(conn *c) {
723 notify_worker_fd(c->thread, c->sfd, queue_redispatch);
724 }
725
timeout_conn(conn * c)726 void timeout_conn(conn *c) {
727 notify_worker_fd(c->thread, c->sfd, queue_timeout);
728 }
729
return_io_pending(io_pending_t * io)730 void return_io_pending(io_pending_t *io) {
731 CQ_ITEM *item = cqi_new(io->thread->ev_queue);
732 if (item == NULL) {
733 // TODO: how can we avoid this?
734 // In the main case I just loop, since a malloc failure here for a
735 // tiny object that's generally in a fixed size queue is going to
736 // implode shortly.
737 return;
738 }
739
740 item->mode = queue_return_io;
741 item->io = io;
742
743 notify_worker(io->thread, item);
744 }
745
746 /* This misses the allow_new_conns flag :( */
sidethread_conn_close(conn * c)747 void sidethread_conn_close(conn *c) {
748 if (settings.verbose > 1)
749 fprintf(stderr, "<%d connection closing from side thread.\n", c->sfd);
750
751 c->state = conn_closing;
752 // redispatch will see closing flag and properly close connection.
753 redispatch_conn(c);
754 return;
755 }
756
757 /********************************* ITEM ACCESS *******************************/
758
759 /*
760 * Allocates a new item.
761 */
item_alloc(char * key,size_t nkey,int flags,rel_time_t exptime,int nbytes)762 item *item_alloc(char *key, size_t nkey, int flags, rel_time_t exptime, int nbytes) {
763 item *it;
764 /* do_item_alloc handles its own locks */
765 it = do_item_alloc(key, nkey, flags, exptime, nbytes);
766 return it;
767 }
768
769 /*
770 * Returns an item if it hasn't been marked as expired,
771 * lazy-expiring as needed.
772 */
item_get(const char * key,const size_t nkey,conn * c,const bool do_update)773 item *item_get(const char *key, const size_t nkey, conn *c, const bool do_update) {
774 item *it;
775 uint32_t hv;
776 hv = hash(key, nkey);
777 item_lock(hv);
778 it = do_item_get(key, nkey, hv, c, do_update);
779 item_unlock(hv);
780 return it;
781 }
782
783 // returns an item with the item lock held.
784 // lock will still be held even if return is NULL, allowing caller to replace
785 // an item atomically if desired.
item_get_locked(const char * key,const size_t nkey,conn * c,const bool do_update,uint32_t * hv)786 item *item_get_locked(const char *key, const size_t nkey, conn *c, const bool do_update, uint32_t *hv) {
787 item *it;
788 *hv = hash(key, nkey);
789 item_lock(*hv);
790 it = do_item_get(key, nkey, *hv, c, do_update);
791 return it;
792 }
793
item_touch(const char * key,size_t nkey,uint32_t exptime,conn * c)794 item *item_touch(const char *key, size_t nkey, uint32_t exptime, conn *c) {
795 item *it;
796 uint32_t hv;
797 hv = hash(key, nkey);
798 item_lock(hv);
799 it = do_item_touch(key, nkey, exptime, hv, c);
800 item_unlock(hv);
801 return it;
802 }
803
804 /*
805 * Links an item into the LRU and hashtable.
806 */
item_link(item * item)807 int item_link(item *item) {
808 int ret;
809 uint32_t hv;
810
811 hv = hash(ITEM_key(item), item->nkey);
812 item_lock(hv);
813 ret = do_item_link(item, hv);
814 item_unlock(hv);
815 return ret;
816 }
817
818 /*
819 * Decrements the reference count on an item and adds it to the freelist if
820 * needed.
821 */
item_remove(item * item)822 void item_remove(item *item) {
823 uint32_t hv;
824 hv = hash(ITEM_key(item), item->nkey);
825
826 item_lock(hv);
827 do_item_remove(item);
828 item_unlock(hv);
829 }
830
831 /*
832 * Replaces one item with another in the hashtable.
833 * Unprotected by a mutex lock since the core server does not require
834 * it to be thread-safe.
835 */
item_replace(item * old_it,item * new_it,const uint32_t hv)836 int item_replace(item *old_it, item *new_it, const uint32_t hv) {
837 return do_item_replace(old_it, new_it, hv);
838 }
839
840 /*
841 * Unlinks an item from the LRU and hashtable.
842 */
item_unlink(item * item)843 void item_unlink(item *item) {
844 uint32_t hv;
845 hv = hash(ITEM_key(item), item->nkey);
846 item_lock(hv);
847 do_item_unlink(item, hv);
848 item_unlock(hv);
849 }
850
851 /*
852 * Does arithmetic on a numeric item value.
853 */
add_delta(conn * c,const char * key,const size_t nkey,bool incr,const int64_t delta,char * buf,uint64_t * cas)854 enum delta_result_type add_delta(conn *c, const char *key,
855 const size_t nkey, bool incr,
856 const int64_t delta, char *buf,
857 uint64_t *cas) {
858 enum delta_result_type ret;
859 uint32_t hv;
860
861 hv = hash(key, nkey);
862 item_lock(hv);
863 ret = do_add_delta(c, key, nkey, incr, delta, buf, cas, hv, NULL);
864 item_unlock(hv);
865 return ret;
866 }
867
868 /*
869 * Stores an item in the cache (high level, obeys set/add/replace semantics)
870 */
store_item(item * item,int comm,conn * c)871 enum store_item_type store_item(item *item, int comm, conn* c) {
872 enum store_item_type ret;
873 uint32_t hv;
874
875 hv = hash(ITEM_key(item), item->nkey);
876 item_lock(hv);
877 ret = do_store_item(item, comm, c, hv);
878 item_unlock(hv);
879 return ret;
880 }
881
882 /******************************* GLOBAL STATS ******************************/
883
STATS_LOCK()884 void STATS_LOCK() {
885 pthread_mutex_lock(&stats_lock);
886 }
887
STATS_UNLOCK()888 void STATS_UNLOCK() {
889 pthread_mutex_unlock(&stats_lock);
890 }
891
threadlocal_stats_reset(void)892 void threadlocal_stats_reset(void) {
893 int ii;
894 for (ii = 0; ii < settings.num_threads; ++ii) {
895 pthread_mutex_lock(&threads[ii].stats.mutex);
896 #define X(name) threads[ii].stats.name = 0;
897 THREAD_STATS_FIELDS
898 #ifdef EXTSTORE
899 EXTSTORE_THREAD_STATS_FIELDS
900 #endif
901 #undef X
902
903 memset(&threads[ii].stats.slab_stats, 0,
904 sizeof(threads[ii].stats.slab_stats));
905 memset(&threads[ii].stats.lru_hits, 0,
906 sizeof(uint64_t) * POWER_LARGEST);
907
908 pthread_mutex_unlock(&threads[ii].stats.mutex);
909 }
910 }
911
threadlocal_stats_aggregate(struct thread_stats * stats)912 void threadlocal_stats_aggregate(struct thread_stats *stats) {
913 int ii, sid;
914
915 /* The struct has a mutex, but we can safely set the whole thing
916 * to zero since it is unused when aggregating. */
917 memset(stats, 0, sizeof(*stats));
918
919 for (ii = 0; ii < settings.num_threads; ++ii) {
920 pthread_mutex_lock(&threads[ii].stats.mutex);
921 #define X(name) stats->name += threads[ii].stats.name;
922 THREAD_STATS_FIELDS
923 #ifdef EXTSTORE
924 EXTSTORE_THREAD_STATS_FIELDS
925 #endif
926 #undef X
927
928 for (sid = 0; sid < MAX_NUMBER_OF_SLAB_CLASSES; sid++) {
929 #define X(name) stats->slab_stats[sid].name += \
930 threads[ii].stats.slab_stats[sid].name;
931 SLAB_STATS_FIELDS
932 #undef X
933 }
934
935 for (sid = 0; sid < POWER_LARGEST; sid++) {
936 stats->lru_hits[sid] +=
937 threads[ii].stats.lru_hits[sid];
938 stats->slab_stats[CLEAR_LRU(sid)].get_hits +=
939 threads[ii].stats.lru_hits[sid];
940 }
941
942 stats->read_buf_count += threads[ii].rbuf_cache->total;
943 stats->read_buf_bytes += threads[ii].rbuf_cache->total * READ_BUFFER_SIZE;
944 stats->read_buf_bytes_free += threads[ii].rbuf_cache->freecurr * READ_BUFFER_SIZE;
945 pthread_mutex_unlock(&threads[ii].stats.mutex);
946 }
947 }
948
slab_stats_aggregate(struct thread_stats * stats,struct slab_stats * out)949 void slab_stats_aggregate(struct thread_stats *stats, struct slab_stats *out) {
950 int sid;
951
952 memset(out, 0, sizeof(*out));
953
954 for (sid = 0; sid < MAX_NUMBER_OF_SLAB_CLASSES; sid++) {
955 #define X(name) out->name += stats->slab_stats[sid].name;
956 SLAB_STATS_FIELDS
957 #undef X
958 }
959 }
960
961 /*
962 * Initializes the thread subsystem, creating various worker threads.
963 *
964 * nthreads Number of worker event handler threads to spawn
965 */
memcached_thread_init(int nthreads,void * arg)966 void memcached_thread_init(int nthreads, void *arg) {
967 int i;
968 int power;
969
970 for (i = 0; i < POWER_LARGEST; i++) {
971 pthread_mutex_init(&lru_locks[i], NULL);
972 }
973 pthread_mutex_init(&worker_hang_lock, NULL);
974
975 pthread_mutex_init(&init_lock, NULL);
976 pthread_cond_init(&init_cond, NULL);
977
978 /* Want a wide lock table, but don't waste memory */
979 if (nthreads < 3) {
980 power = 10;
981 } else if (nthreads < 4) {
982 power = 11;
983 } else if (nthreads < 5) {
984 power = 12;
985 } else if (nthreads <= 10) {
986 power = 13;
987 } else if (nthreads <= 20) {
988 power = 14;
989 } else {
990 /* 32k buckets. just under the hashpower default. */
991 power = 15;
992 }
993
994 if (power >= hashpower) {
995 fprintf(stderr, "Hash table power size (%d) cannot be equal to or less than item lock table (%d)\n", hashpower, power);
996 fprintf(stderr, "Item lock table grows with `-t N` (worker threadcount)\n");
997 fprintf(stderr, "Hash table grows with `-o hashpower=N` \n");
998 exit(1);
999 }
1000
1001 item_lock_count = hashsize(power);
1002 item_lock_hashpower = power;
1003
1004 item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
1005 if (! item_locks) {
1006 perror("Can't allocate item locks");
1007 exit(1);
1008 }
1009 for (i = 0; i < item_lock_count; i++) {
1010 pthread_mutex_init(&item_locks[i], NULL);
1011 }
1012
1013 threads = calloc(nthreads, sizeof(LIBEVENT_THREAD));
1014 if (! threads) {
1015 perror("Can't allocate thread descriptors");
1016 exit(1);
1017 }
1018
1019 for (i = 0; i < nthreads; i++) {
1020 #ifdef HAVE_EVENTFD
1021 threads[i].notify_event_fd = eventfd(0, EFD_NONBLOCK);
1022 if (threads[i].notify_event_fd == -1) {
1023 perror("failed creating eventfd for worker thread");
1024 exit(1);
1025 }
1026 #else
1027 int fds[2];
1028 if (pipe(fds)) {
1029 perror("Can't create notify pipe");
1030 exit(1);
1031 }
1032
1033 threads[i].notify_receive_fd = fds[0];
1034 threads[i].notify_send_fd = fds[1];
1035 #endif
1036 #ifdef EXTSTORE
1037 threads[i].storage = arg;
1038 #endif
1039 setup_thread(&threads[i]);
1040 /* Reserve three fds for the libevent base, and two for the pipe */
1041 stats_state.reserved_fds += 5;
1042 }
1043
1044 /* Create threads after we've done all the libevent setup. */
1045 for (i = 0; i < nthreads; i++) {
1046 create_worker(worker_libevent, &threads[i]);
1047 }
1048
1049 /* Wait for all the threads to set themselves up before returning. */
1050 pthread_mutex_lock(&init_lock);
1051 wait_for_thread_registration(nthreads);
1052 pthread_mutex_unlock(&init_lock);
1053 }
1054
1055