1
2 /*
3 * Copyright (C) Igor Sysoev
4 * Copyright (C) NGINX, Inc.
5 */
6
7 #include <nxt_main.h>
8
9
10 static nxt_int_t nxt_file_cache_lvlhsh_test(nxt_lvlhsh_key_t *hkey, void *data);
11 static nxt_work_handler_t nxt_file_cache_query_locked(nxt_file_cache_t *cache,
12 nxt_file_cache_query_t *q, nxt_lvlhsh_key_t *hkey);
13 static nxt_work_handler_t nxt_file_cache_node_hold(nxt_file_cache_t *cache,
14 nxt_file_cache_query_t *q, nxt_lvlhsh_key_t *hkey);
15 static nxt_work_handler_t nxt_file_cache_node_test(nxt_file_cache_t *cache,
16 nxt_file_cache_query_t *q);
17
18 static void nxt_file_cache_wait_handler(void *data);
19 static void nxt_file_cache_timeout_handler(nxt_event_timer_t *ev);
20 static void nxt_file_cache_wake_handler(void *data);
21
22 static nxt_file_cache_node_t *nxt_file_cache_node_alloc(nxt_cache_t *cache);
23 static void nxt_file_cache_node_free(nxt_file_cache_t *cache,
24 nxt_file_cache_node_t *node, nxt_bool_t fast);
25 static nxt_file_cache_query_wait_t *nxt_file_cache_query_wait_alloc(
26 nxt_file_cache_t *cache, nxt_bool_t *fast);
27 static void nxt_file_cache_query_wait_free(nxt_file_cache_t *cache,
28 nxt_file_cache_query_wait_t *qw);
29 static void nxt_file_cache_lock(nxt_file_cache_t *cache);
30 static void nxt_file_cache_unlock(nxt_file_cache_t *cache);
31
32
33 void
nxt_file_cache_init(nxt_cache_t * cache)34 nxt_file_cache_init(nxt_cache_t *cache)
35 {
36 static const nxt_lvlhsh_ctx_t ctx = {
37 nxt_file_cache_lvlhsh_test,
38 nxt_lvlhsh_alloc,
39 nxt_lvlhsh_free,
40 0,
41 };
42
43 /* lvlhsh with large first level. */
44 cache->lvlhsh.shift[1] = 10;
45
46 cache->lvlhsh.ctx = &ctx;
47
48 cache->start_time = nxt_thread_time();
49 }
50
51
52 static nxt_int_t
nxt_file_cache_lvlhsh_test(nxt_lvlhsh_key_t * hkey,void * data)53 nxt_file_cache_lvlhsh_test(nxt_lvlhsh_key_t *hkey, void *data)
54 {
55 nxt_file_cache_node_t *node;
56
57 node = data;
58
59 if (nxt_strmem_eq(&hkey->key, node->key_data, node->key_len)) {
60 return NXT_OK;
61 }
62
63 return NXT_DECLINED;
64 }
65
66
67 void
nxt_file_cache_query(nxt_file_cache_t * cache,nxt_file_cache_query_t * q)68 nxt_file_cache_query(nxt_file_cache_t *cache, nxt_file_cache_query_t *q)
69 {
70 nxt_lvlhsh_key_t hkey;
71 nxt_work_handler_t handler;
72
73 if (cache != NULL) {
74 hkey.key.len = q->key_len;
75 hkey.key.data = q->key_data;
76 hkey.key_hash = nxt_murmur_hash2(q->key_data, q->key_len);
77 hkey.replace = 0;
78
79 nxt_file_cache_lock(cache);
80
81 handler = nxt_file_cache_query_locked(cache, q, &hkey);
82
83 nxt_file_cache_unlock(cache);
84
85 } else {
86 handler = q->state->nocache_handler;
87 }
88
89 handler(q);
90 }
91
92
93 static nxt_work_handler_t
nxt_file_cache_query_locked(nxt_file_cache_t * cache,nxt_file_cache_query_t * q,nxt_lvlhsh_key_t * hkey)94 nxt_file_cache_query_locked(nxt_file_cache_t *cache, nxt_file_cache_query_t *q,
95 nxt_lvlhsh_key_t *hkey)
96 {
97 nxt_int_t ret;
98 nxt_bool_t fast;
99 nxt_work_handler_t handler;
100 nxt_file_cache_node_t *node, *sentinel;
101 nxt_file_cache_query_wait_t *qw;
102 nxt_file_cache_query_state_t *state;
103
104 state = q->state;
105 sentinel = nxt_file_cache_node_alloc(cache);
106
107 if (nxt_slow_path(sentinel == NULL)) {
108 return state->error_handler;
109 }
110
111 sentinel->key_data = q->key_data;
112 sentinel->key_len = q->key_len;
113 hkey->value = sentinel;
114
115 /*
116 * Try to insert an empty sentinel node to hold updating
117 * process if there is no existent cache node in cache.
118 */
119
120 ret = nxt_lvlhsh_insert(&cache->lvlhsh, hkey);
121
122 if (ret == NXT_OK) {
123 /* The sentinel node was successully added. */
124
125 q->node = sentinel;
126 sentinel->updating = 1;
127 return state->update_handler;
128 }
129
130 nxt_cache_node_free(cache, sentinel, 1);
131
132 if (ret == NXT_ERROR) {
133 return state->error_handler;
134 }
135
136 /* NXT_DECLINED: a cache node exists. */
137
138 node = hkey->value;
139 node->count++;
140 q->node = node;
141
142 handler = nxt_cache_node_test(cache, q);
143
144 if (handler == NULL) {
145 /* Add the node to a wait queue. */
146
147 qw = nxt_cache_query_wait_alloc(cache, &fast);
148 if (nxt_slow_path(qw == NULL)) {
149 return state->error_handler;
150 }
151
152 if (!fast) {
153 /* The node state may be changed during slow allocation. */
154 handler = nxt_cache_node_test(cache, q);
155
156 if (handler != NULL) {
157 nxt_cache_query_wait_free(cache, qw);
158 return handler;
159 }
160 }
161
162 qw->query = q;
163 qw->next = node->waiting;
164 qw->busy = 0;
165 qw->deleted = 0;
166 qw->pid = nxt_pid;
167 qw->engine = nxt_thread_event_engine();
168 qw->handler = nxt_cache_wake_handler;
169 qw->cache = cache;
170
171 node->waiting = qw;
172
173 return nxt_cache_wait_handler;
174 }
175
176 return handler;
177 }
178
179
180 static nxt_work_handler_t
nxt_cache_node_test(nxt_cache_t * cache,nxt_cache_query_t * q)181 nxt_cache_node_test(nxt_cache_t *cache, nxt_cache_query_t *q)
182 {
183 nxt_time_t expiry;
184 nxt_cache_node_t *node;
185 nxt_cache_query_state_t *state;
186
187 q->stale = 0;
188 state = q->state;
189 node = q->node;
190
191 expiry = cache->start_time + node->expiry;
192
193 if (nxt_thread_time() < expiry) {
194 return state->ready_handler;
195 }
196
197 /*
198 * A valid stale or empty sentinel cache node.
199 * The sentinel node can be only in updating state.
200 */
201
202 if (node->updating) {
203
204 if (node->expiry != 0) {
205 /* A valid stale cache node. */
206
207 q->stale = 1;
208
209 if (q->use_stale) {
210 return state->stale_handler;
211 }
212 }
213
214 return NULL;
215 }
216
217 /* A valid stale cache node is not being updated now. */
218
219 q->stale = 1;
220
221 if (q->use_stale) {
222
223 if (q->update_stale) {
224 node->updating = 1;
225 return state->update_stale_handler;
226 }
227
228 return state->stale_handler;
229 }
230
231 node->updating = 1;
232 return state->update_handler;
233 }
234
235
236 static void
nxt_cache_wait_handler(void * data)237 nxt_cache_wait_handler(void *data)
238 {
239 nxt_thread_t *thr;
240 nxt_event_timer_t *ev;
241 nxt_cache_query_t *q;
242
243 q = data;
244
245 if (&q->timeout == 0) {
246 return;
247 }
248
249 ev = &q->timer;
250
251 if (!nxt_event_timer_is_set(ev)) {
252 thr = nxt_thread();
253 ev->log = thr->log;
254 ev->handler = nxt_cache_timeout_handler;
255 ev->data = q;
256 nxt_event_timer_ident(ev, -1);
257
258 nxt_event_timer_add(thr->engine, ev, q->timeout);
259 }
260 }
261
262
263 static void
nxt_cache_timeout_handler(nxt_event_timer_t * ev)264 nxt_cache_timeout_handler(nxt_event_timer_t *ev)
265 {
266 nxt_cache_query_t *q;
267
268 q = ev->data;
269
270 q->state->timeout_handler(q);
271 }
272
273
274 static void
nxt_cache_wake_handler(void * data)275 nxt_cache_wake_handler(void *data)
276 {
277 nxt_cache_t *cache;
278 nxt_work_handler_t handler;
279 nxt_cache_query_t *q;
280 nxt_cache_query_wait_t *qw;
281
282 qw = data;
283 q = qw->query;
284 cache = qw->cache;
285
286 nxt_cache_lock(cache);
287
288 handler = nxt_cache_node_test(cache, q);
289
290 if (handler == NULL) {
291 /* Wait again. */
292 qw->next = q->node->waiting;
293 q->node->waiting = qw;
294 }
295
296 nxt_cache_unlock(cache);
297
298 if (handler != NULL) {
299 nxt_cache_query_wait_free(cache, qw);
300 }
301
302 handler(q);
303 }
304
305
306 static nxt_cache_node_t *
nxt_cache_node_alloc(nxt_cache_t * cache)307 nxt_cache_node_alloc(nxt_cache_t *cache)
308 {
309 nxt_queue_node_t *qn;
310 nxt_cache_node_t *node;
311
312 qn = nxt_queue_first(&cache->free_nodes);
313
314 if (nxt_fast_path(qn != nxt_queue_tail(&cache->free_nodes))) {
315 cache->nfree_nodes--;
316 nxt_queue_remove(qn);
317
318 node = nxt_queue_node_data(qn, nxt_cache_node_t, queue);
319 nxt_memzero(node, sizeof(nxt_cache_node_t));
320
321 return node;
322 }
323
324 nxt_cache_unlock(cache);
325
326 node = cache->alloc(cache->data, sizeof(nxt_cache_node_t));
327
328 nxt_cache_lock(cache);
329
330 return node;
331 }
332
333
334 static void
nxt_cache_node_free(nxt_cache_t * cache,nxt_cache_node_t * node,nxt_bool_t fast)335 nxt_cache_node_free(nxt_cache_t *cache, nxt_cache_node_t *node, nxt_bool_t fast)
336 {
337 if (fast || cache->nfree_nodes < 32) {
338 nxt_queue_insert_head(&cache->free_nodes, &node->queue);
339 cache->nfree_nodes++;
340 return;
341 }
342
343 nxt_cache_unlock(cache);
344
345 cache->free(cache->data, node);
346
347 nxt_cache_lock(cache);
348 }
349
350
351 static nxt_cache_query_wait_t *
nxt_cache_query_wait_alloc(nxt_cache_t * cache,nxt_bool_t * fast)352 nxt_cache_query_wait_alloc(nxt_cache_t *cache, nxt_bool_t *fast)
353 {
354 nxt_cache_query_wait_t *qw;
355
356 qw = cache->free_query_wait;
357
358 if (nxt_fast_path(qw != NULL)) {
359 cache->free_query_wait = qw->next;
360 cache->nfree_query_wait--;
361
362 *fast = 1;
363 return qw;
364 }
365
366 nxt_cache_unlock(cache);
367
368 qw = cache->alloc(cache->data, sizeof(nxt_cache_query_wait_t));
369 *fast = 0;
370
371 nxt_cache_lock(cache);
372
373 return qw;
374 }
375
376
377 static void
nxt_cache_query_wait_free(nxt_cache_t * cache,nxt_cache_query_wait_t * qw)378 nxt_cache_query_wait_free(nxt_cache_t *cache, nxt_cache_query_wait_t *qw)
379 {
380 if (cache->nfree_query_wait < 32) {
381 qw->next = cache->free_query_wait;
382 cache->free_query_wait = qw;
383 cache->nfree_query_wait++;
384 return;
385 }
386
387 nxt_cache_unlock(cache);
388
389 cache->free(cache->data, qw);
390
391 nxt_cache_lock(cache);
392 }
393
394
395 #if 0
396
397 nxt_int_t
398 nxt_cache_update(nxt_cache_t *cache, nxt_cache_node_t *node)
399 {
400 nxt_lvlhsh_key_t hkey;
401
402 if (node->expiry == 0) {
403 /* An empty sentinel node. */
404 nxt_cache_release(cache, node);
405 return;
406 }
407
408 hkey.key.len = node->key_len;
409 hkey.key.data = node->key_data;
410 hkey.key_hash = nxt_murmur_hash2(node->key_data, node->key_len);
411 hkey.replace = 1;
412 hkey.value = node;
413
414 node->count = 1;
415
416 if (nxt_lvlhsh_insert(&cache->lvlhsh, &hkey) != NXT_OK) {
417 return NXT_ERROR;
418 }
419
420 node = hkey.value;
421
422 if (node != NULL) {
423 if (node->count != 0) {
424 node->delete = 1;
425
426 } else {
427 // delete cache node
428 }
429 }
430
431 return NXT_OK;
432 }
433
434 #endif
435
436
437 void
nxt_cache_node_release(nxt_cache_t * cache,nxt_cache_node_t * node)438 nxt_cache_node_release(nxt_cache_t *cache, nxt_cache_node_t *node)
439 {
440 nxt_bool_t delete;
441
442 nxt_cache_lock(cache);
443
444 delete = nxt_cache_node_release_locked(cache, node);
445
446 nxt_cache_unlock(cache);
447
448 if (delete) {
449 nxt_thread_work_queue_add(cache->delete_handler, node);
450 }
451 }
452
453
454 nxt_bool_t
nxt_cache_node_release_locked(nxt_cache_t * cache,nxt_cache_node_t * node)455 nxt_cache_node_release_locked(nxt_cache_t *cache, nxt_cache_node_t *node)
456 {
457 #if 0
458 nxt_lvlhsh_key_t hkey;
459 #endif
460
461 node->count--;
462
463 if (node->count != 0) {
464 return 0;
465 }
466
467 if (!node->deleted) {
468 /*
469 * A cache node is locked whilst its count is non zero.
470 * To minimize number of operations the node's place in expiry
471 * queue can be updated only if the node is not currently used.
472 */
473 node->accessed = nxt_thread_time() - cache->start_time;
474
475 nxt_queue_remove(&node->queue);
476 nxt_queue_insert_head(&cache->expiry_queue, &node->queue);
477
478 return 0;
479 }
480
481 #if 0
482 hkey.key.len = node->key_len;
483 hkey.key.data = node->key_data;
484 hkey.key_hash = nxt_murmur_hash2(node->key_data, node->key_len);
485
486 nxt_lvlhsh_delete(&cache->lvlhsh, &hkey);
487 #endif
488
489 return 1;
490 }
491
492
493 static void
nxt_file_cache_lock(nxt_file_cache_t * cache)494 nxt_file_cache_lock(nxt_file_cache_t *cache)
495 {
496 if (cache->shared) {
497 nxt_thread_spin_lock(&cache->lock);
498 }
499 }
500
501
502 static void
nxt_file_cache_unlock(nxt_file_cache_t * cache)503 nxt_file_cache_unlock(nxt_file_cache_t *cache)
504 {
505 if (cache->shared) {
506 nxt_thread_spin_unlock(&cache->lock);
507 }
508 }
509