1 /*-
2 * Copyright (c) 2006 Verdens Gang AS
3 * Copyright (c) 2006-2015 Varnish Software AS
4 * All rights reserved.
5 *
6 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7 * Author: Martin Blix Grydeland <martin@varnish-software.com>
8 *
9 * SPDX-License-Identifier: BSD-2-Clause
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 */
33
34 #include "config.h"
35
36 #include <sys/mman.h>
37 #include <sys/stat.h>
38 #include <sys/types.h>
39
40 #include <fcntl.h>
41 #include <stdint.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <unistd.h>
46
47 #include "vdef.h"
48 #include "vas.h"
49 #include "miniobj.h"
50 #include "vmb.h"
51
52 #include "vqueue.h"
53 #include "vre.h"
54 #include "vsl_priv.h"
55
56 #include "vapi/vsl.h"
57 #include "vapi/vsm.h"
58
59 #include "vsl_api.h"
60
61 struct vslc_vsm {
62 unsigned magic;
63 #define VSLC_VSM_MAGIC 0x4D3903A6
64
65 struct VSL_cursor cursor;
66
67 unsigned options;
68
69 struct vsm *vsm;
70 struct vsm_fantom vf;
71
72 const struct VSL_head *head;
73 const uint32_t *end;
74 struct VSLC_ptr next;
75 };
76
77 static void
vslc_vsm_delete(const struct VSL_cursor * cursor)78 vslc_vsm_delete(const struct VSL_cursor *cursor)
79 {
80 struct vslc_vsm *c;
81
82 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
83 AZ(VSM_Unmap(c->vsm, &c->vf));
84 assert(&c->cursor == cursor);
85 FREE_OBJ(c);
86 }
87
88 /*
89 * We tolerate the fact that segment_n wraps around eventually: for the default
90 * vsl_space of 80MB and 8 segments, each segment is 10MB long, so we wrap
91 * roughly after 40 pebibytes (32bit) or 160 yobibytes (64bit) worth of vsl
92 * written.
93 *
94 * The vsm_check would fail if a vslc paused while this amount of data was
95 * written
96 */
97
v_matchproto_(vslc_check_f)98 static enum vsl_check v_matchproto_(vslc_check_f)
99 vslc_vsm_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
100 {
101 const struct vslc_vsm *c;
102 unsigned dist;
103
104 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
105 assert(&c->cursor == cursor);
106
107 if (ptr->ptr == NULL)
108 return (vsl_check_e_inval);
109
110 dist = c->head->segment_n - ptr->priv;
111
112 if (dist >= VSL_SEGMENTS - 2)
113 /* Too close to continue */
114 return (vsl_check_e_inval);
115 if (dist >= VSL_SEGMENTS - 4)
116 /* Warning level */
117 return (vsl_check_warn);
118 /* Safe */
119 return (vsl_check_valid);
120 }
121
v_matchproto_(vslc_next_f)122 static enum vsl_status v_matchproto_(vslc_next_f)
123 vslc_vsm_next(const struct VSL_cursor *cursor)
124 {
125 struct vslc_vsm *c;
126 enum vsl_check i;
127 uint32_t t;
128
129 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
130 assert(&c->cursor == cursor);
131
132 while (1) {
133 i = vslc_vsm_check(&c->cursor, &c->next);
134 if (i < vsl_check_warn) {
135 if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
136 return (vsl_e_abandon);
137 else
138 return (vsl_e_overrun);
139 }
140
141 t = *(volatile const uint32_t *)c->next.ptr;
142 AN(t);
143
144 if (t == VSL_ENDMARKER) {
145 if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
146 return (vsl_e_abandon);
147 if (c->options & VSL_COPT_TAILSTOP)
148 return (vsl_e_eof);
149 /* No new records available */
150 return (vsl_end);
151 }
152
153 /* New data observed. Ensure load ordering with the log
154 * writer. */
155 VRMB();
156
157 if (t == VSL_WRAPMARKER) {
158 /* Wrap around not possible at front */
159 assert(c->next.ptr != c->head->log);
160 c->next.ptr = c->head->log;
161 while (c->next.priv % VSL_SEGMENTS)
162 c->next.priv++;
163 continue;
164 }
165
166 c->cursor.rec = c->next;
167 c->next.ptr = VSL_NEXT(c->next.ptr);
168
169 if (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch) {
170 if (!(c->options & VSL_COPT_BATCH))
171 /* Skip the batch record */
172 continue;
173 /* Next call will point to the first record past
174 the batch */
175 c->next.ptr +=
176 VSL_WORDS(VSL_BATCHLEN(c->cursor.rec.ptr));
177 }
178
179 while ((c->next.ptr - c->head->log) / c->head->segsize >
180 c->next.priv % VSL_SEGMENTS)
181 c->next.priv++;
182
183 assert(c->next.ptr >= c->head->log);
184 assert(c->next.ptr < c->end);
185
186 return (vsl_more);
187 }
188 }
189
v_matchproto_(vslc_reset_f)190 static enum vsl_status v_matchproto_(vslc_reset_f)
191 vslc_vsm_reset(const struct VSL_cursor *cursor)
192 {
193 struct vslc_vsm *c;
194 unsigned u, segment_n;
195 enum vsl_status r;
196
197 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
198 assert(&c->cursor == cursor);
199 c->cursor.rec.ptr = NULL;
200
201 segment_n = c->head->segment_n;
202 /* Make sure offset table is not stale compared to segment_n */
203 VRMB();
204
205 if (c->options & VSL_COPT_TAIL) {
206 /* Start in the same segment varnishd currently is in and
207 run forward until we see the end */
208 u = c->next.priv = segment_n;
209 assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
210 c->next.ptr = c->head->log +
211 c->head->offset[c->next.priv % VSL_SEGMENTS];
212 do {
213 if (c->head->segment_n - u > 1) {
214 /* Give up if varnishd is moving faster
215 than us */
216 return (vsl_e_overrun);
217 }
218 r = vslc_vsm_next(&c->cursor);
219 } while (r == vsl_more);
220 if (r != vsl_end)
221 return (r);
222 } else {
223 /* Starting (VSL_SEGMENTS - 3) behind varnishd. This way
224 * even if varnishd advances segment_n immediately, we'll
225 * still have a full segment worth of log before the
226 * general constraint of at least 2 segments apart will be
227 * broken.
228 */
229 c->next.priv = segment_n - (VSL_SEGMENTS - 3);
230 while (c->head->offset[c->next.priv % VSL_SEGMENTS] < 0) {
231 /* seg 0 must be initialized */
232 assert(c->next.priv % VSL_SEGMENTS != 0);
233 c->next.priv++;
234 }
235 assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
236 c->next.ptr = c->head->log +
237 c->head->offset[c->next.priv % VSL_SEGMENTS];
238 }
239 assert(c->next.ptr >= c->head->log);
240 assert(c->next.ptr < c->end);
241 return (vsl_end);
242 }
243
244 static const struct vslc_tbl vslc_vsm_tbl = {
245 .magic = VSLC_TBL_MAGIC,
246 .delete = vslc_vsm_delete,
247 .next = vslc_vsm_next,
248 .reset = vslc_vsm_reset,
249 .check = vslc_vsm_check,
250 };
251
252 struct VSL_cursor *
VSL_CursorVSM(struct VSL_data * vsl,struct vsm * vsm,unsigned options)253 VSL_CursorVSM(struct VSL_data *vsl, struct vsm *vsm, unsigned options)
254 {
255 struct vslc_vsm *c;
256 struct vsm_fantom vf;
257 struct VSL_head *head;
258 enum vsl_status r;
259
260 CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
261
262 if (!VSM_Get(vsm, &vf, VSL_CLASS, NULL)) {
263 (void)vsl_diag(vsl,
264 "No VSL chunk found (child not started ?)");
265 return (NULL);
266 }
267 if (VSM_Map(vsm, &vf)) {
268 (void)vsl_diag(vsl,
269 "VSM_Map(): %s", VSM_Error(vsm));
270 return (NULL);
271 }
272 AN(vf.b);
273
274 head = vf.b;
275 if (memcmp(head->marker, VSL_HEAD_MARKER, sizeof head->marker)) {
276 AZ(VSM_Unmap(vsm, &vf));
277 (void)vsl_diag(vsl, "Not a VSL chunk");
278 return (NULL);
279 }
280 ALLOC_OBJ(c, VSLC_VSM_MAGIC);
281 if (c == NULL) {
282 AZ(VSM_Unmap(vsm, &vf));
283 (void)vsl_diag(vsl, "Out of memory");
284 return (NULL);
285 }
286 c->cursor.priv_tbl = &vslc_vsm_tbl;
287 c->cursor.priv_data = c;
288
289 c->options = options;
290 c->vsm = vsm;
291 c->vf = vf;
292 c->head = head;
293 c->end = c->head->log + c->head->segsize * VSL_SEGMENTS;
294 assert(c->end <= (const uint32_t *)vf.e);
295
296 r = vslc_vsm_reset(&c->cursor);
297 if (r != vsl_end) {
298 AZ(VSM_Unmap(vsm, &vf));
299 (void)vsl_diag(vsl, "Cursor initialization failure (%d)", r);
300 FREE_OBJ(c);
301 return (NULL);
302 }
303
304 return (&c->cursor);
305 }
306
307 struct vslc_file {
308 unsigned magic;
309 #define VSLC_FILE_MAGIC 0x1D65FFEF
310
311 int fd;
312 int close_fd;
313 ssize_t buflen;
314 uint32_t *buf;
315
316 struct VSL_cursor cursor;
317
318 };
319
320 static void
vslc_file_delete(const struct VSL_cursor * cursor)321 vslc_file_delete(const struct VSL_cursor *cursor)
322 {
323 struct vslc_file *c;
324
325 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
326 assert(&c->cursor == cursor);
327 if (c->close_fd)
328 (void)close(c->fd);
329 if (c->buf != NULL)
330 free(c->buf);
331 FREE_OBJ(c);
332 }
333
334 /* Read n bytes from fd into buf */
335 static ssize_t
vslc_file_readn(int fd,void * buf,ssize_t n)336 vslc_file_readn(int fd, void *buf, ssize_t n)
337 {
338 ssize_t t = 0;
339 ssize_t l;
340
341 while (t < n) {
342 l = read(fd, (char *)buf + t, n - t);
343 if (l <= 0)
344 return (l);
345 t += l;
346 }
347 return (t);
348 }
349
v_matchproto_(vslc_next_f)350 static enum vsl_status v_matchproto_(vslc_next_f)
351 vslc_file_next(const struct VSL_cursor *cursor)
352 {
353 struct vslc_file *c;
354 ssize_t i;
355 ssize_t l;
356
357 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
358 assert(&c->cursor == cursor);
359
360 do {
361 c->cursor.rec.ptr = NULL;
362 assert(c->buflen >= 2);
363 i = vslc_file_readn(c->fd, c->buf, VSL_BYTES(2));
364 if (i < 0)
365 return (vsl_e_io);
366 if (i == 0)
367 return (vsl_e_eof);
368 assert(i == VSL_BYTES(2));
369 l = 2 + VSL_WORDS(VSL_LEN(c->buf));
370 if (c->buflen < l) {
371 while (c->buflen < l)
372 c->buflen = 2 * l;
373 c->buf = realloc(c->buf, VSL_BYTES(c->buflen));
374 AN(c->buf);
375 }
376 if (l > 2) {
377 i = vslc_file_readn(c->fd, c->buf + 2,
378 VSL_BYTES(l - 2));
379 if (i < 0)
380 return (vsl_e_io);
381 if (i == 0)
382 return (vsl_e_eof);
383 assert(i == VSL_BYTES(l - 2));
384 }
385 c->cursor.rec.ptr = c->buf;
386 } while (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch);
387 return (vsl_more);
388 }
389
v_matchproto_(vslc_reset_f)390 static enum vsl_status v_matchproto_(vslc_reset_f)
391 vslc_file_reset(const struct VSL_cursor *cursor)
392 {
393 (void)cursor;
394 /* XXX: Implement me */
395 return (vsl_e_eof);
396 }
397
398 static const struct vslc_tbl vslc_file_tbl = {
399 .magic = VSLC_TBL_MAGIC,
400 .delete = vslc_file_delete,
401 .next = vslc_file_next,
402 .reset = vslc_file_reset,
403 .check = NULL,
404 };
405
406 struct vslc_mmap {
407 unsigned magic;
408 #define VSLC_MMAP_MAGIC 0x7de15f61
409 int fd;
410 char *b;
411 char *e;
412 struct VSL_cursor cursor;
413 struct VSLC_ptr next;
414 };
415
416 static void
vslc_mmap_delete(const struct VSL_cursor * cursor)417 vslc_mmap_delete(const struct VSL_cursor *cursor)
418 {
419 struct vslc_mmap *c;
420
421 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
422 assert(&c->cursor == cursor);
423 AZ(munmap(c->b, c->e - c->b));
424 FREE_OBJ(c);
425 }
426
v_matchproto_(vslc_next_f)427 static enum vsl_status v_matchproto_(vslc_next_f)
428 vslc_mmap_next(const struct VSL_cursor *cursor)
429 {
430 struct vslc_mmap *c;
431 const char *t;
432
433 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
434 assert(&c->cursor == cursor);
435 c->cursor.rec = c->next;
436 t = TRUST_ME(c->cursor.rec.ptr);
437 if (t == c->e)
438 return (vsl_e_eof);
439 c->next.ptr = VSL_NEXT(c->next.ptr);
440 t = TRUST_ME(c->next.ptr);
441 if (t > c->e)
442 return (vsl_e_io);
443 return (vsl_more);
444 }
445
v_matchproto_(vslc_reset_f)446 static enum vsl_status v_matchproto_(vslc_reset_f)
447 vslc_mmap_reset(const struct VSL_cursor *cursor)
448 {
449 struct vslc_mmap *c;
450
451 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
452 assert(&c->cursor == cursor);
453 return (vsl_e_eof);
454 }
455
v_matchproto_(vslc_check_f)456 static enum vsl_check v_matchproto_(vslc_check_f)
457 vslc_mmap_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
458 {
459 struct vslc_mmap *c;
460 const char *t;
461
462 CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
463 assert(&c->cursor == cursor);
464 AN(ptr->ptr);
465 t = TRUST_ME(ptr->ptr);
466 assert(t > c->b);
467 assert(t <= c->e);
468 return (vsl_check_valid);
469 }
470
471 static const struct vslc_tbl vslc_mmap_tbl = {
472 .magic = VSLC_TBL_MAGIC,
473 .delete = vslc_mmap_delete,
474 .next = vslc_mmap_next,
475 .reset = vslc_mmap_reset,
476 .check = vslc_mmap_check,
477 };
478
479 static struct VSL_cursor *
vsl_cursor_mmap(struct VSL_data * vsl,int fd)480 vsl_cursor_mmap(struct VSL_data *vsl, int fd)
481 {
482 struct vslc_mmap *c;
483 struct stat st[1];
484 void *p;
485
486 AZ(fstat(fd, st));
487 if ((st->st_mode & S_IFMT) != S_IFREG)
488 return (MAP_FAILED);
489
490 assert(st->st_size >= (off_t)(sizeof VSL_FILE_ID));
491 p = mmap(NULL, st->st_size, PROT_READ, MAP_PRIVATE, fd, 0);
492 if (p == MAP_FAILED) {
493 vsl_diag(vsl, "Cannot mmap: %s", strerror(errno));
494 return (MAP_FAILED);
495 }
496
497 ALLOC_OBJ(c, VSLC_MMAP_MAGIC);
498 if (c == NULL) {
499 (void)munmap(p, st->st_size);
500 (void)close(fd);
501 vsl_diag(vsl, "Out of memory");
502 return (NULL);
503 }
504 c->cursor.priv_tbl = &vslc_mmap_tbl;
505 c->cursor.priv_data = c;
506
507 c->fd = fd;
508 c->b = p;
509 c->e = c->b + st->st_size;
510 c->next.ptr = TRUST_ME(c->b + sizeof VSL_FILE_ID);
511
512 return (&c->cursor);
513 }
514
515 struct VSL_cursor *
VSL_CursorFile(struct VSL_data * vsl,const char * name,unsigned options)516 VSL_CursorFile(struct VSL_data *vsl, const char *name, unsigned options)
517 {
518 struct VSL_cursor *mc;
519 struct vslc_file *c;
520 int fd;
521 int close_fd = 0;
522 char buf[] = VSL_FILE_ID;
523 ssize_t i;
524
525 CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
526 AN(name);
527 (void)options;
528
529 if (!strcmp(name, "-"))
530 fd = STDIN_FILENO;
531 else {
532 fd = open(name, O_RDONLY);
533 if (fd < 0) {
534 vsl_diag(vsl, "Cannot open %s: %s", name,
535 strerror(errno));
536 return (NULL);
537 }
538 close_fd = 1;
539 }
540
541 i = vslc_file_readn(fd, buf, sizeof buf);
542 if (i <= 0) {
543 if (close_fd)
544 (void)close(fd);
545 vsl_diag(vsl, "VSL file read error: %s",
546 i < 0 ? strerror(errno) : "EOF");
547 return (NULL);
548 }
549 assert(i == sizeof buf);
550 if (memcmp(buf, VSL_FILE_ID, sizeof buf)) {
551 if (close_fd)
552 (void)close(fd);
553 vsl_diag(vsl, "Not a VSL file: %s", name);
554 return (NULL);
555 }
556
557 mc = vsl_cursor_mmap(vsl, fd);
558 if (mc == NULL)
559 return (NULL);
560 if (mc != MAP_FAILED)
561 return (mc);
562
563 ALLOC_OBJ(c, VSLC_FILE_MAGIC);
564 if (c == NULL) {
565 if (close_fd)
566 (void)close(fd);
567 vsl_diag(vsl, "Out of memory");
568 return (NULL);
569 }
570 c->cursor.priv_tbl = &vslc_file_tbl;
571 c->cursor.priv_data = c;
572
573 c->fd = fd;
574 c->close_fd = close_fd;
575 c->buflen = VSL_WORDS(BUFSIZ);
576 c->buf = malloc(VSL_BYTES(c->buflen));
577 AN(c->buf);
578
579 return (&c->cursor);
580 }
581
582 void
VSL_DeleteCursor(const struct VSL_cursor * cursor)583 VSL_DeleteCursor(const struct VSL_cursor *cursor)
584 {
585 const struct vslc_tbl *tbl;
586
587 CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
588 if (tbl->delete == NULL)
589 return;
590 (tbl->delete)(cursor);
591 }
592
593 enum vsl_status
VSL_ResetCursor(const struct VSL_cursor * cursor)594 VSL_ResetCursor(const struct VSL_cursor *cursor)
595 {
596 const struct vslc_tbl *tbl;
597
598 CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
599 if (tbl->reset == NULL)
600 return (vsl_e_eof);
601 return ((tbl->reset)(cursor));
602 }
603
604 enum vsl_status
VSL_Next(const struct VSL_cursor * cursor)605 VSL_Next(const struct VSL_cursor *cursor)
606 {
607 const struct vslc_tbl *tbl;
608
609 CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
610 AN(tbl->next);
611 return ((tbl->next)(cursor));
612 }
613
614 enum vsl_check
VSL_Check(const struct VSL_cursor * cursor,const struct VSLC_ptr * ptr)615 VSL_Check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
616 {
617 const struct vslc_tbl *tbl;
618
619 CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
620 if (tbl->check == NULL)
621 return (vsl_check_e_notsupp);
622 return ((tbl->check)(cursor, ptr));
623 }
624