1 /*-
2  * Copyright (c) 2006 Verdens Gang AS
3  * Copyright (c) 2006-2015 Varnish Software AS
4  * All rights reserved.
5  *
6  * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7  *
8  * SPDX-License-Identifier: BSD-2-Clause
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "config.h"
33 
34 #include "cache_varnishd.h"
35 
36 #include <stdio.h>
37 #include <stdlib.h>
38 
39 #include "vgz.h"
40 #include "vsl_priv.h"
41 #include "vmb.h"
42 
43 #include "common/heritage.h"
44 #include "common/vsmw.h"
45 
46 /* ------------------------------------------------------------
47  * strands helpers - move elsewhere?
48  */
49 
50 static unsigned
strands_len(const struct strands * s)51 strands_len(const struct strands *s)
52 {
53 	unsigned r = 0;
54 	int i;
55 
56 	for (i = 0; i < s->n; i++) {
57 		if (s->p[i] == NULL || *s->p[i] == '\0')
58 			continue;
59 		r += strlen(s->p[i]);
60 	}
61 
62 	return (r);
63 }
64 
65 /*
66  * like VRT_Strands(), but truncating instead of failing for end of buffer
67  *
68  * returns number of bytes including NUL
69  */
70 static unsigned
strands_cat(char * buf,unsigned bufl,const struct strands * s)71 strands_cat(char *buf, unsigned bufl, const struct strands *s)
72 {
73 	unsigned l = 0, ll;
74 	int i;
75 
76 	/* NUL-terminated */
77 	assert(bufl > 0);
78 	bufl--;
79 
80 	for (i = 0; i < s->n && bufl > 0; i++) {
81 		if (s->p[i] == NULL || *s->p[i] == '\0')
82 			continue;
83 		ll = strlen(s->p[i]);
84 		if (ll > bufl)
85 			ll = bufl;
86 		memcpy(buf, s->p[i], ll);
87 		l += ll;
88 		buf += ll;
89 		bufl -= ll;
90 	}
91 	*buf = '\0';	/* NUL-terminated */
92 	return (l + 1);
93 }
94 
95 /* These cannot be struct lock, which depends on vsm/vsl working */
96 static pthread_mutex_t vsl_mtx;
97 static pthread_mutex_t vsc_mtx;
98 static pthread_mutex_t vsm_mtx;
99 
100 static struct VSL_head		*vsl_head;
101 static const uint32_t		*vsl_end;
102 static uint32_t			*vsl_ptr;
103 static unsigned			vsl_segment_n;
104 static ssize_t			vsl_segsize;
105 
106 struct VSC_main *VSC_C_main;
107 
108 static void
vsl_sanity(const struct vsl_log * vsl)109 vsl_sanity(const struct vsl_log *vsl)
110 {
111 	AN(vsl);
112 	AN(vsl->wlp);
113 	AN(vsl->wlb);
114 	AN(vsl->wle);
115 }
116 
117 /*--------------------------------------------------------------------
118  * Check if the VSL_tag is masked by parameter bitmap
119  */
120 
121 static inline int
vsl_tag_is_masked(enum VSL_tag_e tag)122 vsl_tag_is_masked(enum VSL_tag_e tag)
123 {
124 	volatile uint8_t *bm = &cache_param->vsl_mask[0];
125 	uint8_t b;
126 
127 	assert(tag > SLT__Bogus);
128 	assert(tag < SLT__Reserved);
129 	bm += ((unsigned)tag >> 3);
130 	b = (0x80 >> ((unsigned)tag & 7));
131 	return (*bm & b);
132 }
133 
134 /*--------------------------------------------------------------------
135  * Lay down a header fields, and return pointer to the next record
136  */
137 
138 static inline uint32_t *
vsl_hdr(enum VSL_tag_e tag,uint32_t * p,unsigned len,uint32_t vxid)139 vsl_hdr(enum VSL_tag_e tag, uint32_t *p, unsigned len, uint32_t vxid)
140 {
141 
142 	AZ((uintptr_t)p & 0x3);
143 	assert(tag > SLT__Bogus);
144 	assert(tag < SLT__Reserved);
145 	AZ(len & ~VSL_LENMASK);
146 
147 	p[1] = vxid;
148 	p[0] = ((((unsigned)tag & 0xff) << 24) | len);
149 	return (VSL_END(p, len));
150 }
151 
152 /*--------------------------------------------------------------------
153  * Wrap the VSL buffer
154  */
155 
156 static void
vsl_wrap(void)157 vsl_wrap(void)
158 {
159 
160 	assert(vsl_ptr >= vsl_head->log);
161 	assert(vsl_ptr < vsl_end);
162 	vsl_segment_n += VSL_SEGMENTS - (vsl_segment_n % VSL_SEGMENTS);
163 	assert(vsl_segment_n % VSL_SEGMENTS == 0);
164 	vsl_head->offset[0] = 0;
165 	vsl_head->log[0] = VSL_ENDMARKER;
166 	VWMB();
167 	if (vsl_ptr != vsl_head->log) {
168 		*vsl_ptr = VSL_WRAPMARKER;
169 		vsl_ptr = vsl_head->log;
170 	}
171 	vsl_head->segment_n = vsl_segment_n;
172 	VSC_C_main->shm_cycles++;
173 }
174 
175 /*--------------------------------------------------------------------
176  * Reserve bytes for a record, wrap if necessary
177  */
178 
179 static uint32_t *
vsl_get(unsigned len,unsigned records,unsigned flushes)180 vsl_get(unsigned len, unsigned records, unsigned flushes)
181 {
182 	uint32_t *p;
183 	int err;
184 
185 	err = pthread_mutex_trylock(&vsl_mtx);
186 	if (err == EBUSY) {
187 		AZ(pthread_mutex_lock(&vsl_mtx));
188 		VSC_C_main->shm_cont++;
189 	} else {
190 		AZ(err);
191 	}
192 	assert(vsl_ptr < vsl_end);
193 	AZ((uintptr_t)vsl_ptr & 0x3);
194 
195 	VSC_C_main->shm_writes++;
196 	VSC_C_main->shm_flushes += flushes;
197 	VSC_C_main->shm_records += records;
198 
199 	/* Wrap if necessary */
200 	if (VSL_END(vsl_ptr, len) >= vsl_end)
201 		vsl_wrap();
202 
203 	p = vsl_ptr;
204 	vsl_ptr = VSL_END(vsl_ptr, len);
205 	assert(vsl_ptr < vsl_end);
206 	AZ((uintptr_t)vsl_ptr & 0x3);
207 
208 	*vsl_ptr = VSL_ENDMARKER;
209 
210 	while ((vsl_ptr - vsl_head->log) / vsl_segsize >
211 	    vsl_segment_n % VSL_SEGMENTS) {
212 		vsl_segment_n++;
213 		vsl_head->offset[vsl_segment_n % VSL_SEGMENTS] =
214 		    vsl_ptr - vsl_head->log;
215 	}
216 
217 	AZ(pthread_mutex_unlock(&vsl_mtx));
218 	/* Implicit VWMB() in mutex op ensures ENDMARKER and new table
219 	   values are seen before new segment number */
220 	vsl_head->segment_n = vsl_segment_n;
221 
222 	return (p);
223 }
224 
225 /*--------------------------------------------------------------------
226  * Stick a finished record into VSL.
227  */
228 
229 static void
vslr(enum VSL_tag_e tag,uint32_t vxid,const char * b,unsigned len)230 vslr(enum VSL_tag_e tag, uint32_t vxid, const char *b, unsigned len)
231 {
232 	uint32_t *p;
233 	unsigned mlen;
234 
235 	mlen = cache_param->vsl_reclen;
236 
237 	/* Truncate */
238 	if (len > mlen)
239 		len = mlen;
240 
241 	p = vsl_get(len, 1, 0);
242 
243 	memcpy(p + 2, b, len);
244 
245 	/*
246 	 * vsl_hdr() writes p[1] again, but we want to make sure it
247 	 * has hit memory because we work on the live buffer here.
248 	 */
249 	p[1] = vxid;
250 	VWMB();
251 	(void)vsl_hdr(tag, p, len, vxid);
252 }
253 
254 /*--------------------------------------------------------------------
255  * Add a unbuffered record to VSL
256  *
257  * NB: This variant should be used sparingly and only for low volume
258  * NB: since it significantly adds to the mutex load on the VSL.
259  */
260 
261 void
VSLv(enum VSL_tag_e tag,uint32_t vxid,const char * fmt,va_list ap)262 VSLv(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, va_list ap)
263 {
264 	unsigned n, mlen = cache_param->vsl_reclen;
265 	char buf[mlen];
266 
267 	AN(fmt);
268 	if (vsl_tag_is_masked(tag))
269 		return;
270 
271 	if (strchr(fmt, '%') == NULL) {
272 		vslr(tag, vxid, fmt, strlen(fmt) + 1);
273 	} else {
274 		n = vsnprintf(buf, mlen, fmt, ap);
275 		if (n > mlen - 1)
276 			n = mlen - 1;
277 		buf[n++] = '\0'; /* NUL-terminated */
278 		vslr(tag, vxid, buf, n);
279 	}
280 
281 }
282 
283 void
VSLs(enum VSL_tag_e tag,uint32_t vxid,const struct strands * s)284 VSLs(enum VSL_tag_e tag, uint32_t vxid, const struct strands *s)
285 {
286 	unsigned n, mlen = cache_param->vsl_reclen;
287 	char buf[mlen];
288 
289 	if (vsl_tag_is_masked(tag))
290 		return;
291 
292 	n = strands_cat(buf, mlen, s);
293 
294 	vslr(tag, vxid, buf, n);
295 }
296 
297 void
VSL(enum VSL_tag_e tag,uint32_t vxid,const char * fmt,...)298 VSL(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, ...)
299 {
300 	va_list ap;
301 
302 	va_start(ap, fmt);
303 	VSLv(tag, vxid, fmt, ap);
304 	va_end(ap);
305 }
306 
307 /*--------------------------------------------------------------------*/
308 
309 void
VSL_Flush(struct vsl_log * vsl,int overflow)310 VSL_Flush(struct vsl_log *vsl, int overflow)
311 {
312 	uint32_t *p;
313 	unsigned l;
314 
315 	vsl_sanity(vsl);
316 	l = pdiff(vsl->wlb, vsl->wlp);
317 	if (l == 0)
318 		return;
319 
320 	assert(l >= 8);
321 
322 	p = vsl_get(l, vsl->wlr, overflow);
323 
324 	memcpy(p + 2, vsl->wlb, l);
325 	p[1] = l;
326 	VWMB();
327 	p[0] = ((((unsigned)SLT__Batch & 0xff) << 24));
328 	vsl->wlp = vsl->wlb;
329 	vsl->wlr = 0;
330 }
331 
332 /*--------------------------------------------------------------------
333  * VSL-buffered-txt
334  */
335 
336 void
VSLbt(struct vsl_log * vsl,enum VSL_tag_e tag,txt t)337 VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
338 {
339 	unsigned l, mlen;
340 	char *p;
341 
342 	vsl_sanity(vsl);
343 	Tcheck(t);
344 	if (vsl_tag_is_masked(tag))
345 		return;
346 	mlen = cache_param->vsl_reclen;
347 
348 	/* Truncate */
349 	l = Tlen(t);
350 	if (l > mlen - 1)
351 		l = mlen - 1;
352 
353 	assert(vsl->wlp < vsl->wle);
354 
355 	/* Flush if necessary */
356 	if (VSL_END(vsl->wlp, l + 1) >= vsl->wle)
357 		VSL_Flush(vsl, 1);
358 	assert(VSL_END(vsl->wlp, l + 1) < vsl->wle);
359 	p = VSL_DATA(vsl->wlp);
360 	memcpy(p, t.b, l);
361 	p[l++] = '\0';		/* NUL-terminated */
362 	vsl->wlp = vsl_hdr(tag, vsl->wlp, l, vsl->wid);
363 	assert(vsl->wlp < vsl->wle);
364 	vsl->wlr++;
365 
366 	if (DO_DEBUG(DBG_SYNCVSL))
367 		VSL_Flush(vsl, 0);
368 }
369 
370 /*--------------------------------------------------------------------
371  * VSL-buffered-strands
372  */
373 void
VSLbs(struct vsl_log * vsl,enum VSL_tag_e tag,const struct strands * s)374 VSLbs(struct vsl_log *vsl, enum VSL_tag_e tag, const struct strands *s)
375 {
376 	unsigned l, mlen;
377 
378 	vsl_sanity(vsl);
379 	if (vsl_tag_is_masked(tag))
380 		return;
381 	mlen = cache_param->vsl_reclen;
382 
383 	/* including NUL */
384 	l = strands_len(s) + 1;
385 	if (l > mlen)
386 		l = mlen;
387 
388 	assert(vsl->wlp < vsl->wle);
389 
390 	/* Flush if necessary */
391 	if (VSL_END(vsl->wlp, l) >= vsl->wle)
392 		VSL_Flush(vsl, 1);
393 	assert(VSL_END(vsl->wlp, l) < vsl->wle);
394 
395 	mlen = strands_cat(VSL_DATA(vsl->wlp), l, s);
396 	assert(l == mlen);
397 
398 	vsl->wlp = vsl_hdr(tag, vsl->wlp, l, vsl->wid);
399 	assert(vsl->wlp < vsl->wle);
400 	vsl->wlr++;
401 
402 	if (DO_DEBUG(DBG_SYNCVSL))
403 		VSL_Flush(vsl, 0);
404 }
405 
406 /*--------------------------------------------------------------------
407  * VSL-buffered
408  */
409 
410 void
VSLbv(struct vsl_log * vsl,enum VSL_tag_e tag,const char * fmt,va_list ap)411 VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
412 {
413 	char *p;
414 	const char *u, *f;
415 	unsigned n, mlen;
416 	txt t;
417 
418 	vsl_sanity(vsl);
419 	AN(fmt);
420 	if (vsl_tag_is_masked(tag))
421 		return;
422 
423 	/*
424 	 * If there are no printf-expansions, don't waste time expanding them
425 	 */
426 	f = NULL;
427 	for (u = fmt; *u != '\0'; u++)
428 		if (*u == '%')
429 			f = u;
430 	if (f == NULL) {
431 		t.b = TRUST_ME(fmt);
432 		t.e = TRUST_ME(u);
433 		VSLbt(vsl, tag, t);
434 		return;
435 	}
436 
437 	if (!strcmp(fmt, "%s")) {
438 		p = va_arg(ap, char *);
439 		t.b = p;
440 		t.e = strchr(p, '\0');
441 		VSLbt(vsl, tag, t);
442 		return;
443 	}
444 
445 	mlen = cache_param->vsl_reclen;
446 
447 	/* Flush if we cannot fit a full size record */
448 	if (VSL_END(vsl->wlp, mlen + 1) >= vsl->wle)
449 		VSL_Flush(vsl, 1);
450 
451 	p = VSL_DATA(vsl->wlp);
452 	n = vsnprintf(p, mlen, fmt, ap);
453 	if (n > mlen - 1)
454 		n = mlen - 1;	/* we truncate long fields */
455 	p[n++] = '\0';		/* NUL-terminated */
456 	vsl->wlp = vsl_hdr(tag, vsl->wlp, n, vsl->wid);
457 	assert(vsl->wlp < vsl->wle);
458 	vsl->wlr++;
459 
460 	if (DO_DEBUG(DBG_SYNCVSL))
461 		VSL_Flush(vsl, 0);
462 }
463 
464 void
VSLb(struct vsl_log * vsl,enum VSL_tag_e tag,const char * fmt,...)465 VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
466 {
467 	va_list ap;
468 
469 	vsl_sanity(vsl);
470 	va_start(ap, fmt);
471 	VSLbv(vsl, tag, fmt, ap);
472 	va_end(ap);
473 }
474 
475 #define Tf6 "%ju.%06ju"
476 #define Ta6(t) (uintmax_t)floor((t)), (uintmax_t)floor((t) * 1e6) % 1000000U
477 
478 void
VSLb_ts(struct vsl_log * vsl,const char * event,vtim_real first,vtim_real * pprev,vtim_real now)479 VSLb_ts(struct vsl_log *vsl, const char *event, vtim_real first,
480     vtim_real *pprev, vtim_real now)
481 {
482 
483 	/*
484 	 * XXX: Make an option to turn off some unnecessary timestamp
485 	 * logging. This must be done carefully because some functions
486 	 * (e.g. V1L_Open) takes the last timestamp as its initial
487 	 * value for timeout calculation.
488 	 */
489 	vsl_sanity(vsl);
490 	AN(event);
491 	AN(pprev);
492 	assert(!isnan(now) && now != 0.);
493 	VSLb(vsl, SLT_Timestamp, "%s: " Tf6 " " Tf6 " " Tf6,
494 	    event, Ta6(now), Ta6(now - first), Ta6(now - *pprev));
495 	*pprev = now;
496 }
497 
498 void
VSLb_bin(struct vsl_log * vsl,enum VSL_tag_e tag,ssize_t len,const void * ptr)499 VSLb_bin(struct vsl_log *vsl, enum VSL_tag_e tag, ssize_t len, const void *ptr)
500 {
501 	unsigned mlen;
502 	char *p;
503 
504 	vsl_sanity(vsl);
505 	AN(ptr);
506 	if (vsl_tag_is_masked(tag))
507 		return;
508 	mlen = cache_param->vsl_reclen;
509 
510 	/* Truncate */
511 	if (len > mlen)
512 		len = mlen;
513 
514 	assert(vsl->wlp < vsl->wle);
515 
516 	/* Flush if necessary */
517 	if (VSL_END(vsl->wlp, len) >= vsl->wle)
518 		VSL_Flush(vsl, 1);
519 	assert(VSL_END(vsl->wlp, len) < vsl->wle);
520 	p = VSL_DATA(vsl->wlp);
521 	memcpy(p, ptr, len);
522 	vsl->wlp = vsl_hdr(tag, vsl->wlp, len, vsl->wid);
523 	assert(vsl->wlp < vsl->wle);
524 	vsl->wlr++;
525 
526 	if (DO_DEBUG(DBG_SYNCVSL))
527 		VSL_Flush(vsl, 0);
528 }
529 
530 /*--------------------------------------------------------------------
531  * Setup a VSL buffer, allocate space if none provided.
532  */
533 
534 void
VSL_Setup(struct vsl_log * vsl,void * ptr,size_t len)535 VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len)
536 {
537 
538 	if (ptr == NULL) {
539 		len = cache_param->vsl_buffer;
540 		ptr = malloc(len);
541 		AN(ptr);
542 	}
543 	vsl->wlp = ptr;
544 	vsl->wlb = ptr;
545 	vsl->wle = ptr;
546 	vsl->wle += len / sizeof(*vsl->wle);
547 	vsl->wlr = 0;
548 	vsl->wid = 0;
549 	vsl_sanity(vsl);
550 }
551 
552 /*--------------------------------------------------------------------*/
553 
554 void
VSL_ChgId(struct vsl_log * vsl,const char * typ,const char * why,uint32_t vxid)555 VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, uint32_t vxid)
556 {
557 	uint32_t ovxid;
558 
559 	vsl_sanity(vsl);
560 	ovxid = vsl->wid;
561 	VSLb(vsl, SLT_Link, "%s %u %s", typ, VXID(vxid), why);
562 	VSL_End(vsl);
563 	vsl->wid = vxid;
564 	VSLb(vsl, SLT_Begin, "%s %u %s", typ, VXID(ovxid), why);
565 }
566 
567 /*--------------------------------------------------------------------*/
568 
569 void
VSL_End(struct vsl_log * vsl)570 VSL_End(struct vsl_log *vsl)
571 {
572 	txt t;
573 	char p[] = "";
574 
575 	vsl_sanity(vsl);
576 	AN(vsl->wid);
577 	t.b = p;
578 	t.e = p;
579 	VSLbt(vsl, SLT_End, t);
580 	VSL_Flush(vsl, 0);
581 	vsl->wid = 0;
582 }
583 
v_matchproto_(vsm_lock_f)584 static void v_matchproto_(vsm_lock_f)
585 vsm_vsc_lock(void)
586 {
587 	AZ(pthread_mutex_lock(&vsc_mtx));
588 }
589 
v_matchproto_(vsm_lock_f)590 static void v_matchproto_(vsm_lock_f)
591 vsm_vsc_unlock(void)
592 {
593 	AZ(pthread_mutex_unlock(&vsc_mtx));
594 }
595 
v_matchproto_(vsm_lock_f)596 static void v_matchproto_(vsm_lock_f)
597 vsm_vsmw_lock(void)
598 {
599 	AZ(pthread_mutex_lock(&vsm_mtx));
600 }
601 
v_matchproto_(vsm_lock_f)602 static void v_matchproto_(vsm_lock_f)
603 vsm_vsmw_unlock(void)
604 {
605 	AZ(pthread_mutex_unlock(&vsm_mtx));
606 }
607 
608 /*--------------------------------------------------------------------*/
609 
610 void
VSM_Init(void)611 VSM_Init(void)
612 {
613 	unsigned u;
614 
615 	assert(UINT_MAX % VSL_SEGMENTS == VSL_SEGMENTS - 1);
616 
617 	AZ(pthread_mutex_init(&vsl_mtx, NULL));
618 	AZ(pthread_mutex_init(&vsc_mtx, NULL));
619 	AZ(pthread_mutex_init(&vsm_mtx, NULL));
620 
621 	vsc_lock = vsm_vsc_lock;
622 	vsc_unlock = vsm_vsc_unlock;
623 	vsmw_lock = vsm_vsmw_lock;
624 	vsmw_unlock = vsm_vsmw_unlock;
625 
626 	VSC_C_main = VSC_main_New(NULL, NULL, "");
627 	AN(VSC_C_main);
628 
629 	AN(heritage.proc_vsmw);
630 	vsl_head = VSMW_Allocf(heritage.proc_vsmw, NULL, VSL_CLASS,
631 	    cache_param->vsl_space, VSL_CLASS);
632 	AN(vsl_head);
633 	vsl_segsize = ((cache_param->vsl_space - sizeof *vsl_head) /
634 	    sizeof *vsl_end) / VSL_SEGMENTS;
635 	vsl_end = vsl_head->log + vsl_segsize * VSL_SEGMENTS;
636 	/* Make segment_n always overflow on first log wrap to make any
637 	   problems with regard to readers on that event visible */
638 	vsl_segment_n = UINT_MAX - (VSL_SEGMENTS - 1);
639 	AZ(vsl_segment_n % VSL_SEGMENTS);
640 	vsl_ptr = vsl_head->log;
641 	*vsl_ptr = VSL_ENDMARKER;
642 
643 	memset(vsl_head, 0, sizeof *vsl_head);
644 	vsl_head->segsize = vsl_segsize;
645 	vsl_head->offset[0] = 0;
646 	vsl_head->segment_n = vsl_segment_n;
647 	for (u = 1; u < VSL_SEGMENTS; u++)
648 		vsl_head->offset[u] = -1;
649 	VWMB();
650 	memcpy(vsl_head->marker, VSL_HEAD_MARKER, sizeof vsl_head->marker);
651 }
652