1 /*
2 * Empire - A multi-player, client/server Internet based war game.
3 * Copyright (C) 1986-2021, Dave Pare, Jeff Bailey, Thomas Ruschak,
4 * Ken Stevens, Steve McClure, Markus Armbruster
5 *
6 * Empire is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * ---
20 *
21 * See files README, COPYING and CREDITS in the root of the source
22 * tree for related information and legal notices. It is expected
23 * that future projects/authors will amend these files as needed.
24 *
25 * ---
26 *
27 * file.c: Operations on Empire tables (`files' for historical reasons)
28 *
29 * Known contributors to this file:
30 * Dave Pare, 1989
31 * Steve McClure, 2000
32 * Markus Armbruster, 2005-2014
33 */
34
35 #include <config.h>
36
37 #include <errno.h>
38 #include <fcntl.h>
39 #include <limits.h>
40 #include <sys/stat.h>
41 #include <sys/types.h>
42 #include <unistd.h>
43 #ifdef _WIN32
44 #include <io.h>
45 #include <share.h>
46 #endif
47 #include "file.h"
48 #include "match.h"
49 #include "misc.h"
50 #include "prototypes.h"
51
52 static int open_locked(char *, int, mode_t);
53 static int ef_realloc_cache(struct empfile *, int);
54 static int fillcache(struct empfile *, int);
55 static int do_read(struct empfile *, void *, int, int);
56 static int do_write(struct empfile *, void *, int, int);
57 static unsigned get_seqno(struct empfile *, int);
58 static void new_seqno(struct empfile *, void *);
59 static void must_be_fresh(struct empfile *, void *);
60 static int do_extend(struct empfile *, int);
61 static void do_blank(struct empfile *, void *, int, int);
62 static int ef_check(int);
63
64 static unsigned ef_generation;
65
66 /*
67 * Open the file-backed table @type (EF_SECTOR, ...).
68 * @how are flags to control operation. Naturally, immutable flags are
69 * not permitted.
70 * The table must not be already open.
71 * Return non-zero on success, zero on failure.
72 */
73 int
ef_open(int type,int how)74 ef_open(int type, int how)
75 {
76 struct empfile *ep;
77 int oflags, fd, fsiz, fids, nslots, fail;
78
79 if (ef_check(type) < 0)
80 return 0;
81 if (CANT_HAPPEN(how & EFF_IMMUTABLE))
82 how &= ~EFF_IMMUTABLE;
83
84 /* open file */
85 ep = &empfile[type];
86 if (CANT_HAPPEN(!ep->file || ep->base != EF_BAD || ep->fd >= 0))
87 return 0;
88 if (CANT_HAPPEN(ep->prewrite && !(how & EFF_MEM)))
89 return 0; /* not implemented */
90 oflags = O_RDWR;
91 if (how & EFF_PRIVATE)
92 oflags = O_RDONLY;
93 if (how & EFF_CREATE)
94 oflags |= O_CREAT | O_TRUNC;
95 fd = open_locked(ep->file, oflags, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
96 if (fd < 0) {
97 logerror("Can't open %s (%s)", ep->file, strerror(errno));
98 return 0;
99 }
100
101 /* get file size */
102 if (how & EFF_CREATE) {
103 fids = ep->nent >= 0 ? ep->nent : 0;
104 } else {
105 fsiz = fsize(fd);
106 if (fsiz % ep->size) {
107 logerror("Can't open %s (file size not a multiple of record size %d)",
108 ep->file, ep->size);
109 close(fd);
110 return 0;
111 }
112 fids = fsiz / ep->size;
113 if (ep->nent >= 0 && ep->nent != fids) {
114 logerror("Can't open %s (got %d records instead of %d)",
115 ep->file, fids, ep->nent);
116 close(fd);
117 return 0;
118 }
119 }
120
121 /* allocate cache */
122 if (ep->flags & EFF_STATIC) {
123 /* ep->cache already points to space for ep->csize elements */
124 if (how & EFF_MEM) {
125 if (fids > ep->csize) {
126 CANT_HAPPEN(ep->nent >= 0); /* insufficient static cache */
127 logerror("Can't open %s (file larger than %d records)",
128 ep->file, ep->csize);
129 close(fd);
130 return 0;
131 }
132 }
133 } else {
134 if (CANT_HAPPEN(ep->cache))
135 free(ep->cache);
136 if (how & EFF_MEM)
137 nslots = fids;
138 else
139 nslots = blksize(fd) / ep->size;
140 if (!ef_realloc_cache(ep, nslots)) {
141 logerror("Can't map %s (%s)", ep->file, strerror(errno));
142 close(fd);
143 return 0;
144 }
145 }
146 ep->baseid = 0;
147 ep->cids = 0;
148 ep->flags = (ep->flags & EFF_IMMUTABLE) | (how & ~EFF_CREATE);
149 ep->fd = fd;
150
151 if (how & EFF_CREATE) {
152 /* populate new file */
153 ep->fids = 0;
154 fail = !do_extend(ep, fids);
155 } else {
156 ep->fids = fids;
157 if ((how & EFF_MEM) && fids)
158 fail = fillcache(ep, 0) != fids;
159 else
160 fail = 0;
161 }
162 if (fail) {
163 ep->cids = 0; /* prevent cache flush */
164 ef_close(type);
165 return 0;
166 }
167
168 if (ep->onresize)
169 ep->onresize(type);
170 return 1;
171 }
172
173 static int
open_locked(char * name,int oflags,mode_t mode)174 open_locked(char *name, int oflags, mode_t mode)
175 {
176 int rdlonly = (oflags & O_ACCMODE) == O_RDONLY;
177 int fd;
178
179 #ifdef _WIN32
180 fd = _sopen(name, oflags | O_BINARY, rdlonly ? SH_DENYNO : SH_DENYWR,
181 mode);
182 if (fd < 0)
183 return -1;
184 #else /* !_WIN32 */
185 struct flock lock;
186
187 fd = open(name, oflags, mode);
188 if (fd < 0)
189 return -1;
190
191 lock.l_type = rdlonly ? F_RDLCK : F_WRLCK;
192 lock.l_whence = SEEK_SET;
193 lock.l_start = lock.l_len = 0;
194 if (fcntl(fd, F_SETLK, &lock) == -1) {
195 close(fd);
196 return -1;
197 }
198 #endif /* !_WIN32 */
199 return fd;
200 }
201
202 /*
203 * Reallocate cache for table @ep to hold @count slots.
204 * The table must not be allocated statically.
205 * The cache may still be unmapped.
206 * If reallocation succeeds, any pointers obtained from ef_ptr()
207 * become invalid.
208 * If it fails, the cache is unchanged, and errno is set.
209 * Return non-zero on success, zero on failure.
210 */
211 static int
ef_realloc_cache(struct empfile * ep,int count)212 ef_realloc_cache(struct empfile *ep, int count)
213 {
214 void *cache;
215
216 if (CANT_HAPPEN(ep->flags & EFF_STATIC))
217 return 0;
218 if (CANT_HAPPEN(count < 0))
219 count = 0;
220
221 /*
222 * Avoid zero slots, because that can lead to null cache, which
223 * would be interpreted as unmapped cache.
224 */
225 if (count == 0)
226 count++;
227 cache = realloc(ep->cache, count * ep->size);
228 if (!cache)
229 return 0;
230
231 ep->cache = cache;
232 ep->csize = count;
233 return 1;
234 }
235
236 /*
237 * Open the table @type, which is a view of a base table
238 * The table must not be already open.
239 * Return non-zero on success, zero on failure.
240 * Beware: views work only as long as the base table doesn't change size!
241 * You must close the view before closing its base table.
242 */
243 int
ef_open_view(int type)244 ef_open_view(int type)
245 {
246 struct empfile *ep;
247 int base;
248
249 if (ef_check(type) < 0)
250 return 0;
251 ep = &empfile[type];
252 base = ep->base;
253 if (ef_check(base) < 0)
254 return 0;
255 if (CANT_HAPPEN(!(ef_flags(base) & EFF_MEM)
256 || ep->file
257 || ep->size != empfile[base].size
258 || ep->nent != empfile[base].nent
259 || ep->cache || ep->oninit || ep->postread
260 || ep->prewrite || ep->onresize))
261 return -1;
262
263 ep->cache = empfile[base].cache;
264 ep->csize = empfile[base].csize;
265 ep->flags |= EFF_MEM;
266 ep->baseid = empfile[base].baseid;
267 ep->cids = empfile[base].cids;
268 ep->fids = empfile[base].fids;
269 return 0;
270 }
271
272 /*
273 * Close the open table @type (EF_SECTOR, ...).
274 * Return non-zero on success, zero on failure.
275 */
276 int
ef_close(int type)277 ef_close(int type)
278 {
279 struct empfile *ep;
280 int retval = 1;
281
282 if (ef_check(type) < 0)
283 return 0;
284 ep = &empfile[type];
285
286 if (EF_IS_VIEW(type)) {
287 ep->cache = NULL;
288 ep->csize = 0;
289 } else {
290 if (!ef_flush(type))
291 retval = 0;
292 if (!(ep->flags & EFF_STATIC)) {
293 free(ep->cache);
294 ep->cache = NULL;
295 ep->csize = 0;
296 }
297 if (close(ep->fd) < 0) {
298 logerror("Error closing %s (%s)", ep->file, strerror(errno));
299 retval = 0;
300 }
301 ep->fd = -1;
302 }
303 ep->flags &= EFF_IMMUTABLE;
304 ep->baseid = ep->cids = ep->fids = 0;
305 if (ep->onresize)
306 ep->onresize(type);
307 return retval;
308 }
309
310 /*
311 * Flush file-backed table @type (EF_SECTOR, ...) to its backing file.
312 * Do nothing if the table is privately mapped.
313 * Update timestamps of written elements if table is EFF_TYPED.
314 * Return non-zero on success, zero on failure.
315 */
316 int
ef_flush(int type)317 ef_flush(int type)
318 {
319 struct empfile *ep;
320
321 if (ef_check(type) < 0)
322 return 0;
323 ep = &empfile[type];
324 if (ep->flags & EFF_PRIVATE)
325 return 1; /* nothing to do */
326 if (CANT_HAPPEN(ep->fd < 0))
327 return 0;
328 /*
329 * We don't know which cache entries are dirty. ef_write() writes
330 * through, but direct updates through ef_ptr() don't. They are
331 * allowed only with EFF_MEM. Assume the whole cash is dirty
332 * then.
333 */
334 if (ep->flags & EFF_MEM) {
335 if (do_write(ep, ep->cache, ep->baseid, ep->cids) < 0)
336 return 0;
337 }
338
339 return 1;
340 }
341
342 /*
343 * Return pointer to element @id in table @type if it exists, else NULL.
344 * The table must be fully cached, i.e. flags & EFF_MEM.
345 * The caller is responsible for flushing changes he makes.
346 */
347 void *
ef_ptr(int type,int id)348 ef_ptr(int type, int id)
349 {
350 struct empfile *ep;
351
352 if (ef_check(type) < 0)
353 return NULL;
354 ep = &empfile[type];
355 if (CANT_HAPPEN(!(ep->flags & EFF_MEM) || !ep->cache))
356 return NULL;
357 if (id < 0 || id >= ep->fids)
358 return NULL;
359 return ep->cache + ep->size * id;
360 }
361
362 /*
363 * Read element @id from table @type into buffer @into.
364 * FIXME pass buffer size!
365 * @into is marked fresh with ef_mark_fresh().
366 * Return non-zero on success, zero on failure.
367 */
368 int
ef_read(int type,int id,void * into)369 ef_read(int type, int id, void *into)
370 {
371 struct empfile *ep;
372 void *cachep;
373
374 if (ef_check(type) < 0)
375 return 0;
376 ep = &empfile[type];
377 if (CANT_HAPPEN(!ep->cache))
378 return 0;
379 if (id < 0 || id >= ep->fids)
380 return 0;
381
382 if (ep->flags & EFF_MEM) {
383 cachep = ep->cache + id * ep->size;
384 } else {
385 if (ep->baseid + ep->cids <= id || ep->baseid > id) {
386 if (fillcache(ep, id) < 1)
387 return 0;
388 }
389 cachep = ep->cache + (id - ep->baseid) * ep->size;
390 }
391 memcpy(into, cachep, ep->size);
392 ef_mark_fresh(type, into);
393
394 if (ep->postread)
395 ep->postread(id, into);
396 return 1;
397 }
398
399 /*
400 * Fill cache of file-backed @ep with elements starting at @id.
401 * If any were read, return their number.
402 * Else return -1 and leave the cache unchanged.
403 */
404 static int
fillcache(struct empfile * ep,int id)405 fillcache(struct empfile *ep, int id)
406 {
407 int ret;
408
409 if (CANT_HAPPEN(!ep->cache))
410 return -1;
411
412 ret = do_read(ep, ep->cache, id, MIN(ep->csize, ep->fids - id));
413 if (ret >= 0) {
414 /* cache changed */
415 ep->baseid = id;
416 ep->cids = ret;
417 }
418 return ret;
419 }
420
421 static int
do_read(struct empfile * ep,void * buf,int id,int count)422 do_read(struct empfile *ep, void *buf, int id, int count)
423 {
424 int n, ret;
425 char *p;
426
427 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
428 return -1;
429
430 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
431 logerror("Error seeking %s to elt %d (%s)",
432 ep->file, id, strerror(errno));
433 return -1;
434 }
435
436 p = buf;
437 n = count * ep->size;
438 while (n > 0) {
439 ret = read(ep->fd, p, n);
440 if (ret < 0) {
441 if (errno != EINTR) {
442 logerror("Error reading %s elt %d (%s)",
443 ep->file,
444 id + (int)((p - (char *)buf) / ep->size),
445 strerror(errno));
446 break;
447 }
448 } else if (ret == 0) {
449 logerror("Unexpected EOF reading %s elt %d",
450 ep->file, id + (int)((p - (char *)buf) / ep->size));
451 break;
452 } else {
453 p += ret;
454 n -= ret;
455 }
456 }
457
458 return (p - (char *)buf) / ep->size;
459 }
460
461 /*
462 * Write @count elements starting at @id from @buf to file-backed @ep.
463 * Update the timestamp if the table is EFF_TYPED.
464 * Don't actually write if table is privately mapped.
465 * Return 0 on success, -1 on error (file may be corrupt then).
466 */
467 static int
do_write(struct empfile * ep,void * buf,int id,int count)468 do_write(struct empfile *ep, void *buf, int id, int count)
469 {
470 int i, n, ret;
471 char *p;
472 struct ef_typedstr *elt;
473 time_t now;
474
475 if (CANT_HAPPEN(ep->fd < 0 || id < 0 || count < 0))
476 return -1;
477
478 if (ep->flags & EFF_TYPED) {
479 now = ep->flags & EFF_NOTIME ? (time_t)-1 : time(NULL);
480 for (i = 0; i < count; i++) {
481 /*
482 * TODO Oopses here could be due to bad data corruption.
483 * Fail instead of attempting to recover?
484 */
485 elt = (struct ef_typedstr *)((char *)buf + i * ep->size);
486 if (CANT_HAPPEN(elt->ef_type != ep->uid))
487 elt->ef_type = ep->uid;
488 if (CANT_HAPPEN(elt->uid != id + i))
489 elt->uid = id + i;
490 if (now != (time_t)-1)
491 elt->timestamp = now;
492 }
493 }
494
495 if (ep->flags & EFF_PRIVATE)
496 return 0;
497
498 if (lseek(ep->fd, id * ep->size, SEEK_SET) == (off_t)-1) {
499 logerror("Error seeking %s to elt %d (%s)",
500 ep->file, id, strerror(errno));
501 return -1;
502 }
503
504 p = buf;
505 n = count * ep->size;
506 while (n > 0) {
507 ret = write(ep->fd, p, n);
508 if (ret < 0) {
509 if (errno != EINTR) {
510 logerror("Error writing %s elt %d (%s)",
511 ep->file,
512 id + (int)((p - (char *)buf) / ep->size),
513 strerror(errno));
514 return -1;
515 }
516 } else {
517 p += ret;
518 n -= ret;
519 }
520 }
521
522 return 0;
523 }
524
525 /*
526 * Write element @id into table @type from buffer @from.
527 * FIXME pass buffer size!
528 * Update timestamp in @from if table is EFF_TYPED.
529 * If table is file-backed and not privately mapped, write through
530 * cache straight to disk.
531 * Cannot write beyond the end of fully cached table (flags & EFF_MEM).
532 * Can write at the end of partially cached table.
533 * @from must be fresh; see ef_make_stale().
534 * Return non-zero on success, zero on failure.
535 */
536 int
ef_write(int type,int id,void * from)537 ef_write(int type, int id, void *from)
538 {
539 struct empfile *ep;
540 char *cachep;
541
542 if (ef_check(type) < 0)
543 return 0;
544 ep = &empfile[type];
545 if (CANT_HAPPEN((ep->flags & (EFF_MEM | EFF_PRIVATE)) == EFF_PRIVATE))
546 return 0;
547 if (CANT_HAPPEN(id < 0))
548 return 0;
549 if (CANT_HAPPEN(ep->nent >= 0 && id >= ep->nent))
550 return 0; /* beyond fixed size */
551 new_seqno(ep, from);
552 if (id >= ep->fids) {
553 /* beyond end of file */
554 if (CANT_HAPPEN((ep->flags & EFF_MEM) || id > ep->fids))
555 return 0; /* not implemented */
556 /* write at end of file extends it */
557 ep->fids = id + 1;
558 if (ep->onresize)
559 ep->onresize(type);
560 }
561 if (id >= ep->baseid && id < ep->baseid + ep->cids) {
562 cachep = ep->cache + (id - ep->baseid) * ep->size;
563 if (cachep != from)
564 must_be_fresh(ep, from);
565 } else
566 cachep = NULL;
567 if (ep->prewrite)
568 ep->prewrite(id, cachep, from);
569 if (ep->fd >= 0) {
570 if (do_write(ep, from, id, 1) < 0)
571 return 0;
572 }
573 if (cachep && cachep != from) /* update the cache if necessary */
574 memcpy(cachep, from, ep->size);
575 return 1;
576 }
577
578 /*
579 * Change element ID.
580 * @buf is an element of table @type.
581 * @id is its new element ID.
582 * If table is EFF_TYPED, change ID and sequence number stored in @buf.
583 * Else do nothing.
584 */
585 void
ef_set_uid(int type,void * buf,int uid)586 ef_set_uid(int type, void *buf, int uid)
587 {
588 struct ef_typedstr *elt;
589 struct empfile *ep;
590
591 if (ef_check(type) < 0)
592 return;
593 ep = &empfile[type];
594 if (!(ep->flags & EFF_TYPED))
595 return;
596 elt = buf;
597 if (elt->uid == uid)
598 return;
599 elt->uid = uid;
600 elt->seqno = get_seqno(ep, uid);
601 }
602
603 /*
604 * Are *@a and *@b equal, except for timestamps and such?
605 */
606 int
ef_typedstr_eq(struct ef_typedstr * a,struct ef_typedstr * b)607 ef_typedstr_eq(struct ef_typedstr *a, struct ef_typedstr *b)
608 {
609 return a->ef_type == b->ef_type
610 && a->seqno == b->seqno
611 && a->uid == b->uid
612 && !memcmp((char *)a + sizeof(*a), (char *)b + sizeof(*a),
613 empfile[a->ef_type].size - sizeof(*a));
614 }
615
616 /*
617 * Return sequence number of element @id in table @ep.
618 * Return zero if table is not EFF_TYPED (it has no sequence number
619 * then).
620 */
621 static unsigned
get_seqno(struct empfile * ep,int id)622 get_seqno(struct empfile *ep, int id)
623 {
624 struct ef_typedstr *elt;
625
626 if (!(ep->flags & EFF_TYPED))
627 return 0;
628 if (id < 0 || id >= ep->fids)
629 return 0;
630 if (id >= ep->baseid && id < ep->baseid + ep->cids)
631 elt = (void *)(ep->cache + (id - ep->baseid) * ep->size);
632 else {
633 /* need a buffer, steal last cache slot */
634 if (ep->cids == ep->csize)
635 ep->cids--;
636 elt = (void *)(ep->cache + ep->cids * ep->size);
637 if (do_read(ep, elt, id, 1) < 0)
638 return 0; /* deep trouble */
639 }
640 return elt->seqno;
641 }
642
643 /*
644 * Increment sequence number in @buf, which is about to be written to @ep.
645 * Do nothing if table is not EFF_TYPED (it has no sequence number
646 * then).
647 * Else, @buf's sequence number must match the one in @ep's cache. If
648 * it doesn't, we're about to clobber a previous write.
649 */
650 static void
new_seqno(struct empfile * ep,void * buf)651 new_seqno(struct empfile *ep, void *buf)
652 {
653 struct ef_typedstr *elt = buf;
654 unsigned old_seqno;
655
656 if (!(ep->flags & EFF_TYPED))
657 return;
658 old_seqno = get_seqno(ep, elt->uid);
659 CANT_HAPPEN(old_seqno != elt->seqno);
660 elt->seqno = old_seqno + 1;
661 }
662
663 /*
664 * Make all copies stale.
665 * Only fresh copies may be written back to the cache.
666 * To be called by functions that may yield the processor.
667 * Writing an copy when there has been a yield since it was read is
668 * unsafe, because we could clobber another thread's write then.
669 * Robust code must assume the that any function that may yield does
670 * yield. Marking copies stale there lets us catch unsafe writes.
671 */
672 void
ef_make_stale(void)673 ef_make_stale(void)
674 {
675 ef_generation++;
676 }
677
678 /* Mark copy of an element of table @type in @buf fresh. */
679 void
ef_mark_fresh(int type,void * buf)680 ef_mark_fresh(int type, void *buf)
681 {
682 struct empfile *ep;
683
684 if (ef_check(type) < 0)
685 return;
686 ep = &empfile[type];
687 if (!(ep->flags & EFF_TYPED))
688 return;
689 ((struct ef_typedstr *)buf)->generation = ef_generation;
690 }
691
692 static void
must_be_fresh(struct empfile * ep,void * buf)693 must_be_fresh(struct empfile *ep, void *buf)
694 {
695 struct ef_typedstr *elt = buf;
696
697 if (!(ep->flags & EFF_TYPED))
698 return;
699 CANT_HAPPEN(elt->generation != (ef_generation & 0xfff));
700 }
701
702 /*
703 * Extend table @type by @count elements.
704 * Any pointers obtained from ef_ptr() become invalid.
705 * Return non-zero on success, zero on failure.
706 */
707 int
ef_extend(int type,int count)708 ef_extend(int type, int count)
709 {
710 struct empfile *ep;
711
712 if (ef_check(type) < 0)
713 return 0;
714 ep = &empfile[type];
715 if (ep->nent >= 0) {
716 logerror("Can't extend %s, its size is fixed", ep->name);
717 return 0;
718 }
719 if (!do_extend(ep, count))
720 return 0;
721 if (ep->onresize)
722 ep->onresize(type);
723 return 1;
724 }
725
726 static int
do_extend(struct empfile * ep,int count)727 do_extend(struct empfile *ep, int count)
728 {
729 char *p;
730 int need_sentinel, i, id;
731
732 if (CANT_HAPPEN(EF_IS_VIEW(ep->uid)) || count < 0)
733 return 0;
734
735 id = ep->fids;
736 if (ep->flags & EFF_MEM) {
737 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
738 if (id + count + need_sentinel > ep->csize) {
739 if (ep->flags & EFF_STATIC) {
740 logerror("Can't extend %s beyond %d elements",
741 ep->name, ep->csize - need_sentinel);
742 return 0;
743 }
744 if (!ef_realloc_cache(ep, id + count + need_sentinel)) {
745 logerror("Can't extend %s to %d elements (%s)",
746 ep->name, id + count, strerror(errno));
747 return 0;
748 }
749 }
750 p = ep->cache + id * ep->size;
751 do_blank(ep, p, id, count);
752 if (ep->fd >= 0) {
753 if (do_write(ep, p, id, count) < 0)
754 return 0;
755 }
756 if (need_sentinel)
757 memset(ep->cache + (id + count) * ep->size, 0, ep->size);
758 ep->cids = id + count;
759 } else {
760 /* need a buffer, steal last cache slot */
761 if (ep->cids == ep->csize)
762 ep->cids--;
763 p = ep->cache + ep->cids * ep->size;
764 for (i = 0; i < count; i++) {
765 do_blank(ep, p, id + i, 1);
766 if (do_write(ep, p, id + i, 1) < 0)
767 return 0;
768 }
769 }
770 ep->fids = id + count;
771 return 1;
772 }
773
774 /*
775 * Initialize element @id for table @type in @buf.
776 * FIXME pass buffer size!
777 * @buf is marked fresh with ef_mark_fresh().
778 */
779 void
ef_blank(int type,int id,void * buf)780 ef_blank(int type, int id, void *buf)
781 {
782 struct empfile *ep;
783 struct ef_typedstr *elt;
784
785 if (ef_check(type) < 0)
786 return;
787 ep = &empfile[type];
788 do_blank(ep, buf, id, 1);
789 if (ep->flags & EFF_TYPED) {
790 elt = buf;
791 elt->seqno = get_seqno(ep, elt->uid);
792 }
793 ef_mark_fresh(type, buf);
794 }
795
796 /*
797 * Initialize @count elements of @ep in @buf, starting with element @id.
798 */
799 static void
do_blank(struct empfile * ep,void * buf,int id,int count)800 do_blank(struct empfile *ep, void *buf, int id, int count)
801 {
802 int i;
803 struct ef_typedstr *elt;
804
805 memset(buf, 0, count * ep->size);
806 for (i = 0; i < count; i++) {
807 elt = (struct ef_typedstr *)((char *)buf + i * ep->size);
808 if (ep->flags & EFF_TYPED) {
809 elt->ef_type = ep->uid;
810 elt->uid = id + i;
811 }
812 if (ep->oninit)
813 ep->oninit(elt);
814 }
815 }
816
817 /*
818 * Truncate table @type to @count elements.
819 * Any pointers obtained from ef_ptr() become invalid.
820 * Return non-zero on success, zero on failure.
821 */
822 int
ef_truncate(int type,int count)823 ef_truncate(int type, int count)
824 {
825 struct empfile *ep;
826 int need_sentinel;
827
828 if (ef_check(type) < 0 || CANT_HAPPEN(EF_IS_VIEW(type)))
829 return 0;
830 ep = &empfile[type];
831 if (ep->nent >= 0) {
832 logerror("Can't truncate %s, its size is fixed", ep->name);
833 return 0;
834 }
835 if (CANT_HAPPEN(count < 0 || count > ep->fids))
836 return 0;
837
838 if (ep->fd >= 0 && !(ep->flags & EFF_PRIVATE)) {
839 if (ftruncate(ep->fd, count * ep->size) < 0) {
840 logerror("Can't truncate %s to %d elements (%s)",
841 ep->file, count, strerror(errno));
842 return 0;
843 }
844 }
845 ep->fids = count;
846
847 if (ep->flags & EFF_MEM) {
848 need_sentinel = (ep->flags & EFF_SENTINEL) != 0;
849 if (!(ep->flags & EFF_STATIC)) {
850 if (!ef_realloc_cache(ep, count + need_sentinel)) {
851 logerror("Can't shrink %s cache after truncate (%s)",
852 ep->name, strerror(errno));
853 /* continue with unshrunk cache */
854 }
855 }
856 if (need_sentinel)
857 memset(ep->cache + count * ep->size, 0, ep->size);
858 ep->cids = count;
859 } else {
860 if (ep->baseid >= count)
861 ep->cids = 0;
862 else if (ep->cids > count - ep->baseid)
863 ep->cids = count - ep->baseid;
864 }
865
866 if (ep->onresize)
867 ep->onresize(type);
868 return 1;
869 }
870
871 struct castr *
ef_cadef(int type)872 ef_cadef(int type)
873 {
874 if (ef_check(type) < 0)
875 return NULL;
876 return empfile[type].cadef;
877 }
878
879 int
ef_nelem(int type)880 ef_nelem(int type)
881 {
882 if (ef_check(type) < 0)
883 return 0;
884 return empfile[type].fids;
885 }
886
887 int
ef_flags(int type)888 ef_flags(int type)
889 {
890 if (ef_check(type) < 0)
891 return 0;
892 return empfile[type].flags;
893 }
894
895 time_t
ef_mtime(int type)896 ef_mtime(int type)
897 {
898 if (ef_check(type) < 0)
899 return 0;
900 if (empfile[type].fd <= 0)
901 return 0;
902 return fdate(empfile[type].fd);
903 }
904
905 /*
906 * Search for a table matching @name, return its table type.
907 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
908 * several.
909 */
910 int
ef_byname(char * name)911 ef_byname(char *name)
912 {
913 return stmtch(name, empfile, offsetof(struct empfile, name),
914 sizeof(empfile[0]));
915 }
916
917 /*
918 * Search @choices[] for a table type matching @name, return it.
919 * Return M_NOTFOUND if there are no matches, M_NOTUNIQUE if there are
920 * several.
921 * @choices[] must be terminated with a negative value.
922 */
923 int
ef_byname_from(char * name,int choices[])924 ef_byname_from(char *name, int choices[])
925 {
926 int res;
927 int *p;
928
929 res = M_NOTFOUND;
930 for (p = choices; *p >= 0; p++) {
931 if (ef_check(*p) < 0)
932 continue;
933 switch (mineq(name, empfile[*p].name)) {
934 case ME_MISMATCH:
935 break;
936 case ME_PARTIAL:
937 if (res >= 0)
938 return M_NOTUNIQUE;
939 res = *p;
940 break;
941 case ME_EXACT:
942 return *p;
943 }
944 }
945 return res;
946 }
947
948 /*
949 * Return name of table @type. Always a single, short word.
950 */
951 char *
ef_nameof(int type)952 ef_nameof(int type)
953 {
954 if (ef_check(type) < 0)
955 return "bad";
956 return empfile[type].name;
957 }
958
959 /*
960 * Return "pretty" name of table @type.
961 */
962 char *
ef_nameof_pretty(int type)963 ef_nameof_pretty(int type)
964 {
965 if (ef_check(type) < 0)
966 return "bad";
967 return empfile[type].pretty_name;
968 }
969
970 static int
ef_check(int type)971 ef_check(int type)
972 {
973 if (CANT_HAPPEN((unsigned)type >= EF_MAX))
974 return -1;
975 return 0;
976 }
977
978 /*
979 * Ensure table @type contains element @id.
980 * If necessary, extend it in steps of @count elements.
981 * Return non-zero on success, zero on failure.
982 */
983 int
ef_ensure_space(int type,int id,int count)984 ef_ensure_space(int type, int id, int count)
985 {
986 if (ef_check(type) < 0 || CANT_HAPPEN(id < 0))
987 return 0;
988
989 while (id >= empfile[type].fids) {
990 if (!ef_extend(type, count))
991 return 0;
992 }
993 return 1;
994 }
995
996 /*
997 * Return maximum ID acceptable for table @type.
998 * Assuming infinite memory and disk space.
999 */
1000 int
ef_id_limit(int type)1001 ef_id_limit(int type)
1002 {
1003 struct empfile *ep;
1004
1005 if (ef_check(type) < 0)
1006 return -1;
1007 ep = &empfile[type];
1008 if (ep->nent >= 0)
1009 return ep->nent - 1;
1010 if (ep->flags & EFF_MEM) {
1011 if (ep->flags & EFF_STATIC)
1012 return ep->csize - 1 - ((ep->flags & EFF_SENTINEL) != 0);
1013 }
1014 return INT_MAX;
1015 }
1016