1 /*===========================================================================
2  *
3  *                            PUBLIC DOMAIN NOTICE
4  *               National Center for Biotechnology Information
5  *
6  *  This software/database is a "United States Government Work" under the
7  *  terms of the United States Copyright Act.  It was written as part of
8  *  the author's official duties as a United States Government employee and
9  *  thus cannot be copyrighted.  This software/database is freely available
10  *  to the public for use. The National Library of Medicine and the U.S.
11  *  Government have not placed any restriction on its use or reproduction.
12  *
13  *  Although all reasonable efforts have been taken to ensure the accuracy
14  *  and reliability of the software and data, the NLM and the U.S.
15  *  Government do not and cannot warrant the performance or results that
16  *  may be obtained by using this software or data. The NLM and the U.S.
17  *  Government disclaim all warranties, express or implied, including
18  *  warranties of performance, merchantability or fitness for any particular
19  *  purpose.
20  *
21  *  Please cite the author in any work or product based on this material.
22  *
23  * ===========================================================================
24  *
25  */
26 
27 #include <klib/rc.h>
28 #include <klib/log.h>
29 #include <vdb/database.h>
30 #include <vdb/table.h>
31 #include <vdb/cursor.h>
32 #include <kproc/lock.h>
33 #include <kproc/thread.h>
34 #include <klib/refcount.h>
35 #include "refseq.h"
36 
37 #include <stdlib.h>
38 #include <assert.h>
39 #include <limits.h>
40 
41 typedef RefSeqList List;
42 typedef RefSeqListEntry Entry;
43 typedef RefSeq Object;
44 
45 #include "list.c"
46 #include "util.h"
47 
48 struct RefSeqAsyncLoadInfo {
49     KRefcount refcount;
50     KThread *th;
51     KLock *mutex;               /**< mostly guards the cursor against concurrent use */
52     VCursor const *curs;        /**< can be used by either thread after acquiring the mutex */
53     RowRange rr;                /**< of the table */
54     CursorAddResult car[2];     /**< column name and id */
55     int64_t volatile loaded;    /**< rows less than this have been loaded already */
56     unsigned volatile count;    /**< number of rows left to load, will cause bg thread to exit if set = 0 */
57     unsigned max_seq_len;       /**< max length of any READ in the table */
58     unsigned volatile hits;     /**< statistics to give some idea of ... */
59     unsigned volatile miss;     /**< ... how effective the bg thread was */
60 };
61 
RefSeqAsyncLoadInfo_Release(RefSeqAsyncLoadInfo * const self)62 static void RefSeqAsyncLoadInfo_Release(RefSeqAsyncLoadInfo *const self)
63 {
64     switch (KRefcountDrop(&self->refcount, "RefSeqAsyncLoadInfo")) {
65     case krefWhack:
66         break;
67     case krefOkay:
68         return;
69     default:
70         assert(!"valid refcount");
71         abort();
72     }
73     VCursorRelease(self->curs);
74     KLockRelease(self->mutex);
75     KThreadRelease(self->th);
76     free(self);
77 }
78 
RefSeqAsyncLoadInfoFree(RefSeqAsyncLoadInfo * const self)79 static rc_t RefSeqAsyncLoadInfoFree(RefSeqAsyncLoadInfo *const self)
80 {
81     rc_t rc = 0;
82     if (self) {
83         /* Synchronize with background thread in preparation for clean up */
84         KRefcountAdd(&self->refcount, "RefSeqAsyncLoadInfo"); // keep alive; let die at line 89
85         LOGMSG(klogDebug, "Foreground thread ending background thread");
86         KLockAcquire(self->mutex);
87         self->count = 0;
88         KLockUnlock(self->mutex);
89         KThreadWait(self->th, &rc);
90         LOGERR(klogDebug, rc, "Background thread ended");
91         RefSeqAsyncLoadInfo_Release(self);
92         if (rc)
93             LOGERR(klogErr, rc, "asynchronous loader thread failed");
94     }
95     return rc;
96 }
97 
98 // packed 2na to unpacked 4na
unpack_2na(uint8_t const * bases,uint8_t dst[4],unsigned const position)99 static void unpack_2na(uint8_t const *bases, uint8_t dst[4], unsigned const position)
100 {
101     int const packed = bases[position / 4];
102     int const b2na_1 = packed >> 6;
103     int const b2na_2 = (packed >> 4) & 0x03;
104     int const b2na_3 = (packed >> 2) & 0x03;
105     int const b2na_4 = packed & 0x03;
106 
107     dst[0] = 1 << b2na_1;
108     dst[1] = 1 << b2na_2;
109     dst[2] = 1 << b2na_3;
110     dst[3] = 1 << b2na_4;
111 }
112 
partial_unpack_2na(uint8_t const * bases,uint8_t * const dst,unsigned const offset,unsigned const limit,unsigned const pos)113 static unsigned partial_unpack_2na(uint8_t const *bases, uint8_t *const dst, unsigned const offset, unsigned const limit, unsigned const pos)
114 {
115     int const j = pos % 4;
116     uint8_t temp[4];
117     unsigned n;
118 
119     unpack_2na(bases, temp, pos);
120     for (n = 0; (offset + n) < limit && (j + n) < 4; ++n)
121         dst[offset + n] = temp[j + n];
122 
123     return n;
124 }
125 
126 struct FillNsData {
127     uint8_t *dst;
128     Range full;
129 };
fillNs(void * vp,Range const * intersectingRange)130 static void fillNs(void *vp, Range const *intersectingRange)
131 {
132     struct FillNsData const *data = vp;
133     unsigned const offset = intersectingRange->start - data->full.start;
134     memset(data->dst + offset, 15, intersectingRange->end - intersectingRange->start);
135 }
136 
getBases_2na(uint8_t * const dst,unsigned const start,unsigned const len,uint8_t const * bases,RangeList const * Ns)137 static void getBases_2na(uint8_t *const dst, unsigned const start, unsigned const len, uint8_t const *bases, RangeList const *Ns)
138 {
139     unsigned pos = start;
140     unsigned i = 0;
141 
142     if (pos % 4 != 0) {
143         unsigned const n = partial_unpack_2na(bases, dst, i, len, pos);
144         i += n; pos += n;
145     }
146     while ((i + 4) <= len) {
147         unpack_2na(bases, dst + i, pos);
148         i += 4;
149         pos += 4;
150     }
151     if (i < len) {
152         unsigned const n = partial_unpack_2na(bases, dst, i, len, pos);
153         i += n; pos += n;
154     }
155     assert(i == len);
156     assert(start + len == pos);
157 
158     // 2na will have 'A's in place of 'N's, put the 'N's back
159     {
160         struct FillNsData data;
161         data.full.start = start;
162         data.full.end = start + len;
163         data.dst = dst;
164         withIntersectRangeList(Ns, &data.full, fillNs, &data);
165     }
166 }
167 
getBases_4na(Object const * self,uint8_t * const dst,unsigned const start,unsigned const len)168 static unsigned getBases_4na(Object const *self, uint8_t *const dst, unsigned const start, unsigned const len)
169 {
170     unsigned const length = self->length;
171     uint8_t const *const bases = self->bases;
172     unsigned i = 0;
173     unsigned j = start % length;
174 
175     if (j % 2 == 1 && i < len) {
176         int const b4na_2 = bases[j >> 1];
177         int const b4na2 = b4na_2 & 0x0F;
178         dst[i++] = b4na2;
179         j = (j + 1) % length;
180     }
181     while ((i + 2) <= len) {
182         int const b4na_2 = bases[j >> 1];
183         int const b4na1 = b4na_2 >> 4;
184         int const b4na2 = b4na_2 & 0x0F;
185         dst[i++] = b4na1;
186         dst[i++] = b4na2;
187         j = (j + 2) % length;
188     }
189     if (i < len) {
190         int const b4na_2 = bases[j >> 1];
191         int const b4na1 = b4na_2 >> 4;
192         int const b4na2 = b4na_2 & 0x0F;
193         dst[i++] = (j % 2) == 0 ? b4na1 : b4na2;
194     }
195     assert(i == len);
196     return i;
197 }
198 
readCircular(Object const * self,uint8_t * const dst,unsigned const start,unsigned const len)199 static unsigned readCircular(Object const *self, uint8_t *const dst, unsigned const start, unsigned const len)
200 {
201     return getBases_4na(self, dst, start, len);
202 }
203 
readNormal(Object const * self,uint8_t * const dst,unsigned const start,unsigned const len)204 static unsigned readNormal(Object const *self, uint8_t *const dst, unsigned const start, unsigned const len)
205 {
206     unsigned const length = self->length;
207     unsigned const actlen = (start + len) < length ? len : start < length ? length - start : 0;
208     if (actlen > 0)
209         getBases_2na(dst, start, actlen, self->bases, &self->Ns);
210     return actlen;
211 }
212 
readZero(Object const * self,uint8_t * const dst,unsigned const start,unsigned const len)213 static unsigned readZero(Object const *self, uint8_t *const dst, unsigned const start, unsigned const len)
214 {
215     /* this should not be reachable; an rc != 0 should have propagated up the
216      * call stack and ended the program before we could get here */
217     assert(!"reachable");
218     abort();
219 }
220 
rowIsLoaded(RefSeqAsyncLoadInfo const * async,int64_t row)221 static bool rowIsLoaded(RefSeqAsyncLoadInfo const *async, int64_t row)
222 {
223     /* the lock is NOT held during this function */
224     return row < async->loaded;
225 }
226 
positionToRow(RefSeqAsyncLoadInfo const * async,unsigned const position)227 static int64_t positionToRow(RefSeqAsyncLoadInfo const *async, unsigned const position)
228 {
229     assert(async != NULL);
230     return async->rr.first + (position / async->max_seq_len);
231 }
232 
rowToPosition(RefSeqAsyncLoadInfo const * async,int64_t const row)233 static unsigned rowToPosition(RefSeqAsyncLoadInfo const *async, int64_t const row)
234 {
235     assert(async != NULL);
236     return (unsigned)((row - async->rr.first) * async->max_seq_len);
237 }
238 
239 /* this is called on the main thread */
readNormalIncomplete(Object const * self,uint8_t * const dst,unsigned const start,unsigned const len)240 static unsigned readNormalIncomplete(Object const *self, uint8_t *const dst, unsigned const start, unsigned const len)
241 {
242     unsigned const length = self->length;
243     unsigned const actlen = (start + len) < length ? len : start < length ? length - start : 0;
244     if (actlen > 0) {
245         RefSeqAsyncLoadInfo *async = self->async;
246         int64_t const first = positionToRow(async, start);
247         int64_t const last = positionToRow(async, start + actlen - 1);
248         size_t const max_bases = ((last + 1) - first) * async->max_seq_len;
249         uint8_t *const buffer = (max_bases <= len && start == rowToPosition(async, first)) ? dst : malloc(max_bases);
250         uint8_t *buf = buffer;
251         int64_t row;
252         rc_t rc = 0;
253 
254         if (buffer == NULL) {
255             LOGERR(klogFatal, RC(rcXF, rcFunction, rcReading, rcMemory, rcExhausted), "Error reading reference");
256             return 0;
257         }
258         for (row = first; row <= last && rc == 0; ++row) {
259             ++async->hits;
260             if (rowIsLoaded(async, row)) {
261                 getBases_2na(buf, rowToPosition(async, row), async->max_seq_len, self->bases, &self->Ns);
262             }
263             else {
264                 ReadStringResult read;
265 
266                 memset(buf, 15, async->max_seq_len);
267                 KLockAcquire(async->mutex);
268                 ++async->miss;
269                 if (readString(&read, &async->car[1], row, async->curs, &rc) != NULL) {
270                     memmove(buf, read.value, read.length);
271                 }
272                 KLockUnlock(async->mutex);
273                 {
274                     unsigned i;
275                     for (i = 0; i < read.length; ++i) {
276                         switch (buf[i]) {
277                         case 1:
278                         case 2:
279                         case 4:
280                         case 8:
281                             break;
282                         default:
283                             buf[i] = 15;
284                         }
285                     }
286                 }
287             }
288             buf += async->max_seq_len;
289         }
290         if (buffer != dst) {
291             unsigned const offset = start - rowToPosition(async, first);
292             memmove(dst, buffer + offset, actlen);
293             free(buffer);
294         }
295         if (rc) {
296             LOGERR(klogErr, rc, "Error reading reference");
297             return 0;
298         }
299     }
300     return actlen;
301 }
302 
303 /* this is called on the background thread */
runLoadThread(Object * self)304 static rc_t runLoadThread(Object *self)
305 {
306     RefSeqAsyncLoadInfo *const async = self->async;
307     uint8_t *const buffer = malloc(async->max_seq_len);
308     uint64_t const count = async->rr.count;
309     int64_t const first = async->rr.first;
310     VCursor const *const curs = async->curs;
311     CursorAddResult *const seqLenInfo = &async->car[0];
312     CursorAddResult *const readInfo = &async->car[1];
313     ReadStringResult read;
314     int accum = 0;
315     int n = 0;
316     uint64_t i;
317     bool done = false;
318     rc_t rc = 0;
319     unsigned j = 0;
320     unsigned position = 0;
321 
322     if (buffer == NULL)
323         rc = RC(rcXF, rcFunction, rcReading, rcMemory, rcExhausted);
324 
325     LOGMSG(klogDebug, "Starting background loading of reference");
326     for (i = 0; i < count && !done && rc == 0; ++i) {
327         int64_t const row = i + first;
328         uint32_t seqLen = 0;
329 
330         KLockAcquire(async->mutex);
331         {
332             done = async->count == 0;
333             async->loaded = row - 1;
334             seqLen = readU32(seqLenInfo, row, curs, &rc);
335             if (seqLen == 0 || NULL == readString(&read, readInfo, row, curs, &rc) || read.length > async->max_seq_len)
336                 ;
337             else
338                 memmove(buffer, read.value, read.length);
339             --async->count;
340         }
341         KLockUnlock(async->mutex);
342         if (!done && rc == 0 && read.length <= async->max_seq_len && position + seqLen <= self->length) {
343             uint32_t ri; ///< index within current row
344 
345             for (ri = 0; ri < read.length; ++ri) {
346                 int base = 0;
347                 int isN = 1;
348 
349                 switch (buffer[ri]) {
350                 case 1: base = 0; isN = 0; break;
351                 case 2: base = 1; isN = 0; break;
352                 case 4: base = 2; isN = 0; break;
353                 case 8: base = 3; isN = 0; break;
354                 }
355                 accum = (accum << 2) | base;
356                 ++n;
357                 if (n == 4) {
358                     self->bases[j++] = accum;
359                     accum = 0;
360                     n = 0;
361                 }
362                 if (isN) {
363                     if (NULL == extendRangeList(&self->Ns, position)) {
364                         rc = RC(rcXF, rcFunction, rcReading, rcMemory, rcExhausted);
365                         break;
366                     }
367                 }
368                 ++position;
369             }
370             for ( ; ri < seqLen; ++ri) {
371                 accum = accum << 2;
372                 ++n;
373                 if (n == 4) {
374                     self->bases[j++] = accum;
375                     accum = 0;
376                     n = 0;
377                 }
378                 if (NULL == extendRangeList(&self->Ns, position)) {
379                     rc = RC(rcXF, rcFunction, rcReading, rcMemory, rcExhausted);
380                     break;
381                 }
382                 ++position;
383             }
384         }
385         else if (!done && rc == 0)
386             rc = RC(rcXF, rcFunction, rcReading, rcData, rcInvalid);
387     }
388     if (n != 0) {
389         while (n < 4) {
390             accum <<= 2;
391             ++n;
392         }
393         self->bases[j++] = accum;
394     }
395     free(buffer);
396     LOGMSG(klogDebug, "Done background loading of reference");
397     if (rc == 0 && i == count) {
398         KLockAcquire(async->mutex);
399         async->loaded = i + first; /* last row was loaded */
400         async->count = 0;
401         KLockUnlock(async->mutex);
402     }
403 
404     assert((atomic_read(&self->rwl) & 1) == 0); /* there is only one writer */
405     atomic_inc(&self->rwl); /* tell readers we want to update the state */
406     while (atomic_read(&self->rwl) != 1)
407         ;
408     /* readers are all waiting in the loop at line 445 */
409     self->reader = rc == 0 ? readNormal : readZero;
410     self->async = NULL;
411     atomic_dec(&self->rwl); /* state is updated; readers continue to line 448 */
412     if (rc == 0 && i == count) {
413         double const pct = 100.0 * (async->hits - async->miss) / async->hits;
414 
415         PLOGMSG(klogDebug, (klogDebug, "Done with background loading of reference; preload was $(pct)%", "pct=%5.1f", (float)pct));
416     }
417     RefSeqAsyncLoadInfo_Release(async);
418     LOGERR(klogDebug, rc, "Background thread exiting");
419 
420     return rc;
421 }
422 
RefSeq_Scheme(void)423 char const *RefSeq_Scheme(void) {
424     return "NCBI:refseq:tbl:reference";
425 }
426 
RefSeq_getBases(Object const * const self,uint8_t * const dst,unsigned const start,unsigned const len)427 unsigned RefSeq_getBases(Object const *const self, uint8_t *const dst, unsigned const start, unsigned const len)
428 {
429     atomic_t *const rwl = &((Object *)self)->rwl;
430 
431     if (self->async == NULL) {
432         /* this is the fast path and the most common for normal use */
433         /* there is no background thread running */
434         return self->reader(self, dst, start, len);
435     }
436     /* there is a background thread running */
437     if ((atomic_read_and_add_even(rwl, 2) & 1) == 0) {
438         /* but it is not trying to update the state */
439         unsigned const actlen = self->reader(self, dst, start, len);
440         atomic_add(rwl, -2);
441         return actlen;
442     }
443     /* very unlikely, but likelihood increases with the number of readers */
444     /* there is a background thread trying to update the state */
445     while ((atomic_read(rwl) & 1) != 0)
446         ;
447     /* the state has been updated; use the new state */
448     return RefSeq_getBases(self, dst, start, len);
449 }
450 
loadCircular_1(uint8_t * result,VCursor const * const curs,RowRange const * const rowRange,CursorAddResult const * const seqLenInfo,CursorAddResult const * const readInfo)451 static rc_t loadCircular_1(  uint8_t *result
452                            , VCursor const *const curs
453                            , RowRange const *const rowRange
454                            , CursorAddResult const *const seqLenInfo
455                            , CursorAddResult const *const readInfo)
456 {
457     int accum = 0;
458     int n = 0;
459     unsigned j = 0; ///< current index in bases
460     uint64_t i;
461     rc_t rc = 0;
462 
463     for (i = 0; i < rowRange->count; ++i) {
464         int64_t const row = rowRange->first + i;
465         uint32_t const seqLen = readU32(seqLenInfo, row, curs, &rc);
466         uint32_t ri; ///< index within current row
467         ReadStringResult read;
468 
469         if (seqLen == 0 || NULL == readString(&read, readInfo, row, curs, &rc))
470             return rc;
471         for (ri = 0; ri < seqLen; ++ri) {
472             int base = 15;
473             if (ri < read.length)
474                 base = read.value[ri];
475             assert(base >= 0 && base <= 15);
476 
477             accum = (accum << 4) | base;
478             ++n;
479             if (n == 2) {
480                 result[j++] = accum;
481                 accum = 0;
482                 n = 0;
483             }
484         }
485     }
486     if (n != 0) {
487         accum = accum << 4;
488         result[j++] = accum;
489     }
490     return 0;
491 }
492 
loadCircular(Object * result,VCursor const * const curs,RowRange const * const rowRange,CursorAddResult const * const info)493 static rc_t loadCircular(  Object *result
494                          , VCursor const *const curs
495                          , RowRange const *const rowRange
496                          , CursorAddResult const *const info
497                          )
498 {
499     rc_t rc = 0;
500     uint64_t const baseCount = readU64(&info[0], rowRange->first, curs, &rc);
501     assert(baseCount < UINT_MAX);
502     if (rc == 0) {
503         unsigned const allocated = (unsigned)((baseCount + 1) / 2);
504         uint8_t *bases = malloc(allocated);
505 
506         if (bases == NULL)
507             return RC(rcXF, rcFunction, rcConstructing, rcMemory, rcExhausted);
508 
509         rc = loadCircular_1(bases, curs, rowRange, &info[1], &info[2]);
510         if (rc == 0) {
511             result->bases = bases;
512             result->length = (unsigned)baseCount;
513             result->reader = readCircular;
514         }
515         else {
516             free(bases);
517         }
518     }
519     return rc;
520 }
521 
RefSeqAsyncLoadInfoMake(VCursor const * curs,RowRange const * rr,CursorAddResult const * car,rc_t * prc)522 static RefSeqAsyncLoadInfo *RefSeqAsyncLoadInfoMake(  VCursor const *curs
523                                                     , RowRange const *rr
524                                                     , CursorAddResult const *car
525                                                     , rc_t *prc)
526 {
527     RefSeqAsyncLoadInfo *result = calloc(1, sizeof(*result));
528     if (result == NULL) {
529         LOGERR(klogFatal, RC(rcXF, rcFunction, rcConstructing, rcMemory, rcExhausted), "OUT OF MEMORY!!!");
530         abort();
531     }
532     *prc = KLockMake(&result->mutex);
533     if (*prc == 0) {
534         result->max_seq_len = readU32(&car[2], rr->first, curs, prc);
535         assert(result->max_seq_len % 4 == 0);
536         if (*prc == 0) {
537             KRefcountInit(&result->refcount, 1, "RefSeqAsyncLoadInfo", "init", "");
538             result->curs = curs;
539             VCursorAddRef(curs);
540             result->rr = *rr;
541             result->count = (unsigned)rr->count;
542             result->car[0] = car[0];
543             result->car[1] = car[1];
544             return result;
545         }
546         KLockRelease(result->mutex);
547     }
548     free(result);
549     return NULL;
550 }
551 
run_load_thread(const KThread * self,void * data)552 static rc_t run_load_thread(const KThread *self, void *data)
553 {
554     return runLoadThread(data);
555 }
556 
load(Object * result,VCursor const * const curs,RowRange const * const rowRange,CursorAddResult const * const info)557 static rc_t load(  Object *result
558                  , VCursor const *const curs
559                  , RowRange const *const rowRange
560                  , CursorAddResult const *const info
561                  )
562 {
563     rc_t rc = 0;
564     uint64_t const baseCount = readU64(&info[0], rowRange->first, curs, &rc);
565     assert(baseCount < UINT_MAX);
566     if (rc == 0) {
567         unsigned const allocated = (unsigned)((baseCount + 3) / 4);
568         uint8_t *bases = malloc(allocated);
569 
570         if (bases == NULL)
571             return RC(rcXF, rcFunction, rcConstructing, rcMemory, rcExhausted);
572 
573         result->bases = bases;
574         result->length = (unsigned)baseCount;
575         result->async = RefSeqAsyncLoadInfoMake(curs, rowRange, info + 1, &rc);
576         if (rc == 0) {
577             rc = KThreadMake(&result->async->th, run_load_thread, result);
578             if (rc == 0) {
579                 result->reader = readNormalIncomplete;
580                 return 0;
581             }
582         }
583         RefSeqFree(result);
584     }
585     return rc;
586 }
587 
init(Object * result,VTable const * const tbl)588 static rc_t init(Object *result, VTable const *const tbl)
589 {
590     CursorAddResult cols[5];
591     RowRange rowRange;
592     rc_t rc = 0;
593     VCursor const *const curs = createCursor(tbl, &rc);
594 
595     memset(result, 0, sizeof(*result));
596     if (curs == NULL) return rc;
597 
598     if (!addColumn(&cols[0], "CIRCULAR", curs, &rc)) return rc;
599     if (!addColumn(&cols[1], "TOTAL_SEQ_LEN", curs, &rc)) return rc;
600     if (!addColumn(&cols[2], "SEQ_LEN", curs, &rc)) return rc;
601     if (!addColumn(&cols[3], "(INSDC:4na:bin)READ", curs, &rc)) return rc;
602     if (!addColumn(&cols[4], "(U32)MAX_SEQ_LEN", curs, &rc)) return rc;
603 
604     rc = VCursorOpen(curs);
605     assert(rc == 0);
606     if (rc == 0) {
607         if (getRowRange(&rowRange, curs, &rc) != NULL) {
608             bool const circular = readBool(&cols[0], rowRange.first, curs, &rc);
609 
610             assert(rowRange.count < UINT_MAX);
611             rc = (circular ? loadCircular : load)(result, curs, &rowRange, &cols[1]);
612         }
613     }
614     VCursorRelease(curs);
615     return rc;
616 }
617 
RefSeqFree(Object * self)618 void RefSeqFree(Object *self)
619 {
620     RefSeqAsyncLoadInfoFree(self->async);
621     RangeListFree(&self->Ns);
622     free(self->bases);
623     free(self);
624 }
625 
RefSeqFind(List * list,unsigned const qlen,char const * qry)626 Entry *RefSeqFind(List *list, unsigned const qlen, char const *qry)
627 {
628     unsigned at = 0;
629     return find(list, &at, qlen, qry) ? &list->entry[at] : NULL;
630 }
631 
RefSeqInsert(List * list,unsigned const qlen,char const * qry,VTable const * tbl,rc_t * prc)632 Entry *RefSeqInsert(List *list, unsigned const qlen, char const *qry, VTable const *tbl, rc_t *prc)
633 {
634     Entry *result = NULL;
635     unsigned at = 0;
636     if (find(list, &at, qlen, qry)) {
637         *prc = 0;
638         return &list->entry[at];
639     }
640 
641     result = insert(list, at, qlen, qry);
642     if (result == NULL) {
643         LOGERR(klogFatal, (*prc = RC(rcXF, rcFunction, rcConstructing, rcMemory, rcExhausted)), "");
644         return NULL;
645     }
646 
647     result->object = calloc(1, sizeof(*result->object));
648     if (result == NULL) {
649         LOGERR(klogFatal, (*prc = RC(rcXF, rcFunction, rcConstructing, rcMemory, rcExhausted)), "");
650         return NULL;
651     }
652 
653     *prc = init(result->object, tbl);
654     if (*prc == 0)
655         return result;
656 
657     undo_insert(list, at);
658     return NULL;
659 }
660 
RefSeqListFree(List * list)661 void RefSeqListFree(List *list)
662 {
663     unsigned i;
664     for (i = 0; i != list->entries; ++i) {
665         RefSeqFree(list->entry[i].object);
666         free(list->entry[i].name);
667     }
668     free(list->entry);
669 }
670 
RefSeqListInit(List * list)671 rc_t RefSeqListInit(List *list)
672 {
673     rc_t rc = 0;
674     return rc;
675 }
676