/*************************************************************************** Disk Based Hash library DBH Header file. * Copyright (C) 2002-2016 Edscott Wilson Garcia * EMail: edscott@imp.mx * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation. notice: MODIFICATIONS TO FILE "dbh.h" WILL BE LOST. MODIFY FILE "dbh.h.in" INSTEAD. ***************************************************************************/ #ifndef DBH_H #define DBH_H #include #ifdef HAVE_PTHREAD #include #endif #ifndef HAVE_WINDOWS_H #include #endif #ifdef __cplusplus extern "C" { #endif #undef DBH_VERSION /** * DBH_VERSION * * Disk Based Hashtables library version * **/ #define DBH_VERSION "@DBH_VERSION@" #undef DBH_FILE_VERSION /** * DBH_FILE_VERSION * * Disk Based Hashtables library file version compatibility * **/ #define DBH_FILE_VERSION "@DBH_FILE_VERSION@" #undef FILE_POINTER /** * FILE_POINTER: * * Architecture independent 64 bit integer type * **/ #define FILE_POINTER \ @FILE_POINTER@ /** * SECTION:dbh * @short_description: Library to create and manage hash tables residing * on disk. Associations are made between keys and * values so that for a given a key the value can be * found and loaded into memory quickly. Being disk based * allows for large and persistent hashes. 64 bit support * allows for hashtables with sizes over 4 Gigabytes on 32 * bit systems. Quantified key generation allows for * minimum access time on balanced multidimensional trees. * @title: Disk Based Hashtables (DBH) 64 bit * @section_id: DBH * @see_also: #GHashTables * @stability: Stable * @include: dbh.h * @Image: calc.png * * * A DBHashTable provides associations between keys and values which is * optimized so that given a key, the associated value can be found very * quickly. * * * Note that only one hash record is loaded from disk to memory at any * given moment for a DBHashTable. Both keys and values should be copied * into the DBHashTable record, so they need not exist for the lifetime * of the DBHashTable. This means that the use of static strings and * temporary strings (i.e. those created in buffers and those returned by * GTK+ widgets) should be copied with dbh_set_key() and dbh_set_data() * into the DBHashTable record before being inserted. * * * You must be careful to ensure that copied key length matches the defined * key length of the DBHashTable, and also that the copied data does not * exceed the maximum length of the DBHashTable record (1024 bytes by * default, and expandable by dbh_set_size() ). If the DBHashTable record length * is to be variable, be sure to set the appropriate length before each * dbh_update(), with dbh_set_recordsize(), otherwise the record length * need only be set before the first dbh_update(). * * * To create a DBHashTable, use dbh_new(). * * * A DBHashTable may be opened (either new or existing) in read-only mode, * parallel-safe mode or thread-safe mode. * * * To insert a key and value into a DBHashTable, use dbh_update(). * The DBHashTable will not be modified until this command is given. * All changes to the current DBHashTable record only reside in memory. * dbh_update() is necessary to commit the changes to the DBHashTable. * * * To lookup a value corresponding to a given key, use dbh_load(). * * * To erase and unerase a key and value, use dbh_erase() and dbh_unerase(). * * * To call a function for each key and value pair (using a sweep route) * use dbh_foreach_sweep() and dbh_sweep(). * * * To call a function for each key and value pair (using a fanout route) * use dbh_foreach_fanout() and dbh_foreach_fanout(). * * * To destroy a DBHashTable use dbh_destroy(). * * * This is dbh version 2, incompatible with dbh version 1 files. * The main difference between the two version is the handling of * file pointers. In version 1, file pointers were 32 bits in length, * while in version 2, file pointers are 64 bits in length. * This allows for DBHashTables with sizes greater than 2 GBytes. * * * Quantified numbers are an alternate way to view * the set of natural numbers {1, 2, 3, ...} where * order is defined in two levels. * In natural numbers there is only one level of order * (defined by the > boolean operator). In * quantified numbers * the first level of order is defined by the cuanta * or quantity. The cuanta is obtained by adding all * the digits of the quantified number. * Thus, for example, 10022, 5, 32, and 11111 are all equal at the * first level of order since they all add up to 5. The second level * or order may be obtained in different manners. In functions dbh_genkey() * and dbh_genkey2() the corresponding order of the * natural numbers from which they are associated is * not conserved. * * * In dbh_orderkey() the corresponding order of the * natural numbers from which they are associated * is conserved, but at a price. * The base, or maximum value each digit may reach, must be defined. * This effectively puts a limit on the number of keys which may be * generated for a given number of digits. * * * When a #DBHashTable is constructed with quantified keys, the * maximum amount of disk access instructions generated to access * any given record is equal to the cuanta of the quantified number * represented by the key. This allows a #DBHashTable to be constructed with * minimum access time across all records. * **/ /* * DBH_CREATE * Create a new dbh file on disk, overwriting any file with the same name * DBH_READ_ONLY * Open an existing dbh file on disk in read only mode * DBH_THREAD_SAFE * Use this flag if more than one thread will be accessing the same * #DBHashTable pointer in parallel. * DBH_PARALLEL_SAFE * Use this flag if more than one thread or process will be accessing * ---in parallel--- the same file which holds a DBH table. */ /** * DBH_CREATE: * * Bit flag for dbh_new() to create a new dbh file on disk, * overwriting any file with the same name and cleansing all locks. * **/ #define DBH_CREATE \ 0x01 /** * DBH_READ_ONLY: * * Bit flag for dbh_new() to open an existing dbh file on disk * in read only mode. * **/ #define DBH_READ_ONLY \ 0x02 /** * DBH_THREAD_SAFE: * * * Bit flag for dbh_new() to use if more than one thread will be * accessing the same #DBHashTable in write mode in parallel. * DBH function calls which may be racing each other * in different threads should be enclosed within a dbh_mutex_lock() and * dbh_mutex_unlock(). Each DBH table opened with the #DBH_THREAD_SAFE * attribute will have a specific mutex for this function. * If threads are to access the same #DBHashTable in * read mode only, then #DBH_READ_ONLY and separate memory allocation * for each thread's #DBHashTable pointer is more than enough and faster. * * * When #DBH_THREAD_SAFE is specified, dbh_new() is automatically mutex * locked until function completes. The function dbh_close() is also * automatically locked until completion on tables opened with the * #DBH_THREAD_SAFE attribute. * * * **/ #define DBH_THREAD_SAFE \ 0x04 /** * DBH_PARALLEL_SAFE: * * Bit flag for dbh_new() to use if more than one heavy weight * process will be accessing the same #DBHashTable in write mode. * If no process will be writing to the #DBHashTable, then * #DBH_READ_ONLY is enough and faster since each process will * hold a separate memory allocation for the #DBHashTable pointer. * **/ #define DBH_PARALLEL_SAFE \ 0x08 /* NOTICE: dbh_header_t structure MUST be data aligned! */ /** * dbh_header_t: * @n_limit: Maximum toplevel branches * @user_chars: Five unsigned chars available to user * @bof: File pointer to root of tree * @erased_space: Amount of bytes marked as erased * @data_space: Amount of bytes ocuppied by data * @total_space: Amount of bytes ocuppied by data and format * @records: Number of records * @record_length: Maximum record length * @user_filepointer: Six 64-bit filepointers available to user * @version: DBHashTable version compatibility information * @copyright: DBH sourcecode distribution copyright and download information * * #dbh_header_t is the structural information written at the first 256 bytes of * a #DBHashTable file. * **/ typedef struct _dbh_header_t dbh_header_t; struct _dbh_header_t { /*< private >*/ /**** 16/256 bytes *****/ /*< public >*/ unsigned char n_limit; /*< private >*/ unsigned char position; // position of key in data unsigned char length; // length of key in data unsigned char totalkeylength; // total length of key in data unsigned char user_tmpdir; // volatile (boolean) unsigned char DBH32; // not used unsigned char reservedC; // used by system, value goes to 0 on updates or inserts) unsigned char reservedD; // same as above, but not used anymore unsigned char sweep_erased; // 0 unsigned char writeOK; // reserved for inner use unsigned char dbh_exit; // reserved for inner use /*< public >*/ unsigned char user_chars[5]; /*< private >*/ /**** 12*8 = 96/256 bytes *****/ /*< public >*/ FILE_POINTER bof; FILE_POINTER erased_space; FILE_POINTER data_space; FILE_POINTER total_space; FILE_POINTER records; FILE_POINTER record_length; FILE_POINTER user_filepointer[6]; /*< private >*/ /**** 16+128 = 144/256 bytes *****/ /*< public >*/ char version[16]; char copyright[128]; /*< private >*/ }; /** * dbh_lock_t: * @write_lock: PID of process holding the write lock or zero. * @write_lock_count: Number of write locks the PID hold (write locks are recursive). * @read_lock_count: Number of read locks on DBH table. * * **/ typedef struct dbh_lock_t{ pid_t write_lock; int write_lock_count; int read_lock_count; }dbh_lock_t; /** * DBHashTable: * @branches: Maximum toplevel branches * @bytes_userdata: size of data record * @key: access key * @data: record data pointer * @fd: file descriptor * @head_info: nonvolatile header information * @path: file path * * * #DBHashTable is a data structure containing the record information for an open * #DBHashTable file. * */ typedef struct _DBHashTable DBHashTable; struct _DBHashTable { /*< public >*/ unsigned char branches; /*< private >*/ unsigned char newbranches; // inner use (find) unsigned char flag; // reserved (erase) FILE_POINTER reservedB; // reserved /*< public >*/ FILE_POINTER bytes_userdata; /*< private >*/ FILE_POINTER newbytes_userdata; // inner use (find) FILE_POINTER *branch; // inner use (format) FILE_POINTER *newbranch; // inner use (find) /*< public >*/ unsigned char *key; /*< private >*/ unsigned char *newkey; unsigned char *reservedC; // inner use unsigned char *reservedD; // inner use union { void *user_data; // correct name void *sweep_data; // deprecated name }; /*< public >*/ void *data; /*< private >*/ void *newdata; // inner use (find) void (*operate) (struct _DBHashTable *);// para funcion barre void (*reservedE) (void *); // reserved void *((*reservedF) (void *)); // reserved void *((*reservedG) (void *)); // reserved /*< public >*/ int fd; dbh_header_t *head_info; char *path; /*< private >*/ char *tmpdir; struct _DBHashTable *dbh_desdbh; int protection_flags; pthread_mutex_t *mutex; int lock_attempt_limit; #ifdef PARALLEL_SAFE dbh_lock_t *lock_p; sem_t *sem; #endif } ; ///////// macros /////////////////////////////////////// /** * DBH_KEYLENGTH: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns the keylenth in bytes associated * to the #DBHashTable. The value is fixed when the * #DBHashTable is created with #dbh_new. * **/ #define DBH_KEYLENGTH(dbh) \ (dbh->head_info->n_limit) /** * DBH_RECORD_SIZE: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns the size of the current record loaded in * memory. If no record has been loaded, then the return value * is not defined. * **/ #define DBH_RECORD_SIZE(dbh) \ (dbh->bytes_userdata) /** * DBH_KEY: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns a pointer to the current * #DBHashTable key area. * **/ #define DBH_KEY(dbh) \ (dbh->key) /** * DBH_DATA: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns a pointer to the current * #DBHashTable data area. * **/ #define DBH_DATA(dbh) \ (dbh->data) /** * DBH_ERASED_SPACE: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns the amount of * bytes taken up by erased data in the #DBHashTable. * **/ #define DBH_ERASED_SPACE(dbh) \ (dbh->head_info->erased_space) /** * DBH_DATA_SPACE: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns the amount of * bytes taken up by valid data in the #DBHashTable. * **/ #define DBH_DATA_SPACE(dbh) \ (dbh->head_info->data_space) /** * DBH_TOTAL_SPACE: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns the total amount * of bytes taken up by the #DBHashTable. * **/ #define DBH_TOTAL_SPACE(dbh) \ (dbh->head_info->total_space) /** * DBH_FORMAT_SPACE: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns the total amount * of bytes taken up by the format of the #DBHashTable. * **/ #define DBH_FORMAT_SPACE(dbh) \ (dbh->head_info->total_space - dbh->head_info->data_space - dbh->head_info->erased_space) /** * DBH_RECORDS: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns the number of records in the #DBHashTable. * **/ #define DBH_RECORDS(dbh) \ (dbh->head_info->records) /** * DBH_MAXIMUM_RECORD_SIZE: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns the maximum allocated space for * data in the current #DBHashTable record. * **/ #define DBH_MAXIMUM_RECORD_SIZE(dbh) \ (dbh->head_info->record_length) /** * DBH_PATH: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * This macro returns a pointer to a string containing * the path to the current #DBHashTable. * **/ #define DBH_PATH(dbh) \ (dbh->head_info->archivo) ////////// functions /////////////////////////////////////// /** * DBHashFunc: * @dbh: A #DBHashTable pointer (#DBHashTable *) * * Pointer to function to apply during dbh_sweep(), dbh_fanout(), * dbh_foreach_sweep() and dbh_foreach_fanout(). * * This function will be applied to all data records involved * in the sweep or fanout process * * **/ typedef void (*DBHashFunc2) (DBHashTable *dbh, void *data); /** * DBHashFunc2: * @dbh: A #DBHashTable pointer (#DBHashTable *) * Pointer to function to apply during dbh_sweep(), dbh_fanout(), * dbh_foreach_sweep() and dbh_foreach_fanout(). * @data: pointer to other data to be passed to function * * This function will be applied to all data records involved * in the sweep or fanout process * * **/ typedef void (*DBHashFunc) (DBHashTable *dbh); /* level 1 functions ***********************************/ /** * dbh_new: * @path: Path on disk where DBHashTable resides. * @key_length: A pointer to store the length of the key to access the DBHashTable. * @flags: Bitwise or of * DBH_CREATE, DBH_READ_ONLY, DBH_THREAD_SAFE, DBH_PARALLEL_SAFE * @Returns: A pointer to the newly opened DBHashTable, * or NULL if it fails. * * Open or create an existing DBH table. Flag is bitwise or of the following: * #DBH_CREATE, #DBH_READ_ONLY, #DBH_THREAD_SAFE, #DBH_PARALLEL_SAFE. * (since 4.7.6) **/ DBHashTable *dbh_new (const char *path, unsigned char *key_length, int flags); #ifndef DBH_DISABLE_DEPRECATED /** * dbh_create: * @path: Path on disk where DBHashTable will reside. * @key_length: The length of the key to access the DBHashTable. * @Returns: A pointer to the newly created and opened #DBHashTable, * or NULL if it fails. * * Create a new hash file (overwriting old version). * Creates and opens for writing a new #DBHashTable. * This function will overwrite any file with the specified * path, including any previous DBH file. The @key_length is * fixed. If you want variable length, use a g_hash_table * to associate quantified keys generated by genkey(), and * create an extra DBHashTable to save the g_hash. * Quantified keys assure that large DBHashes are spread out * optimally. * DEPRECATED: Use dbh_new() instead **/ DBHashTable *dbh_create (const char *path, unsigned char key_length); /** * dbh_open: * @path: Path on disk where DBHashTable resides. * @Returns: A pointer to the newly opened #DBHashTable, * or NULL if it fails. * * Open an existing hash in read-write mode. * DEPRECATED: Use dbh_new() instead **/ DBHashTable *dbh_open (const char *path); /** * dbh_open_ro: * @path: Path on disk where DBHashTable resides. * @Returns: A pointer to the newly opened read-only DBHashTable, * or NULL if it fails. * * Open an existing hash in read-only mode. * DEPRECATED: Use dbh_new() instead **/ DBHashTable *dbh_open_ro (const char *path); #endif /** * dbh_close: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 on error, 1 otherwise. * * Close hash file (thus flushing io buffer). * **/ int dbh_close (DBHashTable * dbh); /** * dbh_destroy: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Close an open DBHashTable and erase file from disk. * Convenience function that does a close and rm. * **/ int dbh_destroy (DBHashTable * dbh); /** * dbh_clear_locks: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Clear dbh file locks associated to #DBHashTable * Use this function to clean up persistent file locks * * (since 4.7.6) * **/ int dbh_clear_locks(DBHashTable * dbh); /** * dbh_erase: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 on error, 1 otherwise. * * Mark the record currently loaded into memory as erased. If no record is * currently loaded, behaviour is undefined. * **/ int dbh_erase (DBHashTable * dbh); /** * dbh_unerase: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 on error, 1 otherwise. * * This is the opposite of dbh_erase(). Mark the record currently loaded * into memory as unerased. If no record is currently loaded, * behaviour is undefined. * **/ int dbh_unerase (DBHashTable * dbh); /** * dbh_prune: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @key: key of top level record of subtree to erase. * @subtree_length: number of branches to erase. * @Returns: 0 on error, 1 otherwise. * * Erases a whole subtree from the record currently loaded * into memory. Records are not really removed fisically, but * rather marked erased so they may be recovered (if not * overwritten later on). Records are permanently removed after * #DBHashTable is reconstructed with dbh_regen_sweep() or dbh_regen_fanout(). * **/ int dbh_prune (DBHashTable * dbh, unsigned char *key, unsigned char subtree_length); /** * dbh_unprune: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @key: key of top level record of subtree to erase. * @subtree_length: number of branches to erase. * @Returns: 0 on error, 1 otherwise. * * Does the opposite of dbh_prune(), marking entire subtree as unerased. * May fail to work if records have been overwritten since the * dbh_prune() instruction was issued. * **/ int dbh_unprune (DBHashTable * dbh, unsigned char *key, unsigned char subtree_length); /** * dbh_find_top: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @n: Number of branches to match on top record. * @Returns: 0 on error, byte offset of loaded record otherwise. * * Find the top level subtree FILE_POINTER for the currently loaded record, * considering only the first @n branches. * This function will find the top node of the branch, based upon a * partial key. Key length to be considered must be specified. * **/ FILE_POINTER dbh_find_top (DBHashTable * dbh, int n); /** * dbh_find: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @n: Number of branches to ignore on top record. * @Returns: 0 on error, byte offset of loaded record otherwise. * * Find the top level subtree FILE_POINTER for the currently loaded record, * but ignoring the last @n branches. * **/ FILE_POINTER dbh_find (DBHashTable * dbh, int n); /** * dbh_genkey0: * @key: The address where to put the generated key * @length: The key length * @n: The natural number from which to generate the key * * Obtain a key from a secuential series of natural numbers * (positive integers without zero) which does not conserve the order * of the natural numbers, but which are optimized for construction * of a balanced hash tree. These keys are expressed in quantified * numbers. Digits are not offset. * **/ void dbh_genkey0 (unsigned char * key, unsigned char length, unsigned int n); /** * dbh_genkey: * @key: The address where to put the generated key * @length: The key length * @n: The natural number from which to generate the key * * Obtain a key from a secuential series of natural numbers * (positive integers without zero) which does not conserve the order * of the natural numbers, but which are optimized for construction * of a balanced hash tree. These keys are expressed in quantified * numbers. Digits are offset to the @0 symbol (+48). * **/ void dbh_genkey (unsigned char *key, unsigned char length, unsigned int n); /** * dbh_genkey2: * @key: The address where to put the generated key * @length: The key length * @n: The natural number from which to generate the key * * Obtain a key from a secuential series of natural numbers (positive integers * without zero) which does not conserve the order of the natural numbers, * but which are optimized for construction of a balanced hash tree. These * keys are expressed in quantified numbers. Digits are offset to the @A symbol (+65). * * **/ void dbh_genkey2 (unsigned char *key, unsigned char length, unsigned int n); /* This function generates a key that belongs to a finite subset of the quantified * numbers, but which preserves the order of the natural numbers (up to the supreme, * of course) */ /** * dbh_orderkey: * @key: The address where to put the generated key * @length: The key length * @n: The natural number for which to generate the key * @base: The number system base to use. This will equal the maximum * number of nodes per branch. This ---along with the keylength--- * will also define a maximum number of records for the DBHashTable * * * Obtain a key from a secuential series of natural numbers * (positive integers without zero) which conserves the order of * the natural numbers. This function generates a key that belongs to * a finite subset of the quantified numbers, but which preserves the * order of the natural numbers (up to the supreme, of course) * **/ void dbh_orderkey (unsigned char *key, unsigned char length, unsigned int n, unsigned char base); /** * dbh_load: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 on error, byte offset of loaded record otherwise. * * Load a record using the currently set key. This function will also * load erased values, except that it will return 0. * **/ FILE_POINTER dbh_load (DBHashTable * dbh); /** * dbh_load_address: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @currentseek: A byte offset. * @Returns: 0 on error, number of branches otherwise. * * Load a record from hash table directly from byte offset @currentseek * **/ unsigned char dbh_load_address (DBHashTable * dbh, FILE_POINTER currentseek); /** * dbh_load_parent: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 on error, byte offset of loaded record otherwise. * * Load the parent of the currently loaded record. * **/ FILE_POINTER dbh_load_parent (DBHashTable * dbh); /** * dbh_load_child: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @key_index: branch number on which to return the child. * @Returns: 0 on error, byte offset of loaded record otherwise. * * Load the first child of the currently loaded record, on branch identified * by @key_index. Since the number of childs (or branches) of each record is * variable, this may be tricky. Top level records have #DBH_KEYLENGTH branches. * Lower level records have less. Each byte of a key represents a branch on * top level records. * **/ FILE_POINTER dbh_load_child (DBHashTable * dbh, unsigned char key_index); /** * dbh_regen_sweep: * @dbh: A pointer to a #DBHashTable pointer (#DBHashTable *). * @Returns: void. * * Regenerate the #DBHashTable, eliminating erased records and * optimizing disk access and speed for sweep access. * This is done by creating a new #DBHashTable where the physical * structure matches the logical sweep structure. The * temporary directory where the new #DBHashTable is created may be set * with dbh_settempdir(). Current #DBHashTable is closed before removed. * New #DBHashTable is opened after renamed. * **/ void dbh_regen_sweep (DBHashTable ** dbh); /** * dbh_regen_fanout: * @dbh: A pointer to a #DBHashTable pointer (#DBHashTable *). * @Returns: void. * * Regenerate the #DBHashTable, eliminating erased records and * optimizing disk access and speed for fanout access. * This is done by creating a new #DBHashTable where the physical * structure matches the logical fanout structure. The * temporary directory where the new #DBHashTable is created may be set * with dbh_settempdir(). Current #DBHashTable is closed before removed. * New #DBHashTable is opened after renamed. * **/ void dbh_regen_fanout (DBHashTable ** dbh); /** * dbh_settempdir: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @temp_dir: path to temporary directory to use. * @Returns: 0 if error, 1 otherwise * * Sets the temporary directory to be used by dbh_regen_sweep() or * dbh_regen_fanout(). * It is usually best to set temporary directory on the same * filesystem device. The default value for the temporary directory is * the directory where @dbh is located. To reset to default value, send NULL * as the @temp_dir * **/ int dbh_settempdir (DBHashTable * dbh, char *temp_dir); /** * dbh_set_data: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @data: Pointer to the data to copy to the current #DBHashTable record * @size: The amount of bytes to copy to the current #DBHashTable record * * This function copies the user data into the current #DBHashTable record * and along with function dbh_set_key(), makes the current #DBHashTable * record ready for the dbh_update() function to commit to the actual * #DBHashTable on disk. * **/ void dbh_set_data (DBHashTable * dbh, void *data, FILE_POINTER size); /** * dbh_set_key: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @key: The key to set as the current DBHashTable record key. * * This function sets the key of the current DBHashTable record. * **/ void dbh_set_key (DBHashTable * dbh, unsigned char *key); /** * dbh_set_size: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @size: size in bytes. * @Returns: 0 on error, 1 otherwise. * * Defines the maximum amount of memory to be allocated to the * #DBHashTable records. This is nonvolatile information which * need to be set only once. The default is 1Kbyte. * **/ int dbh_set_size (DBHashTable * dbh, FILE_POINTER size); /** * dbh_set_recordsize: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @record_size: The amount of bytes in the current #DBHashTable record. * * This sets the recordsize of the the data in the current #DBHashTable * record. It is called implicitly by calling dbh_set_data(). It is very * important to call this function. Unpredictable results will follow if * record_size is not set. #DBHashTable records are variable in length, so * use this function at least once if you are planning to use fixed length * records. This function is not needed if dbh_set_data() is used to set * the record data. * **/ void dbh_set_recordsize (DBHashTable * dbh, int record_size); /** * dbh_sweep: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @operate: The function to apply to each selected member of the #DBHashTable * @key1: The key from which to start the sweep or NULL if you don't care. * Make sure it is a top level node of a subtree with * dbh_find() first. * @key2: The key which will trigger an exit condition from the sweep, * or NULL if don't care. * @ignore_portion: The ignored trailing bytes of key1 which will define the * magnitud of the subtree to be sweeped, or zero if don't care. * @Returns: 0 on error, 1 otherwise. * * Apply a function to subtree members of the hash, following a sweep * trajectory (vertically through branches). * * In order for dbh_sweep() to be extremely fast, you should * prepare the #DBHashTable for the trajectory with * dbh_regen_sweep() first. This allows for extremely efficient use * of hardware and operating system caches. * * **/ int dbh_sweep (DBHashTable * dbh, DBHashFunc operate, unsigned char *key1, unsigned char *key2, unsigned char ignore_portion); /** * dbh_fanout: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @operate: The function to apply to each selected member of the #DBHashTable * @key1: The key from which to start the fanout or NULL if you don't care. * Make sure it is a top level node of a subtree with * dbh_find() first. * @key2: The key which will trigger an exit condition from the sweep, * or NULL if don't care. * @ignore_portion: The ignored trailing bytes of key1 which will define the * magnitud of the subtree to be sweeped, or zero if don't care. * @Returns: 0 on error, 1 otherwise. * * Apply a function to subtree members of the hash, following a fanout * trajectory (horizontally through records). * * In order for dbh_fanout() to be extremely fast, you should * prepare the #DBHashTable for the trajectory with * dbh_regen_fanout() first. This allows for extremely efficient use * of hardware and operating system caches. * * **/ int dbh_fanout (DBHashTable * dbh, DBHashFunc operate, unsigned char *key1, unsigned char *key2, unsigned char ignore_portion); /** * dbh_foreach: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @operate: A DBHashFunc2() to execute on all records * @data: pointer to data passed to DBHashFunc2() * @Returns: 0 on error, 1 otherwise. * * Apply a function to each member of the hash, following a sweep trajectory. * Sweep is done by traversing * the #DBHashTable in a vertical direction through all branches. * * * In order for dbh_foreach_sweep() to be extremely fast, you should * prepare the #DBHashTable for the trajectory with * dbh_regen_sweep() first. This allows for extremely efficient use * of hardware and operating system caches. * * **/ int dbh_foreach (DBHashTable * dbh, DBHashFunc2 operate, void *data); /** * dbh_foreach_sweep: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @operate: A DBHashFunc() to execute on all records * @Returns: 0 on error, 1 otherwise. * * Apply a function to each member of the hash, following a sweep trajectory. * Sweep is done by traversing * the #DBHashTable in a vertical direction through all branches. * * * In order for dbh_foreach_sweep() to be extremely fast, you should * prepare the #DBHashTable for the trajectory with * dbh_regen_sweep() first. This allows for extremely efficient use * of hardware and operating system caches. * * **/ int dbh_foreach_sweep (DBHashTable * dbh, DBHashFunc operate); /** * dbh_foreach_fanout: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @operate: A DBHashFunc() to execute on all records * @Returns: 0 on error, 1 otherwise. * * Apply a function to each member of the hash, following a fanout * trajectory (horizontally through records). dbh_foreach_fanout() is done by * traversing the #DBHashTable in a horizontal direction through all records. * * In order for dbh_foreach_fanout() to be extremely fast, you should * prepare the #DBHashTable for the trajectory with * dbh_regen_fanout() first. This allows for extremely efficient use * of hardware and operating system caches. * * **/ int dbh_foreach_fanout (DBHashTable * dbh, DBHashFunc operate); /** * dbh_exit_sweep: * @dbh: A #DBHashTable pointer (#DBHashTable *). * * Calling this function from within a #DBHashFunc will cause an * exit of a currently running sweep. * **/ void dbh_exit_sweep (DBHashTable * dbh); /** * dbh_exit_fanout: * @dbh: A #DBHashTable pointer (#DBHashTable *). * * Calling this function from within a #DBHashFunc will cause an * exit of a currently running fanout. * **/ void dbh_exit_fanout (DBHashTable * dbh); /** * dbh_update: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 on error, byte offset of loaded record otherwise. * * Update the current record in memory to the disk based hash. Update function * will update erased records as well as unerased records, but if * an erased record is updated, it is automatically unerased. * **/ FILE_POINTER dbh_update (DBHashTable * dbh); /** * dbh_writeheader: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Write out the DBHashTable header information. It is advisable * to call this function inmediately after creation of a new DBHashTable * to force a buffer flush. * **/ int dbh_writeheader (DBHashTable * dbh); /** * dbh_mutex_lock: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Lock the DBHashTable mutex. This is only valid if table was opened * with the DBH_THREAD_SAFE flag, Otherwise the function does * nothing. * **/ int dbh_mutex_lock(DBHashTable * dbh); /** * dbh_mutex_unlock: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Unlock the DBHashTable mutex. This is only valid if table was opened * with the DBH_THREAD_SAFE flag, Otherwise the function does * nothing. * **/ int dbh_mutex_unlock(DBHashTable * dbh); #ifndef DBH_DISABLE_DEPRECATED /** * dbh_set_parallel_lock_attempt_limit: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @limit: Number of attempts to lock a parallel protected file lock before removing lock. * @Returns: 0 if error, 1 otherwise * * Sets the limit on the attempts to lock a parallel protected * dbh file lock before considering the lock to be stale. Stale * locks may occur when the calling program crashes while the * lock is set in either read or write mode. Lock will persist * in shared memory beyond program crash. Lock may be removed * manually, or a lock attempt limit on the number of tries * specified to remove the lock automatically. Each lock attempt * limit is equal to 1/10th of a second (1E+08 nanoseconds). * If limit is set to zero, then lock attempts will continue * indefinitely. * DEPRECATED: Use dbh_set_parallel_lock_timeout() instead. As of * 5.0.10, this function is inoperative. * **/ int dbh_set_parallel_lock_attempt_limit(DBHashTable * dbh, int limit); #endif /** * dbh_set_parallel_lock_timeout: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @seconds: Number of second to try to lock a parallel protected file * before failing. A value of zero means function will block until lock is * obtained. * @Returns: 0 if error, 1 otherwise * * **/ int dbh_set_parallel_lock_timeout(DBHashTable * dbh, int seconds); /** * dbh_lock_write: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Attempts to get a write lock on the dbh file. * A file can only have one write lock, and when * write lock is set, no read locks may be secured. * If dbh_set_parallel_lock_timeout() is set to zero * (that's the default) this function will block * until lock is secured. * **/ int dbh_lock_write (DBHashTable * dbh); /** * dbh_lock_read: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Attempts to get a read lock on the dbh file. * A file may have any number of readlocks as * long as no write lock is set. * If dbh_set_parallel_lock_timeout() is set to zero * (that's the default) this function will block * until lock is secured. * **/ int dbh_lock_read (DBHashTable * dbh); /** * dbh_unlock_read: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Releases a read lock on the dbh file. * **/ int dbh_unlock_read (DBHashTable * dbh); /** * dbh_unlock_write: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Releases a write lock on the dbh file. * **/ int dbh_unlock_write (DBHashTable * dbh); /** * dbh_info: * @dbh: A #DBHashTable pointer (#DBHashTable *). * @Returns: 0 if error, 1 otherwise * * Prints header information to stdout. * **/ int dbh_info (DBHashTable * dbh); /** * dbh_set_lock_timeout: * @seconds: Timeout default for obtaining a read/write lock in parallel safe * mode. * @Returns: 0 if error, 1 otherwise * * Sets the default time for obtaining a read/write lock in parallel safe mode. * The default value is zero, which means there is no timeout. If there is no * timeout, file locking will block until lock is secured. Locks may persist * beyond program life and may be stale if program crashed before unlocking was * performed. Does not affect currently open dbh files. If the value for a * currently open dbh file is to be modified, use dbh_set_parallel_lock_timeout() * as well. * **/ int dbh_set_lock_timeout(int seconds); /** * dbh_get_lock_timeout: * @Returns: the default timeout in seconds to secure a read/write lock in * parallel safe mode. * * Gets the default time for obtaining a read/write lock in parallel safe mode. * The default value is zero, which means there is no timeout. If there is no * timeout, file locking will block until lock is secured. Locks may persist * beyond program life and may be stale if program crashed before unlocking was * performed. * **/ int dbh_get_lock_timeout(void); #ifdef __cplusplus } #endif #endif