1 /** @file mdb.c
2  *	@brief Lightning memory-mapped database library
3  *
4  *	A Btree-based database management library modeled loosely on the
5  *	BerkeleyDB API, but much simplified.
6  */
7 /*
8  * Copyright 2011-2021 Howard Chu, Symas Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted only as authorized by the OpenLDAP
13  * Public License.
14  *
15  * A copy of this license is available in the file LICENSE in the
16  * top-level directory of the distribution or, alternatively, at
17  * <http://www.OpenLDAP.org/license.html>.
18  *
19  * This code is derived from btree.c written by Martin Hedenfalk.
20  *
21  * Copyright (c) 2009, 2010 Martin Hedenfalk <martin@bzero.se>
22  *
23  * Permission to use, copy, modify, and distribute this software for any
24  * purpose with or without fee is hereby granted, provided that the above
25  * copyright notice and this permission notice appear in all copies.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
28  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
29  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
30  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
31  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
32  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
33  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
34  */
35 #ifndef _GNU_SOURCE
36 #define _GNU_SOURCE 1
37 #endif
38 #if defined(MDB_VL32) || defined(__WIN64__)
39 #define _FILE_OFFSET_BITS	64
40 #endif
41 #ifdef _WIN32
42 #include <malloc.h>
43 #include <windows.h>
44 #include <wchar.h>				/* get wcscpy() */
45 
46 /* We use native NT APIs to setup the memory map, so that we can
47  * let the DB file grow incrementally instead of always preallocating
48  * the full size. These APIs are defined in <wdm.h> and <ntifs.h>
49  * but those headers are meant for driver-level development and
50  * conflict with the regular user-level headers, so we explicitly
51  * declare them here. We get pointers to these functions from
52  * NTDLL.DLL at runtime, to avoid buildtime dependencies on any
53  * NTDLL import libraries.
54  */
55 
56 /*
57  * #ITS 8338 Workaround for build fail in MinGW/MSYS
58  *
59  */
60 #if !defined (_NTDEF_) && !defined (_NTDEF_H)
61 typedef LONG NTSTATUS;
62 #endif
63 
64 typedef NTSTATUS (WINAPI NtCreateSectionFunc)
65   (OUT PHANDLE sh, IN ACCESS_MASK acc,
66   IN void * oa OPTIONAL,
67   IN PLARGE_INTEGER ms OPTIONAL,
68   IN ULONG pp, IN ULONG aa, IN HANDLE fh OPTIONAL);
69 
70 static NtCreateSectionFunc *NtCreateSection;
71 
72 typedef enum _SECTION_INHERIT {
73 	ViewShare = 1,
74 	ViewUnmap = 2
75 } SECTION_INHERIT;
76 
77 typedef NTSTATUS (WINAPI NtMapViewOfSectionFunc)
78   (IN PHANDLE sh, IN HANDLE ph,
79   IN OUT PVOID *addr, IN ULONG_PTR zbits,
80   IN SIZE_T cs, IN OUT PLARGE_INTEGER off OPTIONAL,
81   IN OUT PSIZE_T vs, IN SECTION_INHERIT ih,
82   IN ULONG at, IN ULONG pp);
83 
84 static NtMapViewOfSectionFunc *NtMapViewOfSection;
85 
86 typedef NTSTATUS (WINAPI NtCloseFunc)(HANDLE h);
87 
88 static NtCloseFunc *NtClose;
89 
90 /** getpid() returns int; MinGW defines pid_t but MinGW64 typedefs it
91  *  as int64 which is wrong. MSVC doesn't define it at all, so just
92  *  don't use it.
93  */
94 #define MDB_PID_T	int
95 #define MDB_THR_T	DWORD
96 #include <sys/types.h>
97 #include <sys/stat.h>
98 #ifdef __GNUC__
99 # include <sys/param.h>
100 #else
101 # define LITTLE_ENDIAN	1234
102 # define BIG_ENDIAN	4321
103 # define BYTE_ORDER	LITTLE_ENDIAN
104 # ifndef SSIZE_MAX
105 #  define SSIZE_MAX	INT_MAX
106 # endif
107 #endif
108 #define MDB_OFF_T	int64_t
109 #else
110 #include <sys/types.h>
111 #include <sys/stat.h>
112 #define MDB_PID_T	pid_t
113 #define MDB_THR_T	pthread_t
114 #include <sys/param.h>
115 #include <sys/uio.h>
116 #include <sys/mman.h>
117 #ifdef HAVE_SYS_FILE_H
118 #include <sys/file.h>
119 #endif
120 #include <fcntl.h>
121 #define MDB_OFF_T	off_t
122 #endif
123 
124 #if defined(__mips) && defined(__linux)
125 /* MIPS has cache coherency issues, requires explicit cache control */
126 #include <asm/cachectl.h>
127 extern int cacheflush(char *addr, int nbytes, int cache);
128 #define CACHEFLUSH(addr, bytes, cache)	cacheflush(addr, bytes, cache)
129 #else
130 #define CACHEFLUSH(addr, bytes, cache)
131 #endif
132 
133 #if defined(__linux) && !defined(MDB_FDATASYNC_WORKS)
134 /** fdatasync is broken on ext3/ext4fs on older kernels, see
135  *	description in #mdb_env_open2 comments. You can safely
136  *	define MDB_FDATASYNC_WORKS if this code will only be run
137  *	on kernels 3.6 and newer.
138  */
139 #define	BROKEN_FDATASYNC
140 #endif
141 
142 #include <errno.h>
143 #include <limits.h>
144 #include <stddef.h>
145 #include <inttypes.h>
146 #include <stdio.h>
147 #include <stdlib.h>
148 #include <string.h>
149 #include <time.h>
150 
151 #ifdef _MSC_VER
152 #include <io.h>
153 typedef SSIZE_T	ssize_t;
154 #else
155 #include <unistd.h>
156 #endif
157 
158 #if defined(__sun) || defined(__ANDROID__)
159 /* Most platforms have posix_memalign, older may only have memalign */
160 #define HAVE_MEMALIGN	1
161 #include <malloc.h>
162 /* On Solaris, we need the POSIX sigwait function */
163 #if defined (__sun)
164 # define _POSIX_PTHREAD_SEMANTICS	1
165 #endif
166 #endif
167 
168 #if !(defined(BYTE_ORDER) || defined(__BYTE_ORDER))
169 #include <netinet/in.h>
170 #include <resolv.h>	/* defines BYTE_ORDER on HPUX and Solaris */
171 #endif
172 
173 #if defined(__FreeBSD__) && defined(__FreeBSD_version) && __FreeBSD_version >= 1100110
174 # define MDB_USE_POSIX_MUTEX	1
175 # define MDB_USE_ROBUST	1
176 #elif defined(__APPLE__) || defined (BSD) || defined(__FreeBSD_kernel__)
177 # if !(defined(MDB_USE_POSIX_MUTEX) || defined(MDB_USE_POSIX_SEM))
178 # define MDB_USE_SYSV_SEM	1
179 # endif
180 # define MDB_FDATASYNC		fsync
181 #elif defined(__ANDROID__)
182 # define MDB_FDATASYNC		fsync
183 #endif
184 
185 #ifndef _WIN32
186 #include <pthread.h>
187 #include <signal.h>
188 #ifdef MDB_USE_POSIX_SEM
189 # define MDB_USE_HASH		1
190 #include <semaphore.h>
191 #elif defined(MDB_USE_SYSV_SEM)
192 #include <sys/ipc.h>
193 #include <sys/sem.h>
194 #ifdef _SEM_SEMUN_UNDEFINED
195 union semun {
196 	int val;
197 	struct semid_ds *buf;
198 	unsigned short *array;
199 };
200 #endif /* _SEM_SEMUN_UNDEFINED */
201 #else
202 #define MDB_USE_POSIX_MUTEX	1
203 #endif /* MDB_USE_POSIX_SEM */
204 #endif /* !_WIN32 */
205 
206 #if defined(_WIN32) + defined(MDB_USE_POSIX_SEM) + defined(MDB_USE_SYSV_SEM) \
207 	+ defined(MDB_USE_POSIX_MUTEX) != 1
208 # error "Ambiguous shared-lock implementation"
209 #endif
210 
211 #ifdef USE_VALGRIND
212 #include <valgrind/memcheck.h>
213 #define VGMEMP_CREATE(h,r,z)    VALGRIND_CREATE_MEMPOOL(h,r,z)
214 #define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s)
215 #define VGMEMP_FREE(h,a) VALGRIND_MEMPOOL_FREE(h,a)
216 #define VGMEMP_DESTROY(h)	VALGRIND_DESTROY_MEMPOOL(h)
217 #define VGMEMP_DEFINED(a,s)	VALGRIND_MAKE_MEM_DEFINED(a,s)
218 #else
219 #define VGMEMP_CREATE(h,r,z)
220 #define VGMEMP_ALLOC(h,a,s)
221 #define VGMEMP_FREE(h,a)
222 #define VGMEMP_DESTROY(h)
223 #define VGMEMP_DEFINED(a,s)
224 #endif
225 
226 #ifndef BYTE_ORDER
227 # if (defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN))
228 /* Solaris just defines one or the other */
229 #  define LITTLE_ENDIAN	1234
230 #  define BIG_ENDIAN	4321
231 #  ifdef _LITTLE_ENDIAN
232 #   define BYTE_ORDER  LITTLE_ENDIAN
233 #  else
234 #   define BYTE_ORDER  BIG_ENDIAN
235 #  endif
236 # else
237 #  define BYTE_ORDER   __BYTE_ORDER
238 # endif
239 #endif
240 
241 #ifndef LITTLE_ENDIAN
242 #define LITTLE_ENDIAN	__LITTLE_ENDIAN
243 #endif
244 #ifndef BIG_ENDIAN
245 #define BIG_ENDIAN	__BIG_ENDIAN
246 #endif
247 
248 #if defined(__i386) || defined(__x86_64) || defined(_M_IX86)
249 #define MISALIGNED_OK	1
250 #endif
251 
252 #include "lmdb.h"
253 #include "midl.h"
254 
255 #if (BYTE_ORDER == LITTLE_ENDIAN) == (BYTE_ORDER == BIG_ENDIAN)
256 # error "Unknown or unsupported endianness (BYTE_ORDER)"
257 #elif (-6 & 5) || CHAR_BIT!=8 || UINT_MAX!=0xffffffff || MDB_SIZE_MAX%UINT_MAX
258 # error "Two's complement, reasonably sized integer types, please"
259 #endif
260 
261 #ifdef __GNUC__
262 /** Put infrequently used env functions in separate section */
263 # ifdef __APPLE__
264 #  define	ESECT	__attribute__ ((section("__TEXT,text_env")))
265 # else
266 #  define	ESECT	__attribute__ ((section("text_env")))
267 # endif
268 #else
269 #define ESECT
270 #endif
271 
272 #ifdef _WIN32
273 #define CALL_CONV WINAPI
274 #else
275 #define CALL_CONV
276 #endif
277 
278 /** @defgroup internal	LMDB Internals
279  *	@{
280  */
281 /** @defgroup compat	Compatibility Macros
282  *	A bunch of macros to minimize the amount of platform-specific ifdefs
283  *	needed throughout the rest of the code. When the features this library
284  *	needs are similar enough to POSIX to be hidden in a one-or-two line
285  *	replacement, this macro approach is used.
286  *	@{
287  */
288 
289 	/** Features under development */
290 #ifndef MDB_DEVEL
291 #define MDB_DEVEL 0
292 #endif
293 
294 	/** Wrapper around __func__, which is a C99 feature */
295 #if __STDC_VERSION__ >= 199901L
296 # define mdb_func_	__func__
297 #elif __GNUC__ >= 2 || _MSC_VER >= 1300
298 # define mdb_func_	__FUNCTION__
299 #else
300 /* If a debug message says <mdb_unknown>(), update the #if statements above */
301 # define mdb_func_	"<mdb_unknown>"
302 #endif
303 
304 /* Internal error codes, not exposed outside liblmdb */
305 #define	MDB_NO_ROOT		(MDB_LAST_ERRCODE + 10)
306 #ifdef _WIN32
307 #define MDB_OWNERDEAD	((int) WAIT_ABANDONED)
308 #elif defined MDB_USE_SYSV_SEM
309 #define MDB_OWNERDEAD	(MDB_LAST_ERRCODE + 11)
310 #elif defined(MDB_USE_POSIX_MUTEX) && defined(EOWNERDEAD)
311 #define MDB_OWNERDEAD	EOWNERDEAD	/**< #LOCK_MUTEX0() result if dead owner */
312 #endif
313 
314 #ifdef __GLIBC__
315 #define	GLIBC_VER	((__GLIBC__ << 16 )| __GLIBC_MINOR__)
316 #endif
317 /** Some platforms define the EOWNERDEAD error code
318  * even though they don't support Robust Mutexes.
319  * Compile with -DMDB_USE_ROBUST=0, or use some other
320  * mechanism like -DMDB_USE_SYSV_SEM instead of
321  * -DMDB_USE_POSIX_MUTEX. (SysV semaphores are
322  * also Robust, but some systems don't support them
323  * either.)
324  */
325 #ifndef MDB_USE_ROBUST
326 /* Android currently lacks Robust Mutex support. So does glibc < 2.4. */
327 # if defined(MDB_USE_POSIX_MUTEX) && (defined(__ANDROID__) || \
328 	(defined(__GLIBC__) && GLIBC_VER < 0x020004))
329 #  define MDB_USE_ROBUST	0
330 # else
331 #  define MDB_USE_ROBUST	1
332 # endif
333 #endif /* !MDB_USE_ROBUST */
334 
335 #if defined(MDB_USE_POSIX_MUTEX) && (MDB_USE_ROBUST)
336 /* glibc < 2.12 only provided _np API */
337 #  if (defined(__GLIBC__) && GLIBC_VER < 0x02000c) || \
338 	(defined(PTHREAD_MUTEX_ROBUST_NP) && !defined(PTHREAD_MUTEX_ROBUST))
339 #   define PTHREAD_MUTEX_ROBUST	PTHREAD_MUTEX_ROBUST_NP
340 #   define pthread_mutexattr_setrobust(attr, flag)	pthread_mutexattr_setrobust_np(attr, flag)
341 #   define pthread_mutex_consistent(mutex)	pthread_mutex_consistent_np(mutex)
342 #  endif
343 #endif /* MDB_USE_POSIX_MUTEX && MDB_USE_ROBUST */
344 
345 #if defined(MDB_OWNERDEAD) && (MDB_USE_ROBUST)
346 #define MDB_ROBUST_SUPPORTED	1
347 #endif
348 
349 #ifdef _WIN32
350 #define MDB_USE_HASH	1
351 #define MDB_PIDLOCK	0
352 #define THREAD_RET	DWORD
353 #define pthread_t	HANDLE
354 #define pthread_mutex_t	HANDLE
355 #define pthread_cond_t	HANDLE
356 typedef HANDLE mdb_mutex_t, mdb_mutexref_t;
357 #define pthread_key_t	DWORD
358 #define pthread_self()	GetCurrentThreadId()
359 #define pthread_key_create(x,y)	\
360 	((*(x) = TlsAlloc()) == TLS_OUT_OF_INDEXES ? ErrCode() : 0)
361 #define pthread_key_delete(x)	TlsFree(x)
362 #define pthread_getspecific(x)	TlsGetValue(x)
363 #define pthread_setspecific(x,y)	(TlsSetValue(x,y) ? 0 : ErrCode())
364 #define pthread_mutex_unlock(x)	ReleaseMutex(*x)
365 #define pthread_mutex_lock(x)	WaitForSingleObject(*x, INFINITE)
366 #define pthread_cond_signal(x)	SetEvent(*x)
367 #define pthread_cond_wait(cond,mutex)	do{SignalObjectAndWait(*mutex, *cond, INFINITE, FALSE); WaitForSingleObject(*mutex, INFINITE);}while(0)
368 #define THREAD_CREATE(thr,start,arg) \
369 	(((thr) = CreateThread(NULL, 0, start, arg, 0, NULL)) ? 0 : ErrCode())
370 #define THREAD_FINISH(thr) \
371 	(WaitForSingleObject(thr, INFINITE) ? ErrCode() : 0)
372 #define LOCK_MUTEX0(mutex)		WaitForSingleObject(mutex, INFINITE)
373 #define UNLOCK_MUTEX(mutex)		ReleaseMutex(mutex)
374 #define mdb_mutex_consistent(mutex)	0
375 #define getpid()	GetCurrentProcessId()
376 #define	MDB_FDATASYNC(fd)	(!FlushFileBuffers(fd))
377 #define	MDB_MSYNC(addr,len,flags)	(!FlushViewOfFile(addr,len))
378 #define	ErrCode()	GetLastError()
379 #define GET_PAGESIZE(x) {SYSTEM_INFO si; GetSystemInfo(&si); (x) = si.dwPageSize;}
380 #define	close(fd)	(CloseHandle(fd) ? 0 : -1)
381 #define	munmap(ptr,len)	UnmapViewOfFile(ptr)
382 #ifdef PROCESS_QUERY_LIMITED_INFORMATION
383 #define MDB_PROCESS_QUERY_LIMITED_INFORMATION PROCESS_QUERY_LIMITED_INFORMATION
384 #else
385 #define MDB_PROCESS_QUERY_LIMITED_INFORMATION 0x1000
386 #endif
387 #else
388 #define THREAD_RET	void *
389 #define THREAD_CREATE(thr,start,arg)	pthread_create(&thr,NULL,start,arg)
390 #define THREAD_FINISH(thr)	pthread_join(thr,NULL)
391 
392 	/** For MDB_LOCK_FORMAT: True if readers take a pid lock in the lockfile */
393 #define MDB_PIDLOCK			1
394 
395 #ifdef MDB_USE_POSIX_SEM
396 
397 typedef sem_t *mdb_mutex_t, *mdb_mutexref_t;
398 #define LOCK_MUTEX0(mutex)		mdb_sem_wait(mutex)
399 #define UNLOCK_MUTEX(mutex)		sem_post(mutex)
400 
401 static int
mdb_sem_wait(sem_t * sem)402 mdb_sem_wait(sem_t *sem)
403 {
404    int rc;
405    while ((rc = sem_wait(sem)) && (rc = errno) == EINTR) ;
406    return rc;
407 }
408 
409 #elif defined MDB_USE_SYSV_SEM
410 
411 typedef struct mdb_mutex {
412 	int semid;
413 	int semnum;
414 	int *locked;
415 } mdb_mutex_t[1], *mdb_mutexref_t;
416 
417 #define LOCK_MUTEX0(mutex)		mdb_sem_wait(mutex)
418 #define UNLOCK_MUTEX(mutex)		do { \
419 	struct sembuf sb = { 0, 1, SEM_UNDO }; \
420 	sb.sem_num = (mutex)->semnum; \
421 	*(mutex)->locked = 0; \
422 	semop((mutex)->semid, &sb, 1); \
423 } while(0)
424 
425 static int
mdb_sem_wait(mdb_mutexref_t sem)426 mdb_sem_wait(mdb_mutexref_t sem)
427 {
428 	int rc, *locked = sem->locked;
429 	struct sembuf sb = { 0, -1, SEM_UNDO };
430 	sb.sem_num = sem->semnum;
431 	do {
432 		if (!semop(sem->semid, &sb, 1)) {
433 			rc = *locked ? MDB_OWNERDEAD : MDB_SUCCESS;
434 			*locked = 1;
435 			break;
436 		}
437 	} while ((rc = errno) == EINTR);
438 	return rc;
439 }
440 
441 #define mdb_mutex_consistent(mutex)	0
442 
443 #else	/* MDB_USE_POSIX_MUTEX: */
444 	/** Shared mutex/semaphore as the original is stored.
445 	 *
446 	 *	Not for copies.  Instead it can be assigned to an #mdb_mutexref_t.
447 	 *	When mdb_mutexref_t is a pointer and mdb_mutex_t is not, then it
448 	 *	is array[size 1] so it can be assigned to the pointer.
449 	 */
450 typedef pthread_mutex_t mdb_mutex_t[1];
451 	/** Reference to an #mdb_mutex_t */
452 typedef pthread_mutex_t *mdb_mutexref_t;
453 	/** Lock the reader or writer mutex.
454 	 *	Returns 0 or a code to give #mdb_mutex_failed(), as in #LOCK_MUTEX().
455 	 */
456 #define LOCK_MUTEX0(mutex)	pthread_mutex_lock(mutex)
457 	/** Unlock the reader or writer mutex.
458 	 */
459 #define UNLOCK_MUTEX(mutex)	pthread_mutex_unlock(mutex)
460 	/** Mark mutex-protected data as repaired, after death of previous owner.
461 	 */
462 #define mdb_mutex_consistent(mutex)	pthread_mutex_consistent(mutex)
463 #endif	/* MDB_USE_POSIX_SEM || MDB_USE_SYSV_SEM */
464 
465 	/** Get the error code for the last failed system function.
466 	 */
467 #define	ErrCode()	errno
468 
469 	/** An abstraction for a file handle.
470 	 *	On POSIX systems file handles are small integers. On Windows
471 	 *	they're opaque pointers.
472 	 */
473 #define	HANDLE	int
474 
475 	/**	A value for an invalid file handle.
476 	 *	Mainly used to initialize file variables and signify that they are
477 	 *	unused.
478 	 */
479 #define INVALID_HANDLE_VALUE	(-1)
480 
481 	/** Get the size of a memory page for the system.
482 	 *	This is the basic size that the platform's memory manager uses, and is
483 	 *	fundamental to the use of memory-mapped files.
484 	 */
485 #define	GET_PAGESIZE(x)	((x) = sysconf(_SC_PAGE_SIZE))
486 #endif
487 
488 #define	Z	MDB_FMT_Z	/**< printf/scanf format modifier for size_t */
489 #define	Yu	MDB_PRIy(u)	/**< printf format for #mdb_size_t */
490 #define	Yd	MDB_PRIy(d)	/**< printf format for 'signed #mdb_size_t' */
491 
492 #ifdef MDB_USE_SYSV_SEM
493 #define MNAME_LEN	(sizeof(int))
494 #else
495 #define MNAME_LEN	(sizeof(pthread_mutex_t))
496 #endif
497 
498 /** Initial part of #MDB_env.me_mutexname[].
499  *	Changes to this code must be reflected in #MDB_LOCK_FORMAT.
500  */
501 #ifdef _WIN32
502 #define MUTEXNAME_PREFIX		"Global\\MDB"
503 #elif defined MDB_USE_POSIX_SEM
504 #define MUTEXNAME_PREFIX		"/MDB"
505 #endif
506 
507 /** @} */
508 
509 #ifdef MDB_ROBUST_SUPPORTED
510 	/** Lock mutex, handle any error, set rc = result.
511 	 *	Return 0 on success, nonzero (not rc) on error.
512 	 */
513 #define LOCK_MUTEX(rc, env, mutex) \
514 	(((rc) = LOCK_MUTEX0(mutex)) && \
515 	 ((rc) = mdb_mutex_failed(env, mutex, rc)))
516 static int mdb_mutex_failed(MDB_env *env, mdb_mutexref_t mutex, int rc);
517 #else
518 #define LOCK_MUTEX(rc, env, mutex) ((rc) = LOCK_MUTEX0(mutex))
519 #define mdb_mutex_failed(env, mutex, rc) (rc)
520 #endif
521 
522 #ifndef _WIN32
523 /**	A flag for opening a file and requesting synchronous data writes.
524  *	This is only used when writing a meta page. It's not strictly needed;
525  *	we could just do a normal write and then immediately perform a flush.
526  *	But if this flag is available it saves us an extra system call.
527  *
528  *	@note If O_DSYNC is undefined but exists in /usr/include,
529  * preferably set some compiler flag to get the definition.
530  */
531 #ifndef MDB_DSYNC
532 # ifdef O_DSYNC
533 # define MDB_DSYNC	O_DSYNC
534 # else
535 # define MDB_DSYNC	O_SYNC
536 # endif
537 #endif
538 #endif
539 
540 /** Function for flushing the data of a file. Define this to fsync
541  *	if fdatasync() is not supported.
542  */
543 #ifndef MDB_FDATASYNC
544 # define MDB_FDATASYNC	fdatasync
545 #endif
546 
547 #ifndef MDB_MSYNC
548 # define MDB_MSYNC(addr,len,flags)	msync(addr,len,flags)
549 #endif
550 
551 #ifndef MS_SYNC
552 #define	MS_SYNC	1
553 #endif
554 
555 #ifndef MS_ASYNC
556 #define	MS_ASYNC	0
557 #endif
558 
559 	/** A page number in the database.
560 	 *	Note that 64 bit page numbers are overkill, since pages themselves
561 	 *	already represent 12-13 bits of addressable memory, and the OS will
562 	 *	always limit applications to a maximum of 63 bits of address space.
563 	 *
564 	 *	@note In the #MDB_node structure, we only store 48 bits of this value,
565 	 *	which thus limits us to only 60 bits of addressable data.
566 	 */
567 typedef MDB_ID	pgno_t;
568 
569 	/** A transaction ID.
570 	 *	See struct MDB_txn.mt_txnid for details.
571 	 */
572 typedef MDB_ID	txnid_t;
573 
574 /** @defgroup debug	Debug Macros
575  *	@{
576  */
577 #ifndef MDB_DEBUG
578 	/**	Enable debug output.  Needs variable argument macros (a C99 feature).
579 	 *	Set this to 1 for copious tracing. Set to 2 to add dumps of all IDLs
580 	 *	read from and written to the database (used for free space management).
581 	 */
582 #define MDB_DEBUG 0
583 #endif
584 
585 #if MDB_DEBUG
586 static int mdb_debug;
587 static txnid_t mdb_debug_start;
588 
589 	/**	Print a debug message with printf formatting.
590 	 *	Requires double parenthesis around 2 or more args.
591 	 */
592 # define DPRINTF(args) ((void) ((mdb_debug) && DPRINTF0 args))
593 # define DPRINTF0(fmt, ...) \
594 	fprintf(stderr, "%s:%d " fmt "\n", mdb_func_, __LINE__, __VA_ARGS__)
595 #else
596 # define DPRINTF(args)	((void) 0)
597 #endif
598 	/**	Print a debug string.
599 	 *	The string is printed literally, with no format processing.
600 	 */
601 #define DPUTS(arg)	DPRINTF(("%s", arg))
602 	/** Debugging output value of a cursor DBI: Negative in a sub-cursor. */
603 #define DDBI(mc) \
604 	(((mc)->mc_flags & C_SUB) ? -(int)(mc)->mc_dbi : (int)(mc)->mc_dbi)
605 /** @} */
606 
607 	/**	@brief The maximum size of a database page.
608 	 *
609 	 *	It is 32k or 64k, since value-PAGEBASE must fit in
610 	 *	#MDB_page.%mp_upper.
611 	 *
612 	 *	LMDB will use database pages < OS pages if needed.
613 	 *	That causes more I/O in write transactions: The OS must
614 	 *	know (read) the whole page before writing a partial page.
615 	 *
616 	 *	Note that we don't currently support Huge pages. On Linux,
617 	 *	regular data files cannot use Huge pages, and in general
618 	 *	Huge pages aren't actually pageable. We rely on the OS
619 	 *	demand-pager to read our data and page it out when memory
620 	 *	pressure from other processes is high. So until OSs have
621 	 *	actual paging support for Huge pages, they're not viable.
622 	 */
623 #define MAX_PAGESIZE	 (PAGEBASE ? 0x10000 : 0x8000)
624 
625 	/** The minimum number of keys required in a database page.
626 	 *	Setting this to a larger value will place a smaller bound on the
627 	 *	maximum size of a data item. Data items larger than this size will
628 	 *	be pushed into overflow pages instead of being stored directly in
629 	 *	the B-tree node. This value used to default to 4. With a page size
630 	 *	of 4096 bytes that meant that any item larger than 1024 bytes would
631 	 *	go into an overflow page. That also meant that on average 2-3KB of
632 	 *	each overflow page was wasted space. The value cannot be lower than
633 	 *	2 because then there would no longer be a tree structure. With this
634 	 *	value, items larger than 2KB will go into overflow pages, and on
635 	 *	average only 1KB will be wasted.
636 	 */
637 #define MDB_MINKEYS	 2
638 
639 	/**	A stamp that identifies a file as an LMDB file.
640 	 *	There's nothing special about this value other than that it is easily
641 	 *	recognizable, and it will reflect any byte order mismatches.
642 	 */
643 #define MDB_MAGIC	 0xBEEFC0DE
644 
645 	/**	The version number for a database's datafile format. */
646 #define MDB_DATA_VERSION	 ((MDB_DEVEL) ? 999 : 1)
647 	/**	The version number for a database's lockfile format. */
648 #define MDB_LOCK_VERSION	 ((MDB_DEVEL) ? 999 : 2)
649 	/** Number of bits representing #MDB_LOCK_VERSION in #MDB_LOCK_FORMAT.
650 	 *	The remaining bits must leave room for #MDB_lock_desc.
651 	 */
652 #define MDB_LOCK_VERSION_BITS 12
653 
654 	/**	@brief The max size of a key we can write, or 0 for computed max.
655 	 *
656 	 *	This macro should normally be left alone or set to 0.
657 	 *	Note that a database with big keys or dupsort data cannot be
658 	 *	reliably modified by a liblmdb which uses a smaller max.
659 	 *	The default is 511 for backwards compat, or 0 when #MDB_DEVEL.
660 	 *
661 	 *	Other values are allowed, for backwards compat.  However:
662 	 *	A value bigger than the computed max can break if you do not
663 	 *	know what you are doing, and liblmdb <= 0.9.10 can break when
664 	 *	modifying a DB with keys/dupsort data bigger than its max.
665 	 *
666 	 *	Data items in an #MDB_DUPSORT database are also limited to
667 	 *	this size, since they're actually keys of a sub-DB.  Keys and
668 	 *	#MDB_DUPSORT data items must fit on a node in a regular page.
669 	 */
670 #ifndef MDB_MAXKEYSIZE
671 #define MDB_MAXKEYSIZE	 ((MDB_DEVEL) ? 0 : 511)
672 #endif
673 
674 	/**	The maximum size of a key we can write to the environment. */
675 #if MDB_MAXKEYSIZE
676 #define ENV_MAXKEY(env)	(MDB_MAXKEYSIZE)
677 #else
678 #define ENV_MAXKEY(env)	((env)->me_maxkey)
679 #endif
680 
681 	/**	@brief The maximum size of a data item.
682 	 *
683 	 *	We only store a 32 bit value for node sizes.
684 	 */
685 #define MAXDATASIZE	0xffffffffUL
686 
687 #if MDB_DEBUG
688 	/**	Key size which fits in a #DKBUF.
689 	 *	@ingroup debug
690 	 */
691 #define DKBUF_MAXKEYSIZE ((MDB_MAXKEYSIZE) > 0 ? (MDB_MAXKEYSIZE) : 511)
692 	/**	A key buffer.
693 	 *	@ingroup debug
694 	 *	This is used for printing a hex dump of a key's contents.
695 	 */
696 #define DKBUF	char kbuf[DKBUF_MAXKEYSIZE*2+1]
697 	/**	Display a key in hex.
698 	 *	@ingroup debug
699 	 *	Invoke a function to display a key in hex.
700 	 */
701 #define	DKEY(x)	mdb_dkey(x, kbuf)
702 #else
703 #define	DKBUF
704 #define DKEY(x)	0
705 #endif
706 
707 	/** An invalid page number.
708 	 *	Mainly used to denote an empty tree.
709 	 */
710 #define P_INVALID	 (~(pgno_t)0)
711 
712 	/** Test if the flags \b f are set in a flag word \b w. */
713 #define F_ISSET(w, f)	 (((w) & (f)) == (f))
714 
715 	/** Round \b n up to an even number. */
716 #define EVEN(n)		(((n) + 1U) & -2) /* sign-extending -2 to match n+1U */
717 
718 	/** Least significant 1-bit of \b n.  n must be of an unsigned type. */
719 #define LOW_BIT(n)		((n) & (-(n)))
720 
721 	/** (log2(\b p2) % \b n), for p2 = power of 2 and 0 < n < 8. */
722 #define LOG2_MOD(p2, n)	(7 - 86 / ((p2) % ((1U<<(n))-1) + 11))
723 	/* Explanation: Let p2 = 2**(n*y + x), x<n and M = (1U<<n)-1. Now p2 =
724 	 * (M+1)**y * 2**x = 2**x (mod M). Finally "/" "happens" to return 7-x.
725 	 */
726 
727 	/** Should be alignment of \b type. Ensure it is a power of 2. */
728 #define ALIGNOF2(type) \
729 	LOW_BIT(offsetof(struct { char ch_; type align_; }, align_))
730 
731 	/**	Used for offsets within a single page.
732 	 *	Since memory pages are typically 4 or 8KB in size, 12-13 bits,
733 	 *	this is plenty.
734 	 */
735 typedef uint16_t	 indx_t;
736 
737 typedef unsigned long long	mdb_hash_t;
738 
739 	/**	Default size of memory map.
740 	 *	This is certainly too small for any actual applications. Apps should always set
741 	 *	the size explicitly using #mdb_env_set_mapsize().
742 	 */
743 #define DEFAULT_MAPSIZE	1048576
744 
745 /**	@defgroup readers	Reader Lock Table
746  *	Readers don't acquire any locks for their data access. Instead, they
747  *	simply record their transaction ID in the reader table. The reader
748  *	mutex is needed just to find an empty slot in the reader table. The
749  *	slot's address is saved in thread-specific data so that subsequent read
750  *	transactions started by the same thread need no further locking to proceed.
751  *
752  *	If #MDB_NOTLS is set, the slot address is not saved in thread-specific data.
753  *
754  *	No reader table is used if the database is on a read-only filesystem, or
755  *	if #MDB_NOLOCK is set.
756  *
757  *	Since the database uses multi-version concurrency control, readers don't
758  *	actually need any locking. This table is used to keep track of which
759  *	readers are using data from which old transactions, so that we'll know
760  *	when a particular old transaction is no longer in use. Old transactions
761  *	that have discarded any data pages can then have those pages reclaimed
762  *	for use by a later write transaction.
763  *
764  *	The lock table is constructed such that reader slots are aligned with the
765  *	processor's cache line size. Any slot is only ever used by one thread.
766  *	This alignment guarantees that there will be no contention or cache
767  *	thrashing as threads update their own slot info, and also eliminates
768  *	any need for locking when accessing a slot.
769  *
770  *	A writer thread will scan every slot in the table to determine the oldest
771  *	outstanding reader transaction. Any freed pages older than this will be
772  *	reclaimed by the writer. The writer doesn't use any locks when scanning
773  *	this table. This means that there's no guarantee that the writer will
774  *	see the most up-to-date reader info, but that's not required for correct
775  *	operation - all we need is to know the upper bound on the oldest reader,
776  *	we don't care at all about the newest reader. So the only consequence of
777  *	reading stale information here is that old pages might hang around a
778  *	while longer before being reclaimed. That's actually good anyway, because
779  *	the longer we delay reclaiming old pages, the more likely it is that a
780  *	string of contiguous pages can be found after coalescing old pages from
781  *	many old transactions together.
782  *	@{
783  */
784 	/**	Number of slots in the reader table.
785 	 *	This value was chosen somewhat arbitrarily. 126 readers plus a
786 	 *	couple mutexes fit exactly into 8KB on my development machine.
787 	 *	Applications should set the table size using #mdb_env_set_maxreaders().
788 	 */
789 #define DEFAULT_READERS	126
790 
791 	/**	The size of a CPU cache line in bytes. We want our lock structures
792 	 *	aligned to this size to avoid false cache line sharing in the
793 	 *	lock table.
794 	 *	This value works for most CPUs. For Itanium this should be 128.
795 	 */
796 #ifndef CACHELINE
797 #define CACHELINE	64
798 #endif
799 
800 	/**	The information we store in a single slot of the reader table.
801 	 *	In addition to a transaction ID, we also record the process and
802 	 *	thread ID that owns a slot, so that we can detect stale information,
803 	 *	e.g. threads or processes that went away without cleaning up.
804 	 *	@note We currently don't check for stale records. We simply re-init
805 	 *	the table when we know that we're the only process opening the
806 	 *	lock file.
807 	 */
808 typedef struct MDB_rxbody {
809 	/**	Current Transaction ID when this transaction began, or (txnid_t)-1.
810 	 *	Multiple readers that start at the same time will probably have the
811 	 *	same ID here. Again, it's not important to exclude them from
812 	 *	anything; all we need to know is which version of the DB they
813 	 *	started from so we can avoid overwriting any data used in that
814 	 *	particular version.
815 	 */
816 	volatile txnid_t		mrb_txnid;
817 	/** The process ID of the process owning this reader txn. */
818 	volatile MDB_PID_T	mrb_pid;
819 	/** The thread ID of the thread owning this txn. */
820 	volatile MDB_THR_T	mrb_tid;
821 } MDB_rxbody;
822 
823 	/** The actual reader record, with cacheline padding. */
824 typedef struct MDB_reader {
825 	union {
826 		MDB_rxbody mrx;
827 		/** shorthand for mrb_txnid */
828 #define	mr_txnid	mru.mrx.mrb_txnid
829 #define	mr_pid	mru.mrx.mrb_pid
830 #define	mr_tid	mru.mrx.mrb_tid
831 		/** cache line alignment */
832 		char pad[(sizeof(MDB_rxbody)+CACHELINE-1) & ~(CACHELINE-1)];
833 	} mru;
834 } MDB_reader;
835 
836 	/** The header for the reader table.
837 	 *	The table resides in a memory-mapped file. (This is a different file
838 	 *	than is used for the main database.)
839 	 *
840 	 *	For POSIX the actual mutexes reside in the shared memory of this
841 	 *	mapped file. On Windows, mutexes are named objects allocated by the
842 	 *	kernel; we store the mutex names in this mapped file so that other
843 	 *	processes can grab them. This same approach is also used on
844 	 *	MacOSX/Darwin (using named semaphores) since MacOSX doesn't support
845 	 *	process-shared POSIX mutexes. For these cases where a named object
846 	 *	is used, the object name is derived from a 64 bit FNV hash of the
847 	 *	environment pathname. As such, naming collisions are extremely
848 	 *	unlikely. If a collision occurs, the results are unpredictable.
849 	 */
850 typedef struct MDB_txbody {
851 		/** Stamp identifying this as an LMDB file. It must be set
852 		 *	to #MDB_MAGIC. */
853 	uint32_t	mtb_magic;
854 		/** Format of this lock file. Must be set to #MDB_LOCK_FORMAT. */
855 	uint32_t	mtb_format;
856 		/**	The ID of the last transaction committed to the database.
857 		 *	This is recorded here only for convenience; the value can always
858 		 *	be determined by reading the main database meta pages.
859 		 */
860 	volatile txnid_t		mtb_txnid;
861 		/** The number of slots that have been used in the reader table.
862 		 *	This always records the maximum count, it is not decremented
863 		 *	when readers release their slots.
864 		 */
865 	volatile unsigned	mtb_numreaders;
866 #if defined(_WIN32) || defined(MDB_USE_POSIX_SEM)
867 		/** Binary form of names of the reader/writer locks */
868 	mdb_hash_t			mtb_mutexid;
869 #elif defined(MDB_USE_SYSV_SEM)
870 	int 	mtb_semid;
871 	int		mtb_rlocked;
872 #else
873 		/** Mutex protecting access to this table.
874 		 *	This is the reader table lock used with LOCK_MUTEX().
875 		 */
876 	mdb_mutex_t	mtb_rmutex;
877 #endif
878 } MDB_txbody;
879 
880 	/** The actual reader table definition. */
881 typedef struct MDB_txninfo {
882 	union {
883 		MDB_txbody mtb;
884 #define mti_magic	mt1.mtb.mtb_magic
885 #define mti_format	mt1.mtb.mtb_format
886 #define mti_rmutex	mt1.mtb.mtb_rmutex
887 #define mti_txnid	mt1.mtb.mtb_txnid
888 #define mti_numreaders	mt1.mtb.mtb_numreaders
889 #define mti_mutexid	mt1.mtb.mtb_mutexid
890 #ifdef MDB_USE_SYSV_SEM
891 #define	mti_semid	mt1.mtb.mtb_semid
892 #define	mti_rlocked	mt1.mtb.mtb_rlocked
893 #endif
894 		char pad[(sizeof(MDB_txbody)+CACHELINE-1) & ~(CACHELINE-1)];
895 	} mt1;
896 #if !(defined(_WIN32) || defined(MDB_USE_POSIX_SEM))
897 	union {
898 #ifdef MDB_USE_SYSV_SEM
899 		int mt2_wlocked;
900 #define mti_wlocked	mt2.mt2_wlocked
901 #else
902 		mdb_mutex_t	mt2_wmutex;
903 #define mti_wmutex	mt2.mt2_wmutex
904 #endif
905 		char pad[(MNAME_LEN+CACHELINE-1) & ~(CACHELINE-1)];
906 	} mt2;
907 #endif
908 	MDB_reader	mti_readers[1];
909 } MDB_txninfo;
910 
911 	/** Lockfile format signature: version, features and field layout */
912 #define MDB_LOCK_FORMAT \
913 	((uint32_t)         \
914 	 (((MDB_LOCK_VERSION) % (1U << MDB_LOCK_VERSION_BITS)) \
915 	  + MDB_lock_desc     * (1U << MDB_LOCK_VERSION_BITS)))
916 
917 	/** Lock type and layout. Values 0-119. _WIN32 implies #MDB_PIDLOCK.
918 	 *	Some low values are reserved for future tweaks.
919 	 */
920 #ifdef _WIN32
921 # define MDB_LOCK_TYPE	(0 + ALIGNOF2(mdb_hash_t)/8 % 2)
922 #elif defined MDB_USE_POSIX_SEM
923 # define MDB_LOCK_TYPE	(4 + ALIGNOF2(mdb_hash_t)/8 % 2)
924 #elif defined MDB_USE_SYSV_SEM
925 # define MDB_LOCK_TYPE	(8)
926 #elif defined MDB_USE_POSIX_MUTEX
927 /* We do not know the inside of a POSIX mutex and how to check if mutexes
928  * used by two executables are compatible. Just check alignment and size.
929  */
930 # define MDB_LOCK_TYPE	(10 + \
931 		LOG2_MOD(ALIGNOF2(pthread_mutex_t), 5) + \
932 		sizeof(pthread_mutex_t) / 4U % 22 * 5)
933 #endif
934 
935 enum {
936 	/** Magic number for lockfile layout and features.
937 	 *
938 	 *  This *attempts* to stop liblmdb variants compiled with conflicting
939 	 *	options from using the lockfile at the same time and thus breaking
940 	 *	it.  It describes locking types, and sizes and sometimes alignment
941 	 *	of the various lockfile items.
942 	 *
943 	 *	The detected ranges are mostly guesswork, or based simply on how
944 	 *	big they could be without using more bits.  So we can tweak them
945 	 *	in good conscience when updating #MDB_LOCK_VERSION.
946 	 */
947 	MDB_lock_desc =
948 	/* Default CACHELINE=64 vs. other values (have seen mention of 32-256) */
949 	(CACHELINE==64 ? 0 : 1 + LOG2_MOD(CACHELINE >> (CACHELINE>64), 5))
950 	+ 6  * (sizeof(MDB_PID_T)/4 % 3)    /* legacy(2) to word(4/8)? */
951 	+ 18 * (sizeof(pthread_t)/4 % 5)    /* can be struct{id, active data} */
952 	+ 90 * (sizeof(MDB_txbody) / CACHELINE % 3)
953 	+ 270 * (MDB_LOCK_TYPE % 120)
954 	/* The above is < 270*120 < 2**15 */
955 	+ ((sizeof(txnid_t) == 8) << 15)    /* 32bit/64bit */
956 	+ ((sizeof(MDB_reader) > CACHELINE) << 16)
957 	/* Not really needed - implied by MDB_LOCK_TYPE != (_WIN32 locking) */
958 	+ (((MDB_PIDLOCK) != 0)   << 17)
959 	/* 18 bits total: Must be <= (32 - MDB_LOCK_VERSION_BITS). */
960 };
961 /** @} */
962 
963 /** Common header for all page types. The page type depends on #mp_flags.
964  *
965  * #P_BRANCH and #P_LEAF pages have unsorted '#MDB_node's at the end, with
966  * sorted #mp_ptrs[] entries referring to them. Exception: #P_LEAF2 pages
967  * omit mp_ptrs and pack sorted #MDB_DUPFIXED values after the page header.
968  *
969  * #P_OVERFLOW records occupy one or more contiguous pages where only the
970  * first has a page header. They hold the real data of #F_BIGDATA nodes.
971  *
972  * #P_SUBP sub-pages are small leaf "pages" with duplicate data.
973  * A node with flag #F_DUPDATA but not #F_SUBDATA contains a sub-page.
974  * (Duplicate data can also go in sub-databases, which use normal pages.)
975  *
976  * #P_META pages contain #MDB_meta, the start point of an LMDB snapshot.
977  *
978  * Each non-metapage up to #MDB_meta.%mm_last_pg is reachable exactly once
979  * in the snapshot: Either used by a database or listed in a freeDB record.
980  */
981 typedef struct MDB_page {
982 #define	mp_pgno	mp_p.p_pgno
983 #define	mp_next	mp_p.p_next
984 	union {
985 		pgno_t		p_pgno;	/**< page number */
986 		struct MDB_page *p_next; /**< for in-memory list of freed pages */
987 	} mp_p;
988 	uint16_t	mp_pad;			/**< key size if this is a LEAF2 page */
989 /**	@defgroup mdb_page	Page Flags
990  *	@ingroup internal
991  *	Flags for the page headers.
992  *	@{
993  */
994 #define	P_BRANCH	 0x01		/**< branch page */
995 #define	P_LEAF		 0x02		/**< leaf page */
996 #define	P_OVERFLOW	 0x04		/**< overflow page */
997 #define	P_META		 0x08		/**< meta page */
998 #define	P_DIRTY		 0x10		/**< dirty page, also set for #P_SUBP pages */
999 #define	P_LEAF2		 0x20		/**< for #MDB_DUPFIXED records */
1000 #define	P_SUBP		 0x40		/**< for #MDB_DUPSORT sub-pages */
1001 #define	P_LOOSE		 0x4000		/**< page was dirtied then freed, can be reused */
1002 #define	P_KEEP		 0x8000		/**< leave this page alone during spill */
1003 /** @} */
1004 	uint16_t	mp_flags;		/**< @ref mdb_page */
1005 #define mp_lower	mp_pb.pb.pb_lower
1006 #define mp_upper	mp_pb.pb.pb_upper
1007 #define mp_pages	mp_pb.pb_pages
1008 	union {
1009 		struct {
1010 			indx_t		pb_lower;		/**< lower bound of free space */
1011 			indx_t		pb_upper;		/**< upper bound of free space */
1012 		} pb;
1013 		uint32_t	pb_pages;	/**< number of overflow pages */
1014 	} mp_pb;
1015 	indx_t		mp_ptrs[1];		/**< dynamic size */
1016 } MDB_page;
1017 
1018 	/** Size of the page header, excluding dynamic data at the end */
1019 #define PAGEHDRSZ	 ((unsigned) offsetof(MDB_page, mp_ptrs))
1020 
1021 	/** Address of first usable data byte in a page, after the header */
1022 #define METADATA(p)	 ((void *)((char *)(p) + PAGEHDRSZ))
1023 
1024 	/** ITS#7713, change PAGEBASE to handle 65536 byte pages */
1025 #define	PAGEBASE	((MDB_DEVEL) ? PAGEHDRSZ : 0)
1026 
1027 	/** Number of nodes on a page */
1028 #define NUMKEYS(p)	 (((p)->mp_lower - (PAGEHDRSZ-PAGEBASE)) >> 1)
1029 
1030 	/** The amount of space remaining in the page */
1031 #define SIZELEFT(p)	 (indx_t)((p)->mp_upper - (p)->mp_lower)
1032 
1033 	/** The percentage of space used in the page, in tenths of a percent. */
1034 #define PAGEFILL(env, p) (1000L * ((env)->me_psize - PAGEHDRSZ - SIZELEFT(p)) / \
1035 				((env)->me_psize - PAGEHDRSZ))
1036 	/** The minimum page fill factor, in tenths of a percent.
1037 	 *	Pages emptier than this are candidates for merging.
1038 	 */
1039 #define FILL_THRESHOLD	 250
1040 
1041 	/** Test if a page is a leaf page */
1042 #define IS_LEAF(p)	 F_ISSET((p)->mp_flags, P_LEAF)
1043 	/** Test if a page is a LEAF2 page */
1044 #define IS_LEAF2(p)	 F_ISSET((p)->mp_flags, P_LEAF2)
1045 	/** Test if a page is a branch page */
1046 #define IS_BRANCH(p)	 F_ISSET((p)->mp_flags, P_BRANCH)
1047 	/** Test if a page is an overflow page */
1048 #define IS_OVERFLOW(p)	 F_ISSET((p)->mp_flags, P_OVERFLOW)
1049 	/** Test if a page is a sub page */
1050 #define IS_SUBP(p)	 F_ISSET((p)->mp_flags, P_SUBP)
1051 
1052 	/** The number of overflow pages needed to store the given size. */
1053 #define OVPAGES(size, psize)	((PAGEHDRSZ-1 + (size)) / (psize) + 1)
1054 
1055 	/** Link in #MDB_txn.%mt_loose_pgs list.
1056 	 *  Kept outside the page header, which is needed when reusing the page.
1057 	 */
1058 #define NEXT_LOOSE_PAGE(p)		(*(MDB_page **)((p) + 2))
1059 
1060 	/** Header for a single key/data pair within a page.
1061 	 * Used in pages of type #P_BRANCH and #P_LEAF without #P_LEAF2.
1062 	 * We guarantee 2-byte alignment for 'MDB_node's.
1063 	 *
1064 	 * #mn_lo and #mn_hi are used for data size on leaf nodes, and for child
1065 	 * pgno on branch nodes.  On 64 bit platforms, #mn_flags is also used
1066 	 * for pgno.  (Branch nodes have no flags).  Lo and hi are in host byte
1067 	 * order in case some accesses can be optimized to 32-bit word access.
1068 	 *
1069 	 * Leaf node flags describe node contents.  #F_BIGDATA says the node's
1070 	 * data part is the page number of an overflow page with actual data.
1071 	 * #F_DUPDATA and #F_SUBDATA can be combined giving duplicate data in
1072 	 * a sub-page/sub-database, and named databases (just #F_SUBDATA).
1073 	 */
1074 typedef struct MDB_node {
1075 	/** part of data size or pgno
1076 	 *	@{ */
1077 #if BYTE_ORDER == LITTLE_ENDIAN
1078 	unsigned short	mn_lo, mn_hi;
1079 #else
1080 	unsigned short	mn_hi, mn_lo;
1081 #endif
1082 	/** @} */
1083 /** @defgroup mdb_node Node Flags
1084  *	@ingroup internal
1085  *	Flags for node headers.
1086  *	@{
1087  */
1088 #define F_BIGDATA	 0x01			/**< data put on overflow page */
1089 #define F_SUBDATA	 0x02			/**< data is a sub-database */
1090 #define F_DUPDATA	 0x04			/**< data has duplicates */
1091 
1092 /** valid flags for #mdb_node_add() */
1093 #define	NODE_ADD_FLAGS	(F_DUPDATA|F_SUBDATA|MDB_RESERVE|MDB_APPEND)
1094 
1095 /** @} */
1096 	unsigned short	mn_flags;		/**< @ref mdb_node */
1097 	unsigned short	mn_ksize;		/**< key size */
1098 	char		mn_data[1];			/**< key and data are appended here */
1099 } MDB_node;
1100 
1101 	/** Size of the node header, excluding dynamic data at the end */
1102 #define NODESIZE	 offsetof(MDB_node, mn_data)
1103 
1104 	/** Bit position of top word in page number, for shifting mn_flags */
1105 #define PGNO_TOPWORD ((pgno_t)-1 > 0xffffffffu ? 32 : 0)
1106 
1107 	/** Size of a node in a branch page with a given key.
1108 	 *	This is just the node header plus the key, there is no data.
1109 	 */
1110 #define INDXSIZE(k)	 (NODESIZE + ((k) == NULL ? 0 : (k)->mv_size))
1111 
1112 	/** Size of a node in a leaf page with a given key and data.
1113 	 *	This is node header plus key plus data size.
1114 	 */
1115 #define LEAFSIZE(k, d)	 (NODESIZE + (k)->mv_size + (d)->mv_size)
1116 
1117 	/** Address of node \b i in page \b p */
1118 #define NODEPTR(p, i)	 ((MDB_node *)((char *)(p) + (p)->mp_ptrs[i] + PAGEBASE))
1119 
1120 	/** Address of the key for the node */
1121 #define NODEKEY(node)	 (void *)((node)->mn_data)
1122 
1123 	/** Address of the data for a node */
1124 #define NODEDATA(node)	 (void *)((char *)(node)->mn_data + (node)->mn_ksize)
1125 
1126 	/** Get the page number pointed to by a branch node */
1127 #define NODEPGNO(node) \
1128 	((node)->mn_lo | ((pgno_t) (node)->mn_hi << 16) | \
1129 	 (PGNO_TOPWORD ? ((pgno_t) (node)->mn_flags << PGNO_TOPWORD) : 0))
1130 	/** Set the page number in a branch node */
1131 #define SETPGNO(node,pgno)	do { \
1132 	(node)->mn_lo = (pgno) & 0xffff; (node)->mn_hi = (pgno) >> 16; \
1133 	if (PGNO_TOPWORD) (node)->mn_flags = (pgno) >> PGNO_TOPWORD; } while(0)
1134 
1135 	/** Get the size of the data in a leaf node */
1136 #define NODEDSZ(node)	 ((node)->mn_lo | ((unsigned)(node)->mn_hi << 16))
1137 	/** Set the size of the data for a leaf node */
1138 #define SETDSZ(node,size)	do { \
1139 	(node)->mn_lo = (size) & 0xffff; (node)->mn_hi = (size) >> 16;} while(0)
1140 	/** The size of a key in a node */
1141 #define NODEKSZ(node)	 ((node)->mn_ksize)
1142 
1143 	/** Copy a page number from src to dst */
1144 #ifdef MISALIGNED_OK
1145 #define COPY_PGNO(dst,src)	dst = src
1146 #else
1147 #if MDB_SIZE_MAX > 0xffffffffU
1148 #define COPY_PGNO(dst,src)	do { \
1149 	unsigned short *s, *d;	\
1150 	s = (unsigned short *)&(src);	\
1151 	d = (unsigned short *)&(dst);	\
1152 	*d++ = *s++;	\
1153 	*d++ = *s++;	\
1154 	*d++ = *s++;	\
1155 	*d = *s;	\
1156 } while (0)
1157 #else
1158 #define COPY_PGNO(dst,src)	do { \
1159 	unsigned short *s, *d;	\
1160 	s = (unsigned short *)&(src);	\
1161 	d = (unsigned short *)&(dst);	\
1162 	*d++ = *s++;	\
1163 	*d = *s;	\
1164 } while (0)
1165 #endif
1166 #endif
1167 	/** The address of a key in a LEAF2 page.
1168 	 *	LEAF2 pages are used for #MDB_DUPFIXED sorted-duplicate sub-DBs.
1169 	 *	There are no node headers, keys are stored contiguously.
1170 	 */
1171 #define LEAF2KEY(p, i, ks)	((char *)(p) + PAGEHDRSZ + ((i)*(ks)))
1172 
1173 	/** Set the \b node's key into \b keyptr, if requested. */
1174 #define MDB_GET_KEY(node, keyptr)	{ if ((keyptr) != NULL) { \
1175 	(keyptr)->mv_size = NODEKSZ(node); (keyptr)->mv_data = NODEKEY(node); } }
1176 
1177 	/** Set the \b node's key into \b key. */
1178 #define MDB_GET_KEY2(node, key)	{ key.mv_size = NODEKSZ(node); key.mv_data = NODEKEY(node); }
1179 
1180 	/** Information about a single database in the environment. */
1181 typedef struct MDB_db {
1182 	uint32_t	md_pad;		/**< also ksize for LEAF2 pages */
1183 	uint16_t	md_flags;	/**< @ref mdb_dbi_open */
1184 	uint16_t	md_depth;	/**< depth of this tree */
1185 	pgno_t		md_branch_pages;	/**< number of internal pages */
1186 	pgno_t		md_leaf_pages;		/**< number of leaf pages */
1187 	pgno_t		md_overflow_pages;	/**< number of overflow pages */
1188 	mdb_size_t	md_entries;		/**< number of data items */
1189 	pgno_t		md_root;		/**< the root page of this tree */
1190 } MDB_db;
1191 
1192 #define MDB_VALID	0x8000		/**< DB handle is valid, for me_dbflags */
1193 #define PERSISTENT_FLAGS	(0xffff & ~(MDB_VALID))
1194 	/** #mdb_dbi_open() flags */
1195 #define VALID_FLAGS	(MDB_REVERSEKEY|MDB_DUPSORT|MDB_INTEGERKEY|MDB_DUPFIXED|\
1196 	MDB_INTEGERDUP|MDB_REVERSEDUP|MDB_CREATE)
1197 
1198 	/** Handle for the DB used to track free pages. */
1199 #define	FREE_DBI	0
1200 	/** Handle for the default DB. */
1201 #define	MAIN_DBI	1
1202 	/** Number of DBs in metapage (free and main) - also hardcoded elsewhere */
1203 #define CORE_DBS	2
1204 
1205 	/** Number of meta pages - also hardcoded elsewhere */
1206 #define NUM_METAS	2
1207 
1208 	/** Meta page content.
1209 	 *	A meta page is the start point for accessing a database snapshot.
1210 	 *	Pages 0-1 are meta pages. Transaction N writes meta page #(N % 2).
1211 	 */
1212 typedef struct MDB_meta {
1213 		/** Stamp identifying this as an LMDB file. It must be set
1214 		 *	to #MDB_MAGIC. */
1215 	uint32_t	mm_magic;
1216 		/** Version number of this file. Must be set to #MDB_DATA_VERSION. */
1217 	uint32_t	mm_version;
1218 #ifdef MDB_VL32
1219 	union {		/* always zero since we don't support fixed mapping in MDB_VL32 */
1220 		MDB_ID	mmun_ull;
1221 		void *mmun_address;
1222 	} mm_un;
1223 #define	mm_address mm_un.mmun_address
1224 #else
1225 	void		*mm_address;		/**< address for fixed mapping */
1226 #endif
1227 	mdb_size_t	mm_mapsize;			/**< size of mmap region */
1228 	MDB_db		mm_dbs[CORE_DBS];	/**< first is free space, 2nd is main db */
1229 	/** The size of pages used in this DB */
1230 #define	mm_psize	mm_dbs[FREE_DBI].md_pad
1231 	/** Any persistent environment flags. @ref mdb_env */
1232 #define	mm_flags	mm_dbs[FREE_DBI].md_flags
1233 	/** Last used page in the datafile.
1234 	 *	Actually the file may be shorter if the freeDB lists the final pages.
1235 	 */
1236 	pgno_t		mm_last_pg;
1237 	volatile txnid_t	mm_txnid;	/**< txnid that committed this page */
1238 } MDB_meta;
1239 
1240 	/** Buffer for a stack-allocated meta page.
1241 	 *	The members define size and alignment, and silence type
1242 	 *	aliasing warnings.  They are not used directly; that could
1243 	 *	mean incorrectly using several union members in parallel.
1244 	 */
1245 typedef union MDB_metabuf {
1246 	MDB_page	mb_page;
1247 	struct {
1248 		char		mm_pad[PAGEHDRSZ];
1249 		MDB_meta	mm_meta;
1250 	} mb_metabuf;
1251 } MDB_metabuf;
1252 
1253 	/** Auxiliary DB info.
1254 	 *	The information here is mostly static/read-only. There is
1255 	 *	only a single copy of this record in the environment.
1256 	 */
1257 typedef struct MDB_dbx {
1258 	MDB_val		md_name;		/**< name of the database */
1259 	MDB_cmp_func	*md_cmp;	/**< function for comparing keys */
1260 	MDB_cmp_func	*md_dcmp;	/**< function for comparing data items */
1261 	MDB_rel_func	*md_rel;	/**< user relocate function */
1262 	void		*md_relctx;		/**< user-provided context for md_rel */
1263 } MDB_dbx;
1264 
1265 	/** A database transaction.
1266 	 *	Every operation requires a transaction handle.
1267 	 */
1268 struct MDB_txn {
1269 	MDB_txn		*mt_parent;		/**< parent of a nested txn */
1270 	/** Nested txn under this txn, set together with flag #MDB_TXN_HAS_CHILD */
1271 	MDB_txn		*mt_child;
1272 	pgno_t		mt_next_pgno;	/**< next unallocated page */
1273 #ifdef MDB_VL32
1274 	pgno_t		mt_last_pgno;	/**< last written page */
1275 #endif
1276 	/** The ID of this transaction. IDs are integers incrementing from 1.
1277 	 *	Only committed write transactions increment the ID. If a transaction
1278 	 *	aborts, the ID may be re-used by the next writer.
1279 	 */
1280 	txnid_t		mt_txnid;
1281 	MDB_env		*mt_env;		/**< the DB environment */
1282 	/** The list of pages that became unused during this transaction.
1283 	 */
1284 	MDB_IDL		mt_free_pgs;
1285 	/** The list of loose pages that became unused and may be reused
1286 	 *	in this transaction, linked through #NEXT_LOOSE_PAGE(page).
1287 	 */
1288 	MDB_page	*mt_loose_pgs;
1289 	/** Number of loose pages (#mt_loose_pgs) */
1290 	int			mt_loose_count;
1291 	/** The sorted list of dirty pages we temporarily wrote to disk
1292 	 *	because the dirty list was full. page numbers in here are
1293 	 *	shifted left by 1, deleted slots have the LSB set.
1294 	 */
1295 	MDB_IDL		mt_spill_pgs;
1296 	union {
1297 		/** For write txns: Modified pages. Sorted when not MDB_WRITEMAP. */
1298 		MDB_ID2L	dirty_list;
1299 		/** For read txns: This thread/txn's reader table slot, or NULL. */
1300 		MDB_reader	*reader;
1301 	} mt_u;
1302 	/** Array of records for each DB known in the environment. */
1303 	MDB_dbx		*mt_dbxs;
1304 	/** Array of MDB_db records for each known DB */
1305 	MDB_db		*mt_dbs;
1306 	/** Array of sequence numbers for each DB handle */
1307 	unsigned int	*mt_dbiseqs;
1308 /** @defgroup mt_dbflag	Transaction DB Flags
1309  *	@ingroup internal
1310  * @{
1311  */
1312 #define DB_DIRTY	0x01		/**< DB was written in this txn */
1313 #define DB_STALE	0x02		/**< Named-DB record is older than txnID */
1314 #define DB_NEW		0x04		/**< Named-DB handle opened in this txn */
1315 #define DB_VALID	0x08		/**< DB handle is valid, see also #MDB_VALID */
1316 #define DB_USRVALID	0x10		/**< As #DB_VALID, but not set for #FREE_DBI */
1317 #define DB_DUPDATA	0x20		/**< DB is #MDB_DUPSORT data */
1318 /** @} */
1319 	/** In write txns, array of cursors for each DB */
1320 	MDB_cursor	**mt_cursors;
1321 	/** Array of flags for each DB */
1322 	unsigned char	*mt_dbflags;
1323 #ifdef MDB_VL32
1324 	/** List of read-only pages (actually chunks) */
1325 	MDB_ID3L	mt_rpages;
1326 	/** We map chunks of 16 pages. Even though Windows uses 4KB pages, all
1327 	 * mappings must begin on 64KB boundaries. So we round off all pgnos to
1328 	 * a chunk boundary. We do the same on Linux for symmetry, and also to
1329 	 * reduce the frequency of mmap/munmap calls.
1330 	 */
1331 #define MDB_RPAGE_CHUNK	16
1332 #define MDB_TRPAGE_SIZE	4096	/**< size of #mt_rpages array of chunks */
1333 #define MDB_TRPAGE_MAX	(MDB_TRPAGE_SIZE-1)	/**< maximum chunk index */
1334 	unsigned int mt_rpcheck;	/**< threshold for reclaiming unref'd chunks */
1335 #endif
1336 	/**	Number of DB records in use, or 0 when the txn is finished.
1337 	 *	This number only ever increments until the txn finishes; we
1338 	 *	don't decrement it when individual DB handles are closed.
1339 	 */
1340 	MDB_dbi		mt_numdbs;
1341 
1342 /** @defgroup mdb_txn	Transaction Flags
1343  *	@ingroup internal
1344  *	@{
1345  */
1346 	/** #mdb_txn_begin() flags */
1347 #define MDB_TXN_BEGIN_FLAGS	(MDB_NOMETASYNC|MDB_NOSYNC|MDB_RDONLY)
1348 #define MDB_TXN_NOMETASYNC	MDB_NOMETASYNC	/**< don't sync meta for this txn on commit */
1349 #define MDB_TXN_NOSYNC		MDB_NOSYNC	/**< don't sync this txn on commit */
1350 #define MDB_TXN_RDONLY		MDB_RDONLY	/**< read-only transaction */
1351 	/* internal txn flags */
1352 #define MDB_TXN_WRITEMAP	MDB_WRITEMAP	/**< copy of #MDB_env flag in writers */
1353 #define MDB_TXN_FINISHED	0x01		/**< txn is finished or never began */
1354 #define MDB_TXN_ERROR		0x02		/**< txn is unusable after an error */
1355 #define MDB_TXN_DIRTY		0x04		/**< must write, even if dirty list is empty */
1356 #define MDB_TXN_SPILLS		0x08		/**< txn or a parent has spilled pages */
1357 #define MDB_TXN_HAS_CHILD	0x10		/**< txn has an #MDB_txn.%mt_child */
1358 	/** most operations on the txn are currently illegal */
1359 #define MDB_TXN_BLOCKED		(MDB_TXN_FINISHED|MDB_TXN_ERROR|MDB_TXN_HAS_CHILD)
1360 /** @} */
1361 	unsigned int	mt_flags;		/**< @ref mdb_txn */
1362 	/** #dirty_list room: Array size - \#dirty pages visible to this txn.
1363 	 *	Includes ancestor txns' dirty pages not hidden by other txns'
1364 	 *	dirty/spilled pages. Thus commit(nested txn) has room to merge
1365 	 *	dirty_list into mt_parent after freeing hidden mt_parent pages.
1366 	 */
1367 	unsigned int	mt_dirty_room;
1368 };
1369 
1370 /** Enough space for 2^32 nodes with minimum of 2 keys per node. I.e., plenty.
1371  * At 4 keys per node, enough for 2^64 nodes, so there's probably no need to
1372  * raise this on a 64 bit machine.
1373  */
1374 #define CURSOR_STACK		 32
1375 
1376 struct MDB_xcursor;
1377 
1378 	/** Cursors are used for all DB operations.
1379 	 *	A cursor holds a path of (page pointer, key index) from the DB
1380 	 *	root to a position in the DB, plus other state. #MDB_DUPSORT
1381 	 *	cursors include an xcursor to the current data item. Write txns
1382 	 *	track their cursors and keep them up to date when data moves.
1383 	 *	Exception: An xcursor's pointer to a #P_SUBP page can be stale.
1384 	 *	(A node with #F_DUPDATA but no #F_SUBDATA contains a subpage).
1385 	 */
1386 struct MDB_cursor {
1387 	/** Next cursor on this DB in this txn */
1388 	MDB_cursor	*mc_next;
1389 	/** Backup of the original cursor if this cursor is a shadow */
1390 	MDB_cursor	*mc_backup;
1391 	/** Context used for databases with #MDB_DUPSORT, otherwise NULL */
1392 	struct MDB_xcursor	*mc_xcursor;
1393 	/** The transaction that owns this cursor */
1394 	MDB_txn		*mc_txn;
1395 	/** The database handle this cursor operates on */
1396 	MDB_dbi		mc_dbi;
1397 	/** The database record for this cursor */
1398 	MDB_db		*mc_db;
1399 	/** The database auxiliary record for this cursor */
1400 	MDB_dbx		*mc_dbx;
1401 	/** The @ref mt_dbflag for this database */
1402 	unsigned char	*mc_dbflag;
1403 	unsigned short 	mc_snum;	/**< number of pushed pages */
1404 	unsigned short	mc_top;		/**< index of top page, normally mc_snum-1 */
1405 /** @defgroup mdb_cursor	Cursor Flags
1406  *	@ingroup internal
1407  *	Cursor state flags.
1408  *	@{
1409  */
1410 #define C_INITIALIZED	0x01	/**< cursor has been initialized and is valid */
1411 #define C_EOF	0x02			/**< No more data */
1412 #define C_SUB	0x04			/**< Cursor is a sub-cursor */
1413 #define C_DEL	0x08			/**< last op was a cursor_del */
1414 #define C_UNTRACK	0x40		/**< Un-track cursor when closing */
1415 #define C_WRITEMAP	MDB_TXN_WRITEMAP /**< Copy of txn flag */
1416 /** Read-only cursor into the txn's original snapshot in the map.
1417  *	Set for read-only txns, and in #mdb_page_alloc() for #FREE_DBI when
1418  *	#MDB_DEVEL & 2. Only implements code which is necessary for this.
1419  */
1420 #define C_ORIG_RDONLY	MDB_TXN_RDONLY
1421 /** @} */
1422 	unsigned int	mc_flags;	/**< @ref mdb_cursor */
1423 	MDB_page	*mc_pg[CURSOR_STACK];	/**< stack of pushed pages */
1424 	indx_t		mc_ki[CURSOR_STACK];	/**< stack of page indices */
1425 #ifdef MDB_VL32
1426 	MDB_page	*mc_ovpg;		/**< a referenced overflow page */
1427 #	define MC_OVPG(mc)			((mc)->mc_ovpg)
1428 #	define MC_SET_OVPG(mc, pg)	((mc)->mc_ovpg = (pg))
1429 #else
1430 #	define MC_OVPG(mc)			((MDB_page *)0)
1431 #	define MC_SET_OVPG(mc, pg)	((void)0)
1432 #endif
1433 };
1434 
1435 	/** Context for sorted-dup records.
1436 	 *	We could have gone to a fully recursive design, with arbitrarily
1437 	 *	deep nesting of sub-databases. But for now we only handle these
1438 	 *	levels - main DB, optional sub-DB, sorted-duplicate DB.
1439 	 */
1440 typedef struct MDB_xcursor {
1441 	/** A sub-cursor for traversing the Dup DB */
1442 	MDB_cursor mx_cursor;
1443 	/** The database record for this Dup DB */
1444 	MDB_db	mx_db;
1445 	/**	The auxiliary DB record for this Dup DB */
1446 	MDB_dbx	mx_dbx;
1447 	/** The @ref mt_dbflag for this Dup DB */
1448 	unsigned char mx_dbflag;
1449 } MDB_xcursor;
1450 
1451 	/** Check if there is an inited xcursor */
1452 #define XCURSOR_INITED(mc) \
1453 	((mc)->mc_xcursor && ((mc)->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED))
1454 
1455 	/** Update the xcursor's sub-page pointer, if any, in \b mc.  Needed
1456 	 *	when the node which contains the sub-page may have moved.  Called
1457 	 *	with leaf page \b mp = mc->mc_pg[\b top].
1458 	 */
1459 #define XCURSOR_REFRESH(mc, top, mp) do { \
1460 	MDB_page *xr_pg = (mp); \
1461 	MDB_node *xr_node; \
1462 	if (!XCURSOR_INITED(mc) || (mc)->mc_ki[top] >= NUMKEYS(xr_pg)) break; \
1463 	xr_node = NODEPTR(xr_pg, (mc)->mc_ki[top]); \
1464 	if ((xr_node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) \
1465 		(mc)->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(xr_node); \
1466 } while (0)
1467 
1468 	/** State of FreeDB old pages, stored in the MDB_env */
1469 typedef struct MDB_pgstate {
1470 	pgno_t		*mf_pghead;	/**< Reclaimed freeDB pages, or NULL before use */
1471 	txnid_t		mf_pglast;	/**< ID of last used record, or 0 if !mf_pghead */
1472 } MDB_pgstate;
1473 
1474 	/** The database environment. */
1475 struct MDB_env {
1476 	HANDLE		me_fd;		/**< The main data file */
1477 	HANDLE		me_lfd;		/**< The lock file */
1478 	HANDLE		me_mfd;		/**< For writing and syncing the meta pages */
1479 #ifdef _WIN32
1480 #ifdef MDB_VL32
1481 	HANDLE		me_fmh;		/**< File Mapping handle */
1482 #endif /* MDB_VL32 */
1483 	HANDLE		me_ovfd;	/**< Overlapped/async with write-through file handle */
1484 #endif /* _WIN32 */
1485 	/** Failed to update the meta page. Probably an I/O error. */
1486 #define	MDB_FATAL_ERROR	0x80000000U
1487 	/** Some fields are initialized. */
1488 #define	MDB_ENV_ACTIVE	0x20000000U
1489 	/** me_txkey is set */
1490 #define	MDB_ENV_TXKEY	0x10000000U
1491 	/** fdatasync is unreliable */
1492 #define	MDB_FSYNCONLY	0x08000000U
1493 	uint32_t 	me_flags;		/**< @ref mdb_env */
1494 	unsigned int	me_psize;	/**< DB page size, inited from me_os_psize */
1495 	unsigned int	me_os_psize;	/**< OS page size, from #GET_PAGESIZE */
1496 	unsigned int	me_maxreaders;	/**< size of the reader table */
1497 	/** Max #MDB_txninfo.%mti_numreaders of interest to #mdb_env_close() */
1498 	volatile int	me_close_readers;
1499 	MDB_dbi		me_numdbs;		/**< number of DBs opened */
1500 	MDB_dbi		me_maxdbs;		/**< size of the DB table */
1501 	MDB_PID_T	me_pid;		/**< process ID of this env */
1502 	char		*me_path;		/**< path to the DB files */
1503 	char		*me_map;		/**< the memory map of the data file */
1504 	MDB_txninfo	*me_txns;		/**< the memory map of the lock file or NULL */
1505 	MDB_meta	*me_metas[NUM_METAS];	/**< pointers to the two meta pages */
1506 	void		*me_pbuf;		/**< scratch area for DUPSORT put() */
1507 	MDB_txn		*me_txn;		/**< current write transaction */
1508 	MDB_txn		*me_txn0;		/**< prealloc'd write transaction */
1509 	mdb_size_t	me_mapsize;		/**< size of the data memory map */
1510 	MDB_OFF_T	me_size;		/**< current file size */
1511 	pgno_t		me_maxpg;		/**< me_mapsize / me_psize */
1512 	MDB_dbx		*me_dbxs;		/**< array of static DB info */
1513 	uint16_t	*me_dbflags;	/**< array of flags from MDB_db.md_flags */
1514 	unsigned int	*me_dbiseqs;	/**< array of dbi sequence numbers */
1515 	pthread_key_t	me_txkey;	/**< thread-key for readers */
1516 	txnid_t		me_pgoldest;	/**< ID of oldest reader last time we looked */
1517 	MDB_pgstate	me_pgstate;		/**< state of old pages from freeDB */
1518 #	define		me_pglast	me_pgstate.mf_pglast
1519 #	define		me_pghead	me_pgstate.mf_pghead
1520 	MDB_page	*me_dpages;		/**< list of malloc'd blocks for re-use */
1521 	/** IDL of pages that became unused in a write txn */
1522 	MDB_IDL		me_free_pgs;
1523 	/** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */
1524 	MDB_ID2L	me_dirty_list;
1525 	/** Max number of freelist items that can fit in a single overflow page */
1526 	int			me_maxfree_1pg;
1527 	/** Max size of a node on a page */
1528 	unsigned int	me_nodemax;
1529 #if !(MDB_MAXKEYSIZE)
1530 	unsigned int	me_maxkey;	/**< max size of a key */
1531 #endif
1532 	int		me_live_reader;		/**< have liveness lock in reader table */
1533 #ifdef _WIN32
1534 	int		me_pidquery;		/**< Used in OpenProcess */
1535 	OVERLAPPED	*ov;			/**< Used for for overlapping I/O requests */
1536 	int		ovs;				/**< Count of OVERLAPPEDs */
1537 #endif
1538 #ifdef MDB_USE_POSIX_MUTEX	/* Posix mutexes reside in shared mem */
1539 #	define		me_rmutex	me_txns->mti_rmutex /**< Shared reader lock */
1540 #	define		me_wmutex	me_txns->mti_wmutex /**< Shared writer lock */
1541 #else
1542 	mdb_mutex_t	me_rmutex;
1543 	mdb_mutex_t	me_wmutex;
1544 # if defined(_WIN32) || defined(MDB_USE_POSIX_SEM)
1545 	/** Half-initialized name of mutexes, to be completed by #MUTEXNAME() */
1546 	char		me_mutexname[sizeof(MUTEXNAME_PREFIX) + 11];
1547 # endif
1548 #endif
1549 #ifdef MDB_VL32
1550 	MDB_ID3L	me_rpages;	/**< like #mt_rpages, but global to env */
1551 	pthread_mutex_t	me_rpmutex;	/**< control access to #me_rpages */
1552 #define MDB_ERPAGE_SIZE	16384
1553 #define MDB_ERPAGE_MAX	(MDB_ERPAGE_SIZE-1)
1554 	unsigned int me_rpcheck;
1555 #endif
1556 	void		*me_userctx;	 /**< User-settable context */
1557 	MDB_assert_func *me_assert_func; /**< Callback for assertion failures */
1558 };
1559 
1560 	/** Nested transaction */
1561 typedef struct MDB_ntxn {
1562 	MDB_txn		mnt_txn;		/**< the transaction */
1563 	MDB_pgstate	mnt_pgstate;	/**< parent transaction's saved freestate */
1564 } MDB_ntxn;
1565 
1566 	/** max number of pages to commit in one writev() call */
1567 #define MDB_COMMIT_PAGES	 64
1568 #if defined(IOV_MAX) && IOV_MAX < MDB_COMMIT_PAGES
1569 #undef MDB_COMMIT_PAGES
1570 #define MDB_COMMIT_PAGES	IOV_MAX
1571 #endif
1572 
1573 	/** max bytes to write in one call */
1574 #define MAX_WRITE		(0x40000000U >> (sizeof(ssize_t) == 4))
1575 
1576 	/** Check \b txn and \b dbi arguments to a function */
1577 #define TXN_DBI_EXIST(txn, dbi, validity) \
1578 	((txn) && (dbi)<(txn)->mt_numdbs && ((txn)->mt_dbflags[dbi] & (validity)))
1579 
1580 	/** Check for misused \b dbi handles */
1581 #define TXN_DBI_CHANGED(txn, dbi) \
1582 	((txn)->mt_dbiseqs[dbi] != (txn)->mt_env->me_dbiseqs[dbi])
1583 
1584 static int  mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp);
1585 static int  mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp);
1586 static int  mdb_page_touch(MDB_cursor *mc);
1587 
1588 #define MDB_END_NAMES {"committed", "empty-commit", "abort", "reset", \
1589 	"reset-tmp", "fail-begin", "fail-beginchild"}
1590 enum {
1591 	/* mdb_txn_end operation number, for logging */
1592 	MDB_END_COMMITTED, MDB_END_EMPTY_COMMIT, MDB_END_ABORT, MDB_END_RESET,
1593 	MDB_END_RESET_TMP, MDB_END_FAIL_BEGIN, MDB_END_FAIL_BEGINCHILD
1594 };
1595 #define MDB_END_OPMASK	0x0F	/**< mask for #mdb_txn_end() operation number */
1596 #define MDB_END_UPDATE	0x10	/**< update env state (DBIs) */
1597 #define MDB_END_FREE	0x20	/**< free txn unless it is #MDB_env.%me_txn0 */
1598 #define MDB_END_SLOT MDB_NOTLS	/**< release any reader slot if #MDB_NOTLS */
1599 static void mdb_txn_end(MDB_txn *txn, unsigned mode);
1600 
1601 static int  mdb_page_get(MDB_cursor *mc, pgno_t pgno, MDB_page **mp, int *lvl);
1602 static int  mdb_page_search_root(MDB_cursor *mc,
1603 			    MDB_val *key, int modify);
1604 #define MDB_PS_MODIFY	1
1605 #define MDB_PS_ROOTONLY	2
1606 #define MDB_PS_FIRST	4
1607 #define MDB_PS_LAST		8
1608 static int  mdb_page_search(MDB_cursor *mc,
1609 			    MDB_val *key, int flags);
1610 static int	mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst);
1611 
1612 #define MDB_SPLIT_REPLACE	MDB_APPENDDUP	/**< newkey is not new */
1613 static int	mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata,
1614 				pgno_t newpgno, unsigned int nflags);
1615 
1616 static int  mdb_env_read_header(MDB_env *env, int prev, MDB_meta *meta);
1617 static MDB_meta *mdb_env_pick_meta(const MDB_env *env);
1618 static int  mdb_env_write_meta(MDB_txn *txn);
1619 #if defined(MDB_USE_POSIX_MUTEX) && !defined(MDB_ROBUST_SUPPORTED) /* Drop unused excl arg */
1620 # define mdb_env_close0(env, excl) mdb_env_close1(env)
1621 #endif
1622 static void mdb_env_close0(MDB_env *env, int excl);
1623 
1624 static MDB_node *mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp);
1625 static int  mdb_node_add(MDB_cursor *mc, indx_t indx,
1626 			    MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags);
1627 static void mdb_node_del(MDB_cursor *mc, int ksize);
1628 static void mdb_node_shrink(MDB_page *mp, indx_t indx);
1629 static int	mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft);
1630 static int  mdb_node_read(MDB_cursor *mc, MDB_node *leaf, MDB_val *data);
1631 static size_t	mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data);
1632 static size_t	mdb_branch_size(MDB_env *env, MDB_val *key);
1633 
1634 static int	mdb_rebalance(MDB_cursor *mc);
1635 static int	mdb_update_key(MDB_cursor *mc, MDB_val *key);
1636 
1637 static void	mdb_cursor_pop(MDB_cursor *mc);
1638 static int	mdb_cursor_push(MDB_cursor *mc, MDB_page *mp);
1639 
1640 static int	mdb_cursor_del0(MDB_cursor *mc);
1641 static int	mdb_del0(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, unsigned flags);
1642 static int	mdb_cursor_sibling(MDB_cursor *mc, int move_right);
1643 static int	mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op);
1644 static int	mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op);
1645 static int	mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op,
1646 				int *exactp);
1647 static int	mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data);
1648 static int	mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data);
1649 
1650 static void	mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx);
1651 static void	mdb_xcursor_init0(MDB_cursor *mc);
1652 static void	mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node);
1653 static void	mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int force);
1654 
1655 static int	mdb_drop0(MDB_cursor *mc, int subs);
1656 static void mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi);
1657 static int mdb_reader_check0(MDB_env *env, int rlocked, int *dead);
1658 
1659 /** @cond */
1660 static MDB_cmp_func	mdb_cmp_memn, mdb_cmp_memnr, mdb_cmp_int, mdb_cmp_cint, mdb_cmp_long;
1661 /** @endcond */
1662 
1663 /** Compare two items pointing at '#mdb_size_t's of unknown alignment. */
1664 #ifdef MISALIGNED_OK
1665 # define mdb_cmp_clong mdb_cmp_long
1666 #else
1667 # define mdb_cmp_clong mdb_cmp_cint
1668 #endif
1669 
1670 /** True if we need #mdb_cmp_clong() instead of \b cmp for #MDB_INTEGERDUP */
1671 #define NEED_CMP_CLONG(cmp, ksize) \
1672 	(UINT_MAX < MDB_SIZE_MAX && \
1673 	 (cmp) == mdb_cmp_int && (ksize) == sizeof(mdb_size_t))
1674 
1675 #ifdef _WIN32
1676 static SECURITY_DESCRIPTOR mdb_null_sd;
1677 static SECURITY_ATTRIBUTES mdb_all_sa;
1678 static int mdb_sec_inited;
1679 
1680 struct MDB_name;
1681 static int utf8_to_utf16(const char *src, struct MDB_name *dst, int xtra);
1682 #endif
1683 
1684 /** Return the library version info. */
1685 char * ESECT
mdb_version(int * major,int * minor,int * patch)1686 mdb_version(int *major, int *minor, int *patch)
1687 {
1688 	if (major) *major = MDB_VERSION_MAJOR;
1689 	if (minor) *minor = MDB_VERSION_MINOR;
1690 	if (patch) *patch = MDB_VERSION_PATCH;
1691 	return MDB_VERSION_STRING;
1692 }
1693 
1694 /** Table of descriptions for LMDB @ref errors */
1695 static char *const mdb_errstr[] = {
1696 	"MDB_KEYEXIST: Key/data pair already exists",
1697 	"MDB_NOTFOUND: No matching key/data pair found",
1698 	"MDB_PAGE_NOTFOUND: Requested page not found",
1699 	"MDB_CORRUPTED: Located page was wrong type",
1700 	"MDB_PANIC: Update of meta page failed or environment had fatal error",
1701 	"MDB_VERSION_MISMATCH: Database environment version mismatch",
1702 	"MDB_INVALID: File is not an LMDB file",
1703 	"MDB_MAP_FULL: Environment mapsize limit reached",
1704 	"MDB_DBS_FULL: Environment maxdbs limit reached",
1705 	"MDB_READERS_FULL: Environment maxreaders limit reached",
1706 	"MDB_TLS_FULL: Thread-local storage keys full - too many environments open",
1707 	"MDB_TXN_FULL: Transaction has too many dirty pages - transaction too big",
1708 	"MDB_CURSOR_FULL: Internal error - cursor stack limit reached",
1709 	"MDB_PAGE_FULL: Internal error - page has no more space",
1710 	"MDB_MAP_RESIZED: Database contents grew beyond environment mapsize",
1711 	"MDB_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed",
1712 	"MDB_BAD_RSLOT: Invalid reuse of reader locktable slot",
1713 	"MDB_BAD_TXN: Transaction must abort, has a child, or is invalid",
1714 	"MDB_BAD_VALSIZE: Unsupported size of key/DB name/data, or wrong DUPFIXED size",
1715 	"MDB_BAD_DBI: The specified DBI handle was closed/changed unexpectedly",
1716 	"MDB_PROBLEM: Unexpected problem - txn should abort",
1717 };
1718 
1719 char *
mdb_strerror(int err)1720 mdb_strerror(int err)
1721 {
1722 #ifdef _WIN32
1723 	/** HACK: pad 4KB on stack over the buf. Return system msgs in buf.
1724 	 *	This works as long as no function between the call to mdb_strerror
1725 	 *	and the actual use of the message uses more than 4K of stack.
1726 	 */
1727 #define MSGSIZE	1024
1728 #define PADSIZE	4096
1729 	char buf[MSGSIZE+PADSIZE], *ptr = buf;
1730 #endif
1731 	int i;
1732 	if (!err)
1733 		return ("Successful return: 0");
1734 
1735 	if (err >= MDB_KEYEXIST && err <= MDB_LAST_ERRCODE) {
1736 		i = err - MDB_KEYEXIST;
1737 		return mdb_errstr[i];
1738 	}
1739 
1740 #ifdef _WIN32
1741 	/* These are the C-runtime error codes we use. The comment indicates
1742 	 * their numeric value, and the Win32 error they would correspond to
1743 	 * if the error actually came from a Win32 API. A major mess, we should
1744 	 * have used LMDB-specific error codes for everything.
1745 	 */
1746 	switch(err) {
1747 	case ENOENT:	/* 2, FILE_NOT_FOUND */
1748 	case EIO:		/* 5, ACCESS_DENIED */
1749 	case ENOMEM:	/* 12, INVALID_ACCESS */
1750 	case EACCES:	/* 13, INVALID_DATA */
1751 	case EBUSY:		/* 16, CURRENT_DIRECTORY */
1752 	case EINVAL:	/* 22, BAD_COMMAND */
1753 	case ENOSPC:	/* 28, OUT_OF_PAPER */
1754 		return strerror(err);
1755 	default:
1756 		;
1757 	}
1758 	buf[0] = 0;
1759 	FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
1760 		FORMAT_MESSAGE_IGNORE_INSERTS,
1761 		NULL, err, 0, ptr, MSGSIZE, (va_list *)buf+MSGSIZE);
1762 	return ptr;
1763 #else
1764 	return strerror(err);
1765 #endif
1766 }
1767 
1768 /** assert(3) variant in cursor context */
1769 #define mdb_cassert(mc, expr)	mdb_assert0((mc)->mc_txn->mt_env, expr, #expr)
1770 /** assert(3) variant in transaction context */
1771 #define mdb_tassert(txn, expr)	mdb_assert0((txn)->mt_env, expr, #expr)
1772 /** assert(3) variant in environment context */
1773 #define mdb_eassert(env, expr)	mdb_assert0(env, expr, #expr)
1774 
1775 #ifndef NDEBUG
1776 # define mdb_assert0(env, expr, expr_txt) ((expr) ? (void)0 : \
1777 		mdb_assert_fail(env, expr_txt, mdb_func_, __FILE__, __LINE__))
1778 
1779 static void ESECT
mdb_assert_fail(MDB_env * env,const char * expr_txt,const char * func,const char * file,int line)1780 mdb_assert_fail(MDB_env *env, const char *expr_txt,
1781 	const char *func, const char *file, int line)
1782 {
1783 	char buf[400];
1784 	sprintf(buf, "%.100s:%d: Assertion '%.200s' failed in %.40s()",
1785 		file, line, expr_txt, func);
1786 	if (env->me_assert_func)
1787 		env->me_assert_func(env, buf);
1788 	fprintf(stderr, "%s\n", buf);
1789 	abort();
1790 }
1791 #else
1792 # define mdb_assert0(env, expr, expr_txt) ((void) 0)
1793 #endif /* NDEBUG */
1794 
1795 #if MDB_DEBUG
1796 /** Return the page number of \b mp which may be sub-page, for debug output */
1797 static pgno_t
mdb_dbg_pgno(MDB_page * mp)1798 mdb_dbg_pgno(MDB_page *mp)
1799 {
1800 	pgno_t ret;
1801 	COPY_PGNO(ret, mp->mp_pgno);
1802 	return ret;
1803 }
1804 
1805 /** Display a key in hexadecimal and return the address of the result.
1806  * @param[in] key the key to display
1807  * @param[in] buf the buffer to write into. Should always be #DKBUF.
1808  * @return The key in hexadecimal form.
1809  */
1810 char *
mdb_dkey(MDB_val * key,char * buf)1811 mdb_dkey(MDB_val *key, char *buf)
1812 {
1813 	char *ptr = buf;
1814 	unsigned char *c = key->mv_data;
1815 	unsigned int i;
1816 
1817 	if (!key)
1818 		return "";
1819 
1820 	if (key->mv_size > DKBUF_MAXKEYSIZE)
1821 		return "MDB_MAXKEYSIZE";
1822 	/* may want to make this a dynamic check: if the key is mostly
1823 	 * printable characters, print it as-is instead of converting to hex.
1824 	 */
1825 #if 1
1826 	buf[0] = '\0';
1827 	for (i=0; i<key->mv_size; i++)
1828 		ptr += sprintf(ptr, "%02x", *c++);
1829 #else
1830 	sprintf(buf, "%.*s", key->mv_size, key->mv_data);
1831 #endif
1832 	return buf;
1833 }
1834 
1835 static const char *
mdb_leafnode_type(MDB_node * n)1836 mdb_leafnode_type(MDB_node *n)
1837 {
1838 	static char *const tp[2][2] = {{"", ": DB"}, {": sub-page", ": sub-DB"}};
1839 	return F_ISSET(n->mn_flags, F_BIGDATA) ? ": overflow page" :
1840 		tp[F_ISSET(n->mn_flags, F_DUPDATA)][F_ISSET(n->mn_flags, F_SUBDATA)];
1841 }
1842 
1843 /** Display all the keys in the page. */
1844 void
mdb_page_list(MDB_page * mp)1845 mdb_page_list(MDB_page *mp)
1846 {
1847 	pgno_t pgno = mdb_dbg_pgno(mp);
1848 	const char *type, *state = (mp->mp_flags & P_DIRTY) ? ", dirty" : "";
1849 	MDB_node *node;
1850 	unsigned int i, nkeys, nsize, total = 0;
1851 	MDB_val key;
1852 	DKBUF;
1853 
1854 	switch (mp->mp_flags & (P_BRANCH|P_LEAF|P_LEAF2|P_META|P_OVERFLOW|P_SUBP)) {
1855 	case P_BRANCH:              type = "Branch page";		break;
1856 	case P_LEAF:                type = "Leaf page";			break;
1857 	case P_LEAF|P_SUBP:         type = "Sub-page";			break;
1858 	case P_LEAF|P_LEAF2:        type = "LEAF2 page";		break;
1859 	case P_LEAF|P_LEAF2|P_SUBP: type = "LEAF2 sub-page";	break;
1860 	case P_OVERFLOW:
1861 		fprintf(stderr, "Overflow page %"Yu" pages %u%s\n",
1862 			pgno, mp->mp_pages, state);
1863 		return;
1864 	case P_META:
1865 		fprintf(stderr, "Meta-page %"Yu" txnid %"Yu"\n",
1866 			pgno, ((MDB_meta *)METADATA(mp))->mm_txnid);
1867 		return;
1868 	default:
1869 		fprintf(stderr, "Bad page %"Yu" flags 0x%X\n", pgno, mp->mp_flags);
1870 		return;
1871 	}
1872 
1873 	nkeys = NUMKEYS(mp);
1874 	fprintf(stderr, "%s %"Yu" numkeys %d%s\n", type, pgno, nkeys, state);
1875 
1876 	for (i=0; i<nkeys; i++) {
1877 		if (IS_LEAF2(mp)) {	/* LEAF2 pages have no mp_ptrs[] or node headers */
1878 			key.mv_size = nsize = mp->mp_pad;
1879 			key.mv_data = LEAF2KEY(mp, i, nsize);
1880 			total += nsize;
1881 			fprintf(stderr, "key %d: nsize %d, %s\n", i, nsize, DKEY(&key));
1882 			continue;
1883 		}
1884 		node = NODEPTR(mp, i);
1885 		key.mv_size = node->mn_ksize;
1886 		key.mv_data = node->mn_data;
1887 		nsize = NODESIZE + key.mv_size;
1888 		if (IS_BRANCH(mp)) {
1889 			fprintf(stderr, "key %d: page %"Yu", %s\n", i, NODEPGNO(node),
1890 				DKEY(&key));
1891 			total += nsize;
1892 		} else {
1893 			if (F_ISSET(node->mn_flags, F_BIGDATA))
1894 				nsize += sizeof(pgno_t);
1895 			else
1896 				nsize += NODEDSZ(node);
1897 			total += nsize;
1898 			nsize += sizeof(indx_t);
1899 			fprintf(stderr, "key %d: nsize %d, %s%s\n",
1900 				i, nsize, DKEY(&key), mdb_leafnode_type(node));
1901 		}
1902 		total = EVEN(total);
1903 	}
1904 	fprintf(stderr, "Total: header %d + contents %d + unused %d\n",
1905 		IS_LEAF2(mp) ? PAGEHDRSZ : PAGEBASE + mp->mp_lower, total, SIZELEFT(mp));
1906 }
1907 
1908 void
mdb_cursor_chk(MDB_cursor * mc)1909 mdb_cursor_chk(MDB_cursor *mc)
1910 {
1911 	unsigned int i;
1912 	MDB_node *node;
1913 	MDB_page *mp;
1914 
1915 	if (!mc->mc_snum || !(mc->mc_flags & C_INITIALIZED)) return;
1916 	for (i=0; i<mc->mc_top; i++) {
1917 		mp = mc->mc_pg[i];
1918 		node = NODEPTR(mp, mc->mc_ki[i]);
1919 		if (NODEPGNO(node) != mc->mc_pg[i+1]->mp_pgno)
1920 			printf("oops!\n");
1921 	}
1922 	if (mc->mc_ki[i] >= NUMKEYS(mc->mc_pg[i]))
1923 		printf("ack!\n");
1924 	if (XCURSOR_INITED(mc)) {
1925 		node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
1926 		if (((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) &&
1927 			mc->mc_xcursor->mx_cursor.mc_pg[0] != NODEDATA(node)) {
1928 			printf("blah!\n");
1929 		}
1930 	}
1931 }
1932 #endif
1933 
1934 #if (MDB_DEBUG) > 2
1935 /** Count all the pages in each DB and in the freelist
1936  *  and make sure it matches the actual number of pages
1937  *  being used.
1938  *  All named DBs must be open for a correct count.
1939  */
mdb_audit(MDB_txn * txn)1940 static void mdb_audit(MDB_txn *txn)
1941 {
1942 	MDB_cursor mc;
1943 	MDB_val key, data;
1944 	MDB_ID freecount, count;
1945 	MDB_dbi i;
1946 	int rc;
1947 
1948 	freecount = 0;
1949 	mdb_cursor_init(&mc, txn, FREE_DBI, NULL);
1950 	while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0)
1951 		freecount += *(MDB_ID *)data.mv_data;
1952 	mdb_tassert(txn, rc == MDB_NOTFOUND);
1953 
1954 	count = 0;
1955 	for (i = 0; i<txn->mt_numdbs; i++) {
1956 		MDB_xcursor mx;
1957 		if (!(txn->mt_dbflags[i] & DB_VALID))
1958 			continue;
1959 		mdb_cursor_init(&mc, txn, i, &mx);
1960 		if (txn->mt_dbs[i].md_root == P_INVALID)
1961 			continue;
1962 		count += txn->mt_dbs[i].md_branch_pages +
1963 			txn->mt_dbs[i].md_leaf_pages +
1964 			txn->mt_dbs[i].md_overflow_pages;
1965 		if (txn->mt_dbs[i].md_flags & MDB_DUPSORT) {
1966 			rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST);
1967 			for (; rc == MDB_SUCCESS; rc = mdb_cursor_sibling(&mc, 1)) {
1968 				unsigned j;
1969 				MDB_page *mp;
1970 				mp = mc.mc_pg[mc.mc_top];
1971 				for (j=0; j<NUMKEYS(mp); j++) {
1972 					MDB_node *leaf = NODEPTR(mp, j);
1973 					if (leaf->mn_flags & F_SUBDATA) {
1974 						MDB_db db;
1975 						memcpy(&db, NODEDATA(leaf), sizeof(db));
1976 						count += db.md_branch_pages + db.md_leaf_pages +
1977 							db.md_overflow_pages;
1978 					}
1979 				}
1980 			}
1981 			mdb_tassert(txn, rc == MDB_NOTFOUND);
1982 		}
1983 	}
1984 	if (freecount + count + NUM_METAS != txn->mt_next_pgno) {
1985 		fprintf(stderr, "audit: %"Yu" freecount: %"Yu" count: %"Yu" total: %"Yu" next_pgno: %"Yu"\n",
1986 			txn->mt_txnid, freecount, count+NUM_METAS,
1987 			freecount+count+NUM_METAS, txn->mt_next_pgno);
1988 	}
1989 }
1990 #endif
1991 
1992 int
mdb_cmp(MDB_txn * txn,MDB_dbi dbi,const MDB_val * a,const MDB_val * b)1993 mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b)
1994 {
1995 	return txn->mt_dbxs[dbi].md_cmp(a, b);
1996 }
1997 
1998 int
mdb_dcmp(MDB_txn * txn,MDB_dbi dbi,const MDB_val * a,const MDB_val * b)1999 mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b)
2000 {
2001 	MDB_cmp_func *dcmp = txn->mt_dbxs[dbi].md_dcmp;
2002 	if (NEED_CMP_CLONG(dcmp, a->mv_size))
2003 		dcmp = mdb_cmp_clong;
2004 	return dcmp(a, b);
2005 }
2006 
2007 /** Allocate memory for a page.
2008  * Re-use old malloc'd pages first for singletons, otherwise just malloc.
2009  * Set #MDB_TXN_ERROR on failure.
2010  */
2011 static MDB_page *
mdb_page_malloc(MDB_txn * txn,unsigned num)2012 mdb_page_malloc(MDB_txn *txn, unsigned num)
2013 {
2014 	MDB_env *env = txn->mt_env;
2015 	MDB_page *ret = env->me_dpages;
2016 	size_t psize = env->me_psize, sz = psize, off;
2017 	/* For ! #MDB_NOMEMINIT, psize counts how much to init.
2018 	 * For a single page alloc, we init everything after the page header.
2019 	 * For multi-page, we init the final page; if the caller needed that
2020 	 * many pages they will be filling in at least up to the last page.
2021 	 */
2022 	if (num == 1) {
2023 		if (ret) {
2024 			VGMEMP_ALLOC(env, ret, sz);
2025 			VGMEMP_DEFINED(ret, sizeof(ret->mp_next));
2026 			env->me_dpages = ret->mp_next;
2027 			return ret;
2028 		}
2029 		psize -= off = PAGEHDRSZ;
2030 	} else {
2031 		sz *= num;
2032 		off = sz - psize;
2033 	}
2034 	if ((ret = malloc(sz)) != NULL) {
2035 		VGMEMP_ALLOC(env, ret, sz);
2036 		if (!(env->me_flags & MDB_NOMEMINIT)) {
2037 			memset((char *)ret + off, 0, psize);
2038 			ret->mp_pad = 0;
2039 		}
2040 	} else {
2041 		txn->mt_flags |= MDB_TXN_ERROR;
2042 	}
2043 	return ret;
2044 }
2045 /** Free a single page.
2046  * Saves single pages to a list, for future reuse.
2047  * (This is not used for multi-page overflow pages.)
2048  */
2049 static void
mdb_page_free(MDB_env * env,MDB_page * mp)2050 mdb_page_free(MDB_env *env, MDB_page *mp)
2051 {
2052 	mp->mp_next = env->me_dpages;
2053 	VGMEMP_FREE(env, mp);
2054 	env->me_dpages = mp;
2055 }
2056 
2057 /** Free a dirty page */
2058 static void
mdb_dpage_free(MDB_env * env,MDB_page * dp)2059 mdb_dpage_free(MDB_env *env, MDB_page *dp)
2060 {
2061 	if (!IS_OVERFLOW(dp) || dp->mp_pages == 1) {
2062 		mdb_page_free(env, dp);
2063 	} else {
2064 		/* large pages just get freed directly */
2065 		VGMEMP_FREE(env, dp);
2066 		free(dp);
2067 	}
2068 }
2069 
2070 /**	Return all dirty pages to dpage list */
2071 static void
mdb_dlist_free(MDB_txn * txn)2072 mdb_dlist_free(MDB_txn *txn)
2073 {
2074 	MDB_env *env = txn->mt_env;
2075 	MDB_ID2L dl = txn->mt_u.dirty_list;
2076 	unsigned i, n = dl[0].mid;
2077 
2078 	for (i = 1; i <= n; i++) {
2079 		mdb_dpage_free(env, dl[i].mptr);
2080 	}
2081 	dl[0].mid = 0;
2082 }
2083 
2084 #ifdef MDB_VL32
2085 static void
mdb_page_unref(MDB_txn * txn,MDB_page * mp)2086 mdb_page_unref(MDB_txn *txn, MDB_page *mp)
2087 {
2088 	pgno_t pgno;
2089 	MDB_ID3L tl = txn->mt_rpages;
2090 	unsigned x, rem;
2091 	if (mp->mp_flags & (P_SUBP|P_DIRTY))
2092 		return;
2093 	rem = mp->mp_pgno & (MDB_RPAGE_CHUNK-1);
2094 	pgno = mp->mp_pgno ^ rem;
2095 	x = mdb_mid3l_search(tl, pgno);
2096 	if (x != tl[0].mid && tl[x+1].mid == mp->mp_pgno)
2097 		x++;
2098 	if (tl[x].mref)
2099 		tl[x].mref--;
2100 }
2101 #define MDB_PAGE_UNREF(txn, mp)	mdb_page_unref(txn, mp)
2102 
2103 static void
mdb_cursor_unref(MDB_cursor * mc)2104 mdb_cursor_unref(MDB_cursor *mc)
2105 {
2106 	int i;
2107 	if (mc->mc_txn->mt_rpages[0].mid) {
2108 		if (!mc->mc_snum || !mc->mc_pg[0] || IS_SUBP(mc->mc_pg[0]))
2109 			return;
2110 		for (i=0; i<mc->mc_snum; i++)
2111 			mdb_page_unref(mc->mc_txn, mc->mc_pg[i]);
2112 		if (mc->mc_ovpg) {
2113 			mdb_page_unref(mc->mc_txn, mc->mc_ovpg);
2114 			mc->mc_ovpg = 0;
2115 		}
2116 	}
2117 	mc->mc_snum = mc->mc_top = 0;
2118 	mc->mc_pg[0] = NULL;
2119 	mc->mc_flags &= ~C_INITIALIZED;
2120 }
2121 #define MDB_CURSOR_UNREF(mc, force) \
2122 	(((force) || ((mc)->mc_flags & C_INITIALIZED)) \
2123 	 ? mdb_cursor_unref(mc) \
2124 	 : (void)0)
2125 
2126 #else
2127 #define MDB_PAGE_UNREF(txn, mp)
2128 #define MDB_CURSOR_UNREF(mc, force) ((void)0)
2129 #endif /* MDB_VL32 */
2130 
2131 /** Loosen or free a single page.
2132  * Saves single pages to a list for future reuse
2133  * in this same txn. It has been pulled from the freeDB
2134  * and already resides on the dirty list, but has been
2135  * deleted. Use these pages first before pulling again
2136  * from the freeDB.
2137  *
2138  * If the page wasn't dirtied in this txn, just add it
2139  * to this txn's free list.
2140  */
2141 static int
mdb_page_loose(MDB_cursor * mc,MDB_page * mp)2142 mdb_page_loose(MDB_cursor *mc, MDB_page *mp)
2143 {
2144 	int loose = 0;
2145 	pgno_t pgno = mp->mp_pgno;
2146 	MDB_txn *txn = mc->mc_txn;
2147 
2148 	if ((mp->mp_flags & P_DIRTY) && mc->mc_dbi != FREE_DBI) {
2149 		if (txn->mt_parent) {
2150 			MDB_ID2 *dl = txn->mt_u.dirty_list;
2151 			/* If txn has a parent, make sure the page is in our
2152 			 * dirty list.
2153 			 */
2154 			if (dl[0].mid) {
2155 				unsigned x = mdb_mid2l_search(dl, pgno);
2156 				if (x <= dl[0].mid && dl[x].mid == pgno) {
2157 					if (mp != dl[x].mptr) { /* bad cursor? */
2158 						mc->mc_flags &= ~(C_INITIALIZED|C_EOF);
2159 						txn->mt_flags |= MDB_TXN_ERROR;
2160 						return MDB_PROBLEM;
2161 					}
2162 					/* ok, it's ours */
2163 					loose = 1;
2164 				}
2165 			}
2166 		} else {
2167 			/* no parent txn, so it's just ours */
2168 			loose = 1;
2169 		}
2170 	}
2171 	if (loose) {
2172 		DPRINTF(("loosen db %d page %"Yu, DDBI(mc), mp->mp_pgno));
2173 		NEXT_LOOSE_PAGE(mp) = txn->mt_loose_pgs;
2174 		txn->mt_loose_pgs = mp;
2175 		txn->mt_loose_count++;
2176 		mp->mp_flags |= P_LOOSE;
2177 	} else {
2178 		int rc = mdb_midl_append(&txn->mt_free_pgs, pgno);
2179 		if (rc)
2180 			return rc;
2181 	}
2182 
2183 	return MDB_SUCCESS;
2184 }
2185 
2186 /** Set or clear P_KEEP in dirty, non-overflow, non-sub pages watched by txn.
2187  * @param[in] mc A cursor handle for the current operation.
2188  * @param[in] pflags Flags of the pages to update:
2189  * P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it.
2190  * @param[in] all No shortcuts. Needed except after a full #mdb_page_flush().
2191  * @return 0 on success, non-zero on failure.
2192  */
2193 static int
mdb_pages_xkeep(MDB_cursor * mc,unsigned pflags,int all)2194 mdb_pages_xkeep(MDB_cursor *mc, unsigned pflags, int all)
2195 {
2196 	enum { Mask = P_SUBP|P_DIRTY|P_LOOSE|P_KEEP };
2197 	MDB_txn *txn = mc->mc_txn;
2198 	MDB_cursor *m3, *m0 = mc;
2199 	MDB_xcursor *mx;
2200 	MDB_page *dp, *mp;
2201 	MDB_node *leaf;
2202 	unsigned i, j;
2203 	int rc = MDB_SUCCESS, level;
2204 
2205 	/* Mark pages seen by cursors: First m0, then tracked cursors */
2206 	for (i = txn->mt_numdbs;; ) {
2207 		if (mc->mc_flags & C_INITIALIZED) {
2208 			for (m3 = mc;; m3 = &mx->mx_cursor) {
2209 				mp = NULL;
2210 				for (j=0; j<m3->mc_snum; j++) {
2211 					mp = m3->mc_pg[j];
2212 					if ((mp->mp_flags & Mask) == pflags)
2213 						mp->mp_flags ^= P_KEEP;
2214 				}
2215 				mx = m3->mc_xcursor;
2216 				/* Proceed to mx if it is at a sub-database */
2217 				if (! (mx && (mx->mx_cursor.mc_flags & C_INITIALIZED)))
2218 					break;
2219 				if (! (mp && (mp->mp_flags & P_LEAF)))
2220 					break;
2221 				leaf = NODEPTR(mp, m3->mc_ki[j-1]);
2222 				if (!(leaf->mn_flags & F_SUBDATA))
2223 					break;
2224 			}
2225 		}
2226 		mc = mc->mc_next;
2227 		for (; !mc || mc == m0; mc = txn->mt_cursors[--i])
2228 			if (i == 0)
2229 				goto mark_done;
2230 	}
2231 
2232 mark_done:
2233 	if (all) {
2234 		/* Mark dirty root pages */
2235 		for (i=0; i<txn->mt_numdbs; i++) {
2236 			if (txn->mt_dbflags[i] & DB_DIRTY) {
2237 				pgno_t pgno = txn->mt_dbs[i].md_root;
2238 				if (pgno == P_INVALID)
2239 					continue;
2240 				if ((rc = mdb_page_get(m0, pgno, &dp, &level)) != MDB_SUCCESS)
2241 					break;
2242 				if ((dp->mp_flags & Mask) == pflags && level <= 1)
2243 					dp->mp_flags ^= P_KEEP;
2244 			}
2245 		}
2246 	}
2247 
2248 	return rc;
2249 }
2250 
2251 static int mdb_page_flush(MDB_txn *txn, int keep);
2252 
2253 /**	Spill pages from the dirty list back to disk.
2254  * This is intended to prevent running into #MDB_TXN_FULL situations,
2255  * but note that they may still occur in a few cases:
2256  *	1) our estimate of the txn size could be too small. Currently this
2257  *	 seems unlikely, except with a large number of #MDB_MULTIPLE items.
2258  *	2) child txns may run out of space if their parents dirtied a
2259  *	 lot of pages and never spilled them. TODO: we probably should do
2260  *	 a preemptive spill during #mdb_txn_begin() of a child txn, if
2261  *	 the parent's dirty_room is below a given threshold.
2262  *
2263  * Otherwise, if not using nested txns, it is expected that apps will
2264  * not run into #MDB_TXN_FULL any more. The pages are flushed to disk
2265  * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared.
2266  * If the txn never references them again, they can be left alone.
2267  * If the txn only reads them, they can be used without any fuss.
2268  * If the txn writes them again, they can be dirtied immediately without
2269  * going thru all of the work of #mdb_page_touch(). Such references are
2270  * handled by #mdb_page_unspill().
2271  *
2272  * Also note, we never spill DB root pages, nor pages of active cursors,
2273  * because we'll need these back again soon anyway. And in nested txns,
2274  * we can't spill a page in a child txn if it was already spilled in a
2275  * parent txn. That would alter the parent txns' data even though
2276  * the child hasn't committed yet, and we'd have no way to undo it if
2277  * the child aborted.
2278  *
2279  * @param[in] m0 cursor A cursor handle identifying the transaction and
2280  *	database for which we are checking space.
2281  * @param[in] key For a put operation, the key being stored.
2282  * @param[in] data For a put operation, the data being stored.
2283  * @return 0 on success, non-zero on failure.
2284  */
2285 static int
mdb_page_spill(MDB_cursor * m0,MDB_val * key,MDB_val * data)2286 mdb_page_spill(MDB_cursor *m0, MDB_val *key, MDB_val *data)
2287 {
2288 	MDB_txn *txn = m0->mc_txn;
2289 	MDB_page *dp;
2290 	MDB_ID2L dl = txn->mt_u.dirty_list;
2291 	unsigned int i, j, need;
2292 	int rc;
2293 
2294 	if (m0->mc_flags & C_SUB)
2295 		return MDB_SUCCESS;
2296 
2297 	/* Estimate how much space this op will take */
2298 	i = m0->mc_db->md_depth;
2299 	/* Named DBs also dirty the main DB */
2300 	if (m0->mc_dbi >= CORE_DBS)
2301 		i += txn->mt_dbs[MAIN_DBI].md_depth;
2302 	/* For puts, roughly factor in the key+data size */
2303 	if (key)
2304 		i += (LEAFSIZE(key, data) + txn->mt_env->me_psize) / txn->mt_env->me_psize;
2305 	i += i;	/* double it for good measure */
2306 	need = i;
2307 
2308 	if (txn->mt_dirty_room > i)
2309 		return MDB_SUCCESS;
2310 
2311 	if (!txn->mt_spill_pgs) {
2312 		txn->mt_spill_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX);
2313 		if (!txn->mt_spill_pgs)
2314 			return ENOMEM;
2315 	} else {
2316 		/* purge deleted slots */
2317 		MDB_IDL sl = txn->mt_spill_pgs;
2318 		unsigned int num = sl[0];
2319 		j=0;
2320 		for (i=1; i<=num; i++) {
2321 			if (!(sl[i] & 1))
2322 				sl[++j] = sl[i];
2323 		}
2324 		sl[0] = j;
2325 	}
2326 
2327 	/* Preserve pages which may soon be dirtied again */
2328 	if ((rc = mdb_pages_xkeep(m0, P_DIRTY, 1)) != MDB_SUCCESS)
2329 		goto done;
2330 
2331 	/* Less aggressive spill - we originally spilled the entire dirty list,
2332 	 * with a few exceptions for cursor pages and DB root pages. But this
2333 	 * turns out to be a lot of wasted effort because in a large txn many
2334 	 * of those pages will need to be used again. So now we spill only 1/8th
2335 	 * of the dirty pages. Testing revealed this to be a good tradeoff,
2336 	 * better than 1/2, 1/4, or 1/10.
2337 	 */
2338 	if (need < MDB_IDL_UM_MAX / 8)
2339 		need = MDB_IDL_UM_MAX / 8;
2340 
2341 	/* Save the page IDs of all the pages we're flushing */
2342 	/* flush from the tail forward, this saves a lot of shifting later on. */
2343 	for (i=dl[0].mid; i && need; i--) {
2344 		MDB_ID pn = dl[i].mid << 1;
2345 		dp = dl[i].mptr;
2346 		if (dp->mp_flags & (P_LOOSE|P_KEEP))
2347 			continue;
2348 		/* Can't spill twice, make sure it's not already in a parent's
2349 		 * spill list.
2350 		 */
2351 		if (txn->mt_parent) {
2352 			MDB_txn *tx2;
2353 			for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) {
2354 				if (tx2->mt_spill_pgs) {
2355 					j = mdb_midl_search(tx2->mt_spill_pgs, pn);
2356 					if (j <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[j] == pn) {
2357 						dp->mp_flags |= P_KEEP;
2358 						break;
2359 					}
2360 				}
2361 			}
2362 			if (tx2)
2363 				continue;
2364 		}
2365 		if ((rc = mdb_midl_append(&txn->mt_spill_pgs, pn)))
2366 			goto done;
2367 		need--;
2368 	}
2369 	mdb_midl_sort(txn->mt_spill_pgs);
2370 
2371 	/* Flush the spilled part of dirty list */
2372 	if ((rc = mdb_page_flush(txn, i)) != MDB_SUCCESS)
2373 		goto done;
2374 
2375 	/* Reset any dirty pages we kept that page_flush didn't see */
2376 	rc = mdb_pages_xkeep(m0, P_DIRTY|P_KEEP, i);
2377 
2378 done:
2379 	txn->mt_flags |= rc ? MDB_TXN_ERROR : MDB_TXN_SPILLS;
2380 	return rc;
2381 }
2382 
2383 /** Find oldest txnid still referenced. Expects txn->mt_txnid > 0. */
2384 static txnid_t
mdb_find_oldest(MDB_txn * txn)2385 mdb_find_oldest(MDB_txn *txn)
2386 {
2387 	int i;
2388 	txnid_t mr, oldest = txn->mt_txnid - 1;
2389 	if (txn->mt_env->me_txns) {
2390 		MDB_reader *r = txn->mt_env->me_txns->mti_readers;
2391 		for (i = txn->mt_env->me_txns->mti_numreaders; --i >= 0; ) {
2392 			if (r[i].mr_pid) {
2393 				mr = r[i].mr_txnid;
2394 				if (oldest > mr)
2395 					oldest = mr;
2396 			}
2397 		}
2398 	}
2399 	return oldest;
2400 }
2401 
2402 /** Add a page to the txn's dirty list */
2403 static void
mdb_page_dirty(MDB_txn * txn,MDB_page * mp)2404 mdb_page_dirty(MDB_txn *txn, MDB_page *mp)
2405 {
2406 	MDB_ID2 mid;
2407 	int rc, (*insert)(MDB_ID2L, MDB_ID2 *);
2408 #ifdef _WIN32	/* With Windows we always write dirty pages with WriteFile,
2409 				 * so we always want them ordered */
2410 	insert = mdb_mid2l_insert;
2411 #else			/* but otherwise with writemaps, we just use msync, we
2412 				 * don't need the ordering and just append */
2413 	if (txn->mt_flags & MDB_TXN_WRITEMAP)
2414 		insert = mdb_mid2l_append;
2415 	else
2416 		insert = mdb_mid2l_insert;
2417 #endif
2418 	mid.mid = mp->mp_pgno;
2419 	mid.mptr = mp;
2420 	rc = insert(txn->mt_u.dirty_list, &mid);
2421 	mdb_tassert(txn, rc == 0);
2422 	txn->mt_dirty_room--;
2423 }
2424 
2425 /** Allocate page numbers and memory for writing.  Maintain me_pglast,
2426  * me_pghead and mt_next_pgno.  Set #MDB_TXN_ERROR on failure.
2427  *
2428  * If there are free pages available from older transactions, they
2429  * are re-used first. Otherwise allocate a new page at mt_next_pgno.
2430  * Do not modify the freedB, just merge freeDB records into me_pghead[]
2431  * and move me_pglast to say which records were consumed.  Only this
2432  * function can create me_pghead and move me_pglast/mt_next_pgno.
2433  * When #MDB_DEVEL & 2, it is not affected by #mdb_freelist_save(): it
2434  * then uses the transaction's original snapshot of the freeDB.
2435  * @param[in] mc cursor A cursor handle identifying the transaction and
2436  *	database for which we are allocating.
2437  * @param[in] num the number of pages to allocate.
2438  * @param[out] mp Address of the allocated page(s). Requests for multiple pages
2439  *  will always be satisfied by a single contiguous chunk of memory.
2440  * @return 0 on success, non-zero on failure.
2441  */
2442 static int
mdb_page_alloc(MDB_cursor * mc,int num,MDB_page ** mp)2443 mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp)
2444 {
2445 #ifdef MDB_PARANOID	/* Seems like we can ignore this now */
2446 	/* Get at most <Max_retries> more freeDB records once me_pghead
2447 	 * has enough pages.  If not enough, use new pages from the map.
2448 	 * If <Paranoid> and mc is updating the freeDB, only get new
2449 	 * records if me_pghead is empty. Then the freelist cannot play
2450 	 * catch-up with itself by growing while trying to save it.
2451 	 */
2452 	enum { Paranoid = 1, Max_retries = 500 };
2453 #else
2454 	enum { Paranoid = 0, Max_retries = INT_MAX /*infinite*/ };
2455 #endif
2456 	int rc, retry = num * 60;
2457 	MDB_txn *txn = mc->mc_txn;
2458 	MDB_env *env = txn->mt_env;
2459 	pgno_t pgno, *mop = env->me_pghead;
2460 	unsigned i, j, mop_len = mop ? mop[0] : 0, n2 = num-1;
2461 	MDB_page *np;
2462 	txnid_t oldest = 0, last;
2463 	MDB_cursor_op op;
2464 	MDB_cursor m2;
2465 	int found_old = 0;
2466 
2467 	/* If there are any loose pages, just use them */
2468 	if (num == 1 && txn->mt_loose_pgs) {
2469 		np = txn->mt_loose_pgs;
2470 		txn->mt_loose_pgs = NEXT_LOOSE_PAGE(np);
2471 		txn->mt_loose_count--;
2472 		DPRINTF(("db %d use loose page %"Yu, DDBI(mc), np->mp_pgno));
2473 		*mp = np;
2474 		return MDB_SUCCESS;
2475 	}
2476 
2477 	*mp = NULL;
2478 
2479 	/* If our dirty list is already full, we can't do anything */
2480 	if (txn->mt_dirty_room == 0) {
2481 		rc = MDB_TXN_FULL;
2482 		goto fail;
2483 	}
2484 
2485 	for (op = MDB_FIRST;; op = MDB_NEXT) {
2486 		MDB_val key, data;
2487 		MDB_node *leaf;
2488 		pgno_t *idl;
2489 
2490 		/* Seek a big enough contiguous page range. Prefer
2491 		 * pages at the tail, just truncating the list.
2492 		 */
2493 		if (mop_len > n2) {
2494 			i = mop_len;
2495 			do {
2496 				pgno = mop[i];
2497 				if (mop[i-n2] == pgno+n2)
2498 					goto search_done;
2499 			} while (--i > n2);
2500 			if (--retry < 0)
2501 				break;
2502 		}
2503 
2504 		if (op == MDB_FIRST) {	/* 1st iteration */
2505 			/* Prepare to fetch more and coalesce */
2506 			last = env->me_pglast;
2507 			oldest = env->me_pgoldest;
2508 			mdb_cursor_init(&m2, txn, FREE_DBI, NULL);
2509 #if (MDB_DEVEL) & 2	/* "& 2" so MDB_DEVEL=1 won't hide bugs breaking freeDB */
2510 			/* Use original snapshot. TODO: Should need less care in code
2511 			 * which modifies the database. Maybe we can delete some code?
2512 			 */
2513 			m2.mc_flags |= C_ORIG_RDONLY;
2514 			m2.mc_db = &env->me_metas[(txn->mt_txnid-1) & 1]->mm_dbs[FREE_DBI];
2515 			m2.mc_dbflag = (unsigned char *)""; /* probably unnecessary */
2516 #endif
2517 			if (last) {
2518 				op = MDB_SET_RANGE;
2519 				key.mv_data = &last; /* will look up last+1 */
2520 				key.mv_size = sizeof(last);
2521 			}
2522 			if (Paranoid && mc->mc_dbi == FREE_DBI)
2523 				retry = -1;
2524 		}
2525 		if (Paranoid && retry < 0 && mop_len)
2526 			break;
2527 
2528 		last++;
2529 		/* Do not fetch more if the record will be too recent */
2530 		if (oldest <= last) {
2531 			if (!found_old) {
2532 				oldest = mdb_find_oldest(txn);
2533 				env->me_pgoldest = oldest;
2534 				found_old = 1;
2535 			}
2536 			if (oldest <= last)
2537 				break;
2538 		}
2539 		rc = mdb_cursor_get(&m2, &key, NULL, op);
2540 		if (rc) {
2541 			if (rc == MDB_NOTFOUND)
2542 				break;
2543 			goto fail;
2544 		}
2545 		last = *(txnid_t*)key.mv_data;
2546 		if (oldest <= last) {
2547 			if (!found_old) {
2548 				oldest = mdb_find_oldest(txn);
2549 				env->me_pgoldest = oldest;
2550 				found_old = 1;
2551 			}
2552 			if (oldest <= last)
2553 				break;
2554 		}
2555 		np = m2.mc_pg[m2.mc_top];
2556 		leaf = NODEPTR(np, m2.mc_ki[m2.mc_top]);
2557 		if ((rc = mdb_node_read(&m2, leaf, &data)) != MDB_SUCCESS)
2558 			goto fail;
2559 
2560 		idl = (MDB_ID *) data.mv_data;
2561 		i = idl[0];
2562 		if (!mop) {
2563 			if (!(env->me_pghead = mop = mdb_midl_alloc(i))) {
2564 				rc = ENOMEM;
2565 				goto fail;
2566 			}
2567 		} else {
2568 			if ((rc = mdb_midl_need(&env->me_pghead, i)) != 0)
2569 				goto fail;
2570 			mop = env->me_pghead;
2571 		}
2572 		env->me_pglast = last;
2573 #if (MDB_DEBUG) > 1
2574 		DPRINTF(("IDL read txn %"Yu" root %"Yu" num %u",
2575 			last, txn->mt_dbs[FREE_DBI].md_root, i));
2576 		for (j = i; j; j--)
2577 			DPRINTF(("IDL %"Yu, idl[j]));
2578 #endif
2579 		/* Merge in descending sorted order */
2580 		mdb_midl_xmerge(mop, idl);
2581 		mop_len = mop[0];
2582 	}
2583 
2584 	/* Use new pages from the map when nothing suitable in the freeDB */
2585 	i = 0;
2586 	pgno = txn->mt_next_pgno;
2587 	if (pgno + num >= env->me_maxpg) {
2588 			DPUTS("DB size maxed out");
2589 			rc = MDB_MAP_FULL;
2590 			goto fail;
2591 	}
2592 #if defined(_WIN32) && !defined(MDB_VL32)
2593 	if (!(env->me_flags & MDB_RDONLY)) {
2594 		void *p;
2595 		p = (MDB_page *)(env->me_map + env->me_psize * pgno);
2596 		p = VirtualAlloc(p, env->me_psize * num, MEM_COMMIT,
2597 			(env->me_flags & MDB_WRITEMAP) ? PAGE_READWRITE:
2598 			PAGE_READONLY);
2599 		if (!p) {
2600 			DPUTS("VirtualAlloc failed");
2601 			rc = ErrCode();
2602 			goto fail;
2603 		}
2604 	}
2605 #endif
2606 
2607 search_done:
2608 	if (env->me_flags & MDB_WRITEMAP) {
2609 		np = (MDB_page *)(env->me_map + env->me_psize * pgno);
2610 	} else {
2611 		if (!(np = mdb_page_malloc(txn, num))) {
2612 			rc = ENOMEM;
2613 			goto fail;
2614 		}
2615 	}
2616 	if (i) {
2617 		mop[0] = mop_len -= num;
2618 		/* Move any stragglers down */
2619 		for (j = i-num; j < mop_len; )
2620 			mop[++j] = mop[++i];
2621 	} else {
2622 		txn->mt_next_pgno = pgno + num;
2623 	}
2624 	np->mp_pgno = pgno;
2625 	mdb_page_dirty(txn, np);
2626 	*mp = np;
2627 
2628 	return MDB_SUCCESS;
2629 
2630 fail:
2631 	txn->mt_flags |= MDB_TXN_ERROR;
2632 	return rc;
2633 }
2634 
2635 /** Copy the used portions of a non-overflow page.
2636  * @param[in] dst page to copy into
2637  * @param[in] src page to copy from
2638  * @param[in] psize size of a page
2639  */
2640 static void
mdb_page_copy(MDB_page * dst,MDB_page * src,unsigned int psize)2641 mdb_page_copy(MDB_page *dst, MDB_page *src, unsigned int psize)
2642 {
2643 	enum { Align = sizeof(pgno_t) };
2644 	indx_t upper = src->mp_upper, lower = src->mp_lower, unused = upper-lower;
2645 
2646 	/* If page isn't full, just copy the used portion. Adjust
2647 	 * alignment so memcpy may copy words instead of bytes.
2648 	 */
2649 	if ((unused &= -Align) && !IS_LEAF2(src)) {
2650 		upper = (upper + PAGEBASE) & -Align;
2651 		memcpy(dst, src, (lower + PAGEBASE + (Align-1)) & -Align);
2652 		memcpy((pgno_t *)((char *)dst+upper), (pgno_t *)((char *)src+upper),
2653 			psize - upper);
2654 	} else {
2655 		memcpy(dst, src, psize - unused);
2656 	}
2657 }
2658 
2659 /** Pull a page off the txn's spill list, if present.
2660  * If a page being referenced was spilled to disk in this txn, bring
2661  * it back and make it dirty/writable again.
2662  * @param[in] txn the transaction handle.
2663  * @param[in] mp the page being referenced. It must not be dirty.
2664  * @param[out] ret the writable page, if any. ret is unchanged if
2665  * mp wasn't spilled.
2666  */
2667 static int
mdb_page_unspill(MDB_txn * txn,MDB_page * mp,MDB_page ** ret)2668 mdb_page_unspill(MDB_txn *txn, MDB_page *mp, MDB_page **ret)
2669 {
2670 	MDB_env *env = txn->mt_env;
2671 	const MDB_txn *tx2;
2672 	unsigned x;
2673 	pgno_t pgno = mp->mp_pgno, pn = pgno << 1;
2674 
2675 	for (tx2 = txn; tx2; tx2=tx2->mt_parent) {
2676 		if (!tx2->mt_spill_pgs)
2677 			continue;
2678 		x = mdb_midl_search(tx2->mt_spill_pgs, pn);
2679 		if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) {
2680 			MDB_page *np;
2681 			int num;
2682 			if (txn->mt_dirty_room == 0)
2683 				return MDB_TXN_FULL;
2684 			if (IS_OVERFLOW(mp))
2685 				num = mp->mp_pages;
2686 			else
2687 				num = 1;
2688 			if (env->me_flags & MDB_WRITEMAP) {
2689 				np = mp;
2690 			} else {
2691 				np = mdb_page_malloc(txn, num);
2692 				if (!np)
2693 					return ENOMEM;
2694 				if (num > 1)
2695 					memcpy(np, mp, num * env->me_psize);
2696 				else
2697 					mdb_page_copy(np, mp, env->me_psize);
2698 			}
2699 			if (tx2 == txn) {
2700 				/* If in current txn, this page is no longer spilled.
2701 				 * If it happens to be the last page, truncate the spill list.
2702 				 * Otherwise mark it as deleted by setting the LSB.
2703 				 */
2704 				if (x == txn->mt_spill_pgs[0])
2705 					txn->mt_spill_pgs[0]--;
2706 				else
2707 					txn->mt_spill_pgs[x] |= 1;
2708 			}	/* otherwise, if belonging to a parent txn, the
2709 				 * page remains spilled until child commits
2710 				 */
2711 
2712 			mdb_page_dirty(txn, np);
2713 			np->mp_flags |= P_DIRTY;
2714 			*ret = np;
2715 			break;
2716 		}
2717 	}
2718 	return MDB_SUCCESS;
2719 }
2720 
2721 /** Touch a page: make it dirty and re-insert into tree with updated pgno.
2722  * Set #MDB_TXN_ERROR on failure.
2723  * @param[in] mc cursor pointing to the page to be touched
2724  * @return 0 on success, non-zero on failure.
2725  */
2726 static int
mdb_page_touch(MDB_cursor * mc)2727 mdb_page_touch(MDB_cursor *mc)
2728 {
2729 	MDB_page *mp = mc->mc_pg[mc->mc_top], *np;
2730 	MDB_txn *txn = mc->mc_txn;
2731 	MDB_cursor *m2, *m3;
2732 	pgno_t	pgno;
2733 	int rc;
2734 
2735 	if (!F_ISSET(mp->mp_flags, P_DIRTY)) {
2736 		if (txn->mt_flags & MDB_TXN_SPILLS) {
2737 			np = NULL;
2738 			rc = mdb_page_unspill(txn, mp, &np);
2739 			if (rc)
2740 				goto fail;
2741 			if (np)
2742 				goto done;
2743 		}
2744 		if ((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) ||
2745 			(rc = mdb_page_alloc(mc, 1, &np)))
2746 			goto fail;
2747 		pgno = np->mp_pgno;
2748 		DPRINTF(("touched db %d page %"Yu" -> %"Yu, DDBI(mc),
2749 			mp->mp_pgno, pgno));
2750 		mdb_cassert(mc, mp->mp_pgno != pgno);
2751 		mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno);
2752 		/* Update the parent page, if any, to point to the new page */
2753 		if (mc->mc_top) {
2754 			MDB_page *parent = mc->mc_pg[mc->mc_top-1];
2755 			MDB_node *node = NODEPTR(parent, mc->mc_ki[mc->mc_top-1]);
2756 			SETPGNO(node, pgno);
2757 		} else {
2758 			mc->mc_db->md_root = pgno;
2759 		}
2760 	} else if (txn->mt_parent && !IS_SUBP(mp)) {
2761 		MDB_ID2 mid, *dl = txn->mt_u.dirty_list;
2762 		pgno = mp->mp_pgno;
2763 		/* If txn has a parent, make sure the page is in our
2764 		 * dirty list.
2765 		 */
2766 		if (dl[0].mid) {
2767 			unsigned x = mdb_mid2l_search(dl, pgno);
2768 			if (x <= dl[0].mid && dl[x].mid == pgno) {
2769 				if (mp != dl[x].mptr) { /* bad cursor? */
2770 					mc->mc_flags &= ~(C_INITIALIZED|C_EOF);
2771 					txn->mt_flags |= MDB_TXN_ERROR;
2772 					return MDB_PROBLEM;
2773 				}
2774 				return 0;
2775 			}
2776 		}
2777 		mdb_cassert(mc, dl[0].mid < MDB_IDL_UM_MAX);
2778 		/* No - copy it */
2779 		np = mdb_page_malloc(txn, 1);
2780 		if (!np)
2781 			return ENOMEM;
2782 		mid.mid = pgno;
2783 		mid.mptr = np;
2784 		rc = mdb_mid2l_insert(dl, &mid);
2785 		mdb_cassert(mc, rc == 0);
2786 	} else {
2787 		return 0;
2788 	}
2789 
2790 	mdb_page_copy(np, mp, txn->mt_env->me_psize);
2791 	np->mp_pgno = pgno;
2792 	np->mp_flags |= P_DIRTY;
2793 
2794 done:
2795 	/* Adjust cursors pointing to mp */
2796 	mc->mc_pg[mc->mc_top] = np;
2797 	m2 = txn->mt_cursors[mc->mc_dbi];
2798 	if (mc->mc_flags & C_SUB) {
2799 		for (; m2; m2=m2->mc_next) {
2800 			m3 = &m2->mc_xcursor->mx_cursor;
2801 			if (m3->mc_snum < mc->mc_snum) continue;
2802 			if (m3->mc_pg[mc->mc_top] == mp)
2803 				m3->mc_pg[mc->mc_top] = np;
2804 		}
2805 	} else {
2806 		for (; m2; m2=m2->mc_next) {
2807 			if (m2->mc_snum < mc->mc_snum) continue;
2808 			if (m2 == mc) continue;
2809 			if (m2->mc_pg[mc->mc_top] == mp) {
2810 				m2->mc_pg[mc->mc_top] = np;
2811 				if (IS_LEAF(np))
2812 					XCURSOR_REFRESH(m2, mc->mc_top, np);
2813 			}
2814 		}
2815 	}
2816 	MDB_PAGE_UNREF(mc->mc_txn, mp);
2817 	return 0;
2818 
2819 fail:
2820 	txn->mt_flags |= MDB_TXN_ERROR;
2821 	return rc;
2822 }
2823 
2824 int
mdb_env_sync0(MDB_env * env,int force,pgno_t numpgs)2825 mdb_env_sync0(MDB_env *env, int force, pgno_t numpgs)
2826 {
2827 	int rc = 0;
2828 	if (env->me_flags & MDB_RDONLY)
2829 		return EACCES;
2830 	if (force
2831 #ifndef _WIN32	/* Sync is normally achieved in Windows by doing WRITE_THROUGH writes */
2832 		|| !(env->me_flags & MDB_NOSYNC)
2833 #endif
2834 		) {
2835 		if (env->me_flags & MDB_WRITEMAP) {
2836 			int flags = ((env->me_flags & MDB_MAPASYNC) && !force)
2837 				? MS_ASYNC : MS_SYNC;
2838 			if (MDB_MSYNC(env->me_map, env->me_psize * numpgs, flags))
2839 				rc = ErrCode();
2840 #ifdef _WIN32
2841 			else if (flags == MS_SYNC && MDB_FDATASYNC(env->me_fd))
2842 				rc = ErrCode();
2843 #endif
2844 		} else {
2845 #ifdef BROKEN_FDATASYNC
2846 			if (env->me_flags & MDB_FSYNCONLY) {
2847 				if (fsync(env->me_fd))
2848 					rc = ErrCode();
2849 			} else
2850 #endif
2851 			if (MDB_FDATASYNC(env->me_fd))
2852 				rc = ErrCode();
2853 		}
2854 	}
2855 	return rc;
2856 }
2857 
2858 int
mdb_env_sync(MDB_env * env,int force)2859 mdb_env_sync(MDB_env *env, int force)
2860 {
2861 	MDB_meta *m = mdb_env_pick_meta(env);
2862 	return mdb_env_sync0(env, force, m->mm_last_pg+1);
2863 }
2864 
2865 /** Back up parent txn's cursors, then grab the originals for tracking */
2866 static int
mdb_cursor_shadow(MDB_txn * src,MDB_txn * dst)2867 mdb_cursor_shadow(MDB_txn *src, MDB_txn *dst)
2868 {
2869 	MDB_cursor *mc, *bk;
2870 	MDB_xcursor *mx;
2871 	size_t size;
2872 	int i;
2873 
2874 	for (i = src->mt_numdbs; --i >= 0; ) {
2875 		if ((mc = src->mt_cursors[i]) != NULL) {
2876 			size = sizeof(MDB_cursor);
2877 			if (mc->mc_xcursor)
2878 				size += sizeof(MDB_xcursor);
2879 			for (; mc; mc = bk->mc_next) {
2880 				bk = malloc(size);
2881 				if (!bk)
2882 					return ENOMEM;
2883 				*bk = *mc;
2884 				mc->mc_backup = bk;
2885 				mc->mc_db = &dst->mt_dbs[i];
2886 				/* Kill pointers into src to reduce abuse: The
2887 				 * user may not use mc until dst ends. But we need a valid
2888 				 * txn pointer here for cursor fixups to keep working.
2889 				 */
2890 				mc->mc_txn    = dst;
2891 				mc->mc_dbflag = &dst->mt_dbflags[i];
2892 				if ((mx = mc->mc_xcursor) != NULL) {
2893 					*(MDB_xcursor *)(bk+1) = *mx;
2894 					mx->mx_cursor.mc_txn = dst;
2895 				}
2896 				mc->mc_next = dst->mt_cursors[i];
2897 				dst->mt_cursors[i] = mc;
2898 			}
2899 		}
2900 	}
2901 	return MDB_SUCCESS;
2902 }
2903 
2904 /** Close this write txn's cursors, give parent txn's cursors back to parent.
2905  * @param[in] txn the transaction handle.
2906  * @param[in] merge true to keep changes to parent cursors, false to revert.
2907  * @return 0 on success, non-zero on failure.
2908  */
2909 static void
mdb_cursors_close(MDB_txn * txn,unsigned merge)2910 mdb_cursors_close(MDB_txn *txn, unsigned merge)
2911 {
2912 	MDB_cursor **cursors = txn->mt_cursors, *mc, *next, *bk;
2913 	MDB_xcursor *mx;
2914 	int i;
2915 
2916 	for (i = txn->mt_numdbs; --i >= 0; ) {
2917 		for (mc = cursors[i]; mc; mc = next) {
2918 			next = mc->mc_next;
2919 			if ((bk = mc->mc_backup) != NULL) {
2920 				if (merge) {
2921 					/* Commit changes to parent txn */
2922 					mc->mc_next = bk->mc_next;
2923 					mc->mc_backup = bk->mc_backup;
2924 					mc->mc_txn = bk->mc_txn;
2925 					mc->mc_db = bk->mc_db;
2926 					mc->mc_dbflag = bk->mc_dbflag;
2927 					if ((mx = mc->mc_xcursor) != NULL)
2928 						mx->mx_cursor.mc_txn = bk->mc_txn;
2929 				} else {
2930 					/* Abort nested txn */
2931 					*mc = *bk;
2932 					if ((mx = mc->mc_xcursor) != NULL)
2933 						*mx = *(MDB_xcursor *)(bk+1);
2934 				}
2935 				mc = bk;
2936 			}
2937 			/* Only malloced cursors are permanently tracked. */
2938 			free(mc);
2939 		}
2940 		cursors[i] = NULL;
2941 	}
2942 }
2943 
2944 #if !(MDB_PIDLOCK)		/* Currently the same as defined(_WIN32) */
2945 enum Pidlock_op {
2946 	Pidset, Pidcheck
2947 };
2948 #else
2949 enum Pidlock_op {
2950 	Pidset = F_SETLK, Pidcheck = F_GETLK
2951 };
2952 #endif
2953 
2954 /** Set or check a pid lock. Set returns 0 on success.
2955  * Check returns 0 if the process is certainly dead, nonzero if it may
2956  * be alive (the lock exists or an error happened so we do not know).
2957  *
2958  * On Windows Pidset is a no-op, we merely check for the existence
2959  * of the process with the given pid. On POSIX we use a single byte
2960  * lock on the lockfile, set at an offset equal to the pid.
2961  */
2962 static int
mdb_reader_pid(MDB_env * env,enum Pidlock_op op,MDB_PID_T pid)2963 mdb_reader_pid(MDB_env *env, enum Pidlock_op op, MDB_PID_T pid)
2964 {
2965 #if !(MDB_PIDLOCK)		/* Currently the same as defined(_WIN32) */
2966 	int ret = 0;
2967 	HANDLE h;
2968 	if (op == Pidcheck) {
2969 		h = OpenProcess(env->me_pidquery, FALSE, pid);
2970 		/* No documented "no such process" code, but other program use this: */
2971 		if (!h)
2972 			return ErrCode() != ERROR_INVALID_PARAMETER;
2973 		/* A process exists until all handles to it close. Has it exited? */
2974 		ret = WaitForSingleObject(h, 0) != 0;
2975 		CloseHandle(h);
2976 	}
2977 	return ret;
2978 #else
2979 	for (;;) {
2980 		int rc;
2981 		struct flock lock_info;
2982 		memset(&lock_info, 0, sizeof(lock_info));
2983 		lock_info.l_type = F_WRLCK;
2984 		lock_info.l_whence = SEEK_SET;
2985 		lock_info.l_start = pid;
2986 		lock_info.l_len = 1;
2987 		if ((rc = fcntl(env->me_lfd, op, &lock_info)) == 0) {
2988 			if (op == F_GETLK && lock_info.l_type != F_UNLCK)
2989 				rc = -1;
2990 		} else if ((rc = ErrCode()) == EINTR) {
2991 			continue;
2992 		}
2993 		return rc;
2994 	}
2995 #endif
2996 }
2997 
2998 /** Common code for #mdb_txn_begin() and #mdb_txn_renew().
2999  * @param[in] txn the transaction handle to initialize
3000  * @return 0 on success, non-zero on failure.
3001  */
3002 static int
mdb_txn_renew0(MDB_txn * txn)3003 mdb_txn_renew0(MDB_txn *txn)
3004 {
3005 	MDB_env *env = txn->mt_env;
3006 	MDB_txninfo *ti = env->me_txns;
3007 	MDB_meta *meta;
3008 	unsigned int i, nr, flags = txn->mt_flags;
3009 	uint16_t x;
3010 	int rc, new_notls = 0;
3011 
3012 	if ((flags &= MDB_TXN_RDONLY) != 0) {
3013 		if (!ti) {
3014 			meta = mdb_env_pick_meta(env);
3015 			txn->mt_txnid = meta->mm_txnid;
3016 			txn->mt_u.reader = NULL;
3017 		} else {
3018 			MDB_reader *r = (env->me_flags & MDB_NOTLS) ? txn->mt_u.reader :
3019 				pthread_getspecific(env->me_txkey);
3020 			if (r) {
3021 				if (r->mr_pid != env->me_pid || r->mr_txnid != (txnid_t)-1)
3022 					return MDB_BAD_RSLOT;
3023 			} else {
3024 				MDB_PID_T pid = env->me_pid;
3025 				MDB_THR_T tid = pthread_self();
3026 				mdb_mutexref_t rmutex = env->me_rmutex;
3027 
3028 				if (!env->me_live_reader) {
3029 					rc = mdb_reader_pid(env, Pidset, pid);
3030 					if (rc)
3031 						return rc;
3032 					env->me_live_reader = 1;
3033 				}
3034 
3035 				if (LOCK_MUTEX(rc, env, rmutex))
3036 					return rc;
3037 				nr = ti->mti_numreaders;
3038 				for (i=0; i<nr; i++)
3039 					if (ti->mti_readers[i].mr_pid == 0)
3040 						break;
3041 				if (i == env->me_maxreaders) {
3042 					UNLOCK_MUTEX(rmutex);
3043 					return MDB_READERS_FULL;
3044 				}
3045 				r = &ti->mti_readers[i];
3046 				/* Claim the reader slot, carefully since other code
3047 				 * uses the reader table un-mutexed: First reset the
3048 				 * slot, next publish it in mti_numreaders.  After
3049 				 * that, it is safe for mdb_env_close() to touch it.
3050 				 * When it will be closed, we can finally claim it.
3051 				 */
3052 				r->mr_pid = 0;
3053 				r->mr_txnid = (txnid_t)-1;
3054 				r->mr_tid = tid;
3055 				if (i == nr)
3056 					ti->mti_numreaders = ++nr;
3057 				env->me_close_readers = nr;
3058 				r->mr_pid = pid;
3059 				UNLOCK_MUTEX(rmutex);
3060 
3061 				new_notls = (env->me_flags & MDB_NOTLS);
3062 				if (!new_notls && (rc=pthread_setspecific(env->me_txkey, r))) {
3063 					r->mr_pid = 0;
3064 					return rc;
3065 				}
3066 			}
3067 			do /* LY: Retry on a race, ITS#7970. */
3068 				r->mr_txnid = ti->mti_txnid;
3069 			while(r->mr_txnid != ti->mti_txnid);
3070 			txn->mt_txnid = r->mr_txnid;
3071 			txn->mt_u.reader = r;
3072 			meta = env->me_metas[txn->mt_txnid & 1];
3073 		}
3074 
3075 	} else {
3076 		/* Not yet touching txn == env->me_txn0, it may be active */
3077 		if (ti) {
3078 			if (LOCK_MUTEX(rc, env, env->me_wmutex))
3079 				return rc;
3080 			txn->mt_txnid = ti->mti_txnid;
3081 			meta = env->me_metas[txn->mt_txnid & 1];
3082 		} else {
3083 			meta = mdb_env_pick_meta(env);
3084 			txn->mt_txnid = meta->mm_txnid;
3085 		}
3086 		txn->mt_txnid++;
3087 #if MDB_DEBUG
3088 		if (txn->mt_txnid == mdb_debug_start)
3089 			mdb_debug = 1;
3090 #endif
3091 		txn->mt_child = NULL;
3092 		txn->mt_loose_pgs = NULL;
3093 		txn->mt_loose_count = 0;
3094 		txn->mt_dirty_room = MDB_IDL_UM_MAX;
3095 		txn->mt_u.dirty_list = env->me_dirty_list;
3096 		txn->mt_u.dirty_list[0].mid = 0;
3097 		txn->mt_free_pgs = env->me_free_pgs;
3098 		txn->mt_free_pgs[0] = 0;
3099 		txn->mt_spill_pgs = NULL;
3100 		env->me_txn = txn;
3101 		memcpy(txn->mt_dbiseqs, env->me_dbiseqs, env->me_maxdbs * sizeof(unsigned int));
3102 	}
3103 
3104 	/* Copy the DB info and flags */
3105 	memcpy(txn->mt_dbs, meta->mm_dbs, CORE_DBS * sizeof(MDB_db));
3106 
3107 	/* Moved to here to avoid a data race in read TXNs */
3108 	txn->mt_next_pgno = meta->mm_last_pg+1;
3109 #ifdef MDB_VL32
3110 	txn->mt_last_pgno = txn->mt_next_pgno - 1;
3111 #endif
3112 
3113 	txn->mt_flags = flags;
3114 
3115 	/* Setup db info */
3116 	txn->mt_numdbs = env->me_numdbs;
3117 	for (i=CORE_DBS; i<txn->mt_numdbs; i++) {
3118 		x = env->me_dbflags[i];
3119 		txn->mt_dbs[i].md_flags = x & PERSISTENT_FLAGS;
3120 		txn->mt_dbflags[i] = (x & MDB_VALID) ? DB_VALID|DB_USRVALID|DB_STALE : 0;
3121 	}
3122 	txn->mt_dbflags[MAIN_DBI] = DB_VALID|DB_USRVALID;
3123 	txn->mt_dbflags[FREE_DBI] = DB_VALID;
3124 
3125 	if (env->me_flags & MDB_FATAL_ERROR) {
3126 		DPUTS("environment had fatal error, must shutdown!");
3127 		rc = MDB_PANIC;
3128 	} else if (env->me_maxpg < txn->mt_next_pgno) {
3129 		rc = MDB_MAP_RESIZED;
3130 	} else {
3131 		return MDB_SUCCESS;
3132 	}
3133 	mdb_txn_end(txn, new_notls /*0 or MDB_END_SLOT*/ | MDB_END_FAIL_BEGIN);
3134 	return rc;
3135 }
3136 
3137 int
mdb_txn_renew(MDB_txn * txn)3138 mdb_txn_renew(MDB_txn *txn)
3139 {
3140 	int rc;
3141 
3142 	if (!txn || !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY|MDB_TXN_FINISHED))
3143 		return EINVAL;
3144 
3145 	rc = mdb_txn_renew0(txn);
3146 	if (rc == MDB_SUCCESS) {
3147 		DPRINTF(("renew txn %"Yu"%c %p on mdbenv %p, root page %"Yu,
3148 			txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w',
3149 			(void *)txn, (void *)txn->mt_env, txn->mt_dbs[MAIN_DBI].md_root));
3150 	}
3151 	return rc;
3152 }
3153 
3154 int
mdb_txn_begin(MDB_env * env,MDB_txn * parent,unsigned int flags,MDB_txn ** ret)3155 mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **ret)
3156 {
3157 	MDB_txn *txn;
3158 	MDB_ntxn *ntxn;
3159 	int rc, size, tsize;
3160 
3161 	flags &= MDB_TXN_BEGIN_FLAGS;
3162 	flags |= env->me_flags & MDB_WRITEMAP;
3163 
3164 	if (env->me_flags & MDB_RDONLY & ~flags) /* write txn in RDONLY env */
3165 		return EACCES;
3166 
3167 	if (parent) {
3168 		/* Nested transactions: Max 1 child, write txns only, no writemap */
3169 		flags |= parent->mt_flags;
3170 		if (flags & (MDB_RDONLY|MDB_WRITEMAP|MDB_TXN_BLOCKED)) {
3171 			return (parent->mt_flags & MDB_TXN_RDONLY) ? EINVAL : MDB_BAD_TXN;
3172 		}
3173 		/* Child txns save MDB_pgstate and use own copy of cursors */
3174 		size = env->me_maxdbs * (sizeof(MDB_db)+sizeof(MDB_cursor *)+1);
3175 		size += tsize = sizeof(MDB_ntxn);
3176 	} else if (flags & MDB_RDONLY) {
3177 		size = env->me_maxdbs * (sizeof(MDB_db)+1);
3178 		size += tsize = sizeof(MDB_txn);
3179 	} else {
3180 		/* Reuse preallocated write txn. However, do not touch it until
3181 		 * mdb_txn_renew0() succeeds, since it currently may be active.
3182 		 */
3183 		txn = env->me_txn0;
3184 		goto renew;
3185 	}
3186 	if ((txn = calloc(1, size)) == NULL) {
3187 		DPRINTF(("calloc: %s", strerror(errno)));
3188 		return ENOMEM;
3189 	}
3190 #ifdef MDB_VL32
3191 	if (!parent) {
3192 		txn->mt_rpages = malloc(MDB_TRPAGE_SIZE * sizeof(MDB_ID3));
3193 		if (!txn->mt_rpages) {
3194 			free(txn);
3195 			return ENOMEM;
3196 		}
3197 		txn->mt_rpages[0].mid = 0;
3198 		txn->mt_rpcheck = MDB_TRPAGE_SIZE/2;
3199 	}
3200 #endif
3201 	txn->mt_dbxs = env->me_dbxs;	/* static */
3202 	txn->mt_dbs = (MDB_db *) ((char *)txn + tsize);
3203 	txn->mt_dbflags = (unsigned char *)txn + size - env->me_maxdbs;
3204 	txn->mt_flags = flags;
3205 	txn->mt_env = env;
3206 
3207 	if (parent) {
3208 		unsigned int i;
3209 		txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs);
3210 		txn->mt_dbiseqs = parent->mt_dbiseqs;
3211 		txn->mt_u.dirty_list = malloc(sizeof(MDB_ID2)*MDB_IDL_UM_SIZE);
3212 		if (!txn->mt_u.dirty_list ||
3213 			!(txn->mt_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX)))
3214 		{
3215 			free(txn->mt_u.dirty_list);
3216 			free(txn);
3217 			return ENOMEM;
3218 		}
3219 		txn->mt_txnid = parent->mt_txnid;
3220 		txn->mt_dirty_room = parent->mt_dirty_room;
3221 		txn->mt_u.dirty_list[0].mid = 0;
3222 		txn->mt_spill_pgs = NULL;
3223 		txn->mt_next_pgno = parent->mt_next_pgno;
3224 		parent->mt_flags |= MDB_TXN_HAS_CHILD;
3225 		parent->mt_child = txn;
3226 		txn->mt_parent = parent;
3227 		txn->mt_numdbs = parent->mt_numdbs;
3228 #ifdef MDB_VL32
3229 		txn->mt_rpages = parent->mt_rpages;
3230 #endif
3231 		memcpy(txn->mt_dbs, parent->mt_dbs, txn->mt_numdbs * sizeof(MDB_db));
3232 		/* Copy parent's mt_dbflags, but clear DB_NEW */
3233 		for (i=0; i<txn->mt_numdbs; i++)
3234 			txn->mt_dbflags[i] = parent->mt_dbflags[i] & ~DB_NEW;
3235 		rc = 0;
3236 		ntxn = (MDB_ntxn *)txn;
3237 		ntxn->mnt_pgstate = env->me_pgstate; /* save parent me_pghead & co */
3238 		if (env->me_pghead) {
3239 			size = MDB_IDL_SIZEOF(env->me_pghead);
3240 			env->me_pghead = mdb_midl_alloc(env->me_pghead[0]);
3241 			if (env->me_pghead)
3242 				memcpy(env->me_pghead, ntxn->mnt_pgstate.mf_pghead, size);
3243 			else
3244 				rc = ENOMEM;
3245 		}
3246 		if (!rc)
3247 			rc = mdb_cursor_shadow(parent, txn);
3248 		if (rc)
3249 			mdb_txn_end(txn, MDB_END_FAIL_BEGINCHILD);
3250 	} else { /* MDB_RDONLY */
3251 		txn->mt_dbiseqs = env->me_dbiseqs;
3252 renew:
3253 		rc = mdb_txn_renew0(txn);
3254 	}
3255 	if (rc) {
3256 		if (txn != env->me_txn0) {
3257 #ifdef MDB_VL32
3258 			free(txn->mt_rpages);
3259 #endif
3260 			free(txn);
3261 		}
3262 	} else {
3263 		txn->mt_flags |= flags;	/* could not change txn=me_txn0 earlier */
3264 		*ret = txn;
3265 		DPRINTF(("begin txn %"Yu"%c %p on mdbenv %p, root page %"Yu,
3266 			txn->mt_txnid, (flags & MDB_RDONLY) ? 'r' : 'w',
3267 			(void *) txn, (void *) env, txn->mt_dbs[MAIN_DBI].md_root));
3268 	}
3269 
3270 	return rc;
3271 }
3272 
3273 MDB_env *
mdb_txn_env(MDB_txn * txn)3274 mdb_txn_env(MDB_txn *txn)
3275 {
3276 	if(!txn) return NULL;
3277 	return txn->mt_env;
3278 }
3279 
3280 mdb_size_t
mdb_txn_id(MDB_txn * txn)3281 mdb_txn_id(MDB_txn *txn)
3282 {
3283     if(!txn) return 0;
3284     return txn->mt_txnid;
3285 }
3286 
3287 /** Export or close DBI handles opened in this txn. */
3288 static void
mdb_dbis_update(MDB_txn * txn,int keep)3289 mdb_dbis_update(MDB_txn *txn, int keep)
3290 {
3291 	int i;
3292 	MDB_dbi n = txn->mt_numdbs;
3293 	MDB_env *env = txn->mt_env;
3294 	unsigned char *tdbflags = txn->mt_dbflags;
3295 
3296 	for (i = n; --i >= CORE_DBS;) {
3297 		if (tdbflags[i] & DB_NEW) {
3298 			if (keep) {
3299 				env->me_dbflags[i] = txn->mt_dbs[i].md_flags | MDB_VALID;
3300 			} else {
3301 				char *ptr = env->me_dbxs[i].md_name.mv_data;
3302 				if (ptr) {
3303 					env->me_dbxs[i].md_name.mv_data = NULL;
3304 					env->me_dbxs[i].md_name.mv_size = 0;
3305 					env->me_dbflags[i] = 0;
3306 					env->me_dbiseqs[i]++;
3307 					free(ptr);
3308 				}
3309 			}
3310 		}
3311 	}
3312 	if (keep && env->me_numdbs < n)
3313 		env->me_numdbs = n;
3314 }
3315 
3316 /** End a transaction, except successful commit of a nested transaction.
3317  * May be called twice for readonly txns: First reset it, then abort.
3318  * @param[in] txn the transaction handle to end
3319  * @param[in] mode why and how to end the transaction
3320  */
3321 static void
mdb_txn_end(MDB_txn * txn,unsigned mode)3322 mdb_txn_end(MDB_txn *txn, unsigned mode)
3323 {
3324 	MDB_env	*env = txn->mt_env;
3325 #if MDB_DEBUG
3326 	static const char *const names[] = MDB_END_NAMES;
3327 #endif
3328 
3329 	/* Export or close DBI handles opened in this txn */
3330 	mdb_dbis_update(txn, mode & MDB_END_UPDATE);
3331 
3332 	DPRINTF(("%s txn %"Yu"%c %p on mdbenv %p, root page %"Yu,
3333 		names[mode & MDB_END_OPMASK],
3334 		txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w',
3335 		(void *) txn, (void *)env, txn->mt_dbs[MAIN_DBI].md_root));
3336 
3337 	if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
3338 		if (txn->mt_u.reader) {
3339 			txn->mt_u.reader->mr_txnid = (txnid_t)-1;
3340 			if (!(env->me_flags & MDB_NOTLS)) {
3341 				txn->mt_u.reader = NULL; /* txn does not own reader */
3342 			} else if (mode & MDB_END_SLOT) {
3343 				txn->mt_u.reader->mr_pid = 0;
3344 				txn->mt_u.reader = NULL;
3345 			} /* else txn owns the slot until it does MDB_END_SLOT */
3346 		}
3347 		txn->mt_numdbs = 0;		/* prevent further DBI activity */
3348 		txn->mt_flags |= MDB_TXN_FINISHED;
3349 
3350 	} else if (!F_ISSET(txn->mt_flags, MDB_TXN_FINISHED)) {
3351 		pgno_t *pghead = env->me_pghead;
3352 
3353 		if (!(mode & MDB_END_UPDATE)) /* !(already closed cursors) */
3354 			mdb_cursors_close(txn, 0);
3355 		if (!(env->me_flags & MDB_WRITEMAP)) {
3356 			mdb_dlist_free(txn);
3357 		}
3358 
3359 		txn->mt_numdbs = 0;
3360 		txn->mt_flags = MDB_TXN_FINISHED;
3361 
3362 		if (!txn->mt_parent) {
3363 			mdb_midl_shrink(&txn->mt_free_pgs);
3364 			env->me_free_pgs = txn->mt_free_pgs;
3365 			/* me_pgstate: */
3366 			env->me_pghead = NULL;
3367 			env->me_pglast = 0;
3368 
3369 			env->me_txn = NULL;
3370 			mode = 0;	/* txn == env->me_txn0, do not free() it */
3371 
3372 			/* The writer mutex was locked in mdb_txn_begin. */
3373 			if (env->me_txns)
3374 				UNLOCK_MUTEX(env->me_wmutex);
3375 		} else {
3376 			txn->mt_parent->mt_child = NULL;
3377 			txn->mt_parent->mt_flags &= ~MDB_TXN_HAS_CHILD;
3378 			env->me_pgstate = ((MDB_ntxn *)txn)->mnt_pgstate;
3379 			mdb_midl_free(txn->mt_free_pgs);
3380 			free(txn->mt_u.dirty_list);
3381 		}
3382 		mdb_midl_free(txn->mt_spill_pgs);
3383 
3384 		mdb_midl_free(pghead);
3385 	}
3386 #ifdef MDB_VL32
3387 	if (!txn->mt_parent) {
3388 		MDB_ID3L el = env->me_rpages, tl = txn->mt_rpages;
3389 		unsigned i, x, n = tl[0].mid;
3390 		pthread_mutex_lock(&env->me_rpmutex);
3391 		for (i = 1; i <= n; i++) {
3392 			if (tl[i].mid & (MDB_RPAGE_CHUNK-1)) {
3393 				/* tmp overflow pages that we didn't share in env */
3394 				munmap(tl[i].mptr, tl[i].mcnt * env->me_psize);
3395 			} else {
3396 				x = mdb_mid3l_search(el, tl[i].mid);
3397 				if (tl[i].mptr == el[x].mptr) {
3398 					el[x].mref--;
3399 				} else {
3400 					/* another tmp overflow page */
3401 					munmap(tl[i].mptr, tl[i].mcnt * env->me_psize);
3402 				}
3403 			}
3404 		}
3405 		pthread_mutex_unlock(&env->me_rpmutex);
3406 		tl[0].mid = 0;
3407 		if (mode & MDB_END_FREE)
3408 			free(tl);
3409 	}
3410 #endif
3411 	if (mode & MDB_END_FREE)
3412 		free(txn);
3413 }
3414 
3415 void
mdb_txn_reset(MDB_txn * txn)3416 mdb_txn_reset(MDB_txn *txn)
3417 {
3418 	if (txn == NULL)
3419 		return;
3420 
3421 	/* This call is only valid for read-only txns */
3422 	if (!(txn->mt_flags & MDB_TXN_RDONLY))
3423 		return;
3424 
3425 	mdb_txn_end(txn, MDB_END_RESET);
3426 }
3427 
3428 void
mdb_txn_abort(MDB_txn * txn)3429 mdb_txn_abort(MDB_txn *txn)
3430 {
3431 	if (txn == NULL)
3432 		return;
3433 
3434 	if (txn->mt_child)
3435 		mdb_txn_abort(txn->mt_child);
3436 
3437 	mdb_txn_end(txn, MDB_END_ABORT|MDB_END_SLOT|MDB_END_FREE);
3438 }
3439 
3440 /** Save the freelist as of this transaction to the freeDB.
3441  * This changes the freelist. Keep trying until it stabilizes.
3442  *
3443  * When (MDB_DEVEL) & 2, the changes do not affect #mdb_page_alloc(),
3444  * it then uses the transaction's original snapshot of the freeDB.
3445  */
3446 static int
mdb_freelist_save(MDB_txn * txn)3447 mdb_freelist_save(MDB_txn *txn)
3448 {
3449 	/* env->me_pghead[] can grow and shrink during this call.
3450 	 * env->me_pglast and txn->mt_free_pgs[] can only grow.
3451 	 * Page numbers cannot disappear from txn->mt_free_pgs[].
3452 	 */
3453 	MDB_cursor mc;
3454 	MDB_env	*env = txn->mt_env;
3455 	int rc, maxfree_1pg = env->me_maxfree_1pg, more = 1;
3456 	txnid_t	pglast = 0, head_id = 0;
3457 	pgno_t	freecnt = 0, *free_pgs, *mop;
3458 	ssize_t	head_room = 0, total_room = 0, mop_len, clean_limit;
3459 
3460 	mdb_cursor_init(&mc, txn, FREE_DBI, NULL);
3461 
3462 	if (env->me_pghead) {
3463 		/* Make sure first page of freeDB is touched and on freelist */
3464 		rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST|MDB_PS_MODIFY);
3465 		if (rc && rc != MDB_NOTFOUND)
3466 			return rc;
3467 	}
3468 
3469 	if (!env->me_pghead && txn->mt_loose_pgs) {
3470 		/* Put loose page numbers in mt_free_pgs, since
3471 		 * we may be unable to return them to me_pghead.
3472 		 */
3473 		MDB_page *mp = txn->mt_loose_pgs;
3474 		MDB_ID2 *dl = txn->mt_u.dirty_list;
3475 		unsigned x;
3476 		if ((rc = mdb_midl_need(&txn->mt_free_pgs, txn->mt_loose_count)) != 0)
3477 			return rc;
3478 		for (; mp; mp = NEXT_LOOSE_PAGE(mp)) {
3479 			mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno);
3480 			/* must also remove from dirty list */
3481 			if (txn->mt_flags & MDB_TXN_WRITEMAP) {
3482 				for (x=1; x<=dl[0].mid; x++)
3483 					if (dl[x].mid == mp->mp_pgno)
3484 						break;
3485 				mdb_tassert(txn, x <= dl[0].mid);
3486 			} else {
3487 				x = mdb_mid2l_search(dl, mp->mp_pgno);
3488 				mdb_tassert(txn, dl[x].mid == mp->mp_pgno);
3489 				mdb_dpage_free(env, mp);
3490 			}
3491 			dl[x].mptr = NULL;
3492 		}
3493 		{
3494 			/* squash freed slots out of the dirty list */
3495 			unsigned y;
3496 			for (y=1; dl[y].mptr && y <= dl[0].mid; y++);
3497 			if (y <= dl[0].mid) {
3498 				for(x=y, y++;;) {
3499 					while (!dl[y].mptr && y <= dl[0].mid) y++;
3500 					if (y > dl[0].mid) break;
3501 					dl[x++] = dl[y++];
3502 				}
3503 				dl[0].mid = x-1;
3504 			} else {
3505 				/* all slots freed */
3506 				dl[0].mid = 0;
3507 			}
3508 		}
3509 		txn->mt_loose_pgs = NULL;
3510 		txn->mt_loose_count = 0;
3511 	}
3512 
3513 	/* MDB_RESERVE cancels meminit in ovpage malloc (when no WRITEMAP) */
3514 	clean_limit = (env->me_flags & (MDB_NOMEMINIT|MDB_WRITEMAP))
3515 		? SSIZE_MAX : maxfree_1pg;
3516 
3517 	for (;;) {
3518 		/* Come back here after each Put() in case freelist changed */
3519 		MDB_val key, data;
3520 		pgno_t *pgs;
3521 		ssize_t j;
3522 
3523 		/* If using records from freeDB which we have not yet
3524 		 * deleted, delete them and any we reserved for me_pghead.
3525 		 */
3526 		while (pglast < env->me_pglast) {
3527 			rc = mdb_cursor_first(&mc, &key, NULL);
3528 			if (rc)
3529 				return rc;
3530 			pglast = head_id = *(txnid_t *)key.mv_data;
3531 			total_room = head_room = 0;
3532 			mdb_tassert(txn, pglast <= env->me_pglast);
3533 			rc = mdb_cursor_del(&mc, 0);
3534 			if (rc)
3535 				return rc;
3536 		}
3537 
3538 		/* Save the IDL of pages freed by this txn, to a single record */
3539 		if (freecnt < txn->mt_free_pgs[0]) {
3540 			if (!freecnt) {
3541 				/* Make sure last page of freeDB is touched and on freelist */
3542 				rc = mdb_page_search(&mc, NULL, MDB_PS_LAST|MDB_PS_MODIFY);
3543 				if (rc && rc != MDB_NOTFOUND)
3544 					return rc;
3545 			}
3546 			free_pgs = txn->mt_free_pgs;
3547 			/* Write to last page of freeDB */
3548 			key.mv_size = sizeof(txn->mt_txnid);
3549 			key.mv_data = &txn->mt_txnid;
3550 			do {
3551 				freecnt = free_pgs[0];
3552 				data.mv_size = MDB_IDL_SIZEOF(free_pgs);
3553 				rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE);
3554 				if (rc)
3555 					return rc;
3556 				/* Retry if mt_free_pgs[] grew during the Put() */
3557 				free_pgs = txn->mt_free_pgs;
3558 			} while (freecnt < free_pgs[0]);
3559 			mdb_midl_sort(free_pgs);
3560 			memcpy(data.mv_data, free_pgs, data.mv_size);
3561 #if (MDB_DEBUG) > 1
3562 			{
3563 				unsigned int i = free_pgs[0];
3564 				DPRINTF(("IDL write txn %"Yu" root %"Yu" num %u",
3565 					txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i));
3566 				for (; i; i--)
3567 					DPRINTF(("IDL %"Yu, free_pgs[i]));
3568 			}
3569 #endif
3570 			continue;
3571 		}
3572 
3573 		mop = env->me_pghead;
3574 		mop_len = (mop ? mop[0] : 0) + txn->mt_loose_count;
3575 
3576 		/* Reserve records for me_pghead[]. Split it if multi-page,
3577 		 * to avoid searching freeDB for a page range. Use keys in
3578 		 * range [1,me_pglast]: Smaller than txnid of oldest reader.
3579 		 */
3580 		if (total_room >= mop_len) {
3581 			if (total_room == mop_len || --more < 0)
3582 				break;
3583 		} else if (head_room >= maxfree_1pg && head_id > 1) {
3584 			/* Keep current record (overflow page), add a new one */
3585 			head_id--;
3586 			head_room = 0;
3587 		}
3588 		/* (Re)write {key = head_id, IDL length = head_room} */
3589 		total_room -= head_room;
3590 		head_room = mop_len - total_room;
3591 		if (head_room > maxfree_1pg && head_id > 1) {
3592 			/* Overflow multi-page for part of me_pghead */
3593 			head_room /= head_id; /* amortize page sizes */
3594 			head_room += maxfree_1pg - head_room % (maxfree_1pg + 1);
3595 		} else if (head_room < 0) {
3596 			/* Rare case, not bothering to delete this record */
3597 			head_room = 0;
3598 		}
3599 		key.mv_size = sizeof(head_id);
3600 		key.mv_data = &head_id;
3601 		data.mv_size = (head_room + 1) * sizeof(pgno_t);
3602 		rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE);
3603 		if (rc)
3604 			return rc;
3605 		/* IDL is initially empty, zero out at least the length */
3606 		pgs = (pgno_t *)data.mv_data;
3607 		j = head_room > clean_limit ? head_room : 0;
3608 		do {
3609 			pgs[j] = 0;
3610 		} while (--j >= 0);
3611 		total_room += head_room;
3612 	}
3613 
3614 	/* Return loose page numbers to me_pghead, though usually none are
3615 	 * left at this point.  The pages themselves remain in dirty_list.
3616 	 */
3617 	if (txn->mt_loose_pgs) {
3618 		MDB_page *mp = txn->mt_loose_pgs;
3619 		unsigned count = txn->mt_loose_count;
3620 		MDB_IDL loose;
3621 		/* Room for loose pages + temp IDL with same */
3622 		if ((rc = mdb_midl_need(&env->me_pghead, 2*count+1)) != 0)
3623 			return rc;
3624 		mop = env->me_pghead;
3625 		loose = mop + MDB_IDL_ALLOCLEN(mop) - count;
3626 		for (count = 0; mp; mp = NEXT_LOOSE_PAGE(mp))
3627 			loose[ ++count ] = mp->mp_pgno;
3628 		loose[0] = count;
3629 		mdb_midl_sort(loose);
3630 		mdb_midl_xmerge(mop, loose);
3631 		txn->mt_loose_pgs = NULL;
3632 		txn->mt_loose_count = 0;
3633 		mop_len = mop[0];
3634 	}
3635 
3636 	/* Fill in the reserved me_pghead records */
3637 	rc = MDB_SUCCESS;
3638 	if (mop_len) {
3639 		MDB_val key, data;
3640 
3641 		mop += mop_len;
3642 		rc = mdb_cursor_first(&mc, &key, &data);
3643 		for (; !rc; rc = mdb_cursor_next(&mc, &key, &data, MDB_NEXT)) {
3644 			txnid_t id = *(txnid_t *)key.mv_data;
3645 			ssize_t	len = (ssize_t)(data.mv_size / sizeof(MDB_ID)) - 1;
3646 			MDB_ID save;
3647 
3648 			mdb_tassert(txn, len >= 0 && id <= env->me_pglast);
3649 			key.mv_data = &id;
3650 			if (len > mop_len) {
3651 				len = mop_len;
3652 				data.mv_size = (len + 1) * sizeof(MDB_ID);
3653 			}
3654 			data.mv_data = mop -= len;
3655 			save = mop[0];
3656 			mop[0] = len;
3657 			rc = mdb_cursor_put(&mc, &key, &data, MDB_CURRENT);
3658 			mop[0] = save;
3659 			if (rc || !(mop_len -= len))
3660 				break;
3661 		}
3662 	}
3663 	return rc;
3664 }
3665 
3666 /** Flush (some) dirty pages to the map, after clearing their dirty flag.
3667  * @param[in] txn the transaction that's being committed
3668  * @param[in] keep number of initial pages in dirty_list to keep dirty.
3669  * @return 0 on success, non-zero on failure.
3670  */
3671 static int
mdb_page_flush(MDB_txn * txn,int keep)3672 mdb_page_flush(MDB_txn *txn, int keep)
3673 {
3674 	MDB_env		*env = txn->mt_env;
3675 	MDB_ID2L	dl = txn->mt_u.dirty_list;
3676 	unsigned	psize = env->me_psize, j;
3677 	int			i, pagecount = dl[0].mid, rc;
3678 	size_t		size = 0;
3679 	MDB_OFF_T	pos = 0;
3680 	pgno_t		pgno = 0;
3681 	MDB_page	*dp = NULL;
3682 #ifdef _WIN32
3683 	OVERLAPPED	*ov = env->ov;
3684 	MDB_page	*wdp;
3685 	int async_i = 0;
3686 	HANDLE fd = (env->me_flags & MDB_NOSYNC) ? env->me_fd : env->me_ovfd;
3687 #else
3688 	struct iovec iov[MDB_COMMIT_PAGES];
3689 	HANDLE fd = env->me_fd;
3690 #endif
3691 	ssize_t		wsize = 0, wres;
3692 	MDB_OFF_T	wpos = 0, next_pos = 1; /* impossible pos, so pos != next_pos */
3693 	int			n = 0;
3694 
3695 	j = i = keep;
3696 	if (env->me_flags & MDB_WRITEMAP
3697 #ifdef _WIN32
3698 		/* In windows, we still do writes to the file (with write-through enabled in sync mode),
3699 		 * as this is faster than FlushViewOfFile/FlushFileBuffers */
3700 		&& (env->me_flags & MDB_NOSYNC)
3701 #endif
3702 		) {
3703 		/* Clear dirty flags */
3704 		while (++i <= pagecount) {
3705 			dp = dl[i].mptr;
3706 			/* Don't flush this page yet */
3707 			if (dp->mp_flags & (P_LOOSE|P_KEEP)) {
3708 				dp->mp_flags &= ~P_KEEP;
3709 				dl[++j] = dl[i];
3710 				continue;
3711 			}
3712 			dp->mp_flags &= ~P_DIRTY;
3713 		}
3714 		goto done;
3715 	}
3716 
3717 #ifdef _WIN32
3718 	if (pagecount - keep >= env->ovs) {
3719 		/* ran out of room in ov array, and re-malloc, copy handles and free previous */
3720 		int ovs = (pagecount - keep) * 1.5; /* provide extra padding to reduce number of re-allocations */
3721 		int new_size = ovs * sizeof(OVERLAPPED);
3722 		ov = malloc(new_size);
3723 		if (ov == NULL)
3724 			return ENOMEM;
3725 		int previous_size = env->ovs * sizeof(OVERLAPPED);
3726 		memcpy(ov, env->ov, previous_size); /* Copy previous OVERLAPPED data to retain event handles */
3727 		/* And clear rest of memory */
3728 		memset(&ov[env->ovs], 0, new_size - previous_size);
3729 		if (env->ovs > 0) {
3730 			free(env->ov); /* release previous allocation */
3731 		}
3732 
3733 		env->ov = ov;
3734 		env->ovs = ovs;
3735 	}
3736 #endif
3737 
3738 	/* Write the pages */
3739 	for (;;) {
3740 		if (++i <= pagecount) {
3741 			dp = dl[i].mptr;
3742 			/* Don't flush this page yet */
3743 			if (dp->mp_flags & (P_LOOSE|P_KEEP)) {
3744 				dp->mp_flags &= ~P_KEEP;
3745 				dl[i].mid = 0;
3746 				continue;
3747 			}
3748 			pgno = dl[i].mid;
3749 			/* clear dirty flag */
3750 			dp->mp_flags &= ~P_DIRTY;
3751 			pos = pgno * psize;
3752 			size = psize;
3753 			if (IS_OVERFLOW(dp)) size *= dp->mp_pages;
3754 		}
3755 		/* Write up to MDB_COMMIT_PAGES dirty pages at a time. */
3756 		if (pos!=next_pos || n==MDB_COMMIT_PAGES || wsize+size>MAX_WRITE
3757 #ifdef _WIN32
3758 			/* If writemap is enabled, consecutive page positions infer
3759 			 * contiguous (mapped) memory.
3760 			 * Otherwise force write pages one at a time.
3761 			 * Windows actually supports scatter/gather I/O, but only on
3762 			 * unbuffered file handles. Since we're relying on the OS page
3763 			 * cache for all our data, that's self-defeating. So we just
3764 			 * write pages one at a time. We use the ov structure to set
3765 			 * the write offset, to at least save the overhead of a Seek
3766 			 * system call.
3767 			 */
3768 			|| !(env->me_flags & MDB_WRITEMAP)
3769 #endif
3770 			) {
3771 			if (n) {
3772 retry_write:
3773 				/* Write previous page(s) */
3774 				DPRINTF(("committing page %"Z"u", pgno));
3775 #ifdef _WIN32
3776 				OVERLAPPED *this_ov = &ov[async_i];
3777 				/* Clear status, and keep hEvent, we reuse that */
3778 				this_ov->Internal = 0;
3779 				this_ov->Offset = wpos & 0xffffffff;
3780 				this_ov->OffsetHigh = wpos >> 16 >> 16;
3781 				if (!F_ISSET(env->me_flags, MDB_NOSYNC) && !this_ov->hEvent) {
3782 					HANDLE event = CreateEvent(NULL, FALSE, FALSE, NULL);
3783 					if (!event) {
3784 						rc = ErrCode();
3785 						DPRINTF(("CreateEvent: %s", strerror(rc)));
3786 						return rc;
3787 					}
3788 					this_ov->hEvent = event;
3789 				}
3790 				if (!WriteFile(fd, wdp, wsize, NULL, this_ov)) {
3791 					rc = ErrCode();
3792 					if (rc != ERROR_IO_PENDING) {
3793 						DPRINTF(("WriteFile: %d", rc));
3794 						return rc;
3795 					}
3796 				}
3797 				async_i++;
3798 #else
3799 #ifdef MDB_USE_PWRITEV
3800 				wres = pwritev(fd, iov, n, wpos);
3801 #else
3802 				if (n == 1) {
3803 					wres = pwrite(fd, iov[0].iov_base, wsize, wpos);
3804 				} else {
3805 retry_seek:
3806 					if (lseek(fd, wpos, SEEK_SET) == -1) {
3807 						rc = ErrCode();
3808 						if (rc == EINTR)
3809 							goto retry_seek;
3810 						DPRINTF(("lseek: %s", strerror(rc)));
3811 						return rc;
3812 					}
3813 					wres = writev(fd, iov, n);
3814 				}
3815 #endif
3816 				if (wres != wsize) {
3817 					if (wres < 0) {
3818 						rc = ErrCode();
3819 						if (rc == EINTR)
3820 							goto retry_write;
3821 						DPRINTF(("Write error: %s", strerror(rc)));
3822 					} else {
3823 						rc = EIO; /* TODO: Use which error code? */
3824 						DPUTS("short write, filesystem full?");
3825 					}
3826 					return rc;
3827 				}
3828 #endif /* _WIN32 */
3829 				n = 0;
3830 			}
3831 			if (i > pagecount)
3832 				break;
3833 			wpos = pos;
3834 			wsize = 0;
3835 #ifdef _WIN32
3836 			wdp = dp;
3837 		}
3838 #else
3839 		}
3840 		iov[n].iov_len = size;
3841 		iov[n].iov_base = (char *)dp;
3842 #endif	/* _WIN32 */
3843 		DPRINTF(("committing page %"Yu, pgno));
3844 		next_pos = pos + size;
3845 		wsize += size;
3846 		n++;
3847 	}
3848 #ifdef MDB_VL32
3849 	if (pgno > txn->mt_last_pgno)
3850 		txn->mt_last_pgno = pgno;
3851 #endif
3852 
3853 #ifdef _WIN32
3854 	if (!F_ISSET(env->me_flags, MDB_NOSYNC)) {
3855 		/* Now wait for all the asynchronous/overlapped sync/write-through writes to complete.
3856 		* We start with the last one so that all the others should already be complete and
3857 		* we reduce thread suspend/resuming (in practice, typically about 99.5% of writes are
3858 		* done after the last write is done) */
3859 		rc = 0;
3860 		while (--async_i >= 0) {
3861 			if (ov[async_i].hEvent) {
3862 				if (!GetOverlappedResult(fd, &ov[async_i], &wres, TRUE)) {
3863 					rc = ErrCode(); /* Continue on so that all the event signals are reset */
3864 				}
3865 			}
3866 		}
3867 		if (rc) { /* any error on GetOverlappedResult, exit now */
3868 			return rc;
3869 		}
3870 	}
3871 #endif	/* _WIN32 */
3872 
3873 	if (!(env->me_flags & MDB_WRITEMAP)) {
3874 		/* Don't free pages when using writemap (can only get here in NOSYNC mode in Windows)
3875 		 * MIPS has cache coherency issues, this is a no-op everywhere else
3876 		 * Note: for any size >= on-chip cache size, entire on-chip cache is
3877 		 * flushed.
3878 		 */
3879 		CACHEFLUSH(env->me_map, txn->mt_next_pgno * env->me_psize, DCACHE);
3880 
3881 		for (i = keep; ++i <= pagecount; ) {
3882 			dp = dl[i].mptr;
3883 			/* This is a page we skipped above */
3884 			if (!dl[i].mid) {
3885 				dl[++j] = dl[i];
3886 				dl[j].mid = dp->mp_pgno;
3887 				continue;
3888 			}
3889 			mdb_dpage_free(env, dp);
3890 		}
3891 	}
3892 
3893 done:
3894 	i--;
3895 	txn->mt_dirty_room += i - j;
3896 	dl[0].mid = j;
3897 	return MDB_SUCCESS;
3898 }
3899 
3900 static int ESECT mdb_env_share_locks(MDB_env *env, int *excl);
3901 
3902 int
mdb_txn_commit(MDB_txn * txn)3903 mdb_txn_commit(MDB_txn *txn)
3904 {
3905 	int		rc;
3906 	unsigned int i, end_mode;
3907 	MDB_env	*env;
3908 
3909 	if (txn == NULL)
3910 		return EINVAL;
3911 
3912 	/* mdb_txn_end() mode for a commit which writes nothing */
3913 	end_mode = MDB_END_EMPTY_COMMIT|MDB_END_UPDATE|MDB_END_SLOT|MDB_END_FREE;
3914 
3915 	if (txn->mt_child) {
3916 		rc = mdb_txn_commit(txn->mt_child);
3917 		if (rc)
3918 			goto fail;
3919 	}
3920 
3921 	env = txn->mt_env;
3922 
3923 	if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
3924 		goto done;
3925 	}
3926 
3927 	if (txn->mt_flags & (MDB_TXN_FINISHED|MDB_TXN_ERROR)) {
3928 		DPUTS("txn has failed/finished, can't commit");
3929 		if (txn->mt_parent)
3930 			txn->mt_parent->mt_flags |= MDB_TXN_ERROR;
3931 		rc = MDB_BAD_TXN;
3932 		goto fail;
3933 	}
3934 
3935 	if (txn->mt_parent) {
3936 		MDB_txn *parent = txn->mt_parent;
3937 		MDB_page **lp;
3938 		MDB_ID2L dst, src;
3939 		MDB_IDL pspill;
3940 		unsigned x, y, len, ps_len;
3941 
3942 		/* Append our free list to parent's */
3943 		rc = mdb_midl_append_list(&parent->mt_free_pgs, txn->mt_free_pgs);
3944 		if (rc)
3945 			goto fail;
3946 		mdb_midl_free(txn->mt_free_pgs);
3947 		/* Failures after this must either undo the changes
3948 		 * to the parent or set MDB_TXN_ERROR in the parent.
3949 		 */
3950 
3951 		parent->mt_next_pgno = txn->mt_next_pgno;
3952 		parent->mt_flags = txn->mt_flags;
3953 
3954 		/* Merge our cursors into parent's and close them */
3955 		mdb_cursors_close(txn, 1);
3956 
3957 		/* Update parent's DB table. */
3958 		memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDB_db));
3959 		parent->mt_numdbs = txn->mt_numdbs;
3960 		parent->mt_dbflags[FREE_DBI] = txn->mt_dbflags[FREE_DBI];
3961 		parent->mt_dbflags[MAIN_DBI] = txn->mt_dbflags[MAIN_DBI];
3962 		for (i=CORE_DBS; i<txn->mt_numdbs; i++) {
3963 			/* preserve parent's DB_NEW status */
3964 			x = parent->mt_dbflags[i] & DB_NEW;
3965 			parent->mt_dbflags[i] = txn->mt_dbflags[i] | x;
3966 		}
3967 
3968 		dst = parent->mt_u.dirty_list;
3969 		src = txn->mt_u.dirty_list;
3970 		/* Remove anything in our dirty list from parent's spill list */
3971 		if ((pspill = parent->mt_spill_pgs) && (ps_len = pspill[0])) {
3972 			x = y = ps_len;
3973 			pspill[0] = (pgno_t)-1;
3974 			/* Mark our dirty pages as deleted in parent spill list */
3975 			for (i=0, len=src[0].mid; ++i <= len; ) {
3976 				MDB_ID pn = src[i].mid << 1;
3977 				while (pn > pspill[x])
3978 					x--;
3979 				if (pn == pspill[x]) {
3980 					pspill[x] = 1;
3981 					y = --x;
3982 				}
3983 			}
3984 			/* Squash deleted pagenums if we deleted any */
3985 			for (x=y; ++x <= ps_len; )
3986 				if (!(pspill[x] & 1))
3987 					pspill[++y] = pspill[x];
3988 			pspill[0] = y;
3989 		}
3990 
3991 		/* Remove anything in our spill list from parent's dirty list */
3992 		if (txn->mt_spill_pgs && txn->mt_spill_pgs[0]) {
3993 			for (i=1; i<=txn->mt_spill_pgs[0]; i++) {
3994 				MDB_ID pn = txn->mt_spill_pgs[i];
3995 				if (pn & 1)
3996 					continue;	/* deleted spillpg */
3997 				pn >>= 1;
3998 				y = mdb_mid2l_search(dst, pn);
3999 				if (y <= dst[0].mid && dst[y].mid == pn) {
4000 					free(dst[y].mptr);
4001 					while (y < dst[0].mid) {
4002 						dst[y] = dst[y+1];
4003 						y++;
4004 					}
4005 					dst[0].mid--;
4006 				}
4007 			}
4008 		}
4009 
4010 		/* Find len = length of merging our dirty list with parent's */
4011 		x = dst[0].mid;
4012 		dst[0].mid = 0;		/* simplify loops */
4013 		if (parent->mt_parent) {
4014 			len = x + src[0].mid;
4015 			y = mdb_mid2l_search(src, dst[x].mid + 1) - 1;
4016 			for (i = x; y && i; y--) {
4017 				pgno_t yp = src[y].mid;
4018 				while (yp < dst[i].mid)
4019 					i--;
4020 				if (yp == dst[i].mid) {
4021 					i--;
4022 					len--;
4023 				}
4024 			}
4025 		} else { /* Simplify the above for single-ancestor case */
4026 			len = MDB_IDL_UM_MAX - txn->mt_dirty_room;
4027 		}
4028 		/* Merge our dirty list with parent's */
4029 		y = src[0].mid;
4030 		for (i = len; y; dst[i--] = src[y--]) {
4031 			pgno_t yp = src[y].mid;
4032 			while (yp < dst[x].mid)
4033 				dst[i--] = dst[x--];
4034 			if (yp == dst[x].mid)
4035 				free(dst[x--].mptr);
4036 		}
4037 		mdb_tassert(txn, i == x);
4038 		dst[0].mid = len;
4039 		free(txn->mt_u.dirty_list);
4040 		parent->mt_dirty_room = txn->mt_dirty_room;
4041 		if (txn->mt_spill_pgs) {
4042 			if (parent->mt_spill_pgs) {
4043 				/* TODO: Prevent failure here, so parent does not fail */
4044 				rc = mdb_midl_append_list(&parent->mt_spill_pgs, txn->mt_spill_pgs);
4045 				if (rc)
4046 					parent->mt_flags |= MDB_TXN_ERROR;
4047 				mdb_midl_free(txn->mt_spill_pgs);
4048 				mdb_midl_sort(parent->mt_spill_pgs);
4049 			} else {
4050 				parent->mt_spill_pgs = txn->mt_spill_pgs;
4051 			}
4052 		}
4053 
4054 		/* Append our loose page list to parent's */
4055 		for (lp = &parent->mt_loose_pgs; *lp; lp = &NEXT_LOOSE_PAGE(*lp))
4056 			;
4057 		*lp = txn->mt_loose_pgs;
4058 		parent->mt_loose_count += txn->mt_loose_count;
4059 
4060 		parent->mt_child = NULL;
4061 		mdb_midl_free(((MDB_ntxn *)txn)->mnt_pgstate.mf_pghead);
4062 		free(txn);
4063 		return rc;
4064 	}
4065 
4066 	if (txn != env->me_txn) {
4067 		DPUTS("attempt to commit unknown transaction");
4068 		rc = EINVAL;
4069 		goto fail;
4070 	}
4071 
4072 	mdb_cursors_close(txn, 0);
4073 
4074 	if (!txn->mt_u.dirty_list[0].mid &&
4075 		!(txn->mt_flags & (MDB_TXN_DIRTY|MDB_TXN_SPILLS)))
4076 		goto done;
4077 
4078 	DPRINTF(("committing txn %"Yu" %p on mdbenv %p, root page %"Yu,
4079 	    txn->mt_txnid, (void*)txn, (void*)env, txn->mt_dbs[MAIN_DBI].md_root));
4080 
4081 	/* Update DB root pointers */
4082 	if (txn->mt_numdbs > CORE_DBS) {
4083 		MDB_cursor mc;
4084 		MDB_dbi i;
4085 		MDB_val data;
4086 		data.mv_size = sizeof(MDB_db);
4087 
4088 		mdb_cursor_init(&mc, txn, MAIN_DBI, NULL);
4089 		for (i = CORE_DBS; i < txn->mt_numdbs; i++) {
4090 			if (txn->mt_dbflags[i] & DB_DIRTY) {
4091 				if (TXN_DBI_CHANGED(txn, i)) {
4092 					rc = MDB_BAD_DBI;
4093 					goto fail;
4094 				}
4095 				data.mv_data = &txn->mt_dbs[i];
4096 				rc = mdb_cursor_put(&mc, &txn->mt_dbxs[i].md_name, &data,
4097 					F_SUBDATA);
4098 				if (rc)
4099 					goto fail;
4100 			}
4101 		}
4102 	}
4103 
4104 	rc = mdb_freelist_save(txn);
4105 	if (rc)
4106 		goto fail;
4107 
4108 	mdb_midl_free(env->me_pghead);
4109 	env->me_pghead = NULL;
4110 	mdb_midl_shrink(&txn->mt_free_pgs);
4111 
4112 #if (MDB_DEBUG) > 2
4113 	mdb_audit(txn);
4114 #endif
4115 
4116 	if ((rc = mdb_page_flush(txn, 0)))
4117 		goto fail;
4118 	if (!F_ISSET(txn->mt_flags, MDB_TXN_NOSYNC) &&
4119 		(rc = mdb_env_sync0(env, 0, txn->mt_next_pgno)))
4120 		goto fail;
4121 	if ((rc = mdb_env_write_meta(txn)))
4122 		goto fail;
4123 	end_mode = MDB_END_COMMITTED|MDB_END_UPDATE;
4124 	if (env->me_flags & MDB_PREVSNAPSHOT) {
4125 		if (!(env->me_flags & MDB_NOLOCK)) {
4126 			int excl;
4127 			rc = mdb_env_share_locks(env, &excl);
4128 			if (rc)
4129 				goto fail;
4130 		}
4131 		env->me_flags ^= MDB_PREVSNAPSHOT;
4132 	}
4133 
4134 done:
4135 	mdb_txn_end(txn, end_mode);
4136 	return MDB_SUCCESS;
4137 
4138 fail:
4139 	mdb_txn_abort(txn);
4140 	return rc;
4141 }
4142 
4143 /** Read the environment parameters of a DB environment before
4144  * mapping it into memory.
4145  * @param[in] env the environment handle
4146  * @param[in] prev whether to read the backup meta page
4147  * @param[out] meta address of where to store the meta information
4148  * @return 0 on success, non-zero on failure.
4149  */
4150 static int ESECT
mdb_env_read_header(MDB_env * env,int prev,MDB_meta * meta)4151 mdb_env_read_header(MDB_env *env, int prev, MDB_meta *meta)
4152 {
4153 	MDB_metabuf	pbuf;
4154 	MDB_page	*p;
4155 	MDB_meta	*m;
4156 	int			i, rc, off;
4157 	enum { Size = sizeof(pbuf) };
4158 
4159 	/* We don't know the page size yet, so use a minimum value.
4160 	 * Read both meta pages so we can use the latest one.
4161 	 */
4162 
4163 	for (i=off=0; i<NUM_METAS; i++, off += meta->mm_psize) {
4164 #ifdef _WIN32
4165 		DWORD len;
4166 		OVERLAPPED ov;
4167 		memset(&ov, 0, sizeof(ov));
4168 		ov.Offset = off;
4169 		rc = ReadFile(env->me_fd, &pbuf, Size, &len, &ov) ? (int)len : -1;
4170 		if (rc == -1 && ErrCode() == ERROR_HANDLE_EOF)
4171 			rc = 0;
4172 #else
4173 		rc = pread(env->me_fd, &pbuf, Size, off);
4174 #endif
4175 		if (rc != Size) {
4176 			if (rc == 0 && off == 0)
4177 				return ENOENT;
4178 			rc = rc < 0 ? (int) ErrCode() : MDB_INVALID;
4179 			DPRINTF(("read: %s", mdb_strerror(rc)));
4180 			return rc;
4181 		}
4182 
4183 		p = (MDB_page *)&pbuf;
4184 
4185 		if (!F_ISSET(p->mp_flags, P_META)) {
4186 			DPRINTF(("page %"Yu" not a meta page", p->mp_pgno));
4187 			return MDB_INVALID;
4188 		}
4189 
4190 		m = METADATA(p);
4191 		if (m->mm_magic != MDB_MAGIC) {
4192 			DPUTS("meta has invalid magic");
4193 			return MDB_INVALID;
4194 		}
4195 
4196 		if (m->mm_version != MDB_DATA_VERSION) {
4197 			DPRINTF(("database is version %u, expected version %u",
4198 				m->mm_version, MDB_DATA_VERSION));
4199 			return MDB_VERSION_MISMATCH;
4200 		}
4201 
4202 		if (off == 0 || (prev ? m->mm_txnid < meta->mm_txnid : m->mm_txnid > meta->mm_txnid))
4203 			*meta = *m;
4204 	}
4205 	return 0;
4206 }
4207 
4208 /** Fill in most of the zeroed #MDB_meta for an empty database environment */
4209 static void ESECT
mdb_env_init_meta0(MDB_env * env,MDB_meta * meta)4210 mdb_env_init_meta0(MDB_env *env, MDB_meta *meta)
4211 {
4212 	meta->mm_magic = MDB_MAGIC;
4213 	meta->mm_version = MDB_DATA_VERSION;
4214 	meta->mm_mapsize = env->me_mapsize;
4215 	meta->mm_psize = env->me_psize;
4216 	meta->mm_last_pg = NUM_METAS-1;
4217 	meta->mm_flags = env->me_flags & 0xffff;
4218 	meta->mm_flags |= MDB_INTEGERKEY; /* this is mm_dbs[FREE_DBI].md_flags */
4219 	meta->mm_dbs[FREE_DBI].md_root = P_INVALID;
4220 	meta->mm_dbs[MAIN_DBI].md_root = P_INVALID;
4221 }
4222 
4223 /** Write the environment parameters of a freshly created DB environment.
4224  * @param[in] env the environment handle
4225  * @param[in] meta the #MDB_meta to write
4226  * @return 0 on success, non-zero on failure.
4227  */
4228 static int ESECT
mdb_env_init_meta(MDB_env * env,MDB_meta * meta)4229 mdb_env_init_meta(MDB_env *env, MDB_meta *meta)
4230 {
4231 	MDB_page *p, *q;
4232 	int rc;
4233 	unsigned int	 psize;
4234 #ifdef _WIN32
4235 	DWORD len;
4236 	OVERLAPPED ov;
4237 	memset(&ov, 0, sizeof(ov));
4238 #define DO_PWRITE(rc, fd, ptr, size, len, pos)	do { \
4239 	ov.Offset = pos;	\
4240 	rc = WriteFile(fd, ptr, size, &len, &ov);	} while(0)
4241 #else
4242 	int len;
4243 #define DO_PWRITE(rc, fd, ptr, size, len, pos)	do { \
4244 	len = pwrite(fd, ptr, size, pos);	\
4245 	if (len == -1 && ErrCode() == EINTR) continue; \
4246 	rc = (len >= 0); break; } while(1)
4247 #endif
4248 	DPUTS("writing new meta page");
4249 
4250 	psize = env->me_psize;
4251 
4252 	p = calloc(NUM_METAS, psize);
4253 	if (!p)
4254 		return ENOMEM;
4255 	p->mp_pgno = 0;
4256 	p->mp_flags = P_META;
4257 	*(MDB_meta *)METADATA(p) = *meta;
4258 
4259 	q = (MDB_page *)((char *)p + psize);
4260 	q->mp_pgno = 1;
4261 	q->mp_flags = P_META;
4262 	*(MDB_meta *)METADATA(q) = *meta;
4263 
4264 	DO_PWRITE(rc, env->me_fd, p, psize * NUM_METAS, len, 0);
4265 	if (!rc)
4266 		rc = ErrCode();
4267 	else if ((unsigned) len == psize * NUM_METAS)
4268 		rc = MDB_SUCCESS;
4269 	else
4270 		rc = ENOSPC;
4271 	free(p);
4272 	return rc;
4273 }
4274 
4275 /** Update the environment info to commit a transaction.
4276  * @param[in] txn the transaction that's being committed
4277  * @return 0 on success, non-zero on failure.
4278  */
4279 static int
mdb_env_write_meta(MDB_txn * txn)4280 mdb_env_write_meta(MDB_txn *txn)
4281 {
4282 	MDB_env *env;
4283 	MDB_meta	meta, metab, *mp;
4284 	unsigned flags;
4285 	mdb_size_t mapsize;
4286 	MDB_OFF_T off;
4287 	int rc, len, toggle;
4288 	char *ptr;
4289 	HANDLE mfd;
4290 #ifdef _WIN32
4291 	OVERLAPPED ov;
4292 #else
4293 	int r2;
4294 #endif
4295 
4296 	toggle = txn->mt_txnid & 1;
4297 	DPRINTF(("writing meta page %d for root page %"Yu,
4298 		toggle, txn->mt_dbs[MAIN_DBI].md_root));
4299 
4300 	env = txn->mt_env;
4301 	flags = txn->mt_flags | env->me_flags;
4302 	mp = env->me_metas[toggle];
4303 	mapsize = env->me_metas[toggle ^ 1]->mm_mapsize;
4304 	/* Persist any increases of mapsize config */
4305 	if (mapsize < env->me_mapsize)
4306 		mapsize = env->me_mapsize;
4307 
4308 #ifndef _WIN32 /* We don't want to ever use MSYNC/FlushViewOfFile in Windows */
4309 	if (flags & MDB_WRITEMAP) {
4310 		mp->mm_mapsize = mapsize;
4311 		mp->mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI];
4312 		mp->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI];
4313 		mp->mm_last_pg = txn->mt_next_pgno - 1;
4314 #if (__GNUC__ * 100 + __GNUC_MINOR__ >= 404) && /* TODO: portability */	\
4315 	!(defined(__i386__) || defined(__x86_64__))
4316 		/* LY: issue a memory barrier, if not x86. ITS#7969 */
4317 		__sync_synchronize();
4318 #endif
4319 		mp->mm_txnid = txn->mt_txnid;
4320 		if (!(flags & (MDB_NOMETASYNC|MDB_NOSYNC))) {
4321 			unsigned meta_size = env->me_psize;
4322 			rc = (env->me_flags & MDB_MAPASYNC) ? MS_ASYNC : MS_SYNC;
4323 			ptr = (char *)mp - PAGEHDRSZ;
4324 			/* POSIX msync() requires ptr = start of OS page */
4325 			r2 = (ptr - env->me_map) & (env->me_os_psize - 1);
4326 			ptr -= r2;
4327 			meta_size += r2;
4328 			if (MDB_MSYNC(ptr, meta_size, rc)) {
4329 				rc = ErrCode();
4330 				goto fail;
4331 			}
4332 		}
4333 		goto done;
4334 	}
4335 #endif
4336 	metab.mm_txnid = mp->mm_txnid;
4337 	metab.mm_last_pg = mp->mm_last_pg;
4338 
4339 	meta.mm_mapsize = mapsize;
4340 	meta.mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI];
4341 	meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI];
4342 	meta.mm_last_pg = txn->mt_next_pgno - 1;
4343 	meta.mm_txnid = txn->mt_txnid;
4344 
4345 	off = offsetof(MDB_meta, mm_mapsize);
4346 	ptr = (char *)&meta + off;
4347 	len = sizeof(MDB_meta) - off;
4348 	off += (char *)mp - env->me_map;
4349 
4350 	/* Write to the SYNC fd unless MDB_NOSYNC/MDB_NOMETASYNC.
4351 	 * (me_mfd goes to the same file as me_fd, but writing to it
4352 	 * also syncs to disk.  Avoids a separate fdatasync() call.)
4353 	 */
4354 	mfd = (flags & (MDB_NOSYNC|MDB_NOMETASYNC)) ? env->me_fd : env->me_mfd;
4355 #ifdef _WIN32
4356 	{
4357 		memset(&ov, 0, sizeof(ov));
4358 		ov.Offset = off;
4359 		if (!WriteFile(mfd, ptr, len, (DWORD *)&rc, &ov))
4360 			rc = -1;
4361 	}
4362 #else
4363 retry_write:
4364 	rc = pwrite(mfd, ptr, len, off);
4365 #endif
4366 	if (rc != len) {
4367 		rc = rc < 0 ? ErrCode() : EIO;
4368 #ifndef _WIN32
4369 		if (rc == EINTR)
4370 			goto retry_write;
4371 #endif
4372 		DPUTS("write failed, disk error?");
4373 		/* On a failure, the pagecache still contains the new data.
4374 		 * Write some old data back, to prevent it from being used.
4375 		 * Use the non-SYNC fd; we know it will fail anyway.
4376 		 */
4377 		meta.mm_last_pg = metab.mm_last_pg;
4378 		meta.mm_txnid = metab.mm_txnid;
4379 #ifdef _WIN32
4380 		memset(&ov, 0, sizeof(ov));
4381 		ov.Offset = off;
4382 		WriteFile(env->me_fd, ptr, len, NULL, &ov);
4383 #else
4384 		r2 = pwrite(env->me_fd, ptr, len, off);
4385 		(void)r2;	/* Silence warnings. We don't care about pwrite's return value */
4386 #endif
4387 fail:
4388 		env->me_flags |= MDB_FATAL_ERROR;
4389 		return rc;
4390 	}
4391 	/* MIPS has cache coherency issues, this is a no-op everywhere else */
4392 	CACHEFLUSH(env->me_map + off, len, DCACHE);
4393 done:
4394 	/* Memory ordering issues are irrelevant; since the entire writer
4395 	 * is wrapped by wmutex, all of these changes will become visible
4396 	 * after the wmutex is unlocked. Since the DB is multi-version,
4397 	 * readers will get consistent data regardless of how fresh or
4398 	 * how stale their view of these values is.
4399 	 */
4400 	if (env->me_txns)
4401 		env->me_txns->mti_txnid = txn->mt_txnid;
4402 
4403 	return MDB_SUCCESS;
4404 }
4405 
4406 /** Check both meta pages to see which one is newer.
4407  * @param[in] env the environment handle
4408  * @return newest #MDB_meta.
4409  */
4410 static MDB_meta *
mdb_env_pick_meta(const MDB_env * env)4411 mdb_env_pick_meta(const MDB_env *env)
4412 {
4413 	MDB_meta *const *metas = env->me_metas;
4414 	return metas[ (metas[0]->mm_txnid < metas[1]->mm_txnid) ^
4415 		((env->me_flags & MDB_PREVSNAPSHOT) != 0) ];
4416 }
4417 
4418 int ESECT
mdb_env_create(MDB_env ** env)4419 mdb_env_create(MDB_env **env)
4420 {
4421 	MDB_env *e;
4422 
4423 	e = calloc(1, sizeof(MDB_env));
4424 	if (!e)
4425 		return ENOMEM;
4426 
4427 	e->me_maxreaders = DEFAULT_READERS;
4428 	e->me_maxdbs = e->me_numdbs = CORE_DBS;
4429 	e->me_fd = INVALID_HANDLE_VALUE;
4430 	e->me_lfd = INVALID_HANDLE_VALUE;
4431 	e->me_mfd = INVALID_HANDLE_VALUE;
4432 #ifdef MDB_USE_POSIX_SEM
4433 	e->me_rmutex = SEM_FAILED;
4434 	e->me_wmutex = SEM_FAILED;
4435 #elif defined MDB_USE_SYSV_SEM
4436 	e->me_rmutex->semid = -1;
4437 	e->me_wmutex->semid = -1;
4438 #endif
4439 	e->me_pid = getpid();
4440 	GET_PAGESIZE(e->me_os_psize);
4441 	VGMEMP_CREATE(e,0,0);
4442 	*env = e;
4443 	return MDB_SUCCESS;
4444 }
4445 
4446 #ifdef _WIN32
4447 /** @brief Map a result from an NTAPI call to WIN32. */
4448 static DWORD
mdb_nt2win32(NTSTATUS st)4449 mdb_nt2win32(NTSTATUS st)
4450 {
4451 	OVERLAPPED o = {0};
4452 	DWORD br;
4453 	o.Internal = st;
4454 	GetOverlappedResult(NULL, &o, &br, FALSE);
4455 	return GetLastError();
4456 }
4457 #endif
4458 
4459 static int ESECT
mdb_env_map(MDB_env * env,void * addr)4460 mdb_env_map(MDB_env *env, void *addr)
4461 {
4462 	MDB_page *p;
4463 	unsigned int flags = env->me_flags;
4464 #ifdef _WIN32
4465 	int rc;
4466 	int access = SECTION_MAP_READ;
4467 	HANDLE mh;
4468 	void *map;
4469 	SIZE_T msize;
4470 	ULONG pageprot = PAGE_READONLY, secprot, alloctype;
4471 
4472 	if (flags & MDB_WRITEMAP) {
4473 		access |= SECTION_MAP_WRITE;
4474 		pageprot = PAGE_READWRITE;
4475 	}
4476 	if (flags & MDB_RDONLY) {
4477 		secprot = PAGE_READONLY;
4478 		msize = 0;
4479 		alloctype = 0;
4480 	} else {
4481 		secprot = PAGE_READWRITE;
4482 		msize = env->me_mapsize;
4483 		alloctype = MEM_RESERVE;
4484 	}
4485 
4486 	/** Some users are afraid of seeing their disk space getting used
4487 	 * all at once, so the default is now to do incremental file growth.
4488 	 * But that has a large performance impact, so give the option of
4489 	 * allocating the file up front.
4490 	 */
4491 #ifdef MDB_FIXEDSIZE
4492 	LARGE_INTEGER fsize;
4493 	fsize.LowPart = msize & 0xffffffff;
4494 	fsize.HighPart = msize >> 16 >> 16;
4495 	rc = NtCreateSection(&mh, access, NULL, &fsize, secprot, SEC_RESERVE, env->me_fd);
4496 #else
4497 	rc = NtCreateSection(&mh, access, NULL, NULL, secprot, SEC_RESERVE, env->me_fd);
4498 #endif
4499 	if (rc)
4500 		return mdb_nt2win32(rc);
4501 	map = addr;
4502 #ifdef MDB_VL32
4503 	msize = NUM_METAS * env->me_psize;
4504 #endif
4505 	rc = NtMapViewOfSection(mh, GetCurrentProcess(), &map, 0, 0, NULL, &msize, ViewUnmap, alloctype, pageprot);
4506 #ifdef MDB_VL32
4507 	env->me_fmh = mh;
4508 #else
4509 	NtClose(mh);
4510 #endif
4511 	if (rc)
4512 		return mdb_nt2win32(rc);
4513 	env->me_map = map;
4514 #else
4515 	int mmap_flags = MAP_SHARED;
4516 	int prot = PROT_READ;
4517 #ifdef MAP_NOSYNC	/* Used on FreeBSD */
4518 	if (flags & MDB_NOSYNC)
4519 		mmap_flags |= MAP_NOSYNC;
4520 #endif
4521 #ifdef MDB_VL32
4522 	(void) flags;
4523 	env->me_map = mmap(addr, NUM_METAS * env->me_psize, prot, mmap_flags,
4524 		env->me_fd, 0);
4525 	if (env->me_map == MAP_FAILED) {
4526 		env->me_map = NULL;
4527 		return ErrCode();
4528 	}
4529 #else
4530 	if (flags & MDB_WRITEMAP) {
4531 		prot |= PROT_WRITE;
4532 		if (ftruncate(env->me_fd, env->me_mapsize) < 0)
4533 			return ErrCode();
4534 	}
4535 	env->me_map = mmap(addr, env->me_mapsize, prot, mmap_flags,
4536 		env->me_fd, 0);
4537 	if (env->me_map == MAP_FAILED) {
4538 		env->me_map = NULL;
4539 		return ErrCode();
4540 	}
4541 
4542 	if (flags & MDB_NORDAHEAD) {
4543 		/* Turn off readahead. It's harmful when the DB is larger than RAM. */
4544 #ifdef MADV_RANDOM
4545 		madvise(env->me_map, env->me_mapsize, MADV_RANDOM);
4546 #else
4547 #ifdef POSIX_MADV_RANDOM
4548 		posix_madvise(env->me_map, env->me_mapsize, POSIX_MADV_RANDOM);
4549 #endif /* POSIX_MADV_RANDOM */
4550 #endif /* MADV_RANDOM */
4551 	}
4552 #endif /* _WIN32 */
4553 
4554 	/* Can happen because the address argument to mmap() is just a
4555 	 * hint.  mmap() can pick another, e.g. if the range is in use.
4556 	 * The MAP_FIXED flag would prevent that, but then mmap could
4557 	 * instead unmap existing pages to make room for the new map.
4558 	 */
4559 	if (addr && env->me_map != addr)
4560 		return EBUSY;	/* TODO: Make a new MDB_* error code? */
4561 #endif
4562 
4563 	p = (MDB_page *)env->me_map;
4564 	env->me_metas[0] = METADATA(p);
4565 	env->me_metas[1] = (MDB_meta *)((char *)env->me_metas[0] + env->me_psize);
4566 
4567 	return MDB_SUCCESS;
4568 }
4569 
4570 int ESECT
mdb_env_set_mapsize(MDB_env * env,mdb_size_t size)4571 mdb_env_set_mapsize(MDB_env *env, mdb_size_t size)
4572 {
4573 	/* If env is already open, caller is responsible for making
4574 	 * sure there are no active txns.
4575 	 */
4576 	if (env->me_map) {
4577 		MDB_meta *meta;
4578 #ifndef MDB_VL32
4579 		void *old;
4580 		int rc;
4581 #endif
4582 		if (env->me_txn)
4583 			return EINVAL;
4584 		meta = mdb_env_pick_meta(env);
4585 		if (!size)
4586 			size = meta->mm_mapsize;
4587 		{
4588 			/* Silently round up to minimum if the size is too small */
4589 			mdb_size_t minsize = (meta->mm_last_pg + 1) * env->me_psize;
4590 			if (size < minsize)
4591 				size = minsize;
4592 		}
4593 #ifndef MDB_VL32
4594 		/* For MDB_VL32 this bit is a noop since we dynamically remap
4595 		 * chunks of the DB anyway.
4596 		 */
4597 		munmap(env->me_map, env->me_mapsize);
4598 		env->me_mapsize = size;
4599 		old = (env->me_flags & MDB_FIXEDMAP) ? env->me_map : NULL;
4600 		rc = mdb_env_map(env, old);
4601 		if (rc)
4602 			return rc;
4603 #endif /* !MDB_VL32 */
4604 	}
4605 	env->me_mapsize = size;
4606 	if (env->me_psize)
4607 		env->me_maxpg = env->me_mapsize / env->me_psize;
4608 	return MDB_SUCCESS;
4609 }
4610 
4611 int ESECT
mdb_env_set_maxdbs(MDB_env * env,MDB_dbi dbs)4612 mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs)
4613 {
4614 	if (env->me_map)
4615 		return EINVAL;
4616 	env->me_maxdbs = dbs + CORE_DBS;
4617 	return MDB_SUCCESS;
4618 }
4619 
4620 int ESECT
mdb_env_set_maxreaders(MDB_env * env,unsigned int readers)4621 mdb_env_set_maxreaders(MDB_env *env, unsigned int readers)
4622 {
4623 	if (env->me_map || readers < 1)
4624 		return EINVAL;
4625 	env->me_maxreaders = readers;
4626 	return MDB_SUCCESS;
4627 }
4628 
4629 int ESECT
mdb_env_get_maxreaders(MDB_env * env,unsigned int * readers)4630 mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers)
4631 {
4632 	if (!env || !readers)
4633 		return EINVAL;
4634 	*readers = env->me_maxreaders;
4635 	return MDB_SUCCESS;
4636 }
4637 
4638 static int ESECT
mdb_fsize(HANDLE fd,mdb_size_t * size)4639 mdb_fsize(HANDLE fd, mdb_size_t *size)
4640 {
4641 #ifdef _WIN32
4642 	LARGE_INTEGER fsize;
4643 
4644 	if (!GetFileSizeEx(fd, &fsize))
4645 		return ErrCode();
4646 
4647 	*size = fsize.QuadPart;
4648 #else
4649 	struct stat st;
4650 
4651 	if (fstat(fd, &st))
4652 		return ErrCode();
4653 
4654 	*size = st.st_size;
4655 #endif
4656 	return MDB_SUCCESS;
4657 }
4658 
4659 
4660 #ifdef _WIN32
4661 typedef wchar_t	mdb_nchar_t;
4662 # define MDB_NAME(str)	L##str
4663 # define mdb_name_cpy	wcscpy
4664 #else
4665 /** Character type for file names: char on Unix, wchar_t on Windows */
4666 typedef char	mdb_nchar_t;
4667 # define MDB_NAME(str)	str		/**< #mdb_nchar_t[] string literal */
4668 # define mdb_name_cpy	strcpy	/**< Copy name (#mdb_nchar_t string) */
4669 #endif
4670 
4671 /** Filename - string of #mdb_nchar_t[] */
4672 typedef struct MDB_name {
4673 	int mn_len;					/**< Length  */
4674 	int mn_alloced;				/**< True if #mn_val was malloced */
4675 	mdb_nchar_t	*mn_val;		/**< Contents */
4676 } MDB_name;
4677 
4678 /** Filename suffixes [datafile,lockfile][without,with MDB_NOSUBDIR] */
4679 static const mdb_nchar_t *const mdb_suffixes[2][2] = {
4680 	{ MDB_NAME("/data.mdb"), MDB_NAME("")      },
4681 	{ MDB_NAME("/lock.mdb"), MDB_NAME("-lock") }
4682 };
4683 
4684 #define MDB_SUFFLEN 9	/**< Max string length in #mdb_suffixes[] */
4685 
4686 /** Set up filename + scratch area for filename suffix, for opening files.
4687  * It should be freed with #mdb_fname_destroy().
4688  * On Windows, paths are converted from char *UTF-8 to wchar_t *UTF-16.
4689  *
4690  * @param[in] path Pathname for #mdb_env_open().
4691  * @param[in] envflags Whether a subdir and/or lockfile will be used.
4692  * @param[out] fname Resulting filename, with room for a suffix if necessary.
4693  */
4694 static int ESECT
mdb_fname_init(const char * path,unsigned envflags,MDB_name * fname)4695 mdb_fname_init(const char *path, unsigned envflags, MDB_name *fname)
4696 {
4697 	int no_suffix = F_ISSET(envflags, MDB_NOSUBDIR|MDB_NOLOCK);
4698 	fname->mn_alloced = 0;
4699 #ifdef _WIN32
4700 	return utf8_to_utf16(path, fname, no_suffix ? 0 : MDB_SUFFLEN);
4701 #else
4702 	fname->mn_len = strlen(path);
4703 	if (no_suffix)
4704 		fname->mn_val = (char *) path;
4705 	else if ((fname->mn_val = malloc(fname->mn_len + MDB_SUFFLEN+1)) != NULL) {
4706 		fname->mn_alloced = 1;
4707 		strcpy(fname->mn_val, path);
4708 	}
4709 	else
4710 		return ENOMEM;
4711 	return MDB_SUCCESS;
4712 #endif
4713 }
4714 
4715 /** Destroy \b fname from #mdb_fname_init() */
4716 #define mdb_fname_destroy(fname) \
4717 	do { if ((fname).mn_alloced) free((fname).mn_val); } while (0)
4718 
4719 #ifdef O_CLOEXEC /* POSIX.1-2008: Set FD_CLOEXEC atomically at open() */
4720 # define MDB_CLOEXEC		O_CLOEXEC
4721 #else
4722 # define MDB_CLOEXEC		0
4723 #endif
4724 
4725 /** File type, access mode etc. for #mdb_fopen() */
4726 enum mdb_fopen_type {
4727 #ifdef _WIN32
4728 	MDB_O_RDONLY, MDB_O_RDWR, MDB_O_OVERLAPPED, MDB_O_META, MDB_O_COPY, MDB_O_LOCKS
4729 #else
4730 	/* A comment in mdb_fopen() explains some O_* flag choices. */
4731 	MDB_O_RDONLY= O_RDONLY,                            /**< for RDONLY me_fd */
4732 	MDB_O_RDWR  = O_RDWR  |O_CREAT,                    /**< for me_fd */
4733 	MDB_O_META  = O_WRONLY|MDB_DSYNC     |MDB_CLOEXEC, /**< for me_mfd */
4734 	MDB_O_COPY  = O_WRONLY|O_CREAT|O_EXCL|MDB_CLOEXEC, /**< for #mdb_env_copy() */
4735 	/** Bitmask for open() flags in enum #mdb_fopen_type.  The other bits
4736 	 * distinguish otherwise-equal MDB_O_* constants from each other.
4737 	 */
4738 	MDB_O_MASK  = MDB_O_RDWR|MDB_CLOEXEC | MDB_O_RDONLY|MDB_O_META|MDB_O_COPY,
4739 	MDB_O_LOCKS = MDB_O_RDWR|MDB_CLOEXEC | ((MDB_O_MASK+1) & ~MDB_O_MASK) /**< for me_lfd */
4740 #endif
4741 };
4742 
4743 /** Open an LMDB file.
4744  * @param[in] env	The LMDB environment.
4745  * @param[in,out] fname	Path from from #mdb_fname_init().  A suffix is
4746  * appended if necessary to create the filename, without changing mn_len.
4747  * @param[in] which	Determines file type, access mode, etc.
4748  * @param[in] mode	The Unix permissions for the file, if we create it.
4749  * @param[out] res	Resulting file handle.
4750  * @return 0 on success, non-zero on failure.
4751  */
4752 static int ESECT
mdb_fopen(const MDB_env * env,MDB_name * fname,enum mdb_fopen_type which,mdb_mode_t mode,HANDLE * res)4753 mdb_fopen(const MDB_env *env, MDB_name *fname,
4754 	enum mdb_fopen_type which, mdb_mode_t mode,
4755 	HANDLE *res)
4756 {
4757 	int rc = MDB_SUCCESS;
4758 	HANDLE fd;
4759 #ifdef _WIN32
4760 	DWORD acc, share, disp, attrs;
4761 #else
4762 	int flags;
4763 #endif
4764 
4765 	if (fname->mn_alloced)		/* modifiable copy */
4766 		mdb_name_cpy(fname->mn_val + fname->mn_len,
4767 			mdb_suffixes[which==MDB_O_LOCKS][F_ISSET(env->me_flags, MDB_NOSUBDIR)]);
4768 
4769 	/* The directory must already exist.  Usually the file need not.
4770 	 * MDB_O_META requires the file because we already created it using
4771 	 * MDB_O_RDWR.  MDB_O_COPY must not overwrite an existing file.
4772 	 *
4773 	 * With MDB_O_COPY we do not want the OS to cache the writes, since
4774 	 * the source data is already in the OS cache.
4775 	 *
4776 	 * The lockfile needs FD_CLOEXEC (close file descriptor on exec*())
4777 	 * to avoid the flock() issues noted under Caveats in lmdb.h.
4778 	 * Also set it for other filehandles which the user cannot get at
4779 	 * and close himself, which he may need after fork().  I.e. all but
4780 	 * me_fd, which programs do use via mdb_env_get_fd().
4781 	 */
4782 
4783 #ifdef _WIN32
4784 	acc = GENERIC_READ|GENERIC_WRITE;
4785 	share = FILE_SHARE_READ|FILE_SHARE_WRITE;
4786 	disp = OPEN_ALWAYS;
4787 	attrs = FILE_ATTRIBUTE_NORMAL;
4788 	switch (which) {
4789 	case MDB_O_OVERLAPPED: 	/* for unbuffered asynchronous writes (write-through mode)*/
4790 		acc = GENERIC_WRITE;
4791 		disp = OPEN_EXISTING;
4792 		attrs = FILE_FLAG_OVERLAPPED|FILE_FLAG_WRITE_THROUGH;
4793 		break;
4794 	case MDB_O_RDONLY:			/* read-only datafile */
4795 		acc = GENERIC_READ;
4796 		disp = OPEN_EXISTING;
4797 		break;
4798 	case MDB_O_META:			/* for writing metapages */
4799 		acc = GENERIC_WRITE;
4800 		disp = OPEN_EXISTING;
4801 		attrs = FILE_ATTRIBUTE_NORMAL|FILE_FLAG_WRITE_THROUGH;
4802 		break;
4803 	case MDB_O_COPY:			/* mdb_env_copy() & co */
4804 		acc = GENERIC_WRITE;
4805 		share = 0;
4806 		disp = CREATE_NEW;
4807 		attrs = FILE_FLAG_NO_BUFFERING|FILE_FLAG_WRITE_THROUGH;
4808 		break;
4809 	default: break;	/* silence gcc -Wswitch (not all enum values handled) */
4810 	}
4811 	fd = CreateFileW(fname->mn_val, acc, share, NULL, disp, attrs, NULL);
4812 #else
4813 	fd = open(fname->mn_val, which & MDB_O_MASK, mode);
4814 #endif
4815 
4816 	if (fd == INVALID_HANDLE_VALUE)
4817 		rc = ErrCode();
4818 #ifndef _WIN32
4819 	else {
4820 		if (which != MDB_O_RDONLY && which != MDB_O_RDWR) {
4821 			/* Set CLOEXEC if we could not pass it to open() */
4822 			if (!MDB_CLOEXEC && (flags = fcntl(fd, F_GETFD)) != -1)
4823 				(void) fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4824 		}
4825 		if (which == MDB_O_COPY && env->me_psize >= env->me_os_psize) {
4826 			/* This may require buffer alignment.  There is no portable
4827 			 * way to ask how much, so we require OS pagesize alignment.
4828 			 */
4829 # ifdef F_NOCACHE	/* __APPLE__ */
4830 			(void) fcntl(fd, F_NOCACHE, 1);
4831 # elif defined O_DIRECT
4832 			/* open(...O_DIRECT...) would break on filesystems without
4833 			 * O_DIRECT support (ITS#7682). Try to set it here instead.
4834 			 */
4835 			if ((flags = fcntl(fd, F_GETFL)) != -1)
4836 				(void) fcntl(fd, F_SETFL, flags | O_DIRECT);
4837 # endif
4838 		}
4839 	}
4840 #endif	/* !_WIN32 */
4841 
4842 	*res = fd;
4843 	return rc;
4844 }
4845 
4846 
4847 #ifdef BROKEN_FDATASYNC
4848 #include <sys/utsname.h>
4849 #include <sys/vfs.h>
4850 #endif
4851 
4852 /** Further setup required for opening an LMDB environment
4853  */
4854 static int ESECT
mdb_env_open2(MDB_env * env,int prev)4855 mdb_env_open2(MDB_env *env, int prev)
4856 {
4857 	unsigned int flags = env->me_flags;
4858 	int i, newenv = 0, rc;
4859 	MDB_meta meta;
4860 
4861 #ifdef _WIN32
4862 	/* See if we should use QueryLimited */
4863 	rc = GetVersion();
4864 	if ((rc & 0xff) > 5)
4865 		env->me_pidquery = MDB_PROCESS_QUERY_LIMITED_INFORMATION;
4866 	else
4867 		env->me_pidquery = PROCESS_QUERY_INFORMATION;
4868 	/* Grab functions we need from NTDLL */
4869 	if (!NtCreateSection) {
4870 		HMODULE h = GetModuleHandleW(L"NTDLL.DLL");
4871 		if (!h)
4872 			return MDB_PROBLEM;
4873 		NtClose = (NtCloseFunc *)GetProcAddress(h, "NtClose");
4874 		if (!NtClose)
4875 			return MDB_PROBLEM;
4876 		NtMapViewOfSection = (NtMapViewOfSectionFunc *)GetProcAddress(h, "NtMapViewOfSection");
4877 		if (!NtMapViewOfSection)
4878 			return MDB_PROBLEM;
4879 		NtCreateSection = (NtCreateSectionFunc *)GetProcAddress(h, "NtCreateSection");
4880 		if (!NtCreateSection)
4881 			return MDB_PROBLEM;
4882 	}
4883 	env->ovs = 0;
4884 #endif /* _WIN32 */
4885 
4886 #ifdef BROKEN_FDATASYNC
4887 	/* ext3/ext4 fdatasync is broken on some older Linux kernels.
4888 	 * https://lkml.org/lkml/2012/9/3/83
4889 	 * Kernels after 3.6-rc6 are known good.
4890 	 * https://lkml.org/lkml/2012/9/10/556
4891 	 * See if the DB is on ext3/ext4, then check for new enough kernel
4892 	 * Kernels 2.6.32.60, 2.6.34.15, 3.2.30, and 3.5.4 are also known
4893 	 * to be patched.
4894 	 */
4895 	{
4896 		struct statfs st;
4897 		fstatfs(env->me_fd, &st);
4898 		while (st.f_type == 0xEF53) {
4899 			struct utsname uts;
4900 			int i;
4901 			uname(&uts);
4902 			if (uts.release[0] < '3') {
4903 				if (!strncmp(uts.release, "2.6.32.", 7)) {
4904 					i = atoi(uts.release+7);
4905 					if (i >= 60)
4906 						break;	/* 2.6.32.60 and newer is OK */
4907 				} else if (!strncmp(uts.release, "2.6.34.", 7)) {
4908 					i = atoi(uts.release+7);
4909 					if (i >= 15)
4910 						break;	/* 2.6.34.15 and newer is OK */
4911 				}
4912 			} else if (uts.release[0] == '3') {
4913 				i = atoi(uts.release+2);
4914 				if (i > 5)
4915 					break;	/* 3.6 and newer is OK */
4916 				if (i == 5) {
4917 					i = atoi(uts.release+4);
4918 					if (i >= 4)
4919 						break;	/* 3.5.4 and newer is OK */
4920 				} else if (i == 2) {
4921 					i = atoi(uts.release+4);
4922 					if (i >= 30)
4923 						break;	/* 3.2.30 and newer is OK */
4924 				}
4925 			} else {	/* 4.x and newer is OK */
4926 				break;
4927 			}
4928 			env->me_flags |= MDB_FSYNCONLY;
4929 			break;
4930 		}
4931 	}
4932 #endif
4933 
4934 	if ((i = mdb_env_read_header(env, prev, &meta)) != 0) {
4935 		if (i != ENOENT)
4936 			return i;
4937 		DPUTS("new mdbenv");
4938 		newenv = 1;
4939 		env->me_psize = env->me_os_psize;
4940 		if (env->me_psize > MAX_PAGESIZE)
4941 			env->me_psize = MAX_PAGESIZE;
4942 		memset(&meta, 0, sizeof(meta));
4943 		mdb_env_init_meta0(env, &meta);
4944 		meta.mm_mapsize = DEFAULT_MAPSIZE;
4945 	} else {
4946 		env->me_psize = meta.mm_psize;
4947 	}
4948 
4949 	/* Was a mapsize configured? */
4950 	if (!env->me_mapsize) {
4951 		env->me_mapsize = meta.mm_mapsize;
4952 	}
4953 	{
4954 		/* Make sure mapsize >= committed data size.  Even when using
4955 		 * mm_mapsize, which could be broken in old files (ITS#7789).
4956 		 */
4957 		mdb_size_t minsize = (meta.mm_last_pg + 1) * meta.mm_psize;
4958 		if (env->me_mapsize < minsize)
4959 			env->me_mapsize = minsize;
4960 	}
4961 	meta.mm_mapsize = env->me_mapsize;
4962 
4963 	if (newenv && !(flags & MDB_FIXEDMAP)) {
4964 		/* mdb_env_map() may grow the datafile.  Write the metapages
4965 		 * first, so the file will be valid if initialization fails.
4966 		 * Except with FIXEDMAP, since we do not yet know mm_address.
4967 		 * We could fill in mm_address later, but then a different
4968 		 * program might end up doing that - one with a memory layout
4969 		 * and map address which does not suit the main program.
4970 		 */
4971 		rc = mdb_env_init_meta(env, &meta);
4972 		if (rc)
4973 			return rc;
4974 		newenv = 0;
4975 	}
4976 #ifdef _WIN32
4977 	/* For FIXEDMAP, make sure the file is non-empty before we attempt to map it */
4978 	if (newenv) {
4979 		char dummy = 0;
4980 		DWORD len;
4981 		rc = WriteFile(env->me_fd, &dummy, 1, &len, NULL);
4982 		if (!rc) {
4983 			rc = ErrCode();
4984 			return rc;
4985 		}
4986 	}
4987 #endif
4988 
4989 	rc = mdb_env_map(env, (flags & MDB_FIXEDMAP) ? meta.mm_address : NULL);
4990 	if (rc)
4991 		return rc;
4992 
4993 	if (newenv) {
4994 		if (flags & MDB_FIXEDMAP)
4995 			meta.mm_address = env->me_map;
4996 		i = mdb_env_init_meta(env, &meta);
4997 		if (i != MDB_SUCCESS) {
4998 			return i;
4999 		}
5000 	}
5001 
5002 	env->me_maxfree_1pg = (env->me_psize - PAGEHDRSZ) / sizeof(pgno_t) - 1;
5003 	env->me_nodemax = (((env->me_psize - PAGEHDRSZ) / MDB_MINKEYS) & -2)
5004 		- sizeof(indx_t);
5005 #if !(MDB_MAXKEYSIZE)
5006 	env->me_maxkey = env->me_nodemax - (NODESIZE + sizeof(MDB_db));
5007 #endif
5008 	env->me_maxpg = env->me_mapsize / env->me_psize;
5009 
5010 #if MDB_DEBUG
5011 	{
5012 		MDB_meta *meta = mdb_env_pick_meta(env);
5013 		MDB_db *db = &meta->mm_dbs[MAIN_DBI];
5014 
5015 		DPRINTF(("opened database version %u, pagesize %u",
5016 			meta->mm_version, env->me_psize));
5017 		DPRINTF(("using meta page %d",  (int) (meta->mm_txnid & 1)));
5018 		DPRINTF(("depth: %u",           db->md_depth));
5019 		DPRINTF(("entries: %"Yu,        db->md_entries));
5020 		DPRINTF(("branch pages: %"Yu,   db->md_branch_pages));
5021 		DPRINTF(("leaf pages: %"Yu,     db->md_leaf_pages));
5022 		DPRINTF(("overflow pages: %"Yu, db->md_overflow_pages));
5023 		DPRINTF(("root: %"Yu,           db->md_root));
5024 	}
5025 #endif
5026 
5027 	return MDB_SUCCESS;
5028 }
5029 
5030 
5031 /** Release a reader thread's slot in the reader lock table.
5032  *	This function is called automatically when a thread exits.
5033  * @param[in] ptr This points to the slot in the reader lock table.
5034  */
5035 static void
mdb_env_reader_dest(void * ptr)5036 mdb_env_reader_dest(void *ptr)
5037 {
5038 	MDB_reader *reader = ptr;
5039 
5040 #ifndef _WIN32
5041 	if (reader->mr_pid == getpid()) /* catch pthread_exit() in child process */
5042 #endif
5043 		/* We omit the mutex, so do this atomically (i.e. skip mr_txnid) */
5044 		reader->mr_pid = 0;
5045 }
5046 
5047 #ifdef _WIN32
5048 /** Junk for arranging thread-specific callbacks on Windows. This is
5049  *	necessarily platform and compiler-specific. Windows supports up
5050  *	to 1088 keys. Let's assume nobody opens more than 64 environments
5051  *	in a single process, for now. They can override this if needed.
5052  */
5053 #ifndef MAX_TLS_KEYS
5054 #define MAX_TLS_KEYS	64
5055 #endif
5056 static pthread_key_t mdb_tls_keys[MAX_TLS_KEYS];
5057 static int mdb_tls_nkeys;
5058 
mdb_tls_callback(PVOID module,DWORD reason,PVOID ptr)5059 static void NTAPI mdb_tls_callback(PVOID module, DWORD reason, PVOID ptr)
5060 {
5061 	int i;
5062 	switch(reason) {
5063 	case DLL_PROCESS_ATTACH: break;
5064 	case DLL_THREAD_ATTACH: break;
5065 	case DLL_THREAD_DETACH:
5066 		for (i=0; i<mdb_tls_nkeys; i++) {
5067 			MDB_reader *r = pthread_getspecific(mdb_tls_keys[i]);
5068 			if (r) {
5069 				mdb_env_reader_dest(r);
5070 			}
5071 		}
5072 		break;
5073 	case DLL_PROCESS_DETACH: break;
5074 	}
5075 }
5076 #ifdef __GNUC__
5077 #ifdef _WIN64
5078 const PIMAGE_TLS_CALLBACK mdb_tls_cbp __attribute__((section (".CRT$XLB"))) = mdb_tls_callback;
5079 #else
5080 PIMAGE_TLS_CALLBACK mdb_tls_cbp __attribute__((section (".CRT$XLB"))) = mdb_tls_callback;
5081 #endif
5082 #else
5083 #ifdef _WIN64
5084 /* Force some symbol references.
5085  *	_tls_used forces the linker to create the TLS directory if not already done
5086  *	mdb_tls_cbp prevents whole-program-optimizer from dropping the symbol.
5087  */
5088 #pragma comment(linker, "/INCLUDE:_tls_used")
5089 #pragma comment(linker, "/INCLUDE:mdb_tls_cbp")
5090 #pragma const_seg(".CRT$XLB")
5091 extern const PIMAGE_TLS_CALLBACK mdb_tls_cbp;
5092 const PIMAGE_TLS_CALLBACK mdb_tls_cbp = mdb_tls_callback;
5093 #pragma const_seg()
5094 #else	/* _WIN32 */
5095 #pragma comment(linker, "/INCLUDE:__tls_used")
5096 #pragma comment(linker, "/INCLUDE:_mdb_tls_cbp")
5097 #pragma data_seg(".CRT$XLB")
5098 PIMAGE_TLS_CALLBACK mdb_tls_cbp = mdb_tls_callback;
5099 #pragma data_seg()
5100 #endif	/* WIN 32/64 */
5101 #endif	/* !__GNUC__ */
5102 #endif
5103 
5104 /** Downgrade the exclusive lock on the region back to shared */
5105 static int ESECT
mdb_env_share_locks(MDB_env * env,int * excl)5106 mdb_env_share_locks(MDB_env *env, int *excl)
5107 {
5108 	int rc = 0;
5109 	MDB_meta *meta = mdb_env_pick_meta(env);
5110 
5111 	env->me_txns->mti_txnid = meta->mm_txnid;
5112 
5113 #ifdef _WIN32
5114 	{
5115 		OVERLAPPED ov;
5116 		/* First acquire a shared lock. The Unlock will
5117 		 * then release the existing exclusive lock.
5118 		 */
5119 		memset(&ov, 0, sizeof(ov));
5120 		if (!LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) {
5121 			rc = ErrCode();
5122 		} else {
5123 			UnlockFile(env->me_lfd, 0, 0, 1, 0);
5124 			*excl = 0;
5125 		}
5126 	}
5127 #else
5128 	{
5129 		struct flock lock_info;
5130 		/* The shared lock replaces the existing lock */
5131 		memset((void *)&lock_info, 0, sizeof(lock_info));
5132 		lock_info.l_type = F_RDLCK;
5133 		lock_info.l_whence = SEEK_SET;
5134 		lock_info.l_start = 0;
5135 		lock_info.l_len = 1;
5136 		while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) &&
5137 				(rc = ErrCode()) == EINTR) ;
5138 		*excl = rc ? -1 : 0;	/* error may mean we lost the lock */
5139 	}
5140 #endif
5141 
5142 	return rc;
5143 }
5144 
5145 /** Try to get exclusive lock, otherwise shared.
5146  *	Maintain *excl = -1: no/unknown lock, 0: shared, 1: exclusive.
5147  */
5148 static int ESECT
mdb_env_excl_lock(MDB_env * env,int * excl)5149 mdb_env_excl_lock(MDB_env *env, int *excl)
5150 {
5151 	int rc = 0;
5152 #ifdef _WIN32
5153 	if (LockFile(env->me_lfd, 0, 0, 1, 0)) {
5154 		*excl = 1;
5155 	} else {
5156 		OVERLAPPED ov;
5157 		memset(&ov, 0, sizeof(ov));
5158 		if (LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) {
5159 			*excl = 0;
5160 		} else {
5161 			rc = ErrCode();
5162 		}
5163 	}
5164 #else
5165 	struct flock lock_info;
5166 	memset((void *)&lock_info, 0, sizeof(lock_info));
5167 	lock_info.l_type = F_WRLCK;
5168 	lock_info.l_whence = SEEK_SET;
5169 	lock_info.l_start = 0;
5170 	lock_info.l_len = 1;
5171 	while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) &&
5172 			(rc = ErrCode()) == EINTR) ;
5173 	if (!rc) {
5174 		*excl = 1;
5175 	} else
5176 # ifndef MDB_USE_POSIX_MUTEX
5177 	if (*excl < 0) /* always true when MDB_USE_POSIX_MUTEX */
5178 # endif
5179 	{
5180 		lock_info.l_type = F_RDLCK;
5181 		while ((rc = fcntl(env->me_lfd, F_SETLKW, &lock_info)) &&
5182 				(rc = ErrCode()) == EINTR) ;
5183 		if (rc == 0)
5184 			*excl = 0;
5185 	}
5186 #endif
5187 	return rc;
5188 }
5189 
5190 #ifdef MDB_USE_HASH
5191 /*
5192  * hash_64 - 64 bit Fowler/Noll/Vo-0 FNV-1a hash code
5193  *
5194  * @(#) $Revision: 5.1 $
5195  * @(#) $Id: hash_64a.c,v 5.1 2009/06/30 09:01:38 chongo Exp $
5196  * @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_64a.c,v $
5197  *
5198  *	  http://www.isthe.com/chongo/tech/comp/fnv/index.html
5199  *
5200  ***
5201  *
5202  * Please do not copyright this code.  This code is in the public domain.
5203  *
5204  * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
5205  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
5206  * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
5207  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
5208  * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
5209  * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
5210  * PERFORMANCE OF THIS SOFTWARE.
5211  *
5212  * By:
5213  *	chongo <Landon Curt Noll> /\oo/\
5214  *	  http://www.isthe.com/chongo/
5215  *
5216  * Share and Enjoy!	:-)
5217  */
5218 
5219 /** perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer
5220  * @param[in] val	value to hash
5221  * @param[in] len	length of value
5222  * @return 64 bit hash
5223  */
5224 static mdb_hash_t
mdb_hash(const void * val,size_t len)5225 mdb_hash(const void *val, size_t len)
5226 {
5227 	const unsigned char *s = (const unsigned char *) val, *end = s + len;
5228 	mdb_hash_t hval = 0xcbf29ce484222325ULL;
5229 	/*
5230 	 * FNV-1a hash each octet of the buffer
5231 	 */
5232 	while (s < end) {
5233 		hval = (hval ^ *s++) * 0x100000001b3ULL;
5234 	}
5235 	/* return our new hash value */
5236 	return hval;
5237 }
5238 
5239 /** Hash the string and output the encoded hash.
5240  * This uses modified RFC1924 Ascii85 encoding to accommodate systems with
5241  * very short name limits. We don't care about the encoding being reversible,
5242  * we just want to preserve as many bits of the input as possible in a
5243  * small printable string.
5244  * @param[in] str string to hash
5245  * @param[out] encbuf an array of 11 chars to hold the hash
5246  */
5247 static const char mdb_a85[]= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~";
5248 
5249 static void ESECT
mdb_pack85(unsigned long long l,char * out)5250 mdb_pack85(unsigned long long l, char *out)
5251 {
5252 	int i;
5253 
5254 	for (i=0; i<10 && l; i++) {
5255 		*out++ = mdb_a85[l % 85];
5256 		l /= 85;
5257 	}
5258 	*out = '\0';
5259 }
5260 
5261 /** Init #MDB_env.me_mutexname[] except the char which #MUTEXNAME() will set.
5262  *	Changes to this code must be reflected in #MDB_LOCK_FORMAT.
5263  */
5264 static void ESECT
mdb_env_mname_init(MDB_env * env)5265 mdb_env_mname_init(MDB_env *env)
5266 {
5267 	char *nm = env->me_mutexname;
5268 	strcpy(nm, MUTEXNAME_PREFIX);
5269 	mdb_pack85(env->me_txns->mti_mutexid, nm + sizeof(MUTEXNAME_PREFIX));
5270 }
5271 
5272 /** Return env->me_mutexname after filling in ch ('r'/'w') for convenience */
5273 #define MUTEXNAME(env, ch) ( \
5274 		(void) ((env)->me_mutexname[sizeof(MUTEXNAME_PREFIX)-1] = (ch)), \
5275 		(env)->me_mutexname)
5276 
5277 #endif
5278 
5279 /** Open and/or initialize the lock region for the environment.
5280  * @param[in] env The LMDB environment.
5281  * @param[in] fname Filename + scratch area, from #mdb_fname_init().
5282  * @param[in] mode The Unix permissions for the file, if we create it.
5283  * @param[in,out] excl In -1, out lock type: -1 none, 0 shared, 1 exclusive
5284  * @return 0 on success, non-zero on failure.
5285  */
5286 static int ESECT
mdb_env_setup_locks(MDB_env * env,MDB_name * fname,int mode,int * excl)5287 mdb_env_setup_locks(MDB_env *env, MDB_name *fname, int mode, int *excl)
5288 {
5289 #ifdef _WIN32
5290 #	define MDB_ERRCODE_ROFS	ERROR_WRITE_PROTECT
5291 #else
5292 #	define MDB_ERRCODE_ROFS	EROFS
5293 #endif
5294 #ifdef MDB_USE_SYSV_SEM
5295 	int semid;
5296 	union semun semu;
5297 #endif
5298 	int rc;
5299 	MDB_OFF_T size, rsize;
5300 
5301 	rc = mdb_fopen(env, fname, MDB_O_LOCKS, mode, &env->me_lfd);
5302 	if (rc) {
5303 		/* Omit lockfile if read-only env on read-only filesystem */
5304 		if (rc == MDB_ERRCODE_ROFS && (env->me_flags & MDB_RDONLY)) {
5305 			return MDB_SUCCESS;
5306 		}
5307 		goto fail;
5308 	}
5309 
5310 	if (!(env->me_flags & MDB_NOTLS)) {
5311 		rc = pthread_key_create(&env->me_txkey, mdb_env_reader_dest);
5312 		if (rc)
5313 			goto fail;
5314 		env->me_flags |= MDB_ENV_TXKEY;
5315 #ifdef _WIN32
5316 		/* Windows TLS callbacks need help finding their TLS info. */
5317 		if (mdb_tls_nkeys >= MAX_TLS_KEYS) {
5318 			rc = MDB_TLS_FULL;
5319 			goto fail;
5320 		}
5321 		mdb_tls_keys[mdb_tls_nkeys++] = env->me_txkey;
5322 #endif
5323 	}
5324 
5325 	/* Try to get exclusive lock. If we succeed, then
5326 	 * nobody is using the lock region and we should initialize it.
5327 	 */
5328 	if ((rc = mdb_env_excl_lock(env, excl))) goto fail;
5329 
5330 #ifdef _WIN32
5331 	size = GetFileSize(env->me_lfd, NULL);
5332 #else
5333 	size = lseek(env->me_lfd, 0, SEEK_END);
5334 	if (size == -1) goto fail_errno;
5335 #endif
5336 	rsize = (env->me_maxreaders-1) * sizeof(MDB_reader) + sizeof(MDB_txninfo);
5337 	if (size < rsize && *excl > 0) {
5338 #ifdef _WIN32
5339 		if (SetFilePointer(env->me_lfd, rsize, NULL, FILE_BEGIN) != (DWORD)rsize
5340 			|| !SetEndOfFile(env->me_lfd))
5341 			goto fail_errno;
5342 #else
5343 		if (ftruncate(env->me_lfd, rsize) != 0) goto fail_errno;
5344 #endif
5345 	} else {
5346 		rsize = size;
5347 		size = rsize - sizeof(MDB_txninfo);
5348 		env->me_maxreaders = size/sizeof(MDB_reader) + 1;
5349 	}
5350 	{
5351 #ifdef _WIN32
5352 		HANDLE mh;
5353 		mh = CreateFileMapping(env->me_lfd, NULL, PAGE_READWRITE,
5354 			0, 0, NULL);
5355 		if (!mh) goto fail_errno;
5356 		env->me_txns = MapViewOfFileEx(mh, FILE_MAP_WRITE, 0, 0, rsize, NULL);
5357 		CloseHandle(mh);
5358 		if (!env->me_txns) goto fail_errno;
5359 #else
5360 		void *m = mmap(NULL, rsize, PROT_READ|PROT_WRITE, MAP_SHARED,
5361 			env->me_lfd, 0);
5362 		if (m == MAP_FAILED) goto fail_errno;
5363 		env->me_txns = m;
5364 #endif
5365 	}
5366 	if (*excl > 0) {
5367 #ifdef _WIN32
5368 		BY_HANDLE_FILE_INFORMATION stbuf;
5369 		struct {
5370 			DWORD volume;
5371 			DWORD nhigh;
5372 			DWORD nlow;
5373 		} idbuf;
5374 
5375 		if (!mdb_sec_inited) {
5376 			InitializeSecurityDescriptor(&mdb_null_sd,
5377 				SECURITY_DESCRIPTOR_REVISION);
5378 			SetSecurityDescriptorDacl(&mdb_null_sd, TRUE, 0, FALSE);
5379 			mdb_all_sa.nLength = sizeof(SECURITY_ATTRIBUTES);
5380 			mdb_all_sa.bInheritHandle = FALSE;
5381 			mdb_all_sa.lpSecurityDescriptor = &mdb_null_sd;
5382 			mdb_sec_inited = 1;
5383 		}
5384 		if (!GetFileInformationByHandle(env->me_lfd, &stbuf)) goto fail_errno;
5385 		idbuf.volume = stbuf.dwVolumeSerialNumber;
5386 		idbuf.nhigh  = stbuf.nFileIndexHigh;
5387 		idbuf.nlow   = stbuf.nFileIndexLow;
5388 		env->me_txns->mti_mutexid = mdb_hash(&idbuf, sizeof(idbuf));
5389 		mdb_env_mname_init(env);
5390 		env->me_rmutex = CreateMutexA(&mdb_all_sa, FALSE, MUTEXNAME(env, 'r'));
5391 		if (!env->me_rmutex) goto fail_errno;
5392 		env->me_wmutex = CreateMutexA(&mdb_all_sa, FALSE, MUTEXNAME(env, 'w'));
5393 		if (!env->me_wmutex) goto fail_errno;
5394 #elif defined(MDB_USE_POSIX_SEM)
5395 		struct stat stbuf;
5396 		struct {
5397 			dev_t dev;
5398 			ino_t ino;
5399 		} idbuf;
5400 
5401 #if defined(__NetBSD__)
5402 #define	MDB_SHORT_SEMNAMES	1	/* limited to 14 chars */
5403 #endif
5404 		if (fstat(env->me_lfd, &stbuf)) goto fail_errno;
5405 		memset(&idbuf, 0, sizeof(idbuf));
5406 		idbuf.dev = stbuf.st_dev;
5407 		idbuf.ino = stbuf.st_ino;
5408 		env->me_txns->mti_mutexid = mdb_hash(&idbuf, sizeof(idbuf))
5409 #ifdef MDB_SHORT_SEMNAMES
5410 			/* Max 9 base85-digits.  We truncate here instead of in
5411 			 * mdb_env_mname_init() to keep the latter portable.
5412 			 */
5413 			% ((mdb_hash_t)85*85*85*85*85*85*85*85*85)
5414 #endif
5415 			;
5416 		mdb_env_mname_init(env);
5417 		/* Clean up after a previous run, if needed:  Try to
5418 		 * remove both semaphores before doing anything else.
5419 		 */
5420 		sem_unlink(MUTEXNAME(env, 'r'));
5421 		sem_unlink(MUTEXNAME(env, 'w'));
5422 		env->me_rmutex = sem_open(MUTEXNAME(env, 'r'), O_CREAT|O_EXCL, mode, 1);
5423 		if (env->me_rmutex == SEM_FAILED) goto fail_errno;
5424 		env->me_wmutex = sem_open(MUTEXNAME(env, 'w'), O_CREAT|O_EXCL, mode, 1);
5425 		if (env->me_wmutex == SEM_FAILED) goto fail_errno;
5426 #elif defined(MDB_USE_SYSV_SEM)
5427 		unsigned short vals[2] = {1, 1};
5428 		key_t key = ftok(fname->mn_val, 'M'); /* fname is lockfile path now */
5429 		if (key == -1)
5430 			goto fail_errno;
5431 		semid = semget(key, 2, (mode & 0777) | IPC_CREAT);
5432 		if (semid < 0)
5433 			goto fail_errno;
5434 		semu.array = vals;
5435 		if (semctl(semid, 0, SETALL, semu) < 0)
5436 			goto fail_errno;
5437 		env->me_txns->mti_semid = semid;
5438 		env->me_txns->mti_rlocked = 0;
5439 		env->me_txns->mti_wlocked = 0;
5440 #else	/* MDB_USE_POSIX_MUTEX: */
5441 		pthread_mutexattr_t mattr;
5442 
5443 		/* Solaris needs this before initing a robust mutex.  Otherwise
5444 		 * it may skip the init and return EBUSY "seems someone already
5445 		 * inited" or EINVAL "it was inited differently".
5446 		 */
5447 		memset(env->me_txns->mti_rmutex, 0, sizeof(*env->me_txns->mti_rmutex));
5448 		memset(env->me_txns->mti_wmutex, 0, sizeof(*env->me_txns->mti_wmutex));
5449 
5450 		if ((rc = pthread_mutexattr_init(&mattr)) != 0)
5451 			goto fail;
5452 		rc = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
5453 #ifdef MDB_ROBUST_SUPPORTED
5454 		if (!rc) rc = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST);
5455 #endif
5456 		if (!rc) rc = pthread_mutex_init(env->me_txns->mti_rmutex, &mattr);
5457 		if (!rc) rc = pthread_mutex_init(env->me_txns->mti_wmutex, &mattr);
5458 		pthread_mutexattr_destroy(&mattr);
5459 		if (rc)
5460 			goto fail;
5461 #endif	/* _WIN32 || ... */
5462 
5463 		env->me_txns->mti_magic = MDB_MAGIC;
5464 		env->me_txns->mti_format = MDB_LOCK_FORMAT;
5465 		env->me_txns->mti_txnid = 0;
5466 		env->me_txns->mti_numreaders = 0;
5467 
5468 	} else {
5469 #ifdef MDB_USE_SYSV_SEM
5470 		struct semid_ds buf;
5471 #endif
5472 		if (env->me_txns->mti_magic != MDB_MAGIC) {
5473 			DPUTS("lock region has invalid magic");
5474 			rc = MDB_INVALID;
5475 			goto fail;
5476 		}
5477 		if (env->me_txns->mti_format != MDB_LOCK_FORMAT) {
5478 			DPRINTF(("lock region has format+version 0x%x, expected 0x%x",
5479 				env->me_txns->mti_format, MDB_LOCK_FORMAT));
5480 			rc = MDB_VERSION_MISMATCH;
5481 			goto fail;
5482 		}
5483 		rc = ErrCode();
5484 		if (rc && rc != EACCES && rc != EAGAIN) {
5485 			goto fail;
5486 		}
5487 #ifdef _WIN32
5488 		mdb_env_mname_init(env);
5489 		env->me_rmutex = OpenMutexA(SYNCHRONIZE, FALSE, MUTEXNAME(env, 'r'));
5490 		if (!env->me_rmutex) goto fail_errno;
5491 		env->me_wmutex = OpenMutexA(SYNCHRONIZE, FALSE, MUTEXNAME(env, 'w'));
5492 		if (!env->me_wmutex) goto fail_errno;
5493 #elif defined(MDB_USE_POSIX_SEM)
5494 		mdb_env_mname_init(env);
5495 		env->me_rmutex = sem_open(MUTEXNAME(env, 'r'), 0);
5496 		if (env->me_rmutex == SEM_FAILED) goto fail_errno;
5497 		env->me_wmutex = sem_open(MUTEXNAME(env, 'w'), 0);
5498 		if (env->me_wmutex == SEM_FAILED) goto fail_errno;
5499 #elif defined(MDB_USE_SYSV_SEM)
5500 		semid = env->me_txns->mti_semid;
5501 		semu.buf = &buf;
5502 		/* check for read access */
5503 		if (semctl(semid, 0, IPC_STAT, semu) < 0)
5504 			goto fail_errno;
5505 		/* check for write access */
5506 		if (semctl(semid, 0, IPC_SET, semu) < 0)
5507 			goto fail_errno;
5508 #endif
5509 	}
5510 #ifdef MDB_USE_SYSV_SEM
5511 	env->me_rmutex->semid = semid;
5512 	env->me_wmutex->semid = semid;
5513 	env->me_rmutex->semnum = 0;
5514 	env->me_wmutex->semnum = 1;
5515 	env->me_rmutex->locked = &env->me_txns->mti_rlocked;
5516 	env->me_wmutex->locked = &env->me_txns->mti_wlocked;
5517 #endif
5518 
5519 	return MDB_SUCCESS;
5520 
5521 fail_errno:
5522 	rc = ErrCode();
5523 fail:
5524 	return rc;
5525 }
5526 
5527 	/** Only a subset of the @ref mdb_env flags can be changed
5528 	 *	at runtime. Changing other flags requires closing the
5529 	 *	environment and re-opening it with the new flags.
5530 	 */
5531 #define	CHANGEABLE	(MDB_NOSYNC|MDB_NOMETASYNC|MDB_MAPASYNC|MDB_NOMEMINIT)
5532 #define	CHANGELESS	(MDB_FIXEDMAP|MDB_NOSUBDIR|MDB_RDONLY| \
5533 	MDB_WRITEMAP|MDB_NOTLS|MDB_NOLOCK|MDB_NORDAHEAD|MDB_PREVSNAPSHOT)
5534 
5535 #if VALID_FLAGS & PERSISTENT_FLAGS & (CHANGEABLE|CHANGELESS)
5536 # error "Persistent DB flags & env flags overlap, but both go in mm_flags"
5537 #endif
5538 
5539 int ESECT
mdb_env_open(MDB_env * env,const char * path,unsigned int flags,mdb_mode_t mode)5540 mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode)
5541 {
5542 	int rc, excl = -1;
5543 	MDB_name fname;
5544 
5545 	if (env->me_fd!=INVALID_HANDLE_VALUE || (flags & ~(CHANGEABLE|CHANGELESS)))
5546 		return EINVAL;
5547 
5548 #ifdef MDB_VL32
5549 	if (flags & MDB_WRITEMAP) {
5550 		/* silently ignore WRITEMAP in 32 bit mode */
5551 		flags ^= MDB_WRITEMAP;
5552 	}
5553 	if (flags & MDB_FIXEDMAP) {
5554 		/* cannot support FIXEDMAP */
5555 		return EINVAL;
5556 	}
5557 #endif
5558 	flags |= env->me_flags;
5559 
5560 	rc = mdb_fname_init(path, flags, &fname);
5561 	if (rc)
5562 		return rc;
5563 
5564 #ifdef MDB_VL32
5565 #ifdef _WIN32
5566 	env->me_rpmutex = CreateMutex(NULL, FALSE, NULL);
5567 	if (!env->me_rpmutex) {
5568 		rc = ErrCode();
5569 		goto leave;
5570 	}
5571 #else
5572 	rc = pthread_mutex_init(&env->me_rpmutex, NULL);
5573 	if (rc)
5574 		goto leave;
5575 #endif
5576 #endif
5577 	flags |= MDB_ENV_ACTIVE;	/* tell mdb_env_close0() to clean up */
5578 
5579 	if (flags & MDB_RDONLY) {
5580 		/* silently ignore WRITEMAP when we're only getting read access */
5581 		flags &= ~MDB_WRITEMAP;
5582 	} else {
5583 		if (!((env->me_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX)) &&
5584 			  (env->me_dirty_list = calloc(MDB_IDL_UM_SIZE, sizeof(MDB_ID2)))))
5585 			rc = ENOMEM;
5586 	}
5587 
5588 	env->me_flags = flags;
5589 	if (rc)
5590 		goto leave;
5591 
5592 #ifdef MDB_VL32
5593 	{
5594 		env->me_rpages = malloc(MDB_ERPAGE_SIZE * sizeof(MDB_ID3));
5595 		if (!env->me_rpages) {
5596 			rc = ENOMEM;
5597 			goto leave;
5598 		}
5599 		env->me_rpages[0].mid = 0;
5600 		env->me_rpcheck = MDB_ERPAGE_SIZE/2;
5601 	}
5602 #endif
5603 
5604 	env->me_path = strdup(path);
5605 	env->me_dbxs = calloc(env->me_maxdbs, sizeof(MDB_dbx));
5606 	env->me_dbflags = calloc(env->me_maxdbs, sizeof(uint16_t));
5607 	env->me_dbiseqs = calloc(env->me_maxdbs, sizeof(unsigned int));
5608 	if (!(env->me_dbxs && env->me_path && env->me_dbflags && env->me_dbiseqs)) {
5609 		rc = ENOMEM;
5610 		goto leave;
5611 	}
5612 	env->me_dbxs[FREE_DBI].md_cmp = mdb_cmp_long; /* aligned MDB_INTEGERKEY */
5613 
5614 	/* For RDONLY, get lockfile after we know datafile exists */
5615 	if (!(flags & (MDB_RDONLY|MDB_NOLOCK))) {
5616 		rc = mdb_env_setup_locks(env, &fname, mode, &excl);
5617 		if (rc)
5618 			goto leave;
5619 		if ((flags & MDB_PREVSNAPSHOT) && !excl) {
5620 			rc = EAGAIN;
5621 			goto leave;
5622 		}
5623 	}
5624 
5625 	rc = mdb_fopen(env, &fname,
5626 		(flags & MDB_RDONLY) ? MDB_O_RDONLY : MDB_O_RDWR,
5627 		mode, &env->me_fd);
5628 	if (rc)
5629 		goto leave;
5630 #ifdef _WIN32
5631 	rc = mdb_fopen(env, &fname, MDB_O_OVERLAPPED, mode, &env->me_ovfd);
5632 	if (rc)
5633 		goto leave;
5634 #endif
5635 
5636 	if ((flags & (MDB_RDONLY|MDB_NOLOCK)) == MDB_RDONLY) {
5637 		rc = mdb_env_setup_locks(env, &fname, mode, &excl);
5638 		if (rc)
5639 			goto leave;
5640 	}
5641 
5642 	if ((rc = mdb_env_open2(env, flags & MDB_PREVSNAPSHOT)) == MDB_SUCCESS) {
5643 		/* Synchronous fd for meta writes. Needed even with
5644 		 * MDB_NOSYNC/MDB_NOMETASYNC, in case these get reset.
5645 		 */
5646 		rc = mdb_fopen(env, &fname, MDB_O_META, mode, &env->me_mfd);
5647 		if (rc)
5648 			goto leave;
5649 
5650 		DPRINTF(("opened dbenv %p", (void *) env));
5651 		if (excl > 0 && !(flags & MDB_PREVSNAPSHOT)) {
5652 			rc = mdb_env_share_locks(env, &excl);
5653 			if (rc)
5654 				goto leave;
5655 		}
5656 		if (!(flags & MDB_RDONLY)) {
5657 			MDB_txn *txn;
5658 			int tsize = sizeof(MDB_txn), size = tsize + env->me_maxdbs *
5659 				(sizeof(MDB_db)+sizeof(MDB_cursor *)+sizeof(unsigned int)+1);
5660 			if ((env->me_pbuf = calloc(1, env->me_psize)) &&
5661 				(txn = calloc(1, size)))
5662 			{
5663 				txn->mt_dbs = (MDB_db *)((char *)txn + tsize);
5664 				txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs);
5665 				txn->mt_dbiseqs = (unsigned int *)(txn->mt_cursors + env->me_maxdbs);
5666 				txn->mt_dbflags = (unsigned char *)(txn->mt_dbiseqs + env->me_maxdbs);
5667 				txn->mt_env = env;
5668 #ifdef MDB_VL32
5669 				txn->mt_rpages = malloc(MDB_TRPAGE_SIZE * sizeof(MDB_ID3));
5670 				if (!txn->mt_rpages) {
5671 					free(txn);
5672 					rc = ENOMEM;
5673 					goto leave;
5674 				}
5675 				txn->mt_rpages[0].mid = 0;
5676 				txn->mt_rpcheck = MDB_TRPAGE_SIZE/2;
5677 #endif
5678 				txn->mt_dbxs = env->me_dbxs;
5679 				txn->mt_flags = MDB_TXN_FINISHED;
5680 				env->me_txn0 = txn;
5681 			} else {
5682 				rc = ENOMEM;
5683 			}
5684 		}
5685 	}
5686 
5687 leave:
5688 	if (rc) {
5689 		mdb_env_close0(env, excl);
5690 	}
5691 	mdb_fname_destroy(fname);
5692 	return rc;
5693 }
5694 
5695 /** Destroy resources from mdb_env_open(), clear our readers & DBIs */
5696 static void ESECT
mdb_env_close0(MDB_env * env,int excl)5697 mdb_env_close0(MDB_env *env, int excl)
5698 {
5699 	int i;
5700 
5701 	if (!(env->me_flags & MDB_ENV_ACTIVE))
5702 		return;
5703 
5704 	/* Doing this here since me_dbxs may not exist during mdb_env_close */
5705 	if (env->me_dbxs) {
5706 		for (i = env->me_maxdbs; --i >= CORE_DBS; )
5707 			free(env->me_dbxs[i].md_name.mv_data);
5708 		free(env->me_dbxs);
5709 	}
5710 
5711 	free(env->me_pbuf);
5712 	free(env->me_dbiseqs);
5713 	free(env->me_dbflags);
5714 	free(env->me_path);
5715 	free(env->me_dirty_list);
5716 #ifdef MDB_VL32
5717 	if (env->me_txn0 && env->me_txn0->mt_rpages)
5718 		free(env->me_txn0->mt_rpages);
5719 	if (env->me_rpages) {
5720 		MDB_ID3L el = env->me_rpages;
5721 		unsigned int x;
5722 		for (x=1; x<=el[0].mid; x++)
5723 			munmap(el[x].mptr, el[x].mcnt * env->me_psize);
5724 		free(el);
5725 	}
5726 #endif
5727 	free(env->me_txn0);
5728 	mdb_midl_free(env->me_free_pgs);
5729 
5730 	if (env->me_flags & MDB_ENV_TXKEY) {
5731 		pthread_key_delete(env->me_txkey);
5732 #ifdef _WIN32
5733 		/* Delete our key from the global list */
5734 		for (i=0; i<mdb_tls_nkeys; i++)
5735 			if (mdb_tls_keys[i] == env->me_txkey) {
5736 				mdb_tls_keys[i] = mdb_tls_keys[mdb_tls_nkeys-1];
5737 				mdb_tls_nkeys--;
5738 				break;
5739 			}
5740 #endif
5741 	}
5742 
5743 	if (env->me_map) {
5744 #ifdef MDB_VL32
5745 		munmap(env->me_map, NUM_METAS*env->me_psize);
5746 #else
5747 		munmap(env->me_map, env->me_mapsize);
5748 #endif
5749 	}
5750 	if (env->me_mfd != INVALID_HANDLE_VALUE)
5751 		(void) close(env->me_mfd);
5752 #ifdef _WIN32
5753 	if (env->ovs > 0) {
5754 		for (i = 0; i < env->ovs; i++) {
5755 			CloseHandle(env->ov[i].hEvent);
5756 		}
5757 		free(env->ov);
5758 	}
5759 	if (env->me_ovfd != INVALID_HANDLE_VALUE)
5760 		(void) close(env->me_ovfd);
5761 #endif
5762 	if (env->me_fd != INVALID_HANDLE_VALUE)
5763 		(void) close(env->me_fd);
5764 	if (env->me_txns) {
5765 		MDB_PID_T pid = getpid();
5766 		/* Clearing readers is done in this function because
5767 		 * me_txkey with its destructor must be disabled first.
5768 		 *
5769 		 * We skip the the reader mutex, so we touch only
5770 		 * data owned by this process (me_close_readers and
5771 		 * our readers), and clear each reader atomically.
5772 		 */
5773 		for (i = env->me_close_readers; --i >= 0; )
5774 			if (env->me_txns->mti_readers[i].mr_pid == pid)
5775 				env->me_txns->mti_readers[i].mr_pid = 0;
5776 #ifdef _WIN32
5777 		if (env->me_rmutex) {
5778 			CloseHandle(env->me_rmutex);
5779 			if (env->me_wmutex) CloseHandle(env->me_wmutex);
5780 		}
5781 		/* Windows automatically destroys the mutexes when
5782 		 * the last handle closes.
5783 		 */
5784 #elif defined(MDB_USE_POSIX_SEM)
5785 		if (env->me_rmutex != SEM_FAILED) {
5786 			sem_close(env->me_rmutex);
5787 			if (env->me_wmutex != SEM_FAILED)
5788 				sem_close(env->me_wmutex);
5789 			/* If we have the filelock:  If we are the
5790 			 * only remaining user, clean up semaphores.
5791 			 */
5792 			if (excl == 0)
5793 				mdb_env_excl_lock(env, &excl);
5794 			if (excl > 0) {
5795 				sem_unlink(MUTEXNAME(env, 'r'));
5796 				sem_unlink(MUTEXNAME(env, 'w'));
5797 			}
5798 		}
5799 #elif defined(MDB_USE_SYSV_SEM)
5800 		if (env->me_rmutex->semid != -1) {
5801 			/* If we have the filelock:  If we are the
5802 			 * only remaining user, clean up semaphores.
5803 			 */
5804 			if (excl == 0)
5805 				mdb_env_excl_lock(env, &excl);
5806 			if (excl > 0)
5807 				semctl(env->me_rmutex->semid, 0, IPC_RMID);
5808 		}
5809 #elif defined(MDB_ROBUST_SUPPORTED)
5810 		/* If we have the filelock:  If we are the
5811 		 * only remaining user, clean up robust
5812 		 * mutexes.
5813 		 */
5814 		if (excl == 0)
5815 			mdb_env_excl_lock(env, &excl);
5816 		if (excl > 0) {
5817 			pthread_mutex_destroy(env->me_txns->mti_rmutex);
5818 			pthread_mutex_destroy(env->me_txns->mti_wmutex);
5819 		}
5820 #endif
5821 		munmap((void *)env->me_txns, (env->me_maxreaders-1)*sizeof(MDB_reader)+sizeof(MDB_txninfo));
5822 	}
5823 	if (env->me_lfd != INVALID_HANDLE_VALUE) {
5824 #ifdef _WIN32
5825 		if (excl >= 0) {
5826 			/* Unlock the lockfile.  Windows would have unlocked it
5827 			 * after closing anyway, but not necessarily at once.
5828 			 */
5829 			UnlockFile(env->me_lfd, 0, 0, 1, 0);
5830 		}
5831 #endif
5832 		(void) close(env->me_lfd);
5833 	}
5834 #ifdef MDB_VL32
5835 #ifdef _WIN32
5836 	if (env->me_fmh) CloseHandle(env->me_fmh);
5837 	if (env->me_rpmutex) CloseHandle(env->me_rpmutex);
5838 #else
5839 	pthread_mutex_destroy(&env->me_rpmutex);
5840 #endif
5841 #endif
5842 
5843 	env->me_flags &= ~(MDB_ENV_ACTIVE|MDB_ENV_TXKEY);
5844 }
5845 
5846 void ESECT
mdb_env_close(MDB_env * env)5847 mdb_env_close(MDB_env *env)
5848 {
5849 	MDB_page *dp;
5850 
5851 	if (env == NULL)
5852 		return;
5853 
5854 	VGMEMP_DESTROY(env);
5855 	while ((dp = env->me_dpages) != NULL) {
5856 		VGMEMP_DEFINED(&dp->mp_next, sizeof(dp->mp_next));
5857 		env->me_dpages = dp->mp_next;
5858 		free(dp);
5859 	}
5860 
5861 	mdb_env_close0(env, 0);
5862 	free(env);
5863 }
5864 
5865 /** Compare two items pointing at aligned #mdb_size_t's */
5866 static int
mdb_cmp_long(const MDB_val * a,const MDB_val * b)5867 mdb_cmp_long(const MDB_val *a, const MDB_val *b)
5868 {
5869 	return (*(mdb_size_t *)a->mv_data < *(mdb_size_t *)b->mv_data) ? -1 :
5870 		*(mdb_size_t *)a->mv_data > *(mdb_size_t *)b->mv_data;
5871 }
5872 
5873 /** Compare two items pointing at aligned unsigned int's.
5874  *
5875  *	This is also set as #MDB_INTEGERDUP|#MDB_DUPFIXED's #MDB_dbx.%md_dcmp,
5876  *	but #mdb_cmp_clong() is called instead if the data type is #mdb_size_t.
5877  */
5878 static int
mdb_cmp_int(const MDB_val * a,const MDB_val * b)5879 mdb_cmp_int(const MDB_val *a, const MDB_val *b)
5880 {
5881 	return (*(unsigned int *)a->mv_data < *(unsigned int *)b->mv_data) ? -1 :
5882 		*(unsigned int *)a->mv_data > *(unsigned int *)b->mv_data;
5883 }
5884 
5885 /** Compare two items pointing at unsigned ints of unknown alignment.
5886  *	Nodes and keys are guaranteed to be 2-byte aligned.
5887  */
5888 static int
mdb_cmp_cint(const MDB_val * a,const MDB_val * b)5889 mdb_cmp_cint(const MDB_val *a, const MDB_val *b)
5890 {
5891 #if BYTE_ORDER == LITTLE_ENDIAN
5892 	unsigned short *u, *c;
5893 	int x;
5894 
5895 	u = (unsigned short *) ((char *) a->mv_data + a->mv_size);
5896 	c = (unsigned short *) ((char *) b->mv_data + a->mv_size);
5897 	do {
5898 		x = *--u - *--c;
5899 	} while(!x && u > (unsigned short *)a->mv_data);
5900 	return x;
5901 #else
5902 	unsigned short *u, *c, *end;
5903 	int x;
5904 
5905 	end = (unsigned short *) ((char *) a->mv_data + a->mv_size);
5906 	u = (unsigned short *)a->mv_data;
5907 	c = (unsigned short *)b->mv_data;
5908 	do {
5909 		x = *u++ - *c++;
5910 	} while(!x && u < end);
5911 	return x;
5912 #endif
5913 }
5914 
5915 /** Compare two items lexically */
5916 static int
mdb_cmp_memn(const MDB_val * a,const MDB_val * b)5917 mdb_cmp_memn(const MDB_val *a, const MDB_val *b)
5918 {
5919 	int diff;
5920 	ssize_t len_diff;
5921 	unsigned int len;
5922 
5923 	len = a->mv_size;
5924 	len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size;
5925 	if (len_diff > 0) {
5926 		len = b->mv_size;
5927 		len_diff = 1;
5928 	}
5929 
5930 	diff = memcmp(a->mv_data, b->mv_data, len);
5931 	return diff ? diff : len_diff<0 ? -1 : len_diff;
5932 }
5933 
5934 /** Compare two items in reverse byte order */
5935 static int
mdb_cmp_memnr(const MDB_val * a,const MDB_val * b)5936 mdb_cmp_memnr(const MDB_val *a, const MDB_val *b)
5937 {
5938 	const unsigned char	*p1, *p2, *p1_lim;
5939 	ssize_t len_diff;
5940 	int diff;
5941 
5942 	p1_lim = (const unsigned char *)a->mv_data;
5943 	p1 = (const unsigned char *)a->mv_data + a->mv_size;
5944 	p2 = (const unsigned char *)b->mv_data + b->mv_size;
5945 
5946 	len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size;
5947 	if (len_diff > 0) {
5948 		p1_lim += len_diff;
5949 		len_diff = 1;
5950 	}
5951 
5952 	while (p1 > p1_lim) {
5953 		diff = *--p1 - *--p2;
5954 		if (diff)
5955 			return diff;
5956 	}
5957 	return len_diff<0 ? -1 : len_diff;
5958 }
5959 
5960 /** Search for key within a page, using binary search.
5961  * Returns the smallest entry larger or equal to the key.
5962  * If exactp is non-null, stores whether the found entry was an exact match
5963  * in *exactp (1 or 0).
5964  * Updates the cursor index with the index of the found entry.
5965  * If no entry larger or equal to the key is found, returns NULL.
5966  */
5967 static MDB_node *
mdb_node_search(MDB_cursor * mc,MDB_val * key,int * exactp)5968 mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp)
5969 {
5970 	unsigned int	 i = 0, nkeys;
5971 	int		 low, high;
5972 	int		 rc = 0;
5973 	MDB_page *mp = mc->mc_pg[mc->mc_top];
5974 	MDB_node	*node = NULL;
5975 	MDB_val	 nodekey;
5976 	MDB_cmp_func *cmp;
5977 	DKBUF;
5978 
5979 	nkeys = NUMKEYS(mp);
5980 
5981 	DPRINTF(("searching %u keys in %s %spage %"Yu,
5982 	    nkeys, IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "",
5983 	    mdb_dbg_pgno(mp)));
5984 
5985 	low = IS_LEAF(mp) ? 0 : 1;
5986 	high = nkeys - 1;
5987 	cmp = mc->mc_dbx->md_cmp;
5988 
5989 	/* Branch pages have no data, so if using integer keys,
5990 	 * alignment is guaranteed. Use faster mdb_cmp_int.
5991 	 */
5992 	if (cmp == mdb_cmp_cint && IS_BRANCH(mp)) {
5993 		if (NODEPTR(mp, 1)->mn_ksize == sizeof(mdb_size_t))
5994 			cmp = mdb_cmp_long;
5995 		else
5996 			cmp = mdb_cmp_int;
5997 	}
5998 
5999 	if (IS_LEAF2(mp)) {
6000 		nodekey.mv_size = mc->mc_db->md_pad;
6001 		node = NODEPTR(mp, 0);	/* fake */
6002 		while (low <= high) {
6003 			i = (low + high) >> 1;
6004 			nodekey.mv_data = LEAF2KEY(mp, i, nodekey.mv_size);
6005 			rc = cmp(key, &nodekey);
6006 			DPRINTF(("found leaf index %u [%s], rc = %i",
6007 			    i, DKEY(&nodekey), rc));
6008 			if (rc == 0)
6009 				break;
6010 			if (rc > 0)
6011 				low = i + 1;
6012 			else
6013 				high = i - 1;
6014 		}
6015 	} else {
6016 		while (low <= high) {
6017 			i = (low + high) >> 1;
6018 
6019 			node = NODEPTR(mp, i);
6020 			nodekey.mv_size = NODEKSZ(node);
6021 			nodekey.mv_data = NODEKEY(node);
6022 
6023 			rc = cmp(key, &nodekey);
6024 #if MDB_DEBUG
6025 			if (IS_LEAF(mp))
6026 				DPRINTF(("found leaf index %u [%s], rc = %i",
6027 				    i, DKEY(&nodekey), rc));
6028 			else
6029 				DPRINTF(("found branch index %u [%s -> %"Yu"], rc = %i",
6030 				    i, DKEY(&nodekey), NODEPGNO(node), rc));
6031 #endif
6032 			if (rc == 0)
6033 				break;
6034 			if (rc > 0)
6035 				low = i + 1;
6036 			else
6037 				high = i - 1;
6038 		}
6039 	}
6040 
6041 	if (rc > 0) {	/* Found entry is less than the key. */
6042 		i++;	/* Skip to get the smallest entry larger than key. */
6043 		if (!IS_LEAF2(mp))
6044 			node = NODEPTR(mp, i);
6045 	}
6046 	if (exactp)
6047 		*exactp = (rc == 0 && nkeys > 0);
6048 	/* store the key index */
6049 	mc->mc_ki[mc->mc_top] = i;
6050 	if (i >= nkeys)
6051 		/* There is no entry larger or equal to the key. */
6052 		return NULL;
6053 
6054 	/* nodeptr is fake for LEAF2 */
6055 	return node;
6056 }
6057 
6058 #if 0
6059 static void
6060 mdb_cursor_adjust(MDB_cursor *mc, func)
6061 {
6062 	MDB_cursor *m2;
6063 
6064 	for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
6065 		if (m2->mc_pg[m2->mc_top] == mc->mc_pg[mc->mc_top]) {
6066 			func(mc, m2);
6067 		}
6068 	}
6069 }
6070 #endif
6071 
6072 /** Pop a page off the top of the cursor's stack. */
6073 static void
mdb_cursor_pop(MDB_cursor * mc)6074 mdb_cursor_pop(MDB_cursor *mc)
6075 {
6076 	if (mc->mc_snum) {
6077 		DPRINTF(("popping page %"Yu" off db %d cursor %p",
6078 			mc->mc_pg[mc->mc_top]->mp_pgno, DDBI(mc), (void *) mc));
6079 
6080 		mc->mc_snum--;
6081 		if (mc->mc_snum) {
6082 			mc->mc_top--;
6083 		} else {
6084 			mc->mc_flags &= ~C_INITIALIZED;
6085 		}
6086 	}
6087 }
6088 
6089 /** Push a page onto the top of the cursor's stack.
6090  * Set #MDB_TXN_ERROR on failure.
6091  */
6092 static int
mdb_cursor_push(MDB_cursor * mc,MDB_page * mp)6093 mdb_cursor_push(MDB_cursor *mc, MDB_page *mp)
6094 {
6095 	DPRINTF(("pushing page %"Yu" on db %d cursor %p", mp->mp_pgno,
6096 		DDBI(mc), (void *) mc));
6097 
6098 	if (mc->mc_snum >= CURSOR_STACK) {
6099 		mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
6100 		return MDB_CURSOR_FULL;
6101 	}
6102 
6103 	mc->mc_top = mc->mc_snum++;
6104 	mc->mc_pg[mc->mc_top] = mp;
6105 	mc->mc_ki[mc->mc_top] = 0;
6106 
6107 	return MDB_SUCCESS;
6108 }
6109 
6110 #ifdef MDB_VL32
6111 /** Map a read-only page.
6112  * There are two levels of tracking in use, a per-txn list and a per-env list.
6113  * ref'ing and unref'ing the per-txn list is faster since it requires no
6114  * locking. Pages are cached in the per-env list for global reuse, and a lock
6115  * is required. Pages are not immediately unmapped when their refcnt goes to
6116  * zero; they hang around in case they will be reused again soon.
6117  *
6118  * When the per-txn list gets full, all pages with refcnt=0 are purged from the
6119  * list and their refcnts in the per-env list are decremented.
6120  *
6121  * When the per-env list gets full, all pages with refcnt=0 are purged from the
6122  * list and their pages are unmapped.
6123  *
6124  * @note "full" means the list has reached its respective rpcheck threshold.
6125  * This threshold slowly raises if no pages could be purged on a given check,
6126  * and returns to its original value when enough pages were purged.
6127  *
6128  * If purging doesn't free any slots, filling the per-txn list will return
6129  * MDB_TXN_FULL, and filling the per-env list returns MDB_MAP_FULL.
6130  *
6131  * Reference tracking in a txn is imperfect, pages can linger with non-zero
6132  * refcnt even without active references. It was deemed to be too invasive
6133  * to add unrefs in every required location. However, all pages are unref'd
6134  * at the end of the transaction. This guarantees that no stale references
6135  * linger in the per-env list.
6136  *
6137  * Usually we map chunks of 16 pages at a time, but if an overflow page begins
6138  * at the tail of the chunk we extend the chunk to include the entire overflow
6139  * page. Unfortunately, pages can be turned into overflow pages after their
6140  * chunk was already mapped. In that case we must remap the chunk if the
6141  * overflow page is referenced. If the chunk's refcnt is 0 we can just remap
6142  * it, otherwise we temporarily map a new chunk just for the overflow page.
6143  *
6144  * @note this chunk handling means we cannot guarantee that a data item
6145  * returned from the DB will stay alive for the duration of the transaction:
6146  *   We unref pages as soon as a cursor moves away from the page
6147  *   A subsequent op may cause a purge, which may unmap any unref'd chunks
6148  * The caller must copy the data if it must be used later in the same txn.
6149  *
6150  * Also - our reference counting revolves around cursors, but overflow pages
6151  * aren't pointed to by a cursor's page stack. We have to remember them
6152  * explicitly, in the added mc_ovpg field. A single cursor can only hold a
6153  * reference to one overflow page at a time.
6154  *
6155  * @param[in] txn the transaction for this access.
6156  * @param[in] pgno the page number for the page to retrieve.
6157  * @param[out] ret address of a pointer where the page's address will be stored.
6158  * @return 0 on success, non-zero on failure.
6159  */
6160 static int
mdb_rpage_get(MDB_txn * txn,pgno_t pg0,MDB_page ** ret)6161 mdb_rpage_get(MDB_txn *txn, pgno_t pg0, MDB_page **ret)
6162 {
6163 	MDB_env *env = txn->mt_env;
6164 	MDB_page *p;
6165 	MDB_ID3L tl = txn->mt_rpages;
6166 	MDB_ID3L el = env->me_rpages;
6167 	MDB_ID3 id3;
6168 	unsigned x, rem;
6169 	pgno_t pgno;
6170 	int rc, retries = 1;
6171 #ifdef _WIN32
6172 	LARGE_INTEGER off;
6173 	SIZE_T len;
6174 #define SET_OFF(off,val)	off.QuadPart = val
6175 #define MAP(rc,env,addr,len,off)	\
6176 	addr = NULL; \
6177 	rc = NtMapViewOfSection(env->me_fmh, GetCurrentProcess(), &addr, 0, \
6178 		len, &off, &len, ViewUnmap, (env->me_flags & MDB_RDONLY) ? 0 : MEM_RESERVE, PAGE_READONLY); \
6179 	if (rc) rc = mdb_nt2win32(rc)
6180 #else
6181 	off_t off;
6182 	size_t len;
6183 #define SET_OFF(off,val)	off = val
6184 #define MAP(rc,env,addr,len,off)	\
6185 	addr = mmap(NULL, len, PROT_READ, MAP_SHARED, env->me_fd, off); \
6186 	rc = (addr == MAP_FAILED) ? errno : 0
6187 #endif
6188 
6189 	/* remember the offset of the actual page number, so we can
6190 	 * return the correct pointer at the end.
6191 	 */
6192 	rem = pg0 & (MDB_RPAGE_CHUNK-1);
6193 	pgno = pg0 ^ rem;
6194 
6195 	id3.mid = 0;
6196 	x = mdb_mid3l_search(tl, pgno);
6197 	if (x <= tl[0].mid && tl[x].mid == pgno) {
6198 		if (x != tl[0].mid && tl[x+1].mid == pg0)
6199 			x++;
6200 		/* check for overflow size */
6201 		p = (MDB_page *)((char *)tl[x].mptr + rem * env->me_psize);
6202 		if (IS_OVERFLOW(p) && p->mp_pages + rem > tl[x].mcnt) {
6203 			id3.mcnt = p->mp_pages + rem;
6204 			len = id3.mcnt * env->me_psize;
6205 			SET_OFF(off, pgno * env->me_psize);
6206 			MAP(rc, env, id3.mptr, len, off);
6207 			if (rc)
6208 				return rc;
6209 			/* check for local-only page */
6210 			if (rem) {
6211 				mdb_tassert(txn, tl[x].mid != pg0);
6212 				/* hope there's room to insert this locally.
6213 				 * setting mid here tells later code to just insert
6214 				 * this id3 instead of searching for a match.
6215 				 */
6216 				id3.mid = pg0;
6217 				goto notlocal;
6218 			} else {
6219 				/* ignore the mapping we got from env, use new one */
6220 				tl[x].mptr = id3.mptr;
6221 				tl[x].mcnt = id3.mcnt;
6222 				/* if no active ref, see if we can replace in env */
6223 				if (!tl[x].mref) {
6224 					unsigned i;
6225 					pthread_mutex_lock(&env->me_rpmutex);
6226 					i = mdb_mid3l_search(el, tl[x].mid);
6227 					if (el[i].mref == 1) {
6228 						/* just us, replace it */
6229 						munmap(el[i].mptr, el[i].mcnt * env->me_psize);
6230 						el[i].mptr = tl[x].mptr;
6231 						el[i].mcnt = tl[x].mcnt;
6232 					} else {
6233 						/* there are others, remove ourself */
6234 						el[i].mref--;
6235 					}
6236 					pthread_mutex_unlock(&env->me_rpmutex);
6237 				}
6238 			}
6239 		}
6240 		id3.mptr = tl[x].mptr;
6241 		id3.mcnt = tl[x].mcnt;
6242 		tl[x].mref++;
6243 		goto ok;
6244 	}
6245 
6246 notlocal:
6247 	if (tl[0].mid >= MDB_TRPAGE_MAX - txn->mt_rpcheck) {
6248 		unsigned i, y;
6249 		/* purge unref'd pages from our list and unref in env */
6250 		pthread_mutex_lock(&env->me_rpmutex);
6251 retry:
6252 		y = 0;
6253 		for (i=1; i<=tl[0].mid; i++) {
6254 			if (!tl[i].mref) {
6255 				if (!y) y = i;
6256 				/* tmp overflow pages don't go to env */
6257 				if (tl[i].mid & (MDB_RPAGE_CHUNK-1)) {
6258 					munmap(tl[i].mptr, tl[i].mcnt * env->me_psize);
6259 					continue;
6260 				}
6261 				x = mdb_mid3l_search(el, tl[i].mid);
6262 				el[x].mref--;
6263 			}
6264 		}
6265 		pthread_mutex_unlock(&env->me_rpmutex);
6266 		if (!y) {
6267 			/* we didn't find any unref'd chunks.
6268 			 * if we're out of room, fail.
6269 			 */
6270 			if (tl[0].mid >= MDB_TRPAGE_MAX)
6271 				return MDB_TXN_FULL;
6272 			/* otherwise, raise threshold for next time around
6273 			 * and let this go.
6274 			 */
6275 			txn->mt_rpcheck /= 2;
6276 		} else {
6277 			/* we found some unused; consolidate the list */
6278 			for (i=y+1; i<= tl[0].mid; i++)
6279 				if (tl[i].mref)
6280 					tl[y++] = tl[i];
6281 			tl[0].mid = y-1;
6282 			/* decrease the check threshold toward its original value */
6283 			if (!txn->mt_rpcheck)
6284 				txn->mt_rpcheck = 1;
6285 			while (txn->mt_rpcheck < tl[0].mid && txn->mt_rpcheck < MDB_TRPAGE_SIZE/2)
6286 				txn->mt_rpcheck *= 2;
6287 		}
6288 	}
6289 	if (tl[0].mid < MDB_TRPAGE_SIZE) {
6290 		id3.mref = 1;
6291 		if (id3.mid)
6292 			goto found;
6293 		/* don't map past last written page in read-only envs */
6294 		if ((env->me_flags & MDB_RDONLY) && pgno + MDB_RPAGE_CHUNK-1 > txn->mt_last_pgno)
6295 			id3.mcnt = txn->mt_last_pgno + 1 - pgno;
6296 		else
6297 			id3.mcnt = MDB_RPAGE_CHUNK;
6298 		len = id3.mcnt * env->me_psize;
6299 		id3.mid = pgno;
6300 
6301 		/* search for page in env */
6302 		pthread_mutex_lock(&env->me_rpmutex);
6303 		x = mdb_mid3l_search(el, pgno);
6304 		if (x <= el[0].mid && el[x].mid == pgno) {
6305 			id3.mptr = el[x].mptr;
6306 			id3.mcnt = el[x].mcnt;
6307 			/* check for overflow size */
6308 			p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize);
6309 			if (IS_OVERFLOW(p) && p->mp_pages + rem > id3.mcnt) {
6310 				id3.mcnt = p->mp_pages + rem;
6311 				len = id3.mcnt * env->me_psize;
6312 				SET_OFF(off, pgno * env->me_psize);
6313 				MAP(rc, env, id3.mptr, len, off);
6314 				if (rc)
6315 					goto fail;
6316 				if (!el[x].mref) {
6317 					munmap(el[x].mptr, env->me_psize * el[x].mcnt);
6318 					el[x].mptr = id3.mptr;
6319 					el[x].mcnt = id3.mcnt;
6320 				} else {
6321 					id3.mid = pg0;
6322 					pthread_mutex_unlock(&env->me_rpmutex);
6323 					goto found;
6324 				}
6325 			}
6326 			el[x].mref++;
6327 			pthread_mutex_unlock(&env->me_rpmutex);
6328 			goto found;
6329 		}
6330 		if (el[0].mid >= MDB_ERPAGE_MAX - env->me_rpcheck) {
6331 			/* purge unref'd pages */
6332 			unsigned i, y = 0;
6333 			for (i=1; i<=el[0].mid; i++) {
6334 				if (!el[i].mref) {
6335 					if (!y) y = i;
6336 					munmap(el[i].mptr, env->me_psize * el[i].mcnt);
6337 				}
6338 			}
6339 			if (!y) {
6340 				if (retries) {
6341 					/* see if we can unref some local pages */
6342 					retries--;
6343 					id3.mid = 0;
6344 					goto retry;
6345 				}
6346 				if (el[0].mid >= MDB_ERPAGE_MAX) {
6347 					pthread_mutex_unlock(&env->me_rpmutex);
6348 					return MDB_MAP_FULL;
6349 				}
6350 				env->me_rpcheck /= 2;
6351 			} else {
6352 				for (i=y+1; i<= el[0].mid; i++)
6353 					if (el[i].mref)
6354 						el[y++] = el[i];
6355 				el[0].mid = y-1;
6356 				if (!env->me_rpcheck)
6357 					env->me_rpcheck = 1;
6358 				while (env->me_rpcheck < el[0].mid && env->me_rpcheck < MDB_ERPAGE_SIZE/2)
6359 					env->me_rpcheck *= 2;
6360 			}
6361 		}
6362 		SET_OFF(off, pgno * env->me_psize);
6363 		MAP(rc, env, id3.mptr, len, off);
6364 		if (rc) {
6365 fail:
6366 			pthread_mutex_unlock(&env->me_rpmutex);
6367 			return rc;
6368 		}
6369 		/* check for overflow size */
6370 		p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize);
6371 		if (IS_OVERFLOW(p) && p->mp_pages + rem > id3.mcnt) {
6372 			id3.mcnt = p->mp_pages + rem;
6373 			munmap(id3.mptr, len);
6374 			len = id3.mcnt * env->me_psize;
6375 			MAP(rc, env, id3.mptr, len, off);
6376 			if (rc)
6377 				goto fail;
6378 		}
6379 		mdb_mid3l_insert(el, &id3);
6380 		pthread_mutex_unlock(&env->me_rpmutex);
6381 found:
6382 		mdb_mid3l_insert(tl, &id3);
6383 	} else {
6384 		return MDB_TXN_FULL;
6385 	}
6386 ok:
6387 	p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize);
6388 #if MDB_DEBUG	/* we don't need this check any more */
6389 	if (IS_OVERFLOW(p)) {
6390 		mdb_tassert(txn, p->mp_pages + rem <= id3.mcnt);
6391 	}
6392 #endif
6393 	*ret = p;
6394 	return MDB_SUCCESS;
6395 }
6396 #endif
6397 
6398 /** Find the address of the page corresponding to a given page number.
6399  * Set #MDB_TXN_ERROR on failure.
6400  * @param[in] mc the cursor accessing the page.
6401  * @param[in] pgno the page number for the page to retrieve.
6402  * @param[out] ret address of a pointer where the page's address will be stored.
6403  * @param[out] lvl dirty_list inheritance level of found page. 1=current txn, 0=mapped page.
6404  * @return 0 on success, non-zero on failure.
6405  */
6406 static int
mdb_page_get(MDB_cursor * mc,pgno_t pgno,MDB_page ** ret,int * lvl)6407 mdb_page_get(MDB_cursor *mc, pgno_t pgno, MDB_page **ret, int *lvl)
6408 {
6409 	MDB_txn *txn = mc->mc_txn;
6410 	MDB_page *p = NULL;
6411 	int level;
6412 
6413 	if (! (mc->mc_flags & (C_ORIG_RDONLY|C_WRITEMAP))) {
6414 		MDB_txn *tx2 = txn;
6415 		level = 1;
6416 		do {
6417 			MDB_ID2L dl = tx2->mt_u.dirty_list;
6418 			unsigned x;
6419 			/* Spilled pages were dirtied in this txn and flushed
6420 			 * because the dirty list got full. Bring this page
6421 			 * back in from the map (but don't unspill it here,
6422 			 * leave that unless page_touch happens again).
6423 			 */
6424 			if (tx2->mt_spill_pgs) {
6425 				MDB_ID pn = pgno << 1;
6426 				x = mdb_midl_search(tx2->mt_spill_pgs, pn);
6427 				if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) {
6428 					goto mapped;
6429 				}
6430 			}
6431 			if (dl[0].mid) {
6432 				unsigned x = mdb_mid2l_search(dl, pgno);
6433 				if (x <= dl[0].mid && dl[x].mid == pgno) {
6434 					p = dl[x].mptr;
6435 					goto done;
6436 				}
6437 			}
6438 			level++;
6439 		} while ((tx2 = tx2->mt_parent) != NULL);
6440 	}
6441 
6442 	if (pgno >= txn->mt_next_pgno) {
6443 		DPRINTF(("page %"Yu" not found", pgno));
6444 		txn->mt_flags |= MDB_TXN_ERROR;
6445 		return MDB_PAGE_NOTFOUND;
6446 	}
6447 
6448 	level = 0;
6449 
6450 mapped:
6451 	{
6452 #ifdef MDB_VL32
6453 		int rc = mdb_rpage_get(txn, pgno, &p);
6454 		if (rc) {
6455 			txn->mt_flags |= MDB_TXN_ERROR;
6456 			return rc;
6457 		}
6458 #else
6459 		MDB_env *env = txn->mt_env;
6460 		p = (MDB_page *)(env->me_map + env->me_psize * pgno);
6461 #endif
6462 	}
6463 
6464 done:
6465 	*ret = p;
6466 	if (lvl)
6467 		*lvl = level;
6468 	return MDB_SUCCESS;
6469 }
6470 
6471 /** Finish #mdb_page_search() / #mdb_page_search_lowest().
6472  *	The cursor is at the root page, set up the rest of it.
6473  */
6474 static int
mdb_page_search_root(MDB_cursor * mc,MDB_val * key,int flags)6475 mdb_page_search_root(MDB_cursor *mc, MDB_val *key, int flags)
6476 {
6477 	MDB_page	*mp = mc->mc_pg[mc->mc_top];
6478 	int rc;
6479 	DKBUF;
6480 
6481 	while (IS_BRANCH(mp)) {
6482 		MDB_node	*node;
6483 		indx_t		i;
6484 
6485 		DPRINTF(("branch page %"Yu" has %u keys", mp->mp_pgno, NUMKEYS(mp)));
6486 		/* Don't assert on branch pages in the FreeDB. We can get here
6487 		 * while in the process of rebalancing a FreeDB branch page; we must
6488 		 * let that proceed. ITS#8336
6489 		 */
6490 		mdb_cassert(mc, !mc->mc_dbi || NUMKEYS(mp) > 1);
6491 		DPRINTF(("found index 0 to page %"Yu, NODEPGNO(NODEPTR(mp, 0))));
6492 
6493 		if (flags & (MDB_PS_FIRST|MDB_PS_LAST)) {
6494 			i = 0;
6495 			if (flags & MDB_PS_LAST) {
6496 				i = NUMKEYS(mp) - 1;
6497 				/* if already init'd, see if we're already in right place */
6498 				if (mc->mc_flags & C_INITIALIZED) {
6499 					if (mc->mc_ki[mc->mc_top] == i) {
6500 						mc->mc_top = mc->mc_snum++;
6501 						mp = mc->mc_pg[mc->mc_top];
6502 						goto ready;
6503 					}
6504 				}
6505 			}
6506 		} else {
6507 			int	 exact;
6508 			node = mdb_node_search(mc, key, &exact);
6509 			if (node == NULL)
6510 				i = NUMKEYS(mp) - 1;
6511 			else {
6512 				i = mc->mc_ki[mc->mc_top];
6513 				if (!exact) {
6514 					mdb_cassert(mc, i > 0);
6515 					i--;
6516 				}
6517 			}
6518 			DPRINTF(("following index %u for key [%s]", i, DKEY(key)));
6519 		}
6520 
6521 		mdb_cassert(mc, i < NUMKEYS(mp));
6522 		node = NODEPTR(mp, i);
6523 
6524 		if ((rc = mdb_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0)
6525 			return rc;
6526 
6527 		mc->mc_ki[mc->mc_top] = i;
6528 		if ((rc = mdb_cursor_push(mc, mp)))
6529 			return rc;
6530 
6531 ready:
6532 		if (flags & MDB_PS_MODIFY) {
6533 			if ((rc = mdb_page_touch(mc)) != 0)
6534 				return rc;
6535 			mp = mc->mc_pg[mc->mc_top];
6536 		}
6537 	}
6538 
6539 	if (!IS_LEAF(mp)) {
6540 		DPRINTF(("internal error, index points to a %02X page!?",
6541 		    mp->mp_flags));
6542 		mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
6543 		return MDB_CORRUPTED;
6544 	}
6545 
6546 	DPRINTF(("found leaf page %"Yu" for key [%s]", mp->mp_pgno,
6547 	    key ? DKEY(key) : "null"));
6548 	mc->mc_flags |= C_INITIALIZED;
6549 	mc->mc_flags &= ~C_EOF;
6550 
6551 	return MDB_SUCCESS;
6552 }
6553 
6554 /** Search for the lowest key under the current branch page.
6555  * This just bypasses a NUMKEYS check in the current page
6556  * before calling mdb_page_search_root(), because the callers
6557  * are all in situations where the current page is known to
6558  * be underfilled.
6559  */
6560 static int
mdb_page_search_lowest(MDB_cursor * mc)6561 mdb_page_search_lowest(MDB_cursor *mc)
6562 {
6563 	MDB_page	*mp = mc->mc_pg[mc->mc_top];
6564 	MDB_node	*node = NODEPTR(mp, 0);
6565 	int rc;
6566 
6567 	if ((rc = mdb_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0)
6568 		return rc;
6569 
6570 	mc->mc_ki[mc->mc_top] = 0;
6571 	if ((rc = mdb_cursor_push(mc, mp)))
6572 		return rc;
6573 	return mdb_page_search_root(mc, NULL, MDB_PS_FIRST);
6574 }
6575 
6576 /** Search for the page a given key should be in.
6577  * Push it and its parent pages on the cursor stack.
6578  * @param[in,out] mc the cursor for this operation.
6579  * @param[in] key the key to search for, or NULL for first/last page.
6580  * @param[in] flags If MDB_PS_MODIFY is set, visited pages in the DB
6581  *   are touched (updated with new page numbers).
6582  *   If MDB_PS_FIRST or MDB_PS_LAST is set, find first or last leaf.
6583  *   This is used by #mdb_cursor_first() and #mdb_cursor_last().
6584  *   If MDB_PS_ROOTONLY set, just fetch root node, no further lookups.
6585  * @return 0 on success, non-zero on failure.
6586  */
6587 static int
mdb_page_search(MDB_cursor * mc,MDB_val * key,int flags)6588 mdb_page_search(MDB_cursor *mc, MDB_val *key, int flags)
6589 {
6590 	int		 rc;
6591 	pgno_t		 root;
6592 
6593 	/* Make sure the txn is still viable, then find the root from
6594 	 * the txn's db table and set it as the root of the cursor's stack.
6595 	 */
6596 	if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) {
6597 		DPUTS("transaction may not be used now");
6598 		return MDB_BAD_TXN;
6599 	} else {
6600 		/* Make sure we're using an up-to-date root */
6601 		if (*mc->mc_dbflag & DB_STALE) {
6602 				MDB_cursor mc2;
6603 				if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi))
6604 					return MDB_BAD_DBI;
6605 				mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, NULL);
6606 				rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, 0);
6607 				if (rc)
6608 					return rc;
6609 				{
6610 					MDB_val data;
6611 					int exact = 0;
6612 					uint16_t flags;
6613 					MDB_node *leaf = mdb_node_search(&mc2,
6614 						&mc->mc_dbx->md_name, &exact);
6615 					if (!exact)
6616 						return MDB_NOTFOUND;
6617 					if ((leaf->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA)
6618 						return MDB_INCOMPATIBLE; /* not a named DB */
6619 					rc = mdb_node_read(&mc2, leaf, &data);
6620 					if (rc)
6621 						return rc;
6622 					memcpy(&flags, ((char *) data.mv_data + offsetof(MDB_db, md_flags)),
6623 						sizeof(uint16_t));
6624 					/* The txn may not know this DBI, or another process may
6625 					 * have dropped and recreated the DB with other flags.
6626 					 */
6627 					if ((mc->mc_db->md_flags & PERSISTENT_FLAGS) != flags)
6628 						return MDB_INCOMPATIBLE;
6629 					memcpy(mc->mc_db, data.mv_data, sizeof(MDB_db));
6630 				}
6631 				*mc->mc_dbflag &= ~DB_STALE;
6632 		}
6633 		root = mc->mc_db->md_root;
6634 
6635 		if (root == P_INVALID) {		/* Tree is empty. */
6636 			DPUTS("tree is empty");
6637 			return MDB_NOTFOUND;
6638 		}
6639 	}
6640 
6641 	mdb_cassert(mc, root > 1);
6642 	if (!mc->mc_pg[0] || mc->mc_pg[0]->mp_pgno != root) {
6643 #ifdef MDB_VL32
6644 		if (mc->mc_pg[0])
6645 			MDB_PAGE_UNREF(mc->mc_txn, mc->mc_pg[0]);
6646 #endif
6647 		if ((rc = mdb_page_get(mc, root, &mc->mc_pg[0], NULL)) != 0)
6648 			return rc;
6649 	}
6650 
6651 #ifdef MDB_VL32
6652 	{
6653 		int i;
6654 		for (i=1; i<mc->mc_snum; i++)
6655 			MDB_PAGE_UNREF(mc->mc_txn, mc->mc_pg[i]);
6656 	}
6657 #endif
6658 	mc->mc_snum = 1;
6659 	mc->mc_top = 0;
6660 
6661 	DPRINTF(("db %d root page %"Yu" has flags 0x%X",
6662 		DDBI(mc), root, mc->mc_pg[0]->mp_flags));
6663 
6664 	if (flags & MDB_PS_MODIFY) {
6665 		if ((rc = mdb_page_touch(mc)))
6666 			return rc;
6667 	}
6668 
6669 	if (flags & MDB_PS_ROOTONLY)
6670 		return MDB_SUCCESS;
6671 
6672 	return mdb_page_search_root(mc, key, flags);
6673 }
6674 
6675 static int
mdb_ovpage_free(MDB_cursor * mc,MDB_page * mp)6676 mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp)
6677 {
6678 	MDB_txn *txn = mc->mc_txn;
6679 	pgno_t pg = mp->mp_pgno;
6680 	unsigned x = 0, ovpages = mp->mp_pages;
6681 	MDB_env *env = txn->mt_env;
6682 	MDB_IDL sl = txn->mt_spill_pgs;
6683 	MDB_ID pn = pg << 1;
6684 	int rc;
6685 
6686 	DPRINTF(("free ov page %"Yu" (%d)", pg, ovpages));
6687 	/* If the page is dirty or on the spill list we just acquired it,
6688 	 * so we should give it back to our current free list, if any.
6689 	 * Otherwise put it onto the list of pages we freed in this txn.
6690 	 *
6691 	 * Won't create me_pghead: me_pglast must be inited along with it.
6692 	 * Unsupported in nested txns: They would need to hide the page
6693 	 * range in ancestor txns' dirty and spilled lists.
6694 	 */
6695 	if (env->me_pghead &&
6696 		!txn->mt_parent &&
6697 		((mp->mp_flags & P_DIRTY) ||
6698 		 (sl && (x = mdb_midl_search(sl, pn)) <= sl[0] && sl[x] == pn)))
6699 	{
6700 		unsigned i, j;
6701 		pgno_t *mop;
6702 		MDB_ID2 *dl, ix, iy;
6703 		rc = mdb_midl_need(&env->me_pghead, ovpages);
6704 		if (rc)
6705 			return rc;
6706 		if (!(mp->mp_flags & P_DIRTY)) {
6707 			/* This page is no longer spilled */
6708 			if (x == sl[0])
6709 				sl[0]--;
6710 			else
6711 				sl[x] |= 1;
6712 			goto release;
6713 		}
6714 		/* Remove from dirty list */
6715 		dl = txn->mt_u.dirty_list;
6716 		x = dl[0].mid--;
6717 		for (ix = dl[x]; ix.mptr != mp; ix = iy) {
6718 			if (x > 1) {
6719 				x--;
6720 				iy = dl[x];
6721 				dl[x] = ix;
6722 			} else {
6723 				mdb_cassert(mc, x > 1);
6724 				j = ++(dl[0].mid);
6725 				dl[j] = ix;		/* Unsorted. OK when MDB_TXN_ERROR. */
6726 				txn->mt_flags |= MDB_TXN_ERROR;
6727 				return MDB_PROBLEM;
6728 			}
6729 		}
6730 		txn->mt_dirty_room++;
6731 		if (!(env->me_flags & MDB_WRITEMAP))
6732 			mdb_dpage_free(env, mp);
6733 release:
6734 		/* Insert in me_pghead */
6735 		mop = env->me_pghead;
6736 		j = mop[0] + ovpages;
6737 		for (i = mop[0]; i && mop[i] < pg; i--)
6738 			mop[j--] = mop[i];
6739 		while (j>i)
6740 			mop[j--] = pg++;
6741 		mop[0] += ovpages;
6742 	} else {
6743 		rc = mdb_midl_append_range(&txn->mt_free_pgs, pg, ovpages);
6744 		if (rc)
6745 			return rc;
6746 	}
6747 #ifdef MDB_VL32
6748 	if (mc->mc_ovpg == mp)
6749 		mc->mc_ovpg = NULL;
6750 #endif
6751 	mc->mc_db->md_overflow_pages -= ovpages;
6752 	return 0;
6753 }
6754 
6755 /** Return the data associated with a given node.
6756  * @param[in] mc The cursor for this operation.
6757  * @param[in] leaf The node being read.
6758  * @param[out] data Updated to point to the node's data.
6759  * @return 0 on success, non-zero on failure.
6760  */
6761 static int
mdb_node_read(MDB_cursor * mc,MDB_node * leaf,MDB_val * data)6762 mdb_node_read(MDB_cursor *mc, MDB_node *leaf, MDB_val *data)
6763 {
6764 	MDB_page	*omp;		/* overflow page */
6765 	pgno_t		 pgno;
6766 	int rc;
6767 
6768 	if (MC_OVPG(mc)) {
6769 		MDB_PAGE_UNREF(mc->mc_txn, MC_OVPG(mc));
6770 		MC_SET_OVPG(mc, NULL);
6771 	}
6772 	if (!F_ISSET(leaf->mn_flags, F_BIGDATA)) {
6773 		data->mv_size = NODEDSZ(leaf);
6774 		data->mv_data = NODEDATA(leaf);
6775 		return MDB_SUCCESS;
6776 	}
6777 
6778 	/* Read overflow data.
6779 	 */
6780 	data->mv_size = NODEDSZ(leaf);
6781 	memcpy(&pgno, NODEDATA(leaf), sizeof(pgno));
6782 	if ((rc = mdb_page_get(mc, pgno, &omp, NULL)) != 0) {
6783 		DPRINTF(("read overflow page %"Yu" failed", pgno));
6784 		return rc;
6785 	}
6786 	data->mv_data = METADATA(omp);
6787 	MC_SET_OVPG(mc, omp);
6788 
6789 	return MDB_SUCCESS;
6790 }
6791 
6792 int
mdb_get(MDB_txn * txn,MDB_dbi dbi,MDB_val * key,MDB_val * data)6793 mdb_get(MDB_txn *txn, MDB_dbi dbi,
6794     MDB_val *key, MDB_val *data)
6795 {
6796 	MDB_cursor	mc;
6797 	MDB_xcursor	mx;
6798 	int exact = 0, rc;
6799 	DKBUF;
6800 
6801 	DPRINTF(("===> get db %u key [%s]", dbi, DKEY(key)));
6802 
6803 	if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
6804 		return EINVAL;
6805 
6806 	if (txn->mt_flags & MDB_TXN_BLOCKED)
6807 		return MDB_BAD_TXN;
6808 
6809 	mdb_cursor_init(&mc, txn, dbi, &mx);
6810 	rc = mdb_cursor_set(&mc, key, data, MDB_SET, &exact);
6811 	/* unref all the pages when MDB_VL32 - caller must copy the data
6812 	 * before doing anything else
6813 	 */
6814 	MDB_CURSOR_UNREF(&mc, 1);
6815 	return rc;
6816 }
6817 
6818 /** Find a sibling for a page.
6819  * Replaces the page at the top of the cursor's stack with the
6820  * specified sibling, if one exists.
6821  * @param[in] mc The cursor for this operation.
6822  * @param[in] move_right Non-zero if the right sibling is requested,
6823  * otherwise the left sibling.
6824  * @return 0 on success, non-zero on failure.
6825  */
6826 static int
mdb_cursor_sibling(MDB_cursor * mc,int move_right)6827 mdb_cursor_sibling(MDB_cursor *mc, int move_right)
6828 {
6829 	int		 rc;
6830 	MDB_node	*indx;
6831 	MDB_page	*mp;
6832 #ifdef MDB_VL32
6833 	MDB_page	*op;
6834 #endif
6835 
6836 	if (mc->mc_snum < 2) {
6837 		return MDB_NOTFOUND;		/* root has no siblings */
6838 	}
6839 
6840 #ifdef MDB_VL32
6841 	op = mc->mc_pg[mc->mc_top];
6842 #endif
6843 	mdb_cursor_pop(mc);
6844 	DPRINTF(("parent page is page %"Yu", index %u",
6845 		mc->mc_pg[mc->mc_top]->mp_pgno, mc->mc_ki[mc->mc_top]));
6846 
6847 	if (move_right ? (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mc->mc_pg[mc->mc_top]))
6848 		       : (mc->mc_ki[mc->mc_top] == 0)) {
6849 		DPRINTF(("no more keys left, moving to %s sibling",
6850 		    move_right ? "right" : "left"));
6851 		if ((rc = mdb_cursor_sibling(mc, move_right)) != MDB_SUCCESS) {
6852 			/* undo cursor_pop before returning */
6853 			mc->mc_top++;
6854 			mc->mc_snum++;
6855 			return rc;
6856 		}
6857 	} else {
6858 		if (move_right)
6859 			mc->mc_ki[mc->mc_top]++;
6860 		else
6861 			mc->mc_ki[mc->mc_top]--;
6862 		DPRINTF(("just moving to %s index key %u",
6863 		    move_right ? "right" : "left", mc->mc_ki[mc->mc_top]));
6864 	}
6865 	mdb_cassert(mc, IS_BRANCH(mc->mc_pg[mc->mc_top]));
6866 
6867 	MDB_PAGE_UNREF(mc->mc_txn, op);
6868 
6869 	indx = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
6870 	if ((rc = mdb_page_get(mc, NODEPGNO(indx), &mp, NULL)) != 0) {
6871 		/* mc will be inconsistent if caller does mc_snum++ as above */
6872 		mc->mc_flags &= ~(C_INITIALIZED|C_EOF);
6873 		return rc;
6874 	}
6875 
6876 	mdb_cursor_push(mc, mp);
6877 	if (!move_right)
6878 		mc->mc_ki[mc->mc_top] = NUMKEYS(mp)-1;
6879 
6880 	return MDB_SUCCESS;
6881 }
6882 
6883 /** Move the cursor to the next data item. */
6884 static int
mdb_cursor_next(MDB_cursor * mc,MDB_val * key,MDB_val * data,MDB_cursor_op op)6885 mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op)
6886 {
6887 	MDB_page	*mp;
6888 	MDB_node	*leaf;
6889 	int rc;
6890 
6891 	if ((mc->mc_flags & C_DEL && op == MDB_NEXT_DUP))
6892 		return MDB_NOTFOUND;
6893 
6894 	if (!(mc->mc_flags & C_INITIALIZED))
6895 		return mdb_cursor_first(mc, key, data);
6896 
6897 	mp = mc->mc_pg[mc->mc_top];
6898 
6899 	if (mc->mc_flags & C_EOF) {
6900 		if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mp)-1)
6901 			return MDB_NOTFOUND;
6902 		mc->mc_flags ^= C_EOF;
6903 	}
6904 
6905 	if (mc->mc_db->md_flags & MDB_DUPSORT) {
6906 		leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
6907 		if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
6908 			if (op == MDB_NEXT || op == MDB_NEXT_DUP) {
6909 				rc = mdb_cursor_next(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_NEXT);
6910 				if (op != MDB_NEXT || rc != MDB_NOTFOUND) {
6911 					if (rc == MDB_SUCCESS)
6912 						MDB_GET_KEY(leaf, key);
6913 					return rc;
6914 				}
6915 			}
6916 			else {
6917 				MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0);
6918 			}
6919 		} else {
6920 			mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
6921 			if (op == MDB_NEXT_DUP)
6922 				return MDB_NOTFOUND;
6923 		}
6924 	}
6925 
6926 	DPRINTF(("cursor_next: top page is %"Yu" in cursor %p",
6927 		mdb_dbg_pgno(mp), (void *) mc));
6928 	if (mc->mc_flags & C_DEL) {
6929 		mc->mc_flags ^= C_DEL;
6930 		goto skip;
6931 	}
6932 
6933 	if (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mp)) {
6934 		DPUTS("=====> move to next sibling page");
6935 		if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) {
6936 			mc->mc_flags |= C_EOF;
6937 			return rc;
6938 		}
6939 		mp = mc->mc_pg[mc->mc_top];
6940 		DPRINTF(("next page is %"Yu", key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top]));
6941 	} else
6942 		mc->mc_ki[mc->mc_top]++;
6943 
6944 skip:
6945 	DPRINTF(("==> cursor points to page %"Yu" with %u keys, key index %u",
6946 	    mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top]));
6947 
6948 	if (IS_LEAF2(mp)) {
6949 		key->mv_size = mc->mc_db->md_pad;
6950 		key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size);
6951 		return MDB_SUCCESS;
6952 	}
6953 
6954 	mdb_cassert(mc, IS_LEAF(mp));
6955 	leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
6956 
6957 	if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
6958 		mdb_xcursor_init1(mc, leaf);
6959 		rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL);
6960 		if (rc != MDB_SUCCESS)
6961 			return rc;
6962 	} else if (data) {
6963 		if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
6964 			return rc;
6965 	}
6966 
6967 	MDB_GET_KEY(leaf, key);
6968 	return MDB_SUCCESS;
6969 }
6970 
6971 /** Move the cursor to the previous data item. */
6972 static int
mdb_cursor_prev(MDB_cursor * mc,MDB_val * key,MDB_val * data,MDB_cursor_op op)6973 mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op)
6974 {
6975 	MDB_page	*mp;
6976 	MDB_node	*leaf;
6977 	int rc;
6978 
6979 	if (!(mc->mc_flags & C_INITIALIZED)) {
6980 		rc = mdb_cursor_last(mc, key, data);
6981 		if (rc)
6982 			return rc;
6983 		mc->mc_ki[mc->mc_top]++;
6984 	}
6985 
6986 	mp = mc->mc_pg[mc->mc_top];
6987 
6988 	if ((mc->mc_db->md_flags & MDB_DUPSORT) &&
6989 		mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) {
6990 		leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
6991 		if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
6992 			if (op == MDB_PREV || op == MDB_PREV_DUP) {
6993 				rc = mdb_cursor_prev(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_PREV);
6994 				if (op != MDB_PREV || rc != MDB_NOTFOUND) {
6995 					if (rc == MDB_SUCCESS) {
6996 						MDB_GET_KEY(leaf, key);
6997 						mc->mc_flags &= ~C_EOF;
6998 					}
6999 					return rc;
7000 				}
7001 			}
7002 			else {
7003 				MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0);
7004 			}
7005 		} else {
7006 			mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
7007 			if (op == MDB_PREV_DUP)
7008 				return MDB_NOTFOUND;
7009 		}
7010 	}
7011 
7012 	DPRINTF(("cursor_prev: top page is %"Yu" in cursor %p",
7013 		mdb_dbg_pgno(mp), (void *) mc));
7014 
7015 	mc->mc_flags &= ~(C_EOF|C_DEL);
7016 
7017 	if (mc->mc_ki[mc->mc_top] == 0)  {
7018 		DPUTS("=====> move to prev sibling page");
7019 		if ((rc = mdb_cursor_sibling(mc, 0)) != MDB_SUCCESS) {
7020 			return rc;
7021 		}
7022 		mp = mc->mc_pg[mc->mc_top];
7023 		mc->mc_ki[mc->mc_top] = NUMKEYS(mp) - 1;
7024 		DPRINTF(("prev page is %"Yu", key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top]));
7025 	} else
7026 		mc->mc_ki[mc->mc_top]--;
7027 
7028 	DPRINTF(("==> cursor points to page %"Yu" with %u keys, key index %u",
7029 	    mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top]));
7030 
7031 	if (!IS_LEAF(mp))
7032 		return MDB_CORRUPTED;
7033 
7034 	if (IS_LEAF2(mp)) {
7035 		key->mv_size = mc->mc_db->md_pad;
7036 		key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size);
7037 		return MDB_SUCCESS;
7038 	}
7039 
7040 	leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
7041 
7042 	if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
7043 		mdb_xcursor_init1(mc, leaf);
7044 		rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL);
7045 		if (rc != MDB_SUCCESS)
7046 			return rc;
7047 	} else if (data) {
7048 		if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
7049 			return rc;
7050 	}
7051 
7052 	MDB_GET_KEY(leaf, key);
7053 	return MDB_SUCCESS;
7054 }
7055 
7056 /** Set the cursor on a specific data item. */
7057 static int
mdb_cursor_set(MDB_cursor * mc,MDB_val * key,MDB_val * data,MDB_cursor_op op,int * exactp)7058 mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data,
7059     MDB_cursor_op op, int *exactp)
7060 {
7061 	int		 rc;
7062 	MDB_page	*mp;
7063 	MDB_node	*leaf = NULL;
7064 	DKBUF;
7065 
7066 	if (key->mv_size == 0)
7067 		return MDB_BAD_VALSIZE;
7068 
7069 	if (mc->mc_xcursor) {
7070 		MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0);
7071 		mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
7072 	}
7073 
7074 	/* See if we're already on the right page */
7075 	if (mc->mc_flags & C_INITIALIZED) {
7076 		MDB_val nodekey;
7077 
7078 		mp = mc->mc_pg[mc->mc_top];
7079 		if (!NUMKEYS(mp)) {
7080 			mc->mc_ki[mc->mc_top] = 0;
7081 			return MDB_NOTFOUND;
7082 		}
7083 		if (mp->mp_flags & P_LEAF2) {
7084 			nodekey.mv_size = mc->mc_db->md_pad;
7085 			nodekey.mv_data = LEAF2KEY(mp, 0, nodekey.mv_size);
7086 		} else {
7087 			leaf = NODEPTR(mp, 0);
7088 			MDB_GET_KEY2(leaf, nodekey);
7089 		}
7090 		rc = mc->mc_dbx->md_cmp(key, &nodekey);
7091 		if (rc == 0) {
7092 			/* Probably happens rarely, but first node on the page
7093 			 * was the one we wanted.
7094 			 */
7095 			mc->mc_ki[mc->mc_top] = 0;
7096 			if (exactp)
7097 				*exactp = 1;
7098 			goto set1;
7099 		}
7100 		if (rc > 0) {
7101 			unsigned int i;
7102 			unsigned int nkeys = NUMKEYS(mp);
7103 			if (nkeys > 1) {
7104 				if (mp->mp_flags & P_LEAF2) {
7105 					nodekey.mv_data = LEAF2KEY(mp,
7106 						 nkeys-1, nodekey.mv_size);
7107 				} else {
7108 					leaf = NODEPTR(mp, nkeys-1);
7109 					MDB_GET_KEY2(leaf, nodekey);
7110 				}
7111 				rc = mc->mc_dbx->md_cmp(key, &nodekey);
7112 				if (rc == 0) {
7113 					/* last node was the one we wanted */
7114 					mc->mc_ki[mc->mc_top] = nkeys-1;
7115 					if (exactp)
7116 						*exactp = 1;
7117 					goto set1;
7118 				}
7119 				if (rc < 0) {
7120 					if (mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) {
7121 						/* This is definitely the right page, skip search_page */
7122 						if (mp->mp_flags & P_LEAF2) {
7123 							nodekey.mv_data = LEAF2KEY(mp,
7124 								 mc->mc_ki[mc->mc_top], nodekey.mv_size);
7125 						} else {
7126 							leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
7127 							MDB_GET_KEY2(leaf, nodekey);
7128 						}
7129 						rc = mc->mc_dbx->md_cmp(key, &nodekey);
7130 						if (rc == 0) {
7131 							/* current node was the one we wanted */
7132 							if (exactp)
7133 								*exactp = 1;
7134 							goto set1;
7135 						}
7136 					}
7137 					rc = 0;
7138 					mc->mc_flags &= ~C_EOF;
7139 					goto set2;
7140 				}
7141 			}
7142 			/* If any parents have right-sibs, search.
7143 			 * Otherwise, there's nothing further.
7144 			 */
7145 			for (i=0; i<mc->mc_top; i++)
7146 				if (mc->mc_ki[i] <
7147 					NUMKEYS(mc->mc_pg[i])-1)
7148 					break;
7149 			if (i == mc->mc_top) {
7150 				/* There are no other pages */
7151 				mc->mc_ki[mc->mc_top] = nkeys;
7152 				return MDB_NOTFOUND;
7153 			}
7154 		}
7155 		if (!mc->mc_top) {
7156 			/* There are no other pages */
7157 			mc->mc_ki[mc->mc_top] = 0;
7158 			if (op == MDB_SET_RANGE && !exactp) {
7159 				rc = 0;
7160 				goto set1;
7161 			} else
7162 				return MDB_NOTFOUND;
7163 		}
7164 	} else {
7165 		mc->mc_pg[0] = 0;
7166 	}
7167 
7168 	rc = mdb_page_search(mc, key, 0);
7169 	if (rc != MDB_SUCCESS)
7170 		return rc;
7171 
7172 	mp = mc->mc_pg[mc->mc_top];
7173 	mdb_cassert(mc, IS_LEAF(mp));
7174 
7175 set2:
7176 	leaf = mdb_node_search(mc, key, exactp);
7177 	if (exactp != NULL && !*exactp) {
7178 		/* MDB_SET specified and not an exact match. */
7179 		return MDB_NOTFOUND;
7180 	}
7181 
7182 	if (leaf == NULL) {
7183 		DPUTS("===> inexact leaf not found, goto sibling");
7184 		if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) {
7185 			mc->mc_flags |= C_EOF;
7186 			return rc;		/* no entries matched */
7187 		}
7188 		mp = mc->mc_pg[mc->mc_top];
7189 		mdb_cassert(mc, IS_LEAF(mp));
7190 		leaf = NODEPTR(mp, 0);
7191 	}
7192 
7193 set1:
7194 	mc->mc_flags |= C_INITIALIZED;
7195 	mc->mc_flags &= ~C_EOF;
7196 
7197 	if (IS_LEAF2(mp)) {
7198 		if (op == MDB_SET_RANGE || op == MDB_SET_KEY) {
7199 			key->mv_size = mc->mc_db->md_pad;
7200 			key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size);
7201 		}
7202 		return MDB_SUCCESS;
7203 	}
7204 
7205 	if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
7206 		mdb_xcursor_init1(mc, leaf);
7207 		if (op == MDB_SET || op == MDB_SET_KEY || op == MDB_SET_RANGE) {
7208 			rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL);
7209 		} else {
7210 			int ex2, *ex2p;
7211 			if (op == MDB_GET_BOTH) {
7212 				ex2p = &ex2;
7213 				ex2 = 0;
7214 			} else {
7215 				ex2p = NULL;
7216 			}
7217 			rc = mdb_cursor_set(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_SET_RANGE, ex2p);
7218 			if (rc != MDB_SUCCESS)
7219 				return rc;
7220 		}
7221 	} else if (data) {
7222 		if (op == MDB_GET_BOTH || op == MDB_GET_BOTH_RANGE) {
7223 			MDB_val olddata;
7224 			MDB_cmp_func *dcmp;
7225 			if ((rc = mdb_node_read(mc, leaf, &olddata)) != MDB_SUCCESS)
7226 				return rc;
7227 			dcmp = mc->mc_dbx->md_dcmp;
7228 			if (NEED_CMP_CLONG(dcmp, olddata.mv_size))
7229 				dcmp = mdb_cmp_clong;
7230 			rc = dcmp(data, &olddata);
7231 			if (rc) {
7232 				if (op == MDB_GET_BOTH || rc > 0)
7233 					return MDB_NOTFOUND;
7234 				rc = 0;
7235 			}
7236 			*data = olddata;
7237 
7238 		} else {
7239 			if (mc->mc_xcursor)
7240 				mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
7241 			if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
7242 				return rc;
7243 		}
7244 	}
7245 
7246 	/* The key already matches in all other cases */
7247 	if (op == MDB_SET_RANGE || op == MDB_SET_KEY)
7248 		MDB_GET_KEY(leaf, key);
7249 	DPRINTF(("==> cursor placed on key [%s]", DKEY(key)));
7250 
7251 	return rc;
7252 }
7253 
7254 /** Move the cursor to the first item in the database. */
7255 static int
mdb_cursor_first(MDB_cursor * mc,MDB_val * key,MDB_val * data)7256 mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data)
7257 {
7258 	int		 rc;
7259 	MDB_node	*leaf;
7260 
7261 	if (mc->mc_xcursor) {
7262 		MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0);
7263 		mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
7264 	}
7265 
7266 	if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) {
7267 		rc = mdb_page_search(mc, NULL, MDB_PS_FIRST);
7268 		if (rc != MDB_SUCCESS)
7269 			return rc;
7270 	}
7271 	mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top]));
7272 
7273 	leaf = NODEPTR(mc->mc_pg[mc->mc_top], 0);
7274 	mc->mc_flags |= C_INITIALIZED;
7275 	mc->mc_flags &= ~C_EOF;
7276 
7277 	mc->mc_ki[mc->mc_top] = 0;
7278 
7279 	if (IS_LEAF2(mc->mc_pg[mc->mc_top])) {
7280 		if ( key ) {
7281 			key->mv_size = mc->mc_db->md_pad;
7282 			key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], 0, key->mv_size);
7283 		}
7284 		return MDB_SUCCESS;
7285 	}
7286 
7287 	if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
7288 		mdb_xcursor_init1(mc, leaf);
7289 		rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL);
7290 		if (rc)
7291 			return rc;
7292 	} else if (data) {
7293 		if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
7294 			return rc;
7295 	}
7296 
7297 	MDB_GET_KEY(leaf, key);
7298 	return MDB_SUCCESS;
7299 }
7300 
7301 /** Move the cursor to the last item in the database. */
7302 static int
mdb_cursor_last(MDB_cursor * mc,MDB_val * key,MDB_val * data)7303 mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data)
7304 {
7305 	int		 rc;
7306 	MDB_node	*leaf;
7307 
7308 	if (mc->mc_xcursor) {
7309 		MDB_CURSOR_UNREF(&mc->mc_xcursor->mx_cursor, 0);
7310 		mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
7311 	}
7312 
7313 	if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) {
7314 		rc = mdb_page_search(mc, NULL, MDB_PS_LAST);
7315 		if (rc != MDB_SUCCESS)
7316 			return rc;
7317 	}
7318 	mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top]));
7319 
7320 	mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]) - 1;
7321 	mc->mc_flags |= C_INITIALIZED|C_EOF;
7322 	leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
7323 
7324 	if (IS_LEAF2(mc->mc_pg[mc->mc_top])) {
7325 		if (key) {
7326 			key->mv_size = mc->mc_db->md_pad;
7327 			key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], key->mv_size);
7328 		}
7329 		return MDB_SUCCESS;
7330 	}
7331 
7332 	if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
7333 		mdb_xcursor_init1(mc, leaf);
7334 		rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL);
7335 		if (rc)
7336 			return rc;
7337 	} else if (data) {
7338 		if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS)
7339 			return rc;
7340 	}
7341 
7342 	MDB_GET_KEY(leaf, key);
7343 	return MDB_SUCCESS;
7344 }
7345 
7346 int
mdb_cursor_get(MDB_cursor * mc,MDB_val * key,MDB_val * data,MDB_cursor_op op)7347 mdb_cursor_get(MDB_cursor *mc, MDB_val *key, MDB_val *data,
7348     MDB_cursor_op op)
7349 {
7350 	int		 rc;
7351 	int		 exact = 0;
7352 	int		 (*mfunc)(MDB_cursor *mc, MDB_val *key, MDB_val *data);
7353 
7354 	if (mc == NULL)
7355 		return EINVAL;
7356 
7357 	if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED)
7358 		return MDB_BAD_TXN;
7359 
7360 	switch (op) {
7361 	case MDB_GET_CURRENT:
7362 		if (!(mc->mc_flags & C_INITIALIZED)) {
7363 			rc = EINVAL;
7364 		} else {
7365 			MDB_page *mp = mc->mc_pg[mc->mc_top];
7366 			int nkeys = NUMKEYS(mp);
7367 			if (!nkeys || mc->mc_ki[mc->mc_top] >= nkeys) {
7368 				mc->mc_ki[mc->mc_top] = nkeys;
7369 				rc = MDB_NOTFOUND;
7370 				break;
7371 			}
7372 			rc = MDB_SUCCESS;
7373 			if (IS_LEAF2(mp)) {
7374 				key->mv_size = mc->mc_db->md_pad;
7375 				key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size);
7376 			} else {
7377 				MDB_node *leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
7378 				MDB_GET_KEY(leaf, key);
7379 				if (data) {
7380 					if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
7381 						rc = mdb_cursor_get(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_GET_CURRENT);
7382 					} else {
7383 						rc = mdb_node_read(mc, leaf, data);
7384 					}
7385 				}
7386 			}
7387 		}
7388 		break;
7389 	case MDB_GET_BOTH:
7390 	case MDB_GET_BOTH_RANGE:
7391 		if (data == NULL) {
7392 			rc = EINVAL;
7393 			break;
7394 		}
7395 		if (mc->mc_xcursor == NULL) {
7396 			rc = MDB_INCOMPATIBLE;
7397 			break;
7398 		}
7399 		/* FALLTHRU */
7400 	case MDB_SET:
7401 	case MDB_SET_KEY:
7402 	case MDB_SET_RANGE:
7403 		if (key == NULL) {
7404 			rc = EINVAL;
7405 		} else {
7406 			rc = mdb_cursor_set(mc, key, data, op,
7407 				op == MDB_SET_RANGE ? NULL : &exact);
7408 		}
7409 		break;
7410 	case MDB_GET_MULTIPLE:
7411 		if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) {
7412 			rc = EINVAL;
7413 			break;
7414 		}
7415 		if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
7416 			rc = MDB_INCOMPATIBLE;
7417 			break;
7418 		}
7419 		rc = MDB_SUCCESS;
7420 		if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) ||
7421 			(mc->mc_xcursor->mx_cursor.mc_flags & C_EOF))
7422 			break;
7423 		goto fetchm;
7424 	case MDB_NEXT_MULTIPLE:
7425 		if (data == NULL) {
7426 			rc = EINVAL;
7427 			break;
7428 		}
7429 		if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
7430 			rc = MDB_INCOMPATIBLE;
7431 			break;
7432 		}
7433 		rc = mdb_cursor_next(mc, key, data, MDB_NEXT_DUP);
7434 		if (rc == MDB_SUCCESS) {
7435 			if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) {
7436 				MDB_cursor *mx;
7437 fetchm:
7438 				mx = &mc->mc_xcursor->mx_cursor;
7439 				data->mv_size = NUMKEYS(mx->mc_pg[mx->mc_top]) *
7440 					mx->mc_db->md_pad;
7441 				data->mv_data = METADATA(mx->mc_pg[mx->mc_top]);
7442 				mx->mc_ki[mx->mc_top] = NUMKEYS(mx->mc_pg[mx->mc_top])-1;
7443 			} else {
7444 				rc = MDB_NOTFOUND;
7445 			}
7446 		}
7447 		break;
7448 	case MDB_PREV_MULTIPLE:
7449 		if (data == NULL) {
7450 			rc = EINVAL;
7451 			break;
7452 		}
7453 		if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
7454 			rc = MDB_INCOMPATIBLE;
7455 			break;
7456 		}
7457 		if (!(mc->mc_flags & C_INITIALIZED))
7458 			rc = mdb_cursor_last(mc, key, data);
7459 		else
7460 			rc = MDB_SUCCESS;
7461 		if (rc == MDB_SUCCESS) {
7462 			MDB_cursor *mx = &mc->mc_xcursor->mx_cursor;
7463 			if (mx->mc_flags & C_INITIALIZED) {
7464 				rc = mdb_cursor_sibling(mx, 0);
7465 				if (rc == MDB_SUCCESS)
7466 					goto fetchm;
7467 			} else {
7468 				rc = MDB_NOTFOUND;
7469 			}
7470 		}
7471 		break;
7472 	case MDB_NEXT:
7473 	case MDB_NEXT_DUP:
7474 	case MDB_NEXT_NODUP:
7475 		rc = mdb_cursor_next(mc, key, data, op);
7476 		break;
7477 	case MDB_PREV:
7478 	case MDB_PREV_DUP:
7479 	case MDB_PREV_NODUP:
7480 		rc = mdb_cursor_prev(mc, key, data, op);
7481 		break;
7482 	case MDB_FIRST:
7483 		rc = mdb_cursor_first(mc, key, data);
7484 		break;
7485 	case MDB_FIRST_DUP:
7486 		mfunc = mdb_cursor_first;
7487 	mmove:
7488 		if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) {
7489 			rc = EINVAL;
7490 			break;
7491 		}
7492 		if (mc->mc_xcursor == NULL) {
7493 			rc = MDB_INCOMPATIBLE;
7494 			break;
7495 		}
7496 		if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) {
7497 			mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]);
7498 			rc = MDB_NOTFOUND;
7499 			break;
7500 		}
7501 		{
7502 			MDB_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
7503 			if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) {
7504 				MDB_GET_KEY(leaf, key);
7505 				rc = mdb_node_read(mc, leaf, data);
7506 				break;
7507 			}
7508 		}
7509 		if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) {
7510 			rc = EINVAL;
7511 			break;
7512 		}
7513 		rc = mfunc(&mc->mc_xcursor->mx_cursor, data, NULL);
7514 		break;
7515 	case MDB_LAST:
7516 		rc = mdb_cursor_last(mc, key, data);
7517 		break;
7518 	case MDB_LAST_DUP:
7519 		mfunc = mdb_cursor_last;
7520 		goto mmove;
7521 	default:
7522 		DPRINTF(("unhandled/unimplemented cursor operation %u", op));
7523 		rc = EINVAL;
7524 		break;
7525 	}
7526 
7527 	if (mc->mc_flags & C_DEL)
7528 		mc->mc_flags ^= C_DEL;
7529 
7530 	return rc;
7531 }
7532 
7533 /** Touch all the pages in the cursor stack. Set mc_top.
7534  *	Makes sure all the pages are writable, before attempting a write operation.
7535  * @param[in] mc The cursor to operate on.
7536  */
7537 static int
mdb_cursor_touch(MDB_cursor * mc)7538 mdb_cursor_touch(MDB_cursor *mc)
7539 {
7540 	int rc = MDB_SUCCESS;
7541 
7542 	if (mc->mc_dbi >= CORE_DBS && !(*mc->mc_dbflag & (DB_DIRTY|DB_DUPDATA))) {
7543 		/* Touch DB record of named DB */
7544 		MDB_cursor mc2;
7545 		MDB_xcursor mcx;
7546 		if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi))
7547 			return MDB_BAD_DBI;
7548 		mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, &mcx);
7549 		rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, MDB_PS_MODIFY);
7550 		if (rc)
7551 			 return rc;
7552 		*mc->mc_dbflag |= DB_DIRTY;
7553 	}
7554 	mc->mc_top = 0;
7555 	if (mc->mc_snum) {
7556 		do {
7557 			rc = mdb_page_touch(mc);
7558 		} while (!rc && ++(mc->mc_top) < mc->mc_snum);
7559 		mc->mc_top = mc->mc_snum-1;
7560 	}
7561 	return rc;
7562 }
7563 
7564 /** Do not spill pages to disk if txn is getting full, may fail instead */
7565 #define MDB_NOSPILL	0x8000
7566 
7567 int
mdb_cursor_put(MDB_cursor * mc,MDB_val * key,MDB_val * data,unsigned int flags)7568 mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data,
7569     unsigned int flags)
7570 {
7571 	MDB_env		*env;
7572 	MDB_node	*leaf = NULL;
7573 	MDB_page	*fp, *mp, *sub_root = NULL;
7574 	uint16_t	fp_flags;
7575 	MDB_val		xdata, *rdata, dkey, olddata;
7576 	MDB_db dummy;
7577 	int do_sub = 0, insert_key, insert_data;
7578 	unsigned int mcount = 0, dcount = 0, nospill;
7579 	size_t nsize;
7580 	int rc, rc2;
7581 	unsigned int nflags;
7582 	DKBUF;
7583 
7584 	if (mc == NULL || key == NULL)
7585 		return EINVAL;
7586 
7587 	env = mc->mc_txn->mt_env;
7588 
7589 	/* Check this first so counter will always be zero on any
7590 	 * early failures.
7591 	 */
7592 	if (flags & MDB_MULTIPLE) {
7593 		dcount = data[1].mv_size;
7594 		data[1].mv_size = 0;
7595 		if (!F_ISSET(mc->mc_db->md_flags, MDB_DUPFIXED))
7596 			return MDB_INCOMPATIBLE;
7597 	}
7598 
7599 	nospill = flags & MDB_NOSPILL;
7600 	flags &= ~MDB_NOSPILL;
7601 
7602 	if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED))
7603 		return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN;
7604 
7605 	if (key->mv_size-1 >= ENV_MAXKEY(env))
7606 		return MDB_BAD_VALSIZE;
7607 
7608 #if SIZE_MAX > MAXDATASIZE
7609 	if (data->mv_size > ((mc->mc_db->md_flags & MDB_DUPSORT) ? ENV_MAXKEY(env) : MAXDATASIZE))
7610 		return MDB_BAD_VALSIZE;
7611 #else
7612 	if ((mc->mc_db->md_flags & MDB_DUPSORT) && data->mv_size > ENV_MAXKEY(env))
7613 		return MDB_BAD_VALSIZE;
7614 #endif
7615 
7616 	DPRINTF(("==> put db %d key [%s], size %"Z"u, data size %"Z"u",
7617 		DDBI(mc), DKEY(key), key ? key->mv_size : 0, data->mv_size));
7618 
7619 	dkey.mv_size = 0;
7620 
7621 	if (flags & MDB_CURRENT) {
7622 		if (!(mc->mc_flags & C_INITIALIZED))
7623 			return EINVAL;
7624 		rc = MDB_SUCCESS;
7625 	} else if (mc->mc_db->md_root == P_INVALID) {
7626 		/* new database, cursor has nothing to point to */
7627 		mc->mc_snum = 0;
7628 		mc->mc_top = 0;
7629 		mc->mc_flags &= ~C_INITIALIZED;
7630 		rc = MDB_NO_ROOT;
7631 	} else {
7632 		int exact = 0;
7633 		MDB_val d2;
7634 		if (flags & MDB_APPEND) {
7635 			MDB_val k2;
7636 			rc = mdb_cursor_last(mc, &k2, &d2);
7637 			if (rc == 0) {
7638 				rc = mc->mc_dbx->md_cmp(key, &k2);
7639 				if (rc > 0) {
7640 					rc = MDB_NOTFOUND;
7641 					mc->mc_ki[mc->mc_top]++;
7642 				} else {
7643 					/* new key is <= last key */
7644 					rc = MDB_KEYEXIST;
7645 				}
7646 			}
7647 		} else {
7648 			rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact);
7649 		}
7650 		if ((flags & MDB_NOOVERWRITE) && rc == 0) {
7651 			DPRINTF(("duplicate key [%s]", DKEY(key)));
7652 			*data = d2;
7653 			return MDB_KEYEXIST;
7654 		}
7655 		if (rc && rc != MDB_NOTFOUND)
7656 			return rc;
7657 	}
7658 
7659 	if (mc->mc_flags & C_DEL)
7660 		mc->mc_flags ^= C_DEL;
7661 
7662 	/* Cursor is positioned, check for room in the dirty list */
7663 	if (!nospill) {
7664 		if (flags & MDB_MULTIPLE) {
7665 			rdata = &xdata;
7666 			xdata.mv_size = data->mv_size * dcount;
7667 		} else {
7668 			rdata = data;
7669 		}
7670 		if ((rc2 = mdb_page_spill(mc, key, rdata)))
7671 			return rc2;
7672 	}
7673 
7674 	if (rc == MDB_NO_ROOT) {
7675 		MDB_page *np;
7676 		/* new database, write a root leaf page */
7677 		DPUTS("allocating new root leaf page");
7678 		if ((rc2 = mdb_page_new(mc, P_LEAF, 1, &np))) {
7679 			return rc2;
7680 		}
7681 		mdb_cursor_push(mc, np);
7682 		mc->mc_db->md_root = np->mp_pgno;
7683 		mc->mc_db->md_depth++;
7684 		*mc->mc_dbflag |= DB_DIRTY;
7685 		if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED))
7686 			== MDB_DUPFIXED)
7687 			np->mp_flags |= P_LEAF2;
7688 		mc->mc_flags |= C_INITIALIZED;
7689 	} else {
7690 		/* make sure all cursor pages are writable */
7691 		rc2 = mdb_cursor_touch(mc);
7692 		if (rc2)
7693 			return rc2;
7694 	}
7695 
7696 	insert_key = insert_data = rc;
7697 	if (insert_key) {
7698 		/* The key does not exist */
7699 		DPRINTF(("inserting key at index %i", mc->mc_ki[mc->mc_top]));
7700 		if ((mc->mc_db->md_flags & MDB_DUPSORT) &&
7701 			LEAFSIZE(key, data) > env->me_nodemax)
7702 		{
7703 			/* Too big for a node, insert in sub-DB.  Set up an empty
7704 			 * "old sub-page" for prep_subDB to expand to a full page.
7705 			 */
7706 			fp_flags = P_LEAF|P_DIRTY;
7707 			fp = env->me_pbuf;
7708 			fp->mp_pad = data->mv_size; /* used if MDB_DUPFIXED */
7709 			fp->mp_lower = fp->mp_upper = (PAGEHDRSZ-PAGEBASE);
7710 			olddata.mv_size = PAGEHDRSZ;
7711 			goto prep_subDB;
7712 		}
7713 	} else {
7714 		/* there's only a key anyway, so this is a no-op */
7715 		if (IS_LEAF2(mc->mc_pg[mc->mc_top])) {
7716 			char *ptr;
7717 			unsigned int ksize = mc->mc_db->md_pad;
7718 			if (key->mv_size != ksize)
7719 				return MDB_BAD_VALSIZE;
7720 			ptr = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], ksize);
7721 			memcpy(ptr, key->mv_data, ksize);
7722 fix_parent:
7723 			/* if overwriting slot 0 of leaf, need to
7724 			 * update branch key if there is a parent page
7725 			 */
7726 			if (mc->mc_top && !mc->mc_ki[mc->mc_top]) {
7727 				unsigned short dtop = 1;
7728 				mc->mc_top--;
7729 				/* slot 0 is always an empty key, find real slot */
7730 				while (mc->mc_top && !mc->mc_ki[mc->mc_top]) {
7731 					mc->mc_top--;
7732 					dtop++;
7733 				}
7734 				if (mc->mc_ki[mc->mc_top])
7735 					rc2 = mdb_update_key(mc, key);
7736 				else
7737 					rc2 = MDB_SUCCESS;
7738 				mc->mc_top += dtop;
7739 				if (rc2)
7740 					return rc2;
7741 			}
7742 			return MDB_SUCCESS;
7743 		}
7744 
7745 more:
7746 		leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
7747 		olddata.mv_size = NODEDSZ(leaf);
7748 		olddata.mv_data = NODEDATA(leaf);
7749 
7750 		/* DB has dups? */
7751 		if (F_ISSET(mc->mc_db->md_flags, MDB_DUPSORT)) {
7752 			/* Prepare (sub-)page/sub-DB to accept the new item,
7753 			 * if needed.  fp: old sub-page or a header faking
7754 			 * it.  mp: new (sub-)page.  offset: growth in page
7755 			 * size.  xdata: node data with new page or DB.
7756 			 */
7757 			unsigned	i, offset = 0;
7758 			mp = fp = xdata.mv_data = env->me_pbuf;
7759 			mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno;
7760 
7761 			/* Was a single item before, must convert now */
7762 			if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) {
7763 				MDB_cmp_func *dcmp;
7764 				/* Just overwrite the current item */
7765 				if (flags == MDB_CURRENT)
7766 					goto current;
7767 				dcmp = mc->mc_dbx->md_dcmp;
7768 				if (NEED_CMP_CLONG(dcmp, olddata.mv_size))
7769 					dcmp = mdb_cmp_clong;
7770 				/* does data match? */
7771 				if (!dcmp(data, &olddata)) {
7772 					if (flags & (MDB_NODUPDATA|MDB_APPENDDUP))
7773 						return MDB_KEYEXIST;
7774 					/* overwrite it */
7775 					goto current;
7776 				}
7777 
7778 				/* Back up original data item */
7779 				dkey.mv_size = olddata.mv_size;
7780 				dkey.mv_data = memcpy(fp+1, olddata.mv_data, olddata.mv_size);
7781 
7782 				/* Make sub-page header for the dup items, with dummy body */
7783 				fp->mp_flags = P_LEAF|P_DIRTY|P_SUBP;
7784 				fp->mp_lower = (PAGEHDRSZ-PAGEBASE);
7785 				xdata.mv_size = PAGEHDRSZ + dkey.mv_size + data->mv_size;
7786 				if (mc->mc_db->md_flags & MDB_DUPFIXED) {
7787 					fp->mp_flags |= P_LEAF2;
7788 					fp->mp_pad = data->mv_size;
7789 					xdata.mv_size += 2 * data->mv_size;	/* leave space for 2 more */
7790 				} else {
7791 					xdata.mv_size += 2 * (sizeof(indx_t) + NODESIZE) +
7792 						(dkey.mv_size & 1) + (data->mv_size & 1);
7793 				}
7794 				fp->mp_upper = xdata.mv_size - PAGEBASE;
7795 				olddata.mv_size = xdata.mv_size; /* pretend olddata is fp */
7796 			} else if (leaf->mn_flags & F_SUBDATA) {
7797 				/* Data is on sub-DB, just store it */
7798 				flags |= F_DUPDATA|F_SUBDATA;
7799 				goto put_sub;
7800 			} else {
7801 				/* Data is on sub-page */
7802 				fp = olddata.mv_data;
7803 				switch (flags) {
7804 				default:
7805 					if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
7806 						offset = EVEN(NODESIZE + sizeof(indx_t) +
7807 							data->mv_size);
7808 						break;
7809 					}
7810 					offset = fp->mp_pad;
7811 					if (SIZELEFT(fp) < offset) {
7812 						offset *= 4; /* space for 4 more */
7813 						break;
7814 					}
7815 					/* FALLTHRU */ /* Big enough MDB_DUPFIXED sub-page */
7816 				case MDB_CURRENT:
7817 					fp->mp_flags |= P_DIRTY;
7818 					COPY_PGNO(fp->mp_pgno, mp->mp_pgno);
7819 					mc->mc_xcursor->mx_cursor.mc_pg[0] = fp;
7820 					flags |= F_DUPDATA;
7821 					goto put_sub;
7822 				}
7823 				xdata.mv_size = olddata.mv_size + offset;
7824 			}
7825 
7826 			fp_flags = fp->mp_flags;
7827 			if (NODESIZE + NODEKSZ(leaf) + xdata.mv_size > env->me_nodemax) {
7828 					/* Too big for a sub-page, convert to sub-DB */
7829 					fp_flags &= ~P_SUBP;
7830 prep_subDB:
7831 					if (mc->mc_db->md_flags & MDB_DUPFIXED) {
7832 						fp_flags |= P_LEAF2;
7833 						dummy.md_pad = fp->mp_pad;
7834 						dummy.md_flags = MDB_DUPFIXED;
7835 						if (mc->mc_db->md_flags & MDB_INTEGERDUP)
7836 							dummy.md_flags |= MDB_INTEGERKEY;
7837 					} else {
7838 						dummy.md_pad = 0;
7839 						dummy.md_flags = 0;
7840 					}
7841 					dummy.md_depth = 1;
7842 					dummy.md_branch_pages = 0;
7843 					dummy.md_leaf_pages = 1;
7844 					dummy.md_overflow_pages = 0;
7845 					dummy.md_entries = NUMKEYS(fp);
7846 					xdata.mv_size = sizeof(MDB_db);
7847 					xdata.mv_data = &dummy;
7848 					if ((rc = mdb_page_alloc(mc, 1, &mp)))
7849 						return rc;
7850 					offset = env->me_psize - olddata.mv_size;
7851 					flags |= F_DUPDATA|F_SUBDATA;
7852 					dummy.md_root = mp->mp_pgno;
7853 					sub_root = mp;
7854 			}
7855 			if (mp != fp) {
7856 				mp->mp_flags = fp_flags | P_DIRTY;
7857 				mp->mp_pad   = fp->mp_pad;
7858 				mp->mp_lower = fp->mp_lower;
7859 				mp->mp_upper = fp->mp_upper + offset;
7860 				if (fp_flags & P_LEAF2) {
7861 					memcpy(METADATA(mp), METADATA(fp), NUMKEYS(fp) * fp->mp_pad);
7862 				} else {
7863 					memcpy((char *)mp + mp->mp_upper + PAGEBASE, (char *)fp + fp->mp_upper + PAGEBASE,
7864 						olddata.mv_size - fp->mp_upper - PAGEBASE);
7865 					memcpy((char *)(&mp->mp_ptrs), (char *)(&fp->mp_ptrs), NUMKEYS(fp) * sizeof(mp->mp_ptrs[0]));
7866 					for (i=0; i<NUMKEYS(fp); i++)
7867 						mp->mp_ptrs[i] += offset;
7868 				}
7869 			}
7870 
7871 			rdata = &xdata;
7872 			flags |= F_DUPDATA;
7873 			do_sub = 1;
7874 			if (!insert_key)
7875 				mdb_node_del(mc, 0);
7876 			goto new_sub;
7877 		}
7878 current:
7879 		/* LMDB passes F_SUBDATA in 'flags' to write a DB record */
7880 		if ((leaf->mn_flags ^ flags) & F_SUBDATA)
7881 			return MDB_INCOMPATIBLE;
7882 		/* overflow page overwrites need special handling */
7883 		if (F_ISSET(leaf->mn_flags, F_BIGDATA)) {
7884 			MDB_page *omp;
7885 			pgno_t pg;
7886 			int level, ovpages, dpages = OVPAGES(data->mv_size, env->me_psize);
7887 
7888 			memcpy(&pg, olddata.mv_data, sizeof(pg));
7889 			if ((rc2 = mdb_page_get(mc, pg, &omp, &level)) != 0)
7890 				return rc2;
7891 			ovpages = omp->mp_pages;
7892 
7893 			/* Is the ov page large enough? */
7894 			if (ovpages >= dpages) {
7895 			  if (!(omp->mp_flags & P_DIRTY) &&
7896 				  (level || (env->me_flags & MDB_WRITEMAP)))
7897 			  {
7898 				rc = mdb_page_unspill(mc->mc_txn, omp, &omp);
7899 				if (rc)
7900 					return rc;
7901 				level = 0;		/* dirty in this txn or clean */
7902 			  }
7903 			  /* Is it dirty? */
7904 			  if (omp->mp_flags & P_DIRTY) {
7905 				/* yes, overwrite it. Note in this case we don't
7906 				 * bother to try shrinking the page if the new data
7907 				 * is smaller than the overflow threshold.
7908 				 */
7909 				if (level > 1) {
7910 					/* It is writable only in a parent txn */
7911 					size_t sz = (size_t) env->me_psize * ovpages, off;
7912 					MDB_page *np = mdb_page_malloc(mc->mc_txn, ovpages);
7913 					MDB_ID2 id2;
7914 					if (!np)
7915 						return ENOMEM;
7916 					id2.mid = pg;
7917 					id2.mptr = np;
7918 					/* Note - this page is already counted in parent's dirty_room */
7919 					rc2 = mdb_mid2l_insert(mc->mc_txn->mt_u.dirty_list, &id2);
7920 					mdb_cassert(mc, rc2 == 0);
7921 					/* Currently we make the page look as with put() in the
7922 					 * parent txn, in case the user peeks at MDB_RESERVEd
7923 					 * or unused parts. Some users treat ovpages specially.
7924 					 */
7925 					if (!(flags & MDB_RESERVE)) {
7926 						/* Skip the part where LMDB will put *data.
7927 						 * Copy end of page, adjusting alignment so
7928 						 * compiler may copy words instead of bytes.
7929 						 */
7930 						off = (PAGEHDRSZ + data->mv_size) & -sizeof(size_t);
7931 						memcpy((size_t *)((char *)np + off),
7932 							(size_t *)((char *)omp + off), sz - off);
7933 						sz = PAGEHDRSZ;
7934 					}
7935 					memcpy(np, omp, sz); /* Copy beginning of page */
7936 					omp = np;
7937 				}
7938 				SETDSZ(leaf, data->mv_size);
7939 				if (F_ISSET(flags, MDB_RESERVE))
7940 					data->mv_data = METADATA(omp);
7941 				else
7942 					memcpy(METADATA(omp), data->mv_data, data->mv_size);
7943 				return MDB_SUCCESS;
7944 			  }
7945 			}
7946 			if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS)
7947 				return rc2;
7948 		} else if (data->mv_size == olddata.mv_size) {
7949 			/* same size, just replace it. Note that we could
7950 			 * also reuse this node if the new data is smaller,
7951 			 * but instead we opt to shrink the node in that case.
7952 			 */
7953 			if (F_ISSET(flags, MDB_RESERVE))
7954 				data->mv_data = olddata.mv_data;
7955 			else if (!(mc->mc_flags & C_SUB))
7956 				memcpy(olddata.mv_data, data->mv_data, data->mv_size);
7957 			else {
7958 				memcpy(NODEKEY(leaf), key->mv_data, key->mv_size);
7959 				goto fix_parent;
7960 			}
7961 			return MDB_SUCCESS;
7962 		}
7963 		mdb_node_del(mc, 0);
7964 	}
7965 
7966 	rdata = data;
7967 
7968 new_sub:
7969 	nflags = flags & NODE_ADD_FLAGS;
7970 	nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->mv_size : mdb_leaf_size(env, key, rdata);
7971 	if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) {
7972 		if (( flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA )
7973 			nflags &= ~MDB_APPEND; /* sub-page may need room to grow */
7974 		if (!insert_key)
7975 			nflags |= MDB_SPLIT_REPLACE;
7976 		rc = mdb_page_split(mc, key, rdata, P_INVALID, nflags);
7977 	} else {
7978 		/* There is room already in this leaf page. */
7979 		rc = mdb_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags);
7980 		if (rc == 0) {
7981 			/* Adjust other cursors pointing to mp */
7982 			MDB_cursor *m2, *m3;
7983 			MDB_dbi dbi = mc->mc_dbi;
7984 			unsigned i = mc->mc_top;
7985 			MDB_page *mp = mc->mc_pg[i];
7986 
7987 			for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
7988 				if (mc->mc_flags & C_SUB)
7989 					m3 = &m2->mc_xcursor->mx_cursor;
7990 				else
7991 					m3 = m2;
7992 				if (m3 == mc || m3->mc_snum < mc->mc_snum || m3->mc_pg[i] != mp) continue;
7993 				if (m3->mc_ki[i] >= mc->mc_ki[i] && insert_key) {
7994 					m3->mc_ki[i]++;
7995 				}
7996 				XCURSOR_REFRESH(m3, i, mp);
7997 			}
7998 		}
7999 	}
8000 
8001 	if (rc == MDB_SUCCESS) {
8002 		/* Now store the actual data in the child DB. Note that we're
8003 		 * storing the user data in the keys field, so there are strict
8004 		 * size limits on dupdata. The actual data fields of the child
8005 		 * DB are all zero size.
8006 		 */
8007 		if (do_sub) {
8008 			int xflags, new_dupdata;
8009 			mdb_size_t ecount;
8010 put_sub:
8011 			xdata.mv_size = 0;
8012 			xdata.mv_data = "";
8013 			leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
8014 			if ((flags & (MDB_CURRENT|MDB_APPENDDUP)) == MDB_CURRENT) {
8015 				xflags = MDB_CURRENT|MDB_NOSPILL;
8016 			} else {
8017 				mdb_xcursor_init1(mc, leaf);
8018 				xflags = (flags & MDB_NODUPDATA) ?
8019 					MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL;
8020 			}
8021 			if (sub_root)
8022 				mc->mc_xcursor->mx_cursor.mc_pg[0] = sub_root;
8023 			new_dupdata = (int)dkey.mv_size;
8024 			/* converted, write the original data first */
8025 			if (dkey.mv_size) {
8026 				rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags);
8027 				if (rc)
8028 					goto bad_sub;
8029 				/* we've done our job */
8030 				dkey.mv_size = 0;
8031 			}
8032 			if (!(leaf->mn_flags & F_SUBDATA) || sub_root) {
8033 				/* Adjust other cursors pointing to mp */
8034 				MDB_cursor *m2;
8035 				MDB_xcursor *mx = mc->mc_xcursor;
8036 				unsigned i = mc->mc_top;
8037 				MDB_page *mp = mc->mc_pg[i];
8038 
8039 				for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
8040 					if (m2 == mc || m2->mc_snum < mc->mc_snum) continue;
8041 					if (!(m2->mc_flags & C_INITIALIZED)) continue;
8042 					if (m2->mc_pg[i] == mp) {
8043 						if (m2->mc_ki[i] == mc->mc_ki[i]) {
8044 							mdb_xcursor_init2(m2, mx, new_dupdata);
8045 						} else if (!insert_key) {
8046 							XCURSOR_REFRESH(m2, i, mp);
8047 						}
8048 					}
8049 				}
8050 			}
8051 			ecount = mc->mc_xcursor->mx_db.md_entries;
8052 			if (flags & MDB_APPENDDUP)
8053 				xflags |= MDB_APPEND;
8054 			rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags);
8055 			if (flags & F_SUBDATA) {
8056 				void *db = NODEDATA(leaf);
8057 				memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db));
8058 			}
8059 			insert_data = mc->mc_xcursor->mx_db.md_entries - ecount;
8060 		}
8061 		/* Increment count unless we just replaced an existing item. */
8062 		if (insert_data)
8063 			mc->mc_db->md_entries++;
8064 		if (insert_key) {
8065 			/* Invalidate txn if we created an empty sub-DB */
8066 			if (rc)
8067 				goto bad_sub;
8068 			/* If we succeeded and the key didn't exist before,
8069 			 * make sure the cursor is marked valid.
8070 			 */
8071 			mc->mc_flags |= C_INITIALIZED;
8072 		}
8073 		if (flags & MDB_MULTIPLE) {
8074 			if (!rc) {
8075 				mcount++;
8076 				/* let caller know how many succeeded, if any */
8077 				data[1].mv_size = mcount;
8078 				if (mcount < dcount) {
8079 					data[0].mv_data = (char *)data[0].mv_data + data[0].mv_size;
8080 					insert_key = insert_data = 0;
8081 					goto more;
8082 				}
8083 			}
8084 		}
8085 		return rc;
8086 bad_sub:
8087 		if (rc == MDB_KEYEXIST)	/* should not happen, we deleted that item */
8088 			rc = MDB_PROBLEM;
8089 	}
8090 	mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
8091 	return rc;
8092 }
8093 
8094 int
mdb_cursor_del(MDB_cursor * mc,unsigned int flags)8095 mdb_cursor_del(MDB_cursor *mc, unsigned int flags)
8096 {
8097 	MDB_node	*leaf;
8098 	MDB_page	*mp;
8099 	int rc;
8100 
8101 	if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED))
8102 		return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN;
8103 
8104 	if (!(mc->mc_flags & C_INITIALIZED))
8105 		return EINVAL;
8106 
8107 	if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top]))
8108 		return MDB_NOTFOUND;
8109 
8110 	if (!(flags & MDB_NOSPILL) && (rc = mdb_page_spill(mc, NULL, NULL)))
8111 		return rc;
8112 
8113 	rc = mdb_cursor_touch(mc);
8114 	if (rc)
8115 		return rc;
8116 
8117 	mp = mc->mc_pg[mc->mc_top];
8118 	if (!IS_LEAF(mp))
8119 		return MDB_CORRUPTED;
8120 	if (IS_LEAF2(mp))
8121 		goto del_key;
8122 	leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
8123 
8124 	if (F_ISSET(leaf->mn_flags, F_DUPDATA)) {
8125 		if (flags & MDB_NODUPDATA) {
8126 			/* mdb_cursor_del0() will subtract the final entry */
8127 			mc->mc_db->md_entries -= mc->mc_xcursor->mx_db.md_entries - 1;
8128 			mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED;
8129 		} else {
8130 			if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) {
8131 				mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf);
8132 			}
8133 			rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, MDB_NOSPILL);
8134 			if (rc)
8135 				return rc;
8136 			/* If sub-DB still has entries, we're done */
8137 			if (mc->mc_xcursor->mx_db.md_entries) {
8138 				if (leaf->mn_flags & F_SUBDATA) {
8139 					/* update subDB info */
8140 					void *db = NODEDATA(leaf);
8141 					memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db));
8142 				} else {
8143 					MDB_cursor *m2;
8144 					/* shrink fake page */
8145 					mdb_node_shrink(mp, mc->mc_ki[mc->mc_top]);
8146 					leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]);
8147 					mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf);
8148 					/* fix other sub-DB cursors pointed at fake pages on this page */
8149 					for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
8150 						if (m2 == mc || m2->mc_snum < mc->mc_snum) continue;
8151 						if (!(m2->mc_flags & C_INITIALIZED)) continue;
8152 						if (m2->mc_pg[mc->mc_top] == mp) {
8153 							XCURSOR_REFRESH(m2, mc->mc_top, mp);
8154 						}
8155 					}
8156 				}
8157 				mc->mc_db->md_entries--;
8158 				return rc;
8159 			} else {
8160 				mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED;
8161 			}
8162 			/* otherwise fall thru and delete the sub-DB */
8163 		}
8164 
8165 		if (leaf->mn_flags & F_SUBDATA) {
8166 			/* add all the child DB's pages to the free list */
8167 			rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0);
8168 			if (rc)
8169 				goto fail;
8170 		}
8171 	}
8172 	/* LMDB passes F_SUBDATA in 'flags' to delete a DB record */
8173 	else if ((leaf->mn_flags ^ flags) & F_SUBDATA) {
8174 		rc = MDB_INCOMPATIBLE;
8175 		goto fail;
8176 	}
8177 
8178 	/* add overflow pages to free list */
8179 	if (F_ISSET(leaf->mn_flags, F_BIGDATA)) {
8180 		MDB_page *omp;
8181 		pgno_t pg;
8182 
8183 		memcpy(&pg, NODEDATA(leaf), sizeof(pg));
8184 		if ((rc = mdb_page_get(mc, pg, &omp, NULL)) ||
8185 			(rc = mdb_ovpage_free(mc, omp)))
8186 			goto fail;
8187 	}
8188 
8189 del_key:
8190 	return mdb_cursor_del0(mc);
8191 
8192 fail:
8193 	mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
8194 	return rc;
8195 }
8196 
8197 /** Allocate and initialize new pages for a database.
8198  * Set #MDB_TXN_ERROR on failure.
8199  * @param[in] mc a cursor on the database being added to.
8200  * @param[in] flags flags defining what type of page is being allocated.
8201  * @param[in] num the number of pages to allocate. This is usually 1,
8202  * unless allocating overflow pages for a large record.
8203  * @param[out] mp Address of a page, or NULL on failure.
8204  * @return 0 on success, non-zero on failure.
8205  */
8206 static int
mdb_page_new(MDB_cursor * mc,uint32_t flags,int num,MDB_page ** mp)8207 mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp)
8208 {
8209 	MDB_page	*np;
8210 	int rc;
8211 
8212 	if ((rc = mdb_page_alloc(mc, num, &np)))
8213 		return rc;
8214 	DPRINTF(("allocated new mpage %"Yu", page size %u",
8215 	    np->mp_pgno, mc->mc_txn->mt_env->me_psize));
8216 	np->mp_flags = flags | P_DIRTY;
8217 	np->mp_lower = (PAGEHDRSZ-PAGEBASE);
8218 	np->mp_upper = mc->mc_txn->mt_env->me_psize - PAGEBASE;
8219 
8220 	if (IS_BRANCH(np))
8221 		mc->mc_db->md_branch_pages++;
8222 	else if (IS_LEAF(np))
8223 		mc->mc_db->md_leaf_pages++;
8224 	else if (IS_OVERFLOW(np)) {
8225 		mc->mc_db->md_overflow_pages += num;
8226 		np->mp_pages = num;
8227 	}
8228 	*mp = np;
8229 
8230 	return 0;
8231 }
8232 
8233 /** Calculate the size of a leaf node.
8234  * The size depends on the environment's page size; if a data item
8235  * is too large it will be put onto an overflow page and the node
8236  * size will only include the key and not the data. Sizes are always
8237  * rounded up to an even number of bytes, to guarantee 2-byte alignment
8238  * of the #MDB_node headers.
8239  * @param[in] env The environment handle.
8240  * @param[in] key The key for the node.
8241  * @param[in] data The data for the node.
8242  * @return The number of bytes needed to store the node.
8243  */
8244 static size_t
mdb_leaf_size(MDB_env * env,MDB_val * key,MDB_val * data)8245 mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data)
8246 {
8247 	size_t		 sz;
8248 
8249 	sz = LEAFSIZE(key, data);
8250 	if (sz > env->me_nodemax) {
8251 		/* put on overflow page */
8252 		sz -= data->mv_size - sizeof(pgno_t);
8253 	}
8254 
8255 	return EVEN(sz + sizeof(indx_t));
8256 }
8257 
8258 /** Calculate the size of a branch node.
8259  * The size should depend on the environment's page size but since
8260  * we currently don't support spilling large keys onto overflow
8261  * pages, it's simply the size of the #MDB_node header plus the
8262  * size of the key. Sizes are always rounded up to an even number
8263  * of bytes, to guarantee 2-byte alignment of the #MDB_node headers.
8264  * @param[in] env The environment handle.
8265  * @param[in] key The key for the node.
8266  * @return The number of bytes needed to store the node.
8267  */
8268 static size_t
mdb_branch_size(MDB_env * env,MDB_val * key)8269 mdb_branch_size(MDB_env *env, MDB_val *key)
8270 {
8271 	size_t		 sz;
8272 
8273 	sz = INDXSIZE(key);
8274 	if (sz > env->me_nodemax) {
8275 		/* put on overflow page */
8276 		/* not implemented */
8277 		/* sz -= key->size - sizeof(pgno_t); */
8278 	}
8279 
8280 	return sz + sizeof(indx_t);
8281 }
8282 
8283 /** Add a node to the page pointed to by the cursor.
8284  * Set #MDB_TXN_ERROR on failure.
8285  * @param[in] mc The cursor for this operation.
8286  * @param[in] indx The index on the page where the new node should be added.
8287  * @param[in] key The key for the new node.
8288  * @param[in] data The data for the new node, if any.
8289  * @param[in] pgno The page number, if adding a branch node.
8290  * @param[in] flags Flags for the node.
8291  * @return 0 on success, non-zero on failure. Possible errors are:
8292  * <ul>
8293  *	<li>ENOMEM - failed to allocate overflow pages for the node.
8294  *	<li>MDB_PAGE_FULL - there is insufficient room in the page. This error
8295  *	should never happen since all callers already calculate the
8296  *	page's free space before calling this function.
8297  * </ul>
8298  */
8299 static int
mdb_node_add(MDB_cursor * mc,indx_t indx,MDB_val * key,MDB_val * data,pgno_t pgno,unsigned int flags)8300 mdb_node_add(MDB_cursor *mc, indx_t indx,
8301     MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags)
8302 {
8303 	unsigned int	 i;
8304 	size_t		 node_size = NODESIZE;
8305 	ssize_t		 room;
8306 	indx_t		 ofs;
8307 	MDB_node	*node;
8308 	MDB_page	*mp = mc->mc_pg[mc->mc_top];
8309 	MDB_page	*ofp = NULL;		/* overflow page */
8310 	void		*ndata;
8311 	DKBUF;
8312 
8313 	mdb_cassert(mc, mp->mp_upper >= mp->mp_lower);
8314 
8315 	DPRINTF(("add to %s %spage %"Yu" index %i, data size %"Z"u key size %"Z"u [%s]",
8316 	    IS_LEAF(mp) ? "leaf" : "branch",
8317 		IS_SUBP(mp) ? "sub-" : "",
8318 		mdb_dbg_pgno(mp), indx, data ? data->mv_size : 0,
8319 		key ? key->mv_size : 0, key ? DKEY(key) : "null"));
8320 
8321 	if (IS_LEAF2(mp)) {
8322 		/* Move higher keys up one slot. */
8323 		int ksize = mc->mc_db->md_pad, dif;
8324 		char *ptr = LEAF2KEY(mp, indx, ksize);
8325 		dif = NUMKEYS(mp) - indx;
8326 		if (dif > 0)
8327 			memmove(ptr+ksize, ptr, dif*ksize);
8328 		/* insert new key */
8329 		memcpy(ptr, key->mv_data, ksize);
8330 
8331 		/* Just using these for counting */
8332 		mp->mp_lower += sizeof(indx_t);
8333 		mp->mp_upper -= ksize - sizeof(indx_t);
8334 		return MDB_SUCCESS;
8335 	}
8336 
8337 	room = (ssize_t)SIZELEFT(mp) - (ssize_t)sizeof(indx_t);
8338 	if (key != NULL)
8339 		node_size += key->mv_size;
8340 	if (IS_LEAF(mp)) {
8341 		mdb_cassert(mc, key && data);
8342 		if (F_ISSET(flags, F_BIGDATA)) {
8343 			/* Data already on overflow page. */
8344 			node_size += sizeof(pgno_t);
8345 		} else if (node_size + data->mv_size > mc->mc_txn->mt_env->me_nodemax) {
8346 			int ovpages = OVPAGES(data->mv_size, mc->mc_txn->mt_env->me_psize);
8347 			int rc;
8348 			/* Put data on overflow page. */
8349 			DPRINTF(("data size is %"Z"u, node would be %"Z"u, put data on overflow page",
8350 			    data->mv_size, node_size+data->mv_size));
8351 			node_size = EVEN(node_size + sizeof(pgno_t));
8352 			if ((ssize_t)node_size > room)
8353 				goto full;
8354 			if ((rc = mdb_page_new(mc, P_OVERFLOW, ovpages, &ofp)))
8355 				return rc;
8356 			DPRINTF(("allocated overflow page %"Yu, ofp->mp_pgno));
8357 			flags |= F_BIGDATA;
8358 			goto update;
8359 		} else {
8360 			node_size += data->mv_size;
8361 		}
8362 	}
8363 	node_size = EVEN(node_size);
8364 	if ((ssize_t)node_size > room)
8365 		goto full;
8366 
8367 update:
8368 	/* Move higher pointers up one slot. */
8369 	for (i = NUMKEYS(mp); i > indx; i--)
8370 		mp->mp_ptrs[i] = mp->mp_ptrs[i - 1];
8371 
8372 	/* Adjust free space offsets. */
8373 	ofs = mp->mp_upper - node_size;
8374 	mdb_cassert(mc, ofs >= mp->mp_lower + sizeof(indx_t));
8375 	mp->mp_ptrs[indx] = ofs;
8376 	mp->mp_upper = ofs;
8377 	mp->mp_lower += sizeof(indx_t);
8378 
8379 	/* Write the node data. */
8380 	node = NODEPTR(mp, indx);
8381 	node->mn_ksize = (key == NULL) ? 0 : key->mv_size;
8382 	node->mn_flags = flags;
8383 	if (IS_LEAF(mp))
8384 		SETDSZ(node,data->mv_size);
8385 	else
8386 		SETPGNO(node,pgno);
8387 
8388 	if (key)
8389 		memcpy(NODEKEY(node), key->mv_data, key->mv_size);
8390 
8391 	if (IS_LEAF(mp)) {
8392 		ndata = NODEDATA(node);
8393 		if (ofp == NULL) {
8394 			if (F_ISSET(flags, F_BIGDATA))
8395 				memcpy(ndata, data->mv_data, sizeof(pgno_t));
8396 			else if (F_ISSET(flags, MDB_RESERVE))
8397 				data->mv_data = ndata;
8398 			else
8399 				memcpy(ndata, data->mv_data, data->mv_size);
8400 		} else {
8401 			memcpy(ndata, &ofp->mp_pgno, sizeof(pgno_t));
8402 			ndata = METADATA(ofp);
8403 			if (F_ISSET(flags, MDB_RESERVE))
8404 				data->mv_data = ndata;
8405 			else
8406 				memcpy(ndata, data->mv_data, data->mv_size);
8407 		}
8408 	}
8409 
8410 	return MDB_SUCCESS;
8411 
8412 full:
8413 	DPRINTF(("not enough room in page %"Yu", got %u ptrs",
8414 		mdb_dbg_pgno(mp), NUMKEYS(mp)));
8415 	DPRINTF(("upper-lower = %u - %u = %"Z"d", mp->mp_upper,mp->mp_lower,room));
8416 	DPRINTF(("node size = %"Z"u", node_size));
8417 	mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
8418 	return MDB_PAGE_FULL;
8419 }
8420 
8421 /** Delete the specified node from a page.
8422  * @param[in] mc Cursor pointing to the node to delete.
8423  * @param[in] ksize The size of a node. Only used if the page is
8424  * part of a #MDB_DUPFIXED database.
8425  */
8426 static void
mdb_node_del(MDB_cursor * mc,int ksize)8427 mdb_node_del(MDB_cursor *mc, int ksize)
8428 {
8429 	MDB_page *mp = mc->mc_pg[mc->mc_top];
8430 	indx_t	indx = mc->mc_ki[mc->mc_top];
8431 	unsigned int	 sz;
8432 	indx_t		 i, j, numkeys, ptr;
8433 	MDB_node	*node;
8434 	char		*base;
8435 
8436 	DPRINTF(("delete node %u on %s page %"Yu, indx,
8437 	    IS_LEAF(mp) ? "leaf" : "branch", mdb_dbg_pgno(mp)));
8438 	numkeys = NUMKEYS(mp);
8439 	mdb_cassert(mc, indx < numkeys);
8440 
8441 	if (IS_LEAF2(mp)) {
8442 		int x = numkeys - 1 - indx;
8443 		base = LEAF2KEY(mp, indx, ksize);
8444 		if (x)
8445 			memmove(base, base + ksize, x * ksize);
8446 		mp->mp_lower -= sizeof(indx_t);
8447 		mp->mp_upper += ksize - sizeof(indx_t);
8448 		return;
8449 	}
8450 
8451 	node = NODEPTR(mp, indx);
8452 	sz = NODESIZE + node->mn_ksize;
8453 	if (IS_LEAF(mp)) {
8454 		if (F_ISSET(node->mn_flags, F_BIGDATA))
8455 			sz += sizeof(pgno_t);
8456 		else
8457 			sz += NODEDSZ(node);
8458 	}
8459 	sz = EVEN(sz);
8460 
8461 	ptr = mp->mp_ptrs[indx];
8462 	for (i = j = 0; i < numkeys; i++) {
8463 		if (i != indx) {
8464 			mp->mp_ptrs[j] = mp->mp_ptrs[i];
8465 			if (mp->mp_ptrs[i] < ptr)
8466 				mp->mp_ptrs[j] += sz;
8467 			j++;
8468 		}
8469 	}
8470 
8471 	base = (char *)mp + mp->mp_upper + PAGEBASE;
8472 	memmove(base + sz, base, ptr - mp->mp_upper);
8473 
8474 	mp->mp_lower -= sizeof(indx_t);
8475 	mp->mp_upper += sz;
8476 }
8477 
8478 /** Compact the main page after deleting a node on a subpage.
8479  * @param[in] mp The main page to operate on.
8480  * @param[in] indx The index of the subpage on the main page.
8481  */
8482 static void
mdb_node_shrink(MDB_page * mp,indx_t indx)8483 mdb_node_shrink(MDB_page *mp, indx_t indx)
8484 {
8485 	MDB_node *node;
8486 	MDB_page *sp, *xp;
8487 	char *base;
8488 	indx_t delta, nsize, len, ptr;
8489 	int i;
8490 
8491 	node = NODEPTR(mp, indx);
8492 	sp = (MDB_page *)NODEDATA(node);
8493 	delta = SIZELEFT(sp);
8494 	nsize = NODEDSZ(node) - delta;
8495 
8496 	/* Prepare to shift upward, set len = length(subpage part to shift) */
8497 	if (IS_LEAF2(sp)) {
8498 		len = nsize;
8499 		if (nsize & 1)
8500 			return;		/* do not make the node uneven-sized */
8501 	} else {
8502 		xp = (MDB_page *)((char *)sp + delta); /* destination subpage */
8503 		for (i = NUMKEYS(sp); --i >= 0; )
8504 			xp->mp_ptrs[i] = sp->mp_ptrs[i] - delta;
8505 		len = PAGEHDRSZ;
8506 	}
8507 	sp->mp_upper = sp->mp_lower;
8508 	COPY_PGNO(sp->mp_pgno, mp->mp_pgno);
8509 	SETDSZ(node, nsize);
8510 
8511 	/* Shift <lower nodes...initial part of subpage> upward */
8512 	base = (char *)mp + mp->mp_upper + PAGEBASE;
8513 	memmove(base + delta, base, (char *)sp + len - base);
8514 
8515 	ptr = mp->mp_ptrs[indx];
8516 	for (i = NUMKEYS(mp); --i >= 0; ) {
8517 		if (mp->mp_ptrs[i] <= ptr)
8518 			mp->mp_ptrs[i] += delta;
8519 	}
8520 	mp->mp_upper += delta;
8521 }
8522 
8523 /** Initial setup of a sorted-dups cursor.
8524  * Sorted duplicates are implemented as a sub-database for the given key.
8525  * The duplicate data items are actually keys of the sub-database.
8526  * Operations on the duplicate data items are performed using a sub-cursor
8527  * initialized when the sub-database is first accessed. This function does
8528  * the preliminary setup of the sub-cursor, filling in the fields that
8529  * depend only on the parent DB.
8530  * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized.
8531  */
8532 static void
mdb_xcursor_init0(MDB_cursor * mc)8533 mdb_xcursor_init0(MDB_cursor *mc)
8534 {
8535 	MDB_xcursor *mx = mc->mc_xcursor;
8536 
8537 	mx->mx_cursor.mc_xcursor = NULL;
8538 	mx->mx_cursor.mc_txn = mc->mc_txn;
8539 	mx->mx_cursor.mc_db = &mx->mx_db;
8540 	mx->mx_cursor.mc_dbx = &mx->mx_dbx;
8541 	mx->mx_cursor.mc_dbi = mc->mc_dbi;
8542 	mx->mx_cursor.mc_dbflag = &mx->mx_dbflag;
8543 	mx->mx_cursor.mc_snum = 0;
8544 	mx->mx_cursor.mc_top = 0;
8545 	MC_SET_OVPG(&mx->mx_cursor, NULL);
8546 	mx->mx_cursor.mc_flags = C_SUB | (mc->mc_flags & (C_ORIG_RDONLY|C_WRITEMAP));
8547 	mx->mx_dbx.md_name.mv_size = 0;
8548 	mx->mx_dbx.md_name.mv_data = NULL;
8549 	mx->mx_dbx.md_cmp = mc->mc_dbx->md_dcmp;
8550 	mx->mx_dbx.md_dcmp = NULL;
8551 	mx->mx_dbx.md_rel = mc->mc_dbx->md_rel;
8552 }
8553 
8554 /** Final setup of a sorted-dups cursor.
8555  *	Sets up the fields that depend on the data from the main cursor.
8556  * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized.
8557  * @param[in] node The data containing the #MDB_db record for the
8558  * sorted-dup database.
8559  */
8560 static void
mdb_xcursor_init1(MDB_cursor * mc,MDB_node * node)8561 mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node)
8562 {
8563 	MDB_xcursor *mx = mc->mc_xcursor;
8564 
8565 	mx->mx_cursor.mc_flags &= C_SUB|C_ORIG_RDONLY|C_WRITEMAP;
8566 	if (node->mn_flags & F_SUBDATA) {
8567 		memcpy(&mx->mx_db, NODEDATA(node), sizeof(MDB_db));
8568 		mx->mx_cursor.mc_pg[0] = 0;
8569 		mx->mx_cursor.mc_snum = 0;
8570 		mx->mx_cursor.mc_top = 0;
8571 	} else {
8572 		MDB_page *fp = NODEDATA(node);
8573 		mx->mx_db.md_pad = 0;
8574 		mx->mx_db.md_flags = 0;
8575 		mx->mx_db.md_depth = 1;
8576 		mx->mx_db.md_branch_pages = 0;
8577 		mx->mx_db.md_leaf_pages = 1;
8578 		mx->mx_db.md_overflow_pages = 0;
8579 		mx->mx_db.md_entries = NUMKEYS(fp);
8580 		COPY_PGNO(mx->mx_db.md_root, fp->mp_pgno);
8581 		mx->mx_cursor.mc_snum = 1;
8582 		mx->mx_cursor.mc_top = 0;
8583 		mx->mx_cursor.mc_flags |= C_INITIALIZED;
8584 		mx->mx_cursor.mc_pg[0] = fp;
8585 		mx->mx_cursor.mc_ki[0] = 0;
8586 		if (mc->mc_db->md_flags & MDB_DUPFIXED) {
8587 			mx->mx_db.md_flags = MDB_DUPFIXED;
8588 			mx->mx_db.md_pad = fp->mp_pad;
8589 			if (mc->mc_db->md_flags & MDB_INTEGERDUP)
8590 				mx->mx_db.md_flags |= MDB_INTEGERKEY;
8591 		}
8592 	}
8593 	DPRINTF(("Sub-db -%u root page %"Yu, mx->mx_cursor.mc_dbi,
8594 		mx->mx_db.md_root));
8595 	mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DUPDATA;
8596 	if (NEED_CMP_CLONG(mx->mx_dbx.md_cmp, mx->mx_db.md_pad))
8597 		mx->mx_dbx.md_cmp = mdb_cmp_clong;
8598 }
8599 
8600 
8601 /** Fixup a sorted-dups cursor due to underlying update.
8602  *	Sets up some fields that depend on the data from the main cursor.
8603  *	Almost the same as init1, but skips initialization steps if the
8604  *	xcursor had already been used.
8605  * @param[in] mc The main cursor whose sorted-dups cursor is to be fixed up.
8606  * @param[in] src_mx The xcursor of an up-to-date cursor.
8607  * @param[in] new_dupdata True if converting from a non-#F_DUPDATA item.
8608  */
8609 static void
mdb_xcursor_init2(MDB_cursor * mc,MDB_xcursor * src_mx,int new_dupdata)8610 mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int new_dupdata)
8611 {
8612 	MDB_xcursor *mx = mc->mc_xcursor;
8613 
8614 	if (new_dupdata) {
8615 		mx->mx_cursor.mc_snum = 1;
8616 		mx->mx_cursor.mc_top = 0;
8617 		mx->mx_cursor.mc_flags |= C_INITIALIZED;
8618 		mx->mx_cursor.mc_ki[0] = 0;
8619 		mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DUPDATA;
8620 #if UINT_MAX < MDB_SIZE_MAX	/* matches mdb_xcursor_init1:NEED_CMP_CLONG() */
8621 		mx->mx_dbx.md_cmp = src_mx->mx_dbx.md_cmp;
8622 #endif
8623 	} else if (!(mx->mx_cursor.mc_flags & C_INITIALIZED)) {
8624 		return;
8625 	}
8626 	mx->mx_db = src_mx->mx_db;
8627 	mx->mx_cursor.mc_pg[0] = src_mx->mx_cursor.mc_pg[0];
8628 	DPRINTF(("Sub-db -%u root page %"Yu, mx->mx_cursor.mc_dbi,
8629 		mx->mx_db.md_root));
8630 }
8631 
8632 /** Initialize a cursor for a given transaction and database. */
8633 static void
mdb_cursor_init(MDB_cursor * mc,MDB_txn * txn,MDB_dbi dbi,MDB_xcursor * mx)8634 mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx)
8635 {
8636 	mc->mc_next = NULL;
8637 	mc->mc_backup = NULL;
8638 	mc->mc_dbi = dbi;
8639 	mc->mc_txn = txn;
8640 	mc->mc_db = &txn->mt_dbs[dbi];
8641 	mc->mc_dbx = &txn->mt_dbxs[dbi];
8642 	mc->mc_dbflag = &txn->mt_dbflags[dbi];
8643 	mc->mc_snum = 0;
8644 	mc->mc_top = 0;
8645 	mc->mc_pg[0] = 0;
8646 	mc->mc_ki[0] = 0;
8647 	MC_SET_OVPG(mc, NULL);
8648 	mc->mc_flags = txn->mt_flags & (C_ORIG_RDONLY|C_WRITEMAP);
8649 	if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) {
8650 		mdb_tassert(txn, mx != NULL);
8651 		mc->mc_xcursor = mx;
8652 		mdb_xcursor_init0(mc);
8653 	} else {
8654 		mc->mc_xcursor = NULL;
8655 	}
8656 	if (*mc->mc_dbflag & DB_STALE) {
8657 		mdb_page_search(mc, NULL, MDB_PS_ROOTONLY);
8658 	}
8659 }
8660 
8661 int
mdb_cursor_open(MDB_txn * txn,MDB_dbi dbi,MDB_cursor ** ret)8662 mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **ret)
8663 {
8664 	MDB_cursor	*mc;
8665 	size_t size = sizeof(MDB_cursor);
8666 
8667 	if (!ret || !TXN_DBI_EXIST(txn, dbi, DB_VALID))
8668 		return EINVAL;
8669 
8670 	if (txn->mt_flags & MDB_TXN_BLOCKED)
8671 		return MDB_BAD_TXN;
8672 
8673 	if (dbi == FREE_DBI && !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY))
8674 		return EINVAL;
8675 
8676 	if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT)
8677 		size += sizeof(MDB_xcursor);
8678 
8679 	if ((mc = malloc(size)) != NULL) {
8680 		mdb_cursor_init(mc, txn, dbi, (MDB_xcursor *)(mc + 1));
8681 		if (txn->mt_cursors) {
8682 			mc->mc_next = txn->mt_cursors[dbi];
8683 			txn->mt_cursors[dbi] = mc;
8684 			mc->mc_flags |= C_UNTRACK;
8685 		}
8686 	} else {
8687 		return ENOMEM;
8688 	}
8689 
8690 	*ret = mc;
8691 
8692 	return MDB_SUCCESS;
8693 }
8694 
8695 int
mdb_cursor_renew(MDB_txn * txn,MDB_cursor * mc)8696 mdb_cursor_renew(MDB_txn *txn, MDB_cursor *mc)
8697 {
8698 	if (!mc || !TXN_DBI_EXIST(txn, mc->mc_dbi, DB_VALID))
8699 		return EINVAL;
8700 
8701 	if ((mc->mc_flags & C_UNTRACK) || txn->mt_cursors)
8702 		return EINVAL;
8703 
8704 	if (txn->mt_flags & MDB_TXN_BLOCKED)
8705 		return MDB_BAD_TXN;
8706 
8707 	mdb_cursor_init(mc, txn, mc->mc_dbi, mc->mc_xcursor);
8708 	return MDB_SUCCESS;
8709 }
8710 
8711 /* Return the count of duplicate data items for the current key */
8712 int
mdb_cursor_count(MDB_cursor * mc,mdb_size_t * countp)8713 mdb_cursor_count(MDB_cursor *mc, mdb_size_t *countp)
8714 {
8715 	MDB_node	*leaf;
8716 
8717 	if (mc == NULL || countp == NULL)
8718 		return EINVAL;
8719 
8720 	if (mc->mc_xcursor == NULL)
8721 		return MDB_INCOMPATIBLE;
8722 
8723 	if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED)
8724 		return MDB_BAD_TXN;
8725 
8726 	if (!(mc->mc_flags & C_INITIALIZED))
8727 		return EINVAL;
8728 
8729 	if (!mc->mc_snum)
8730 		return MDB_NOTFOUND;
8731 
8732 	if (mc->mc_flags & C_EOF) {
8733 		if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top]))
8734 			return MDB_NOTFOUND;
8735 		mc->mc_flags ^= C_EOF;
8736 	}
8737 
8738 	leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
8739 	if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) {
8740 		*countp = 1;
8741 	} else {
8742 		if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED))
8743 			return EINVAL;
8744 
8745 		*countp = mc->mc_xcursor->mx_db.md_entries;
8746 	}
8747 	return MDB_SUCCESS;
8748 }
8749 
8750 void
mdb_cursor_close(MDB_cursor * mc)8751 mdb_cursor_close(MDB_cursor *mc)
8752 {
8753 	if (mc) {
8754 		MDB_CURSOR_UNREF(mc, 0);
8755 	}
8756 	if (mc && !mc->mc_backup) {
8757 		/* Remove from txn, if tracked.
8758 		 * A read-only txn (!C_UNTRACK) may have been freed already,
8759 		 * so do not peek inside it.  Only write txns track cursors.
8760 		 */
8761 		if ((mc->mc_flags & C_UNTRACK) && mc->mc_txn->mt_cursors) {
8762 			MDB_cursor **prev = &mc->mc_txn->mt_cursors[mc->mc_dbi];
8763 			while (*prev && *prev != mc) prev = &(*prev)->mc_next;
8764 			if (*prev == mc)
8765 				*prev = mc->mc_next;
8766 		}
8767 		free(mc);
8768 	}
8769 }
8770 
8771 MDB_txn *
mdb_cursor_txn(MDB_cursor * mc)8772 mdb_cursor_txn(MDB_cursor *mc)
8773 {
8774 	if (!mc) return NULL;
8775 	return mc->mc_txn;
8776 }
8777 
8778 MDB_dbi
mdb_cursor_dbi(MDB_cursor * mc)8779 mdb_cursor_dbi(MDB_cursor *mc)
8780 {
8781 	return mc->mc_dbi;
8782 }
8783 
8784 /** Replace the key for a branch node with a new key.
8785  * Set #MDB_TXN_ERROR on failure.
8786  * @param[in] mc Cursor pointing to the node to operate on.
8787  * @param[in] key The new key to use.
8788  * @return 0 on success, non-zero on failure.
8789  */
8790 static int
mdb_update_key(MDB_cursor * mc,MDB_val * key)8791 mdb_update_key(MDB_cursor *mc, MDB_val *key)
8792 {
8793 	MDB_page		*mp;
8794 	MDB_node		*node;
8795 	char			*base;
8796 	size_t			 len;
8797 	int				 delta, ksize, oksize;
8798 	indx_t			 ptr, i, numkeys, indx;
8799 	DKBUF;
8800 
8801 	indx = mc->mc_ki[mc->mc_top];
8802 	mp = mc->mc_pg[mc->mc_top];
8803 	node = NODEPTR(mp, indx);
8804 	ptr = mp->mp_ptrs[indx];
8805 #if MDB_DEBUG
8806 	{
8807 		MDB_val	k2;
8808 		char kbuf2[DKBUF_MAXKEYSIZE*2+1];
8809 		k2.mv_data = NODEKEY(node);
8810 		k2.mv_size = node->mn_ksize;
8811 		DPRINTF(("update key %u (ofs %u) [%s] to [%s] on page %"Yu,
8812 			indx, ptr,
8813 			mdb_dkey(&k2, kbuf2),
8814 			DKEY(key),
8815 			mp->mp_pgno));
8816 	}
8817 #endif
8818 
8819 	/* Sizes must be 2-byte aligned. */
8820 	ksize = EVEN(key->mv_size);
8821 	oksize = EVEN(node->mn_ksize);
8822 	delta = ksize - oksize;
8823 
8824 	/* Shift node contents if EVEN(key length) changed. */
8825 	if (delta) {
8826 		if (delta > 0 && SIZELEFT(mp) < delta) {
8827 			pgno_t pgno;
8828 			/* not enough space left, do a delete and split */
8829 			DPRINTF(("Not enough room, delta = %d, splitting...", delta));
8830 			pgno = NODEPGNO(node);
8831 			mdb_node_del(mc, 0);
8832 			return mdb_page_split(mc, key, NULL, pgno, MDB_SPLIT_REPLACE);
8833 		}
8834 
8835 		numkeys = NUMKEYS(mp);
8836 		for (i = 0; i < numkeys; i++) {
8837 			if (mp->mp_ptrs[i] <= ptr)
8838 				mp->mp_ptrs[i] -= delta;
8839 		}
8840 
8841 		base = (char *)mp + mp->mp_upper + PAGEBASE;
8842 		len = ptr - mp->mp_upper + NODESIZE;
8843 		memmove(base - delta, base, len);
8844 		mp->mp_upper -= delta;
8845 
8846 		node = NODEPTR(mp, indx);
8847 	}
8848 
8849 	/* But even if no shift was needed, update ksize */
8850 	if (node->mn_ksize != key->mv_size)
8851 		node->mn_ksize = key->mv_size;
8852 
8853 	if (key->mv_size)
8854 		memcpy(NODEKEY(node), key->mv_data, key->mv_size);
8855 
8856 	return MDB_SUCCESS;
8857 }
8858 
8859 static void
8860 mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst);
8861 
8862 /** Perform \b act while tracking temporary cursor \b mn */
8863 #define WITH_CURSOR_TRACKING(mn, act) do { \
8864 	MDB_cursor dummy, *tracked, **tp = &(mn).mc_txn->mt_cursors[mn.mc_dbi]; \
8865 	if ((mn).mc_flags & C_SUB) { \
8866 		dummy.mc_flags =  C_INITIALIZED; \
8867 		dummy.mc_xcursor = (MDB_xcursor *)&(mn);	\
8868 		tracked = &dummy; \
8869 	} else { \
8870 		tracked = &(mn); \
8871 	} \
8872 	tracked->mc_next = *tp; \
8873 	*tp = tracked; \
8874 	{ act; } \
8875 	*tp = tracked->mc_next; \
8876 } while (0)
8877 
8878 /** Move a node from csrc to cdst.
8879  */
8880 static int
mdb_node_move(MDB_cursor * csrc,MDB_cursor * cdst,int fromleft)8881 mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft)
8882 {
8883 	MDB_node		*srcnode;
8884 	MDB_val		 key, data;
8885 	pgno_t	srcpg;
8886 	MDB_cursor mn;
8887 	int			 rc;
8888 	unsigned short flags;
8889 
8890 	DKBUF;
8891 
8892 	/* Mark src and dst as dirty. */
8893 	if ((rc = mdb_page_touch(csrc)) ||
8894 	    (rc = mdb_page_touch(cdst)))
8895 		return rc;
8896 
8897 	if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) {
8898 		key.mv_size = csrc->mc_db->md_pad;
8899 		key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top], key.mv_size);
8900 		data.mv_size = 0;
8901 		data.mv_data = NULL;
8902 		srcpg = 0;
8903 		flags = 0;
8904 	} else {
8905 		srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top]);
8906 		mdb_cassert(csrc, !((size_t)srcnode & 1));
8907 		srcpg = NODEPGNO(srcnode);
8908 		flags = srcnode->mn_flags;
8909 		if (csrc->mc_ki[csrc->mc_top] == 0 && IS_BRANCH(csrc->mc_pg[csrc->mc_top])) {
8910 			unsigned int snum = csrc->mc_snum;
8911 			MDB_node *s2;
8912 			/* must find the lowest key below src */
8913 			rc = mdb_page_search_lowest(csrc);
8914 			if (rc)
8915 				return rc;
8916 			if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) {
8917 				key.mv_size = csrc->mc_db->md_pad;
8918 				key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size);
8919 			} else {
8920 				s2 = NODEPTR(csrc->mc_pg[csrc->mc_top], 0);
8921 				key.mv_size = NODEKSZ(s2);
8922 				key.mv_data = NODEKEY(s2);
8923 			}
8924 			csrc->mc_snum = snum--;
8925 			csrc->mc_top = snum;
8926 		} else {
8927 			key.mv_size = NODEKSZ(srcnode);
8928 			key.mv_data = NODEKEY(srcnode);
8929 		}
8930 		data.mv_size = NODEDSZ(srcnode);
8931 		data.mv_data = NODEDATA(srcnode);
8932 	}
8933 	mn.mc_xcursor = NULL;
8934 	if (IS_BRANCH(cdst->mc_pg[cdst->mc_top]) && cdst->mc_ki[cdst->mc_top] == 0) {
8935 		unsigned int snum = cdst->mc_snum;
8936 		MDB_node *s2;
8937 		MDB_val bkey;
8938 		/* must find the lowest key below dst */
8939 		mdb_cursor_copy(cdst, &mn);
8940 		rc = mdb_page_search_lowest(&mn);
8941 		if (rc)
8942 			return rc;
8943 		if (IS_LEAF2(mn.mc_pg[mn.mc_top])) {
8944 			bkey.mv_size = mn.mc_db->md_pad;
8945 			bkey.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, bkey.mv_size);
8946 		} else {
8947 			s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0);
8948 			bkey.mv_size = NODEKSZ(s2);
8949 			bkey.mv_data = NODEKEY(s2);
8950 		}
8951 		mn.mc_snum = snum--;
8952 		mn.mc_top = snum;
8953 		mn.mc_ki[snum] = 0;
8954 		rc = mdb_update_key(&mn, &bkey);
8955 		if (rc)
8956 			return rc;
8957 	}
8958 
8959 	DPRINTF(("moving %s node %u [%s] on page %"Yu" to node %u on page %"Yu,
8960 	    IS_LEAF(csrc->mc_pg[csrc->mc_top]) ? "leaf" : "branch",
8961 	    csrc->mc_ki[csrc->mc_top],
8962 		DKEY(&key),
8963 	    csrc->mc_pg[csrc->mc_top]->mp_pgno,
8964 	    cdst->mc_ki[cdst->mc_top], cdst->mc_pg[cdst->mc_top]->mp_pgno));
8965 
8966 	/* Add the node to the destination page.
8967 	 */
8968 	rc = mdb_node_add(cdst, cdst->mc_ki[cdst->mc_top], &key, &data, srcpg, flags);
8969 	if (rc != MDB_SUCCESS)
8970 		return rc;
8971 
8972 	/* Delete the node from the source page.
8973 	 */
8974 	mdb_node_del(csrc, key.mv_size);
8975 
8976 	{
8977 		/* Adjust other cursors pointing to mp */
8978 		MDB_cursor *m2, *m3;
8979 		MDB_dbi dbi = csrc->mc_dbi;
8980 		MDB_page *mpd, *mps;
8981 
8982 		mps = csrc->mc_pg[csrc->mc_top];
8983 		/* If we're adding on the left, bump others up */
8984 		if (fromleft) {
8985 			mpd = cdst->mc_pg[csrc->mc_top];
8986 			for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
8987 				if (csrc->mc_flags & C_SUB)
8988 					m3 = &m2->mc_xcursor->mx_cursor;
8989 				else
8990 					m3 = m2;
8991 				if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top)
8992 					continue;
8993 				if (m3 != cdst &&
8994 					m3->mc_pg[csrc->mc_top] == mpd &&
8995 					m3->mc_ki[csrc->mc_top] >= cdst->mc_ki[csrc->mc_top]) {
8996 					m3->mc_ki[csrc->mc_top]++;
8997 				}
8998 				if (m3 !=csrc &&
8999 					m3->mc_pg[csrc->mc_top] == mps &&
9000 					m3->mc_ki[csrc->mc_top] == csrc->mc_ki[csrc->mc_top]) {
9001 					m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top];
9002 					m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top];
9003 					m3->mc_ki[csrc->mc_top-1]++;
9004 				}
9005 				if (IS_LEAF(mps))
9006 					XCURSOR_REFRESH(m3, csrc->mc_top, m3->mc_pg[csrc->mc_top]);
9007 			}
9008 		} else
9009 		/* Adding on the right, bump others down */
9010 		{
9011 			for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
9012 				if (csrc->mc_flags & C_SUB)
9013 					m3 = &m2->mc_xcursor->mx_cursor;
9014 				else
9015 					m3 = m2;
9016 				if (m3 == csrc) continue;
9017 				if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top)
9018 					continue;
9019 				if (m3->mc_pg[csrc->mc_top] == mps) {
9020 					if (!m3->mc_ki[csrc->mc_top]) {
9021 						m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top];
9022 						m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top];
9023 						m3->mc_ki[csrc->mc_top-1]--;
9024 					} else {
9025 						m3->mc_ki[csrc->mc_top]--;
9026 					}
9027 					if (IS_LEAF(mps))
9028 						XCURSOR_REFRESH(m3, csrc->mc_top, m3->mc_pg[csrc->mc_top]);
9029 				}
9030 			}
9031 		}
9032 	}
9033 
9034 	/* Update the parent separators.
9035 	 */
9036 	if (csrc->mc_ki[csrc->mc_top] == 0) {
9037 		if (csrc->mc_ki[csrc->mc_top-1] != 0) {
9038 			if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) {
9039 				key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size);
9040 			} else {
9041 				srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], 0);
9042 				key.mv_size = NODEKSZ(srcnode);
9043 				key.mv_data = NODEKEY(srcnode);
9044 			}
9045 			DPRINTF(("update separator for source page %"Yu" to [%s]",
9046 				csrc->mc_pg[csrc->mc_top]->mp_pgno, DKEY(&key)));
9047 			mdb_cursor_copy(csrc, &mn);
9048 			mn.mc_snum--;
9049 			mn.mc_top--;
9050 			/* We want mdb_rebalance to find mn when doing fixups */
9051 			WITH_CURSOR_TRACKING(mn,
9052 				rc = mdb_update_key(&mn, &key));
9053 			if (rc)
9054 				return rc;
9055 		}
9056 		if (IS_BRANCH(csrc->mc_pg[csrc->mc_top])) {
9057 			MDB_val	 nullkey;
9058 			indx_t	ix = csrc->mc_ki[csrc->mc_top];
9059 			nullkey.mv_size = 0;
9060 			csrc->mc_ki[csrc->mc_top] = 0;
9061 			rc = mdb_update_key(csrc, &nullkey);
9062 			csrc->mc_ki[csrc->mc_top] = ix;
9063 			mdb_cassert(csrc, rc == MDB_SUCCESS);
9064 		}
9065 	}
9066 
9067 	if (cdst->mc_ki[cdst->mc_top] == 0) {
9068 		if (cdst->mc_ki[cdst->mc_top-1] != 0) {
9069 			if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) {
9070 				key.mv_data = LEAF2KEY(cdst->mc_pg[cdst->mc_top], 0, key.mv_size);
9071 			} else {
9072 				srcnode = NODEPTR(cdst->mc_pg[cdst->mc_top], 0);
9073 				key.mv_size = NODEKSZ(srcnode);
9074 				key.mv_data = NODEKEY(srcnode);
9075 			}
9076 			DPRINTF(("update separator for destination page %"Yu" to [%s]",
9077 				cdst->mc_pg[cdst->mc_top]->mp_pgno, DKEY(&key)));
9078 			mdb_cursor_copy(cdst, &mn);
9079 			mn.mc_snum--;
9080 			mn.mc_top--;
9081 			/* We want mdb_rebalance to find mn when doing fixups */
9082 			WITH_CURSOR_TRACKING(mn,
9083 				rc = mdb_update_key(&mn, &key));
9084 			if (rc)
9085 				return rc;
9086 		}
9087 		if (IS_BRANCH(cdst->mc_pg[cdst->mc_top])) {
9088 			MDB_val	 nullkey;
9089 			indx_t	ix = cdst->mc_ki[cdst->mc_top];
9090 			nullkey.mv_size = 0;
9091 			cdst->mc_ki[cdst->mc_top] = 0;
9092 			rc = mdb_update_key(cdst, &nullkey);
9093 			cdst->mc_ki[cdst->mc_top] = ix;
9094 			mdb_cassert(cdst, rc == MDB_SUCCESS);
9095 		}
9096 	}
9097 
9098 	return MDB_SUCCESS;
9099 }
9100 
9101 /** Merge one page into another.
9102  *  The nodes from the page pointed to by \b csrc will
9103  *	be copied to the page pointed to by \b cdst and then
9104  *	the \b csrc page will be freed.
9105  * @param[in] csrc Cursor pointing to the source page.
9106  * @param[in] cdst Cursor pointing to the destination page.
9107  * @return 0 on success, non-zero on failure.
9108  */
9109 static int
mdb_page_merge(MDB_cursor * csrc,MDB_cursor * cdst)9110 mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst)
9111 {
9112 	MDB_page	*psrc, *pdst;
9113 	MDB_node	*srcnode;
9114 	MDB_val		 key, data;
9115 	unsigned	 nkeys;
9116 	int			 rc;
9117 	indx_t		 i, j;
9118 
9119 	psrc = csrc->mc_pg[csrc->mc_top];
9120 	pdst = cdst->mc_pg[cdst->mc_top];
9121 
9122 	DPRINTF(("merging page %"Yu" into %"Yu, psrc->mp_pgno, pdst->mp_pgno));
9123 
9124 	mdb_cassert(csrc, csrc->mc_snum > 1);	/* can't merge root page */
9125 	mdb_cassert(csrc, cdst->mc_snum > 1);
9126 
9127 	/* Mark dst as dirty. */
9128 	if ((rc = mdb_page_touch(cdst)))
9129 		return rc;
9130 
9131 	/* get dst page again now that we've touched it. */
9132 	pdst = cdst->mc_pg[cdst->mc_top];
9133 
9134 	/* Move all nodes from src to dst.
9135 	 */
9136 	j = nkeys = NUMKEYS(pdst);
9137 	if (IS_LEAF2(psrc)) {
9138 		key.mv_size = csrc->mc_db->md_pad;
9139 		key.mv_data = METADATA(psrc);
9140 		for (i = 0; i < NUMKEYS(psrc); i++, j++) {
9141 			rc = mdb_node_add(cdst, j, &key, NULL, 0, 0);
9142 			if (rc != MDB_SUCCESS)
9143 				return rc;
9144 			key.mv_data = (char *)key.mv_data + key.mv_size;
9145 		}
9146 	} else {
9147 		for (i = 0; i < NUMKEYS(psrc); i++, j++) {
9148 			srcnode = NODEPTR(psrc, i);
9149 			if (i == 0 && IS_BRANCH(psrc)) {
9150 				MDB_cursor mn;
9151 				MDB_node *s2;
9152 				mdb_cursor_copy(csrc, &mn);
9153 				mn.mc_xcursor = NULL;
9154 				/* must find the lowest key below src */
9155 				rc = mdb_page_search_lowest(&mn);
9156 				if (rc)
9157 					return rc;
9158 				if (IS_LEAF2(mn.mc_pg[mn.mc_top])) {
9159 					key.mv_size = mn.mc_db->md_pad;
9160 					key.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, key.mv_size);
9161 				} else {
9162 					s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0);
9163 					key.mv_size = NODEKSZ(s2);
9164 					key.mv_data = NODEKEY(s2);
9165 				}
9166 			} else {
9167 				key.mv_size = srcnode->mn_ksize;
9168 				key.mv_data = NODEKEY(srcnode);
9169 			}
9170 
9171 			data.mv_size = NODEDSZ(srcnode);
9172 			data.mv_data = NODEDATA(srcnode);
9173 			rc = mdb_node_add(cdst, j, &key, &data, NODEPGNO(srcnode), srcnode->mn_flags);
9174 			if (rc != MDB_SUCCESS)
9175 				return rc;
9176 		}
9177 	}
9178 
9179 	DPRINTF(("dst page %"Yu" now has %u keys (%.1f%% filled)",
9180 	    pdst->mp_pgno, NUMKEYS(pdst),
9181 		(float)PAGEFILL(cdst->mc_txn->mt_env, pdst) / 10));
9182 
9183 	/* Unlink the src page from parent and add to free list.
9184 	 */
9185 	csrc->mc_top--;
9186 	mdb_node_del(csrc, 0);
9187 	if (csrc->mc_ki[csrc->mc_top] == 0) {
9188 		key.mv_size = 0;
9189 		rc = mdb_update_key(csrc, &key);
9190 		if (rc) {
9191 			csrc->mc_top++;
9192 			return rc;
9193 		}
9194 	}
9195 	csrc->mc_top++;
9196 
9197 	psrc = csrc->mc_pg[csrc->mc_top];
9198 	/* If not operating on FreeDB, allow this page to be reused
9199 	 * in this txn. Otherwise just add to free list.
9200 	 */
9201 	rc = mdb_page_loose(csrc, psrc);
9202 	if (rc)
9203 		return rc;
9204 	if (IS_LEAF(psrc))
9205 		csrc->mc_db->md_leaf_pages--;
9206 	else
9207 		csrc->mc_db->md_branch_pages--;
9208 	{
9209 		/* Adjust other cursors pointing to mp */
9210 		MDB_cursor *m2, *m3;
9211 		MDB_dbi dbi = csrc->mc_dbi;
9212 		unsigned int top = csrc->mc_top;
9213 
9214 		for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
9215 			if (csrc->mc_flags & C_SUB)
9216 				m3 = &m2->mc_xcursor->mx_cursor;
9217 			else
9218 				m3 = m2;
9219 			if (m3 == csrc) continue;
9220 			if (m3->mc_snum < csrc->mc_snum) continue;
9221 			if (m3->mc_pg[top] == psrc) {
9222 				m3->mc_pg[top] = pdst;
9223 				m3->mc_ki[top] += nkeys;
9224 				m3->mc_ki[top-1] = cdst->mc_ki[top-1];
9225 			} else if (m3->mc_pg[top-1] == csrc->mc_pg[top-1] &&
9226 				m3->mc_ki[top-1] > csrc->mc_ki[top-1]) {
9227 				m3->mc_ki[top-1]--;
9228 			}
9229 			if (IS_LEAF(psrc))
9230 				XCURSOR_REFRESH(m3, top, m3->mc_pg[top]);
9231 		}
9232 	}
9233 	{
9234 		unsigned int snum = cdst->mc_snum;
9235 		uint16_t depth = cdst->mc_db->md_depth;
9236 		mdb_cursor_pop(cdst);
9237 		rc = mdb_rebalance(cdst);
9238 		/* Did the tree height change? */
9239 		if (depth != cdst->mc_db->md_depth)
9240 			snum += cdst->mc_db->md_depth - depth;
9241 		cdst->mc_snum = snum;
9242 		cdst->mc_top = snum-1;
9243 	}
9244 	return rc;
9245 }
9246 
9247 /** Copy the contents of a cursor.
9248  * @param[in] csrc The cursor to copy from.
9249  * @param[out] cdst The cursor to copy to.
9250  */
9251 static void
mdb_cursor_copy(const MDB_cursor * csrc,MDB_cursor * cdst)9252 mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst)
9253 {
9254 	unsigned int i;
9255 
9256 	cdst->mc_txn = csrc->mc_txn;
9257 	cdst->mc_dbi = csrc->mc_dbi;
9258 	cdst->mc_db  = csrc->mc_db;
9259 	cdst->mc_dbx = csrc->mc_dbx;
9260 	cdst->mc_snum = csrc->mc_snum;
9261 	cdst->mc_top = csrc->mc_top;
9262 	cdst->mc_flags = csrc->mc_flags;
9263 	MC_SET_OVPG(cdst, MC_OVPG(csrc));
9264 
9265 	for (i=0; i<csrc->mc_snum; i++) {
9266 		cdst->mc_pg[i] = csrc->mc_pg[i];
9267 		cdst->mc_ki[i] = csrc->mc_ki[i];
9268 	}
9269 }
9270 
9271 /** Rebalance the tree after a delete operation.
9272  * @param[in] mc Cursor pointing to the page where rebalancing
9273  * should begin.
9274  * @return 0 on success, non-zero on failure.
9275  */
9276 static int
mdb_rebalance(MDB_cursor * mc)9277 mdb_rebalance(MDB_cursor *mc)
9278 {
9279 	MDB_node	*node;
9280 	int rc, fromleft;
9281 	unsigned int ptop, minkeys, thresh;
9282 	MDB_cursor	mn;
9283 	indx_t oldki;
9284 
9285 	if (IS_BRANCH(mc->mc_pg[mc->mc_top])) {
9286 		minkeys = 2;
9287 		thresh = 1;
9288 	} else {
9289 		minkeys = 1;
9290 		thresh = FILL_THRESHOLD;
9291 	}
9292 	DPRINTF(("rebalancing %s page %"Yu" (has %u keys, %.1f%% full)",
9293 	    IS_LEAF(mc->mc_pg[mc->mc_top]) ? "leaf" : "branch",
9294 	    mdb_dbg_pgno(mc->mc_pg[mc->mc_top]), NUMKEYS(mc->mc_pg[mc->mc_top]),
9295 		(float)PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) / 10));
9296 
9297 	if (PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) >= thresh &&
9298 		NUMKEYS(mc->mc_pg[mc->mc_top]) >= minkeys) {
9299 		DPRINTF(("no need to rebalance page %"Yu", above fill threshold",
9300 		    mdb_dbg_pgno(mc->mc_pg[mc->mc_top])));
9301 		return MDB_SUCCESS;
9302 	}
9303 
9304 	if (mc->mc_snum < 2) {
9305 		MDB_page *mp = mc->mc_pg[0];
9306 		if (IS_SUBP(mp)) {
9307 			DPUTS("Can't rebalance a subpage, ignoring");
9308 			return MDB_SUCCESS;
9309 		}
9310 		if (NUMKEYS(mp) == 0) {
9311 			DPUTS("tree is completely empty");
9312 			mc->mc_db->md_root = P_INVALID;
9313 			mc->mc_db->md_depth = 0;
9314 			mc->mc_db->md_leaf_pages = 0;
9315 			rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno);
9316 			if (rc)
9317 				return rc;
9318 			/* Adjust cursors pointing to mp */
9319 			mc->mc_snum = 0;
9320 			mc->mc_top = 0;
9321 			mc->mc_flags &= ~C_INITIALIZED;
9322 			{
9323 				MDB_cursor *m2, *m3;
9324 				MDB_dbi dbi = mc->mc_dbi;
9325 
9326 				for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
9327 					if (mc->mc_flags & C_SUB)
9328 						m3 = &m2->mc_xcursor->mx_cursor;
9329 					else
9330 						m3 = m2;
9331 					if (!(m3->mc_flags & C_INITIALIZED) || (m3->mc_snum < mc->mc_snum))
9332 						continue;
9333 					if (m3->mc_pg[0] == mp) {
9334 						m3->mc_snum = 0;
9335 						m3->mc_top = 0;
9336 						m3->mc_flags &= ~C_INITIALIZED;
9337 					}
9338 				}
9339 			}
9340 		} else if (IS_BRANCH(mp) && NUMKEYS(mp) == 1) {
9341 			int i;
9342 			DPUTS("collapsing root page!");
9343 			rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno);
9344 			if (rc)
9345 				return rc;
9346 			mc->mc_db->md_root = NODEPGNO(NODEPTR(mp, 0));
9347 			rc = mdb_page_get(mc, mc->mc_db->md_root, &mc->mc_pg[0], NULL);
9348 			if (rc)
9349 				return rc;
9350 			mc->mc_db->md_depth--;
9351 			mc->mc_db->md_branch_pages--;
9352 			mc->mc_ki[0] = mc->mc_ki[1];
9353 			for (i = 1; i<mc->mc_db->md_depth; i++) {
9354 				mc->mc_pg[i] = mc->mc_pg[i+1];
9355 				mc->mc_ki[i] = mc->mc_ki[i+1];
9356 			}
9357 			{
9358 				/* Adjust other cursors pointing to mp */
9359 				MDB_cursor *m2, *m3;
9360 				MDB_dbi dbi = mc->mc_dbi;
9361 
9362 				for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
9363 					if (mc->mc_flags & C_SUB)
9364 						m3 = &m2->mc_xcursor->mx_cursor;
9365 					else
9366 						m3 = m2;
9367 					if (m3 == mc) continue;
9368 					if (!(m3->mc_flags & C_INITIALIZED))
9369 						continue;
9370 					if (m3->mc_pg[0] == mp) {
9371 						for (i=0; i<mc->mc_db->md_depth; i++) {
9372 							m3->mc_pg[i] = m3->mc_pg[i+1];
9373 							m3->mc_ki[i] = m3->mc_ki[i+1];
9374 						}
9375 						m3->mc_snum--;
9376 						m3->mc_top--;
9377 					}
9378 				}
9379 			}
9380 		} else
9381 			DPUTS("root page doesn't need rebalancing");
9382 		return MDB_SUCCESS;
9383 	}
9384 
9385 	/* The parent (branch page) must have at least 2 pointers,
9386 	 * otherwise the tree is invalid.
9387 	 */
9388 	ptop = mc->mc_top-1;
9389 	mdb_cassert(mc, NUMKEYS(mc->mc_pg[ptop]) > 1);
9390 
9391 	/* Leaf page fill factor is below the threshold.
9392 	 * Try to move keys from left or right neighbor, or
9393 	 * merge with a neighbor page.
9394 	 */
9395 
9396 	/* Find neighbors.
9397 	 */
9398 	mdb_cursor_copy(mc, &mn);
9399 	mn.mc_xcursor = NULL;
9400 
9401 	oldki = mc->mc_ki[mc->mc_top];
9402 	if (mc->mc_ki[ptop] == 0) {
9403 		/* We're the leftmost leaf in our parent.
9404 		 */
9405 		DPUTS("reading right neighbor");
9406 		mn.mc_ki[ptop]++;
9407 		node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]);
9408 		rc = mdb_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL);
9409 		if (rc)
9410 			return rc;
9411 		mn.mc_ki[mn.mc_top] = 0;
9412 		mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]);
9413 		fromleft = 0;
9414 	} else {
9415 		/* There is at least one neighbor to the left.
9416 		 */
9417 		DPUTS("reading left neighbor");
9418 		mn.mc_ki[ptop]--;
9419 		node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]);
9420 		rc = mdb_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL);
9421 		if (rc)
9422 			return rc;
9423 		mn.mc_ki[mn.mc_top] = NUMKEYS(mn.mc_pg[mn.mc_top]) - 1;
9424 		mc->mc_ki[mc->mc_top] = 0;
9425 		fromleft = 1;
9426 	}
9427 
9428 	DPRINTF(("found neighbor page %"Yu" (%u keys, %.1f%% full)",
9429 	    mn.mc_pg[mn.mc_top]->mp_pgno, NUMKEYS(mn.mc_pg[mn.mc_top]),
9430 		(float)PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) / 10));
9431 
9432 	/* If the neighbor page is above threshold and has enough keys,
9433 	 * move one key from it. Otherwise we should try to merge them.
9434 	 * (A branch page must never have less than 2 keys.)
9435 	 */
9436 	if (PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) >= thresh && NUMKEYS(mn.mc_pg[mn.mc_top]) > minkeys) {
9437 		rc = mdb_node_move(&mn, mc, fromleft);
9438 		if (fromleft) {
9439 			/* if we inserted on left, bump position up */
9440 			oldki++;
9441 		}
9442 	} else {
9443 		if (!fromleft) {
9444 			rc = mdb_page_merge(&mn, mc);
9445 		} else {
9446 			oldki += NUMKEYS(mn.mc_pg[mn.mc_top]);
9447 			mn.mc_ki[mn.mc_top] += mc->mc_ki[mn.mc_top] + 1;
9448 			/* We want mdb_rebalance to find mn when doing fixups */
9449 			WITH_CURSOR_TRACKING(mn,
9450 				rc = mdb_page_merge(mc, &mn));
9451 			mdb_cursor_copy(&mn, mc);
9452 		}
9453 		mc->mc_flags &= ~C_EOF;
9454 	}
9455 	mc->mc_ki[mc->mc_top] = oldki;
9456 	return rc;
9457 }
9458 
9459 /** Complete a delete operation started by #mdb_cursor_del(). */
9460 static int
mdb_cursor_del0(MDB_cursor * mc)9461 mdb_cursor_del0(MDB_cursor *mc)
9462 {
9463 	int rc;
9464 	MDB_page *mp;
9465 	indx_t ki;
9466 	unsigned int nkeys;
9467 	MDB_cursor *m2, *m3;
9468 	MDB_dbi dbi = mc->mc_dbi;
9469 
9470 	ki = mc->mc_ki[mc->mc_top];
9471 	mp = mc->mc_pg[mc->mc_top];
9472 	mdb_node_del(mc, mc->mc_db->md_pad);
9473 	mc->mc_db->md_entries--;
9474 	{
9475 		/* Adjust other cursors pointing to mp */
9476 		for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
9477 			m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2;
9478 			if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED))
9479 				continue;
9480 			if (m3 == mc || m3->mc_snum < mc->mc_snum)
9481 				continue;
9482 			if (m3->mc_pg[mc->mc_top] == mp) {
9483 				if (m3->mc_ki[mc->mc_top] == ki) {
9484 					m3->mc_flags |= C_DEL;
9485 					if (mc->mc_db->md_flags & MDB_DUPSORT) {
9486 						/* Sub-cursor referred into dataset which is gone */
9487 						m3->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF);
9488 					}
9489 					continue;
9490 				} else if (m3->mc_ki[mc->mc_top] > ki) {
9491 					m3->mc_ki[mc->mc_top]--;
9492 				}
9493 				XCURSOR_REFRESH(m3, mc->mc_top, mp);
9494 			}
9495 		}
9496 	}
9497 	rc = mdb_rebalance(mc);
9498 	if (rc)
9499 		goto fail;
9500 
9501 	/* DB is totally empty now, just bail out.
9502 	 * Other cursors adjustments were already done
9503 	 * by mdb_rebalance and aren't needed here.
9504 	 */
9505 	if (!mc->mc_snum) {
9506 		mc->mc_flags |= C_EOF;
9507 		return rc;
9508 	}
9509 
9510 	mp = mc->mc_pg[mc->mc_top];
9511 	nkeys = NUMKEYS(mp);
9512 
9513 	/* Adjust other cursors pointing to mp */
9514 	for (m2 = mc->mc_txn->mt_cursors[dbi]; !rc && m2; m2=m2->mc_next) {
9515 		m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2;
9516 		if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED))
9517 			continue;
9518 		if (m3->mc_snum < mc->mc_snum)
9519 			continue;
9520 		if (m3->mc_pg[mc->mc_top] == mp) {
9521 			if (m3->mc_ki[mc->mc_top] >= mc->mc_ki[mc->mc_top]) {
9522 			/* if m3 points past last node in page, find next sibling */
9523 				if (m3->mc_ki[mc->mc_top] >= nkeys) {
9524 					rc = mdb_cursor_sibling(m3, 1);
9525 					if (rc == MDB_NOTFOUND) {
9526 						m3->mc_flags |= C_EOF;
9527 						rc = MDB_SUCCESS;
9528 						continue;
9529 					}
9530 					if (rc)
9531 						goto fail;
9532 				}
9533 				if (m3->mc_xcursor && !(m3->mc_flags & C_EOF)) {
9534 					MDB_node *node = NODEPTR(m3->mc_pg[m3->mc_top], m3->mc_ki[m3->mc_top]);
9535 					/* If this node has dupdata, it may need to be reinited
9536 					 * because its data has moved.
9537 					 * If the xcursor was not initd it must be reinited.
9538 					 * Else if node points to a subDB, nothing is needed.
9539 					 * Else (xcursor was initd, not a subDB) needs mc_pg[0] reset.
9540 					 */
9541 					if (node->mn_flags & F_DUPDATA) {
9542 						if (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) {
9543 							if (!(node->mn_flags & F_SUBDATA))
9544 								m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node);
9545 						} else {
9546 							mdb_xcursor_init1(m3, node);
9547 							rc = mdb_cursor_first(&m3->mc_xcursor->mx_cursor, NULL, NULL);
9548 							if (rc)
9549 								goto fail;
9550 						}
9551 					}
9552 					m3->mc_xcursor->mx_cursor.mc_flags |= C_DEL;
9553 				}
9554 			}
9555 		}
9556 	}
9557 	mc->mc_flags |= C_DEL;
9558 
9559 fail:
9560 	if (rc)
9561 		mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
9562 	return rc;
9563 }
9564 
9565 int
mdb_del(MDB_txn * txn,MDB_dbi dbi,MDB_val * key,MDB_val * data)9566 mdb_del(MDB_txn *txn, MDB_dbi dbi,
9567     MDB_val *key, MDB_val *data)
9568 {
9569 	if (!key || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
9570 		return EINVAL;
9571 
9572 	if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED))
9573 		return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN;
9574 
9575 	if (!F_ISSET(txn->mt_dbs[dbi].md_flags, MDB_DUPSORT)) {
9576 		/* must ignore any data */
9577 		data = NULL;
9578 	}
9579 
9580 	return mdb_del0(txn, dbi, key, data, 0);
9581 }
9582 
9583 static int
mdb_del0(MDB_txn * txn,MDB_dbi dbi,MDB_val * key,MDB_val * data,unsigned flags)9584 mdb_del0(MDB_txn *txn, MDB_dbi dbi,
9585 	MDB_val *key, MDB_val *data, unsigned flags)
9586 {
9587 	MDB_cursor mc;
9588 	MDB_xcursor mx;
9589 	MDB_cursor_op op;
9590 	MDB_val rdata, *xdata;
9591 	int		 rc, exact = 0;
9592 	DKBUF;
9593 
9594 	DPRINTF(("====> delete db %u key [%s]", dbi, DKEY(key)));
9595 
9596 	mdb_cursor_init(&mc, txn, dbi, &mx);
9597 
9598 	if (data) {
9599 		op = MDB_GET_BOTH;
9600 		rdata = *data;
9601 		xdata = &rdata;
9602 	} else {
9603 		op = MDB_SET;
9604 		xdata = NULL;
9605 		flags |= MDB_NODUPDATA;
9606 	}
9607 	rc = mdb_cursor_set(&mc, key, xdata, op, &exact);
9608 	if (rc == 0) {
9609 		/* let mdb_page_split know about this cursor if needed:
9610 		 * delete will trigger a rebalance; if it needs to move
9611 		 * a node from one page to another, it will have to
9612 		 * update the parent's separator key(s). If the new sepkey
9613 		 * is larger than the current one, the parent page may
9614 		 * run out of space, triggering a split. We need this
9615 		 * cursor to be consistent until the end of the rebalance.
9616 		 */
9617 		mc.mc_next = txn->mt_cursors[dbi];
9618 		txn->mt_cursors[dbi] = &mc;
9619 		rc = mdb_cursor_del(&mc, flags);
9620 		txn->mt_cursors[dbi] = mc.mc_next;
9621 	}
9622 	return rc;
9623 }
9624 
9625 /** Split a page and insert a new node.
9626  * Set #MDB_TXN_ERROR on failure.
9627  * @param[in,out] mc Cursor pointing to the page and desired insertion index.
9628  * The cursor will be updated to point to the actual page and index where
9629  * the node got inserted after the split.
9630  * @param[in] newkey The key for the newly inserted node.
9631  * @param[in] newdata The data for the newly inserted node.
9632  * @param[in] newpgno The page number, if the new node is a branch node.
9633  * @param[in] nflags The #NODE_ADD_FLAGS for the new node.
9634  * @return 0 on success, non-zero on failure.
9635  */
9636 static int
mdb_page_split(MDB_cursor * mc,MDB_val * newkey,MDB_val * newdata,pgno_t newpgno,unsigned int nflags)9637 mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, pgno_t newpgno,
9638 	unsigned int nflags)
9639 {
9640 	unsigned int flags;
9641 	int		 rc = MDB_SUCCESS, new_root = 0, did_split = 0;
9642 	indx_t		 newindx;
9643 	pgno_t		 pgno = 0;
9644 	int	 i, j, split_indx, nkeys, pmax;
9645 	MDB_env 	*env = mc->mc_txn->mt_env;
9646 	MDB_node	*node;
9647 	MDB_val	 sepkey, rkey, xdata, *rdata = &xdata;
9648 	MDB_page	*copy = NULL;
9649 	MDB_page	*mp, *rp, *pp;
9650 	int ptop;
9651 	MDB_cursor	mn;
9652 	DKBUF;
9653 
9654 	mp = mc->mc_pg[mc->mc_top];
9655 	newindx = mc->mc_ki[mc->mc_top];
9656 	nkeys = NUMKEYS(mp);
9657 
9658 	DPRINTF(("-----> splitting %s page %"Yu" and adding [%s] at index %i/%i",
9659 	    IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno,
9660 	    DKEY(newkey), mc->mc_ki[mc->mc_top], nkeys));
9661 
9662 	/* Create a right sibling. */
9663 	if ((rc = mdb_page_new(mc, mp->mp_flags, 1, &rp)))
9664 		return rc;
9665 	rp->mp_pad = mp->mp_pad;
9666 	DPRINTF(("new right sibling: page %"Yu, rp->mp_pgno));
9667 
9668 	/* Usually when splitting the root page, the cursor
9669 	 * height is 1. But when called from mdb_update_key,
9670 	 * the cursor height may be greater because it walks
9671 	 * up the stack while finding the branch slot to update.
9672 	 */
9673 	if (mc->mc_top < 1) {
9674 		if ((rc = mdb_page_new(mc, P_BRANCH, 1, &pp)))
9675 			goto done;
9676 		/* shift current top to make room for new parent */
9677 		for (i=mc->mc_snum; i>0; i--) {
9678 			mc->mc_pg[i] = mc->mc_pg[i-1];
9679 			mc->mc_ki[i] = mc->mc_ki[i-1];
9680 		}
9681 		mc->mc_pg[0] = pp;
9682 		mc->mc_ki[0] = 0;
9683 		mc->mc_db->md_root = pp->mp_pgno;
9684 		DPRINTF(("root split! new root = %"Yu, pp->mp_pgno));
9685 		new_root = mc->mc_db->md_depth++;
9686 
9687 		/* Add left (implicit) pointer. */
9688 		if ((rc = mdb_node_add(mc, 0, NULL, NULL, mp->mp_pgno, 0)) != MDB_SUCCESS) {
9689 			/* undo the pre-push */
9690 			mc->mc_pg[0] = mc->mc_pg[1];
9691 			mc->mc_ki[0] = mc->mc_ki[1];
9692 			mc->mc_db->md_root = mp->mp_pgno;
9693 			mc->mc_db->md_depth--;
9694 			goto done;
9695 		}
9696 		mc->mc_snum++;
9697 		mc->mc_top++;
9698 		ptop = 0;
9699 	} else {
9700 		ptop = mc->mc_top-1;
9701 		DPRINTF(("parent branch page is %"Yu, mc->mc_pg[ptop]->mp_pgno));
9702 	}
9703 
9704 	mdb_cursor_copy(mc, &mn);
9705 	mn.mc_xcursor = NULL;
9706 	mn.mc_pg[mn.mc_top] = rp;
9707 	mn.mc_ki[ptop] = mc->mc_ki[ptop]+1;
9708 
9709 	if (nflags & MDB_APPEND) {
9710 		mn.mc_ki[mn.mc_top] = 0;
9711 		sepkey = *newkey;
9712 		split_indx = newindx;
9713 		nkeys = 0;
9714 	} else {
9715 
9716 		split_indx = (nkeys+1) / 2;
9717 
9718 		if (IS_LEAF2(rp)) {
9719 			char *split, *ins;
9720 			int x;
9721 			unsigned int lsize, rsize, ksize;
9722 			/* Move half of the keys to the right sibling */
9723 			x = mc->mc_ki[mc->mc_top] - split_indx;
9724 			ksize = mc->mc_db->md_pad;
9725 			split = LEAF2KEY(mp, split_indx, ksize);
9726 			rsize = (nkeys - split_indx) * ksize;
9727 			lsize = (nkeys - split_indx) * sizeof(indx_t);
9728 			mp->mp_lower -= lsize;
9729 			rp->mp_lower += lsize;
9730 			mp->mp_upper += rsize - lsize;
9731 			rp->mp_upper -= rsize - lsize;
9732 			sepkey.mv_size = ksize;
9733 			if (newindx == split_indx) {
9734 				sepkey.mv_data = newkey->mv_data;
9735 			} else {
9736 				sepkey.mv_data = split;
9737 			}
9738 			if (x<0) {
9739 				ins = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], ksize);
9740 				memcpy(rp->mp_ptrs, split, rsize);
9741 				sepkey.mv_data = rp->mp_ptrs;
9742 				memmove(ins+ksize, ins, (split_indx - mc->mc_ki[mc->mc_top]) * ksize);
9743 				memcpy(ins, newkey->mv_data, ksize);
9744 				mp->mp_lower += sizeof(indx_t);
9745 				mp->mp_upper -= ksize - sizeof(indx_t);
9746 			} else {
9747 				if (x)
9748 					memcpy(rp->mp_ptrs, split, x * ksize);
9749 				ins = LEAF2KEY(rp, x, ksize);
9750 				memcpy(ins, newkey->mv_data, ksize);
9751 				memcpy(ins+ksize, split + x * ksize, rsize - x * ksize);
9752 				rp->mp_lower += sizeof(indx_t);
9753 				rp->mp_upper -= ksize - sizeof(indx_t);
9754 				mc->mc_ki[mc->mc_top] = x;
9755 			}
9756 		} else {
9757 			int psize, nsize, k;
9758 			/* Maximum free space in an empty page */
9759 			pmax = env->me_psize - PAGEHDRSZ;
9760 			if (IS_LEAF(mp))
9761 				nsize = mdb_leaf_size(env, newkey, newdata);
9762 			else
9763 				nsize = mdb_branch_size(env, newkey);
9764 			nsize = EVEN(nsize);
9765 
9766 			/* grab a page to hold a temporary copy */
9767 			copy = mdb_page_malloc(mc->mc_txn, 1);
9768 			if (copy == NULL) {
9769 				rc = ENOMEM;
9770 				goto done;
9771 			}
9772 			copy->mp_pgno  = mp->mp_pgno;
9773 			copy->mp_flags = mp->mp_flags;
9774 			copy->mp_lower = (PAGEHDRSZ-PAGEBASE);
9775 			copy->mp_upper = env->me_psize - PAGEBASE;
9776 
9777 			/* prepare to insert */
9778 			for (i=0, j=0; i<nkeys; i++) {
9779 				if (i == newindx) {
9780 					copy->mp_ptrs[j++] = 0;
9781 				}
9782 				copy->mp_ptrs[j++] = mp->mp_ptrs[i];
9783 			}
9784 
9785 			/* When items are relatively large the split point needs
9786 			 * to be checked, because being off-by-one will make the
9787 			 * difference between success or failure in mdb_node_add.
9788 			 *
9789 			 * It's also relevant if a page happens to be laid out
9790 			 * such that one half of its nodes are all "small" and
9791 			 * the other half of its nodes are "large." If the new
9792 			 * item is also "large" and falls on the half with
9793 			 * "large" nodes, it also may not fit.
9794 			 *
9795 			 * As a final tweak, if the new item goes on the last
9796 			 * spot on the page (and thus, onto the new page), bias
9797 			 * the split so the new page is emptier than the old page.
9798 			 * This yields better packing during sequential inserts.
9799 			 */
9800 			if (nkeys < 32 || nsize > pmax/16 || newindx >= nkeys) {
9801 				/* Find split point */
9802 				psize = 0;
9803 				if (newindx <= split_indx || newindx >= nkeys) {
9804 					i = 0; j = 1;
9805 					k = newindx >= nkeys ? nkeys : split_indx+1+IS_LEAF(mp);
9806 				} else {
9807 					i = nkeys; j = -1;
9808 					k = split_indx-1;
9809 				}
9810 				for (; i!=k; i+=j) {
9811 					if (i == newindx) {
9812 						psize += nsize;
9813 						node = NULL;
9814 					} else {
9815 						node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE);
9816 						psize += NODESIZE + NODEKSZ(node) + sizeof(indx_t);
9817 						if (IS_LEAF(mp)) {
9818 							if (F_ISSET(node->mn_flags, F_BIGDATA))
9819 								psize += sizeof(pgno_t);
9820 							else
9821 								psize += NODEDSZ(node);
9822 						}
9823 						psize = EVEN(psize);
9824 					}
9825 					if (psize > pmax || i == k-j) {
9826 						split_indx = i + (j<0);
9827 						break;
9828 					}
9829 				}
9830 			}
9831 			if (split_indx == newindx) {
9832 				sepkey.mv_size = newkey->mv_size;
9833 				sepkey.mv_data = newkey->mv_data;
9834 			} else {
9835 				node = (MDB_node *)((char *)mp + copy->mp_ptrs[split_indx] + PAGEBASE);
9836 				sepkey.mv_size = node->mn_ksize;
9837 				sepkey.mv_data = NODEKEY(node);
9838 			}
9839 		}
9840 	}
9841 
9842 	DPRINTF(("separator is %d [%s]", split_indx, DKEY(&sepkey)));
9843 
9844 	/* Copy separator key to the parent.
9845 	 */
9846 	if (SIZELEFT(mn.mc_pg[ptop]) < mdb_branch_size(env, &sepkey)) {
9847 		int snum = mc->mc_snum;
9848 		mn.mc_snum--;
9849 		mn.mc_top--;
9850 		did_split = 1;
9851 		/* We want other splits to find mn when doing fixups */
9852 		WITH_CURSOR_TRACKING(mn,
9853 			rc = mdb_page_split(&mn, &sepkey, NULL, rp->mp_pgno, 0));
9854 		if (rc)
9855 			goto done;
9856 
9857 		/* root split? */
9858 		if (mc->mc_snum > snum) {
9859 			ptop++;
9860 		}
9861 		/* Right page might now have changed parent.
9862 		 * Check if left page also changed parent.
9863 		 */
9864 		if (mn.mc_pg[ptop] != mc->mc_pg[ptop] &&
9865 		    mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) {
9866 			for (i=0; i<ptop; i++) {
9867 				mc->mc_pg[i] = mn.mc_pg[i];
9868 				mc->mc_ki[i] = mn.mc_ki[i];
9869 			}
9870 			mc->mc_pg[ptop] = mn.mc_pg[ptop];
9871 			if (mn.mc_ki[ptop]) {
9872 				mc->mc_ki[ptop] = mn.mc_ki[ptop] - 1;
9873 			} else {
9874 				/* find right page's left sibling */
9875 				mc->mc_ki[ptop] = mn.mc_ki[ptop];
9876 				rc = mdb_cursor_sibling(mc, 0);
9877 			}
9878 		}
9879 	} else {
9880 		mn.mc_top--;
9881 		rc = mdb_node_add(&mn, mn.mc_ki[ptop], &sepkey, NULL, rp->mp_pgno, 0);
9882 		mn.mc_top++;
9883 	}
9884 	if (rc != MDB_SUCCESS) {
9885 		if (rc == MDB_NOTFOUND) /* improper mdb_cursor_sibling() result */
9886 			rc = MDB_PROBLEM;
9887 		goto done;
9888 	}
9889 	if (nflags & MDB_APPEND) {
9890 		mc->mc_pg[mc->mc_top] = rp;
9891 		mc->mc_ki[mc->mc_top] = 0;
9892 		rc = mdb_node_add(mc, 0, newkey, newdata, newpgno, nflags);
9893 		if (rc)
9894 			goto done;
9895 		for (i=0; i<mc->mc_top; i++)
9896 			mc->mc_ki[i] = mn.mc_ki[i];
9897 	} else if (!IS_LEAF2(mp)) {
9898 		/* Move nodes */
9899 		mc->mc_pg[mc->mc_top] = rp;
9900 		i = split_indx;
9901 		j = 0;
9902 		do {
9903 			if (i == newindx) {
9904 				rkey.mv_data = newkey->mv_data;
9905 				rkey.mv_size = newkey->mv_size;
9906 				if (IS_LEAF(mp)) {
9907 					rdata = newdata;
9908 				} else
9909 					pgno = newpgno;
9910 				flags = nflags;
9911 				/* Update index for the new key. */
9912 				mc->mc_ki[mc->mc_top] = j;
9913 			} else {
9914 				node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE);
9915 				rkey.mv_data = NODEKEY(node);
9916 				rkey.mv_size = node->mn_ksize;
9917 				if (IS_LEAF(mp)) {
9918 					xdata.mv_data = NODEDATA(node);
9919 					xdata.mv_size = NODEDSZ(node);
9920 					rdata = &xdata;
9921 				} else
9922 					pgno = NODEPGNO(node);
9923 				flags = node->mn_flags;
9924 			}
9925 
9926 			if (!IS_LEAF(mp) && j == 0) {
9927 				/* First branch index doesn't need key data. */
9928 				rkey.mv_size = 0;
9929 			}
9930 
9931 			rc = mdb_node_add(mc, j, &rkey, rdata, pgno, flags);
9932 			if (rc)
9933 				goto done;
9934 			if (i == nkeys) {
9935 				i = 0;
9936 				j = 0;
9937 				mc->mc_pg[mc->mc_top] = copy;
9938 			} else {
9939 				i++;
9940 				j++;
9941 			}
9942 		} while (i != split_indx);
9943 
9944 		nkeys = NUMKEYS(copy);
9945 		for (i=0; i<nkeys; i++)
9946 			mp->mp_ptrs[i] = copy->mp_ptrs[i];
9947 		mp->mp_lower = copy->mp_lower;
9948 		mp->mp_upper = copy->mp_upper;
9949 		memcpy(NODEPTR(mp, nkeys-1), NODEPTR(copy, nkeys-1),
9950 			env->me_psize - copy->mp_upper - PAGEBASE);
9951 
9952 		/* reset back to original page */
9953 		if (newindx < split_indx) {
9954 			mc->mc_pg[mc->mc_top] = mp;
9955 		} else {
9956 			mc->mc_pg[mc->mc_top] = rp;
9957 			mc->mc_ki[ptop]++;
9958 			/* Make sure mc_ki is still valid.
9959 			 */
9960 			if (mn.mc_pg[ptop] != mc->mc_pg[ptop] &&
9961 				mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) {
9962 				for (i=0; i<=ptop; i++) {
9963 					mc->mc_pg[i] = mn.mc_pg[i];
9964 					mc->mc_ki[i] = mn.mc_ki[i];
9965 				}
9966 			}
9967 		}
9968 		if (nflags & MDB_RESERVE) {
9969 			node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
9970 			if (!(node->mn_flags & F_BIGDATA))
9971 				newdata->mv_data = NODEDATA(node);
9972 		}
9973 	} else {
9974 		if (newindx >= split_indx) {
9975 			mc->mc_pg[mc->mc_top] = rp;
9976 			mc->mc_ki[ptop]++;
9977 			/* Make sure mc_ki is still valid.
9978 			 */
9979 			if (mn.mc_pg[ptop] != mc->mc_pg[ptop] &&
9980 				mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) {
9981 				for (i=0; i<=ptop; i++) {
9982 					mc->mc_pg[i] = mn.mc_pg[i];
9983 					mc->mc_ki[i] = mn.mc_ki[i];
9984 				}
9985 			}
9986 		}
9987 	}
9988 
9989 	{
9990 		/* Adjust other cursors pointing to mp */
9991 		MDB_cursor *m2, *m3;
9992 		MDB_dbi dbi = mc->mc_dbi;
9993 		nkeys = NUMKEYS(mp);
9994 
9995 		for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
9996 			if (mc->mc_flags & C_SUB)
9997 				m3 = &m2->mc_xcursor->mx_cursor;
9998 			else
9999 				m3 = m2;
10000 			if (m3 == mc)
10001 				continue;
10002 			if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED))
10003 				continue;
10004 			if (new_root) {
10005 				int k;
10006 				/* sub cursors may be on different DB */
10007 				if (m3->mc_pg[0] != mp)
10008 					continue;
10009 				/* root split */
10010 				for (k=new_root; k>=0; k--) {
10011 					m3->mc_ki[k+1] = m3->mc_ki[k];
10012 					m3->mc_pg[k+1] = m3->mc_pg[k];
10013 				}
10014 				if (m3->mc_ki[0] >= nkeys) {
10015 					m3->mc_ki[0] = 1;
10016 				} else {
10017 					m3->mc_ki[0] = 0;
10018 				}
10019 				m3->mc_pg[0] = mc->mc_pg[0];
10020 				m3->mc_snum++;
10021 				m3->mc_top++;
10022 			}
10023 			if (m3->mc_top >= mc->mc_top && m3->mc_pg[mc->mc_top] == mp) {
10024 				if (m3->mc_ki[mc->mc_top] >= newindx && !(nflags & MDB_SPLIT_REPLACE))
10025 					m3->mc_ki[mc->mc_top]++;
10026 				if (m3->mc_ki[mc->mc_top] >= nkeys) {
10027 					m3->mc_pg[mc->mc_top] = rp;
10028 					m3->mc_ki[mc->mc_top] -= nkeys;
10029 					for (i=0; i<mc->mc_top; i++) {
10030 						m3->mc_ki[i] = mn.mc_ki[i];
10031 						m3->mc_pg[i] = mn.mc_pg[i];
10032 					}
10033 				}
10034 			} else if (!did_split && m3->mc_top >= ptop && m3->mc_pg[ptop] == mc->mc_pg[ptop] &&
10035 				m3->mc_ki[ptop] >= mc->mc_ki[ptop]) {
10036 				m3->mc_ki[ptop]++;
10037 			}
10038 			if (IS_LEAF(mp))
10039 				XCURSOR_REFRESH(m3, mc->mc_top, m3->mc_pg[mc->mc_top]);
10040 		}
10041 	}
10042 	DPRINTF(("mp left: %d, rp left: %d", SIZELEFT(mp), SIZELEFT(rp)));
10043 
10044 done:
10045 	if (copy)					/* tmp page */
10046 		mdb_page_free(env, copy);
10047 	if (rc)
10048 		mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
10049 	return rc;
10050 }
10051 
10052 int
mdb_put(MDB_txn * txn,MDB_dbi dbi,MDB_val * key,MDB_val * data,unsigned int flags)10053 mdb_put(MDB_txn *txn, MDB_dbi dbi,
10054     MDB_val *key, MDB_val *data, unsigned int flags)
10055 {
10056 	MDB_cursor mc;
10057 	MDB_xcursor mx;
10058 	int rc;
10059 
10060 	if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
10061 		return EINVAL;
10062 
10063 	if (flags & ~(MDB_NOOVERWRITE|MDB_NODUPDATA|MDB_RESERVE|MDB_APPEND|MDB_APPENDDUP))
10064 		return EINVAL;
10065 
10066 	if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED))
10067 		return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN;
10068 
10069 	mdb_cursor_init(&mc, txn, dbi, &mx);
10070 	mc.mc_next = txn->mt_cursors[dbi];
10071 	txn->mt_cursors[dbi] = &mc;
10072 	rc = mdb_cursor_put(&mc, key, data, flags);
10073 	txn->mt_cursors[dbi] = mc.mc_next;
10074 	return rc;
10075 }
10076 
10077 #ifndef MDB_WBUF
10078 #define MDB_WBUF	(1024*1024)
10079 #endif
10080 #define MDB_EOF		0x10	/**< #mdb_env_copyfd1() is done reading */
10081 
10082 	/** State needed for a double-buffering compacting copy. */
10083 typedef struct mdb_copy {
10084 	MDB_env *mc_env;
10085 	MDB_txn *mc_txn;
10086 	pthread_mutex_t mc_mutex;
10087 	pthread_cond_t mc_cond;	/**< Condition variable for #mc_new */
10088 	char *mc_wbuf[2];
10089 	char *mc_over[2];
10090 	int mc_wlen[2];
10091 	int mc_olen[2];
10092 	pgno_t mc_next_pgno;
10093 	HANDLE mc_fd;
10094 	int mc_toggle;			/**< Buffer number in provider */
10095 	int mc_new;				/**< (0-2 buffers to write) | (#MDB_EOF at end) */
10096 	/** Error code.  Never cleared if set.  Both threads can set nonzero
10097 	 *	to fail the copy.  Not mutex-protected, LMDB expects atomic int.
10098 	 */
10099 	volatile int mc_error;
10100 } mdb_copy;
10101 
10102 	/** Dedicated writer thread for compacting copy. */
10103 static THREAD_RET ESECT CALL_CONV
mdb_env_copythr(void * arg)10104 mdb_env_copythr(void *arg)
10105 {
10106 	mdb_copy *my = arg;
10107 	char *ptr;
10108 	int toggle = 0, wsize, rc;
10109 #ifdef _WIN32
10110 	DWORD len;
10111 #define DO_WRITE(rc, fd, ptr, w2, len)	rc = WriteFile(fd, ptr, w2, &len, NULL)
10112 #else
10113 	int len;
10114 #define DO_WRITE(rc, fd, ptr, w2, len)	len = write(fd, ptr, w2); rc = (len >= 0)
10115 #ifdef SIGPIPE
10116 	sigset_t set;
10117 	sigemptyset(&set);
10118 	sigaddset(&set, SIGPIPE);
10119 	if ((rc = pthread_sigmask(SIG_BLOCK, &set, NULL)) != 0)
10120 		my->mc_error = rc;
10121 #endif
10122 #endif
10123 
10124 	pthread_mutex_lock(&my->mc_mutex);
10125 	for(;;) {
10126 		while (!my->mc_new)
10127 			pthread_cond_wait(&my->mc_cond, &my->mc_mutex);
10128 		if (my->mc_new == 0 + MDB_EOF) /* 0 buffers, just EOF */
10129 			break;
10130 		wsize = my->mc_wlen[toggle];
10131 		ptr = my->mc_wbuf[toggle];
10132 again:
10133 		rc = MDB_SUCCESS;
10134 		while (wsize > 0 && !my->mc_error) {
10135 			DO_WRITE(rc, my->mc_fd, ptr, wsize, len);
10136 			if (!rc) {
10137 				rc = ErrCode();
10138 #if defined(SIGPIPE) && !defined(_WIN32)
10139 				if (rc == EPIPE) {
10140 					/* Collect the pending SIGPIPE, otherwise at least OS X
10141 					 * gives it to the process on thread-exit (ITS#8504).
10142 					 */
10143 					int tmp;
10144 					sigwait(&set, &tmp);
10145 				}
10146 #endif
10147 				break;
10148 			} else if (len > 0) {
10149 				rc = MDB_SUCCESS;
10150 				ptr += len;
10151 				wsize -= len;
10152 				continue;
10153 			} else {
10154 				rc = EIO;
10155 				break;
10156 			}
10157 		}
10158 		if (rc) {
10159 			my->mc_error = rc;
10160 		}
10161 		/* If there's an overflow page tail, write it too */
10162 		if (my->mc_olen[toggle]) {
10163 			wsize = my->mc_olen[toggle];
10164 			ptr = my->mc_over[toggle];
10165 			my->mc_olen[toggle] = 0;
10166 			goto again;
10167 		}
10168 		my->mc_wlen[toggle] = 0;
10169 		toggle ^= 1;
10170 		/* Return the empty buffer to provider */
10171 		my->mc_new--;
10172 		pthread_cond_signal(&my->mc_cond);
10173 	}
10174 	pthread_mutex_unlock(&my->mc_mutex);
10175 	return (THREAD_RET)0;
10176 #undef DO_WRITE
10177 }
10178 
10179 	/** Give buffer and/or #MDB_EOF to writer thread, await unused buffer.
10180 	 *
10181 	 * @param[in] my control structure.
10182 	 * @param[in] adjust (1 to hand off 1 buffer) | (MDB_EOF when ending).
10183 	 */
10184 static int ESECT
mdb_env_cthr_toggle(mdb_copy * my,int adjust)10185 mdb_env_cthr_toggle(mdb_copy *my, int adjust)
10186 {
10187 	pthread_mutex_lock(&my->mc_mutex);
10188 	my->mc_new += adjust;
10189 	pthread_cond_signal(&my->mc_cond);
10190 	while (my->mc_new & 2)		/* both buffers in use */
10191 		pthread_cond_wait(&my->mc_cond, &my->mc_mutex);
10192 	pthread_mutex_unlock(&my->mc_mutex);
10193 
10194 	my->mc_toggle ^= (adjust & 1);
10195 	/* Both threads reset mc_wlen, to be safe from threading errors */
10196 	my->mc_wlen[my->mc_toggle] = 0;
10197 	return my->mc_error;
10198 }
10199 
10200 	/** Depth-first tree traversal for compacting copy.
10201 	 * @param[in] my control structure.
10202 	 * @param[in,out] pg database root.
10203 	 * @param[in] flags includes #F_DUPDATA if it is a sorted-duplicate sub-DB.
10204 	 */
10205 static int ESECT
mdb_env_cwalk(mdb_copy * my,pgno_t * pg,int flags)10206 mdb_env_cwalk(mdb_copy *my, pgno_t *pg, int flags)
10207 {
10208 	MDB_cursor mc = {0};
10209 	MDB_node *ni;
10210 	MDB_page *mo, *mp, *leaf;
10211 	char *buf, *ptr;
10212 	int rc, toggle;
10213 	unsigned int i;
10214 
10215 	/* Empty DB, nothing to do */
10216 	if (*pg == P_INVALID)
10217 		return MDB_SUCCESS;
10218 
10219 	mc.mc_snum = 1;
10220 	mc.mc_txn = my->mc_txn;
10221 	mc.mc_flags = my->mc_txn->mt_flags & (C_ORIG_RDONLY|C_WRITEMAP);
10222 
10223 	rc = mdb_page_get(&mc, *pg, &mc.mc_pg[0], NULL);
10224 	if (rc)
10225 		return rc;
10226 	rc = mdb_page_search_root(&mc, NULL, MDB_PS_FIRST);
10227 	if (rc)
10228 		return rc;
10229 
10230 	/* Make cursor pages writable */
10231 	buf = ptr = malloc(my->mc_env->me_psize * mc.mc_snum);
10232 	if (buf == NULL)
10233 		return ENOMEM;
10234 
10235 	for (i=0; i<mc.mc_top; i++) {
10236 		mdb_page_copy((MDB_page *)ptr, mc.mc_pg[i], my->mc_env->me_psize);
10237 		mc.mc_pg[i] = (MDB_page *)ptr;
10238 		ptr += my->mc_env->me_psize;
10239 	}
10240 
10241 	/* This is writable space for a leaf page. Usually not needed. */
10242 	leaf = (MDB_page *)ptr;
10243 
10244 	toggle = my->mc_toggle;
10245 	while (mc.mc_snum > 0) {
10246 		unsigned n;
10247 		mp = mc.mc_pg[mc.mc_top];
10248 		n = NUMKEYS(mp);
10249 
10250 		if (IS_LEAF(mp)) {
10251 			if (!IS_LEAF2(mp) && !(flags & F_DUPDATA)) {
10252 				for (i=0; i<n; i++) {
10253 					ni = NODEPTR(mp, i);
10254 					if (ni->mn_flags & F_BIGDATA) {
10255 						MDB_page *omp;
10256 						pgno_t pg;
10257 
10258 						/* Need writable leaf */
10259 						if (mp != leaf) {
10260 							mc.mc_pg[mc.mc_top] = leaf;
10261 							mdb_page_copy(leaf, mp, my->mc_env->me_psize);
10262 							mp = leaf;
10263 							ni = NODEPTR(mp, i);
10264 						}
10265 
10266 						memcpy(&pg, NODEDATA(ni), sizeof(pg));
10267 						memcpy(NODEDATA(ni), &my->mc_next_pgno, sizeof(pgno_t));
10268 						rc = mdb_page_get(&mc, pg, &omp, NULL);
10269 						if (rc)
10270 							goto done;
10271 						if (my->mc_wlen[toggle] >= MDB_WBUF) {
10272 							rc = mdb_env_cthr_toggle(my, 1);
10273 							if (rc)
10274 								goto done;
10275 							toggle = my->mc_toggle;
10276 						}
10277 						mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]);
10278 						memcpy(mo, omp, my->mc_env->me_psize);
10279 						mo->mp_pgno = my->mc_next_pgno;
10280 						my->mc_next_pgno += omp->mp_pages;
10281 						my->mc_wlen[toggle] += my->mc_env->me_psize;
10282 						if (omp->mp_pages > 1) {
10283 							my->mc_olen[toggle] = my->mc_env->me_psize * (omp->mp_pages - 1);
10284 							my->mc_over[toggle] = (char *)omp + my->mc_env->me_psize;
10285 							rc = mdb_env_cthr_toggle(my, 1);
10286 							if (rc)
10287 								goto done;
10288 							toggle = my->mc_toggle;
10289 						}
10290 					} else if (ni->mn_flags & F_SUBDATA) {
10291 						MDB_db db;
10292 
10293 						/* Need writable leaf */
10294 						if (mp != leaf) {
10295 							mc.mc_pg[mc.mc_top] = leaf;
10296 							mdb_page_copy(leaf, mp, my->mc_env->me_psize);
10297 							mp = leaf;
10298 							ni = NODEPTR(mp, i);
10299 						}
10300 
10301 						memcpy(&db, NODEDATA(ni), sizeof(db));
10302 						my->mc_toggle = toggle;
10303 						rc = mdb_env_cwalk(my, &db.md_root, ni->mn_flags & F_DUPDATA);
10304 						if (rc)
10305 							goto done;
10306 						toggle = my->mc_toggle;
10307 						memcpy(NODEDATA(ni), &db, sizeof(db));
10308 					}
10309 				}
10310 			}
10311 		} else {
10312 			mc.mc_ki[mc.mc_top]++;
10313 			if (mc.mc_ki[mc.mc_top] < n) {
10314 				pgno_t pg;
10315 again:
10316 				ni = NODEPTR(mp, mc.mc_ki[mc.mc_top]);
10317 				pg = NODEPGNO(ni);
10318 				rc = mdb_page_get(&mc, pg, &mp, NULL);
10319 				if (rc)
10320 					goto done;
10321 				mc.mc_top++;
10322 				mc.mc_snum++;
10323 				mc.mc_ki[mc.mc_top] = 0;
10324 				if (IS_BRANCH(mp)) {
10325 					/* Whenever we advance to a sibling branch page,
10326 					 * we must proceed all the way down to its first leaf.
10327 					 */
10328 					mdb_page_copy(mc.mc_pg[mc.mc_top], mp, my->mc_env->me_psize);
10329 					goto again;
10330 				} else
10331 					mc.mc_pg[mc.mc_top] = mp;
10332 				continue;
10333 			}
10334 		}
10335 		if (my->mc_wlen[toggle] >= MDB_WBUF) {
10336 			rc = mdb_env_cthr_toggle(my, 1);
10337 			if (rc)
10338 				goto done;
10339 			toggle = my->mc_toggle;
10340 		}
10341 		mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]);
10342 		mdb_page_copy(mo, mp, my->mc_env->me_psize);
10343 		mo->mp_pgno = my->mc_next_pgno++;
10344 		my->mc_wlen[toggle] += my->mc_env->me_psize;
10345 		if (mc.mc_top) {
10346 			/* Update parent if there is one */
10347 			ni = NODEPTR(mc.mc_pg[mc.mc_top-1], mc.mc_ki[mc.mc_top-1]);
10348 			SETPGNO(ni, mo->mp_pgno);
10349 			mdb_cursor_pop(&mc);
10350 		} else {
10351 			/* Otherwise we're done */
10352 			*pg = mo->mp_pgno;
10353 			break;
10354 		}
10355 	}
10356 done:
10357 	free(buf);
10358 	return rc;
10359 }
10360 
10361 	/** Copy environment with compaction. */
10362 static int ESECT
mdb_env_copyfd1(MDB_env * env,HANDLE fd)10363 mdb_env_copyfd1(MDB_env *env, HANDLE fd)
10364 {
10365 	MDB_meta *mm;
10366 	MDB_page *mp;
10367 	mdb_copy my = {0};
10368 	MDB_txn *txn = NULL;
10369 	pthread_t thr;
10370 	pgno_t root, new_root;
10371 	int rc = MDB_SUCCESS;
10372 
10373 #ifdef _WIN32
10374 	if (!(my.mc_mutex = CreateMutex(NULL, FALSE, NULL)) ||
10375 		!(my.mc_cond = CreateEvent(NULL, FALSE, FALSE, NULL))) {
10376 		rc = ErrCode();
10377 		goto done;
10378 	}
10379 	my.mc_wbuf[0] = _aligned_malloc(MDB_WBUF*2, env->me_os_psize);
10380 	if (my.mc_wbuf[0] == NULL) {
10381 		/* _aligned_malloc() sets errno, but we use Windows error codes */
10382 		rc = ERROR_NOT_ENOUGH_MEMORY;
10383 		goto done;
10384 	}
10385 #else
10386 	if ((rc = pthread_mutex_init(&my.mc_mutex, NULL)) != 0)
10387 		return rc;
10388 	if ((rc = pthread_cond_init(&my.mc_cond, NULL)) != 0)
10389 		goto done2;
10390 #ifdef HAVE_MEMALIGN
10391 	my.mc_wbuf[0] = memalign(env->me_os_psize, MDB_WBUF*2);
10392 	if (my.mc_wbuf[0] == NULL) {
10393 		rc = errno;
10394 		goto done;
10395 	}
10396 #else
10397 	{
10398 		void *p;
10399 		if ((rc = posix_memalign(&p, env->me_os_psize, MDB_WBUF*2)) != 0)
10400 			goto done;
10401 		my.mc_wbuf[0] = p;
10402 	}
10403 #endif
10404 #endif
10405 	memset(my.mc_wbuf[0], 0, MDB_WBUF*2);
10406 	my.mc_wbuf[1] = my.mc_wbuf[0] + MDB_WBUF;
10407 	my.mc_next_pgno = NUM_METAS;
10408 	my.mc_env = env;
10409 	my.mc_fd = fd;
10410 	rc = THREAD_CREATE(thr, mdb_env_copythr, &my);
10411 	if (rc)
10412 		goto done;
10413 
10414 	rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn);
10415 	if (rc)
10416 		goto finish;
10417 
10418 	mp = (MDB_page *)my.mc_wbuf[0];
10419 	memset(mp, 0, NUM_METAS * env->me_psize);
10420 	mp->mp_pgno = 0;
10421 	mp->mp_flags = P_META;
10422 	mm = (MDB_meta *)METADATA(mp);
10423 	mdb_env_init_meta0(env, mm);
10424 	mm->mm_address = env->me_metas[0]->mm_address;
10425 
10426 	mp = (MDB_page *)(my.mc_wbuf[0] + env->me_psize);
10427 	mp->mp_pgno = 1;
10428 	mp->mp_flags = P_META;
10429 	*(MDB_meta *)METADATA(mp) = *mm;
10430 	mm = (MDB_meta *)METADATA(mp);
10431 
10432 	/* Set metapage 1 with current main DB */
10433 	root = new_root = txn->mt_dbs[MAIN_DBI].md_root;
10434 	if (root != P_INVALID) {
10435 		/* Count free pages + freeDB pages.  Subtract from last_pg
10436 		 * to find the new last_pg, which also becomes the new root.
10437 		 */
10438 		MDB_ID freecount = 0;
10439 		MDB_cursor mc;
10440 		MDB_val key, data;
10441 		mdb_cursor_init(&mc, txn, FREE_DBI, NULL);
10442 		while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0)
10443 			freecount += *(MDB_ID *)data.mv_data;
10444 		if (rc != MDB_NOTFOUND)
10445 			goto finish;
10446 		freecount += txn->mt_dbs[FREE_DBI].md_branch_pages +
10447 			txn->mt_dbs[FREE_DBI].md_leaf_pages +
10448 			txn->mt_dbs[FREE_DBI].md_overflow_pages;
10449 
10450 		new_root = txn->mt_next_pgno - 1 - freecount;
10451 		mm->mm_last_pg = new_root;
10452 		mm->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI];
10453 		mm->mm_dbs[MAIN_DBI].md_root = new_root;
10454 	} else {
10455 		/* When the DB is empty, handle it specially to
10456 		 * fix any breakage like page leaks from ITS#8174.
10457 		 */
10458 		mm->mm_dbs[MAIN_DBI].md_flags = txn->mt_dbs[MAIN_DBI].md_flags;
10459 	}
10460 	if (root != P_INVALID || mm->mm_dbs[MAIN_DBI].md_flags) {
10461 		mm->mm_txnid = 1;		/* use metapage 1 */
10462 	}
10463 
10464 	my.mc_wlen[0] = env->me_psize * NUM_METAS;
10465 	my.mc_txn = txn;
10466 	rc = mdb_env_cwalk(&my, &root, 0);
10467 	if (rc == MDB_SUCCESS && root != new_root) {
10468 		rc = MDB_INCOMPATIBLE;	/* page leak or corrupt DB */
10469 	}
10470 
10471 finish:
10472 	if (rc)
10473 		my.mc_error = rc;
10474 	mdb_env_cthr_toggle(&my, 1 | MDB_EOF);
10475 	rc = THREAD_FINISH(thr);
10476 	mdb_txn_abort(txn);
10477 
10478 done:
10479 #ifdef _WIN32
10480 	if (my.mc_wbuf[0]) _aligned_free(my.mc_wbuf[0]);
10481 	if (my.mc_cond)  CloseHandle(my.mc_cond);
10482 	if (my.mc_mutex) CloseHandle(my.mc_mutex);
10483 #else
10484 	free(my.mc_wbuf[0]);
10485 	pthread_cond_destroy(&my.mc_cond);
10486 done2:
10487 	pthread_mutex_destroy(&my.mc_mutex);
10488 #endif
10489 	return rc ? rc : my.mc_error;
10490 }
10491 
10492 	/** Copy environment as-is. */
10493 static int ESECT
mdb_env_copyfd0(MDB_env * env,HANDLE fd)10494 mdb_env_copyfd0(MDB_env *env, HANDLE fd)
10495 {
10496 	MDB_txn *txn = NULL;
10497 	mdb_mutexref_t wmutex = NULL;
10498 	int rc;
10499 	mdb_size_t wsize, w3;
10500 	char *ptr;
10501 #ifdef _WIN32
10502 	DWORD len, w2;
10503 #define DO_WRITE(rc, fd, ptr, w2, len)	rc = WriteFile(fd, ptr, w2, &len, NULL)
10504 #else
10505 	ssize_t len;
10506 	size_t w2;
10507 #define DO_WRITE(rc, fd, ptr, w2, len)	len = write(fd, ptr, w2); rc = (len >= 0)
10508 #endif
10509 
10510 	/* Do the lock/unlock of the reader mutex before starting the
10511 	 * write txn.  Otherwise other read txns could block writers.
10512 	 */
10513 	rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn);
10514 	if (rc)
10515 		return rc;
10516 
10517 	if (env->me_txns) {
10518 		/* We must start the actual read txn after blocking writers */
10519 		mdb_txn_end(txn, MDB_END_RESET_TMP);
10520 
10521 		/* Temporarily block writers until we snapshot the meta pages */
10522 		wmutex = env->me_wmutex;
10523 		if (LOCK_MUTEX(rc, env, wmutex))
10524 			goto leave;
10525 
10526 		rc = mdb_txn_renew0(txn);
10527 		if (rc) {
10528 			UNLOCK_MUTEX(wmutex);
10529 			goto leave;
10530 		}
10531 	}
10532 
10533 	wsize = env->me_psize * NUM_METAS;
10534 	ptr = env->me_map;
10535 	w2 = wsize;
10536 	while (w2 > 0) {
10537 		DO_WRITE(rc, fd, ptr, w2, len);
10538 		if (!rc) {
10539 			rc = ErrCode();
10540 			break;
10541 		} else if (len > 0) {
10542 			rc = MDB_SUCCESS;
10543 			ptr += len;
10544 			w2 -= len;
10545 			continue;
10546 		} else {
10547 			/* Non-blocking or async handles are not supported */
10548 			rc = EIO;
10549 			break;
10550 		}
10551 	}
10552 	if (wmutex)
10553 		UNLOCK_MUTEX(wmutex);
10554 
10555 	if (rc)
10556 		goto leave;
10557 
10558 	w3 = txn->mt_next_pgno * env->me_psize;
10559 	{
10560 		mdb_size_t fsize = 0;
10561 		if ((rc = mdb_fsize(env->me_fd, &fsize)))
10562 			goto leave;
10563 		if (w3 > fsize)
10564 			w3 = fsize;
10565 	}
10566 	wsize = w3 - wsize;
10567 	while (wsize > 0) {
10568 		if (wsize > MAX_WRITE)
10569 			w2 = MAX_WRITE;
10570 		else
10571 			w2 = wsize;
10572 		DO_WRITE(rc, fd, ptr, w2, len);
10573 		if (!rc) {
10574 			rc = ErrCode();
10575 			break;
10576 		} else if (len > 0) {
10577 			rc = MDB_SUCCESS;
10578 			ptr += len;
10579 			wsize -= len;
10580 			continue;
10581 		} else {
10582 			rc = EIO;
10583 			break;
10584 		}
10585 	}
10586 
10587 leave:
10588 	mdb_txn_abort(txn);
10589 	return rc;
10590 }
10591 
10592 int ESECT
mdb_env_copyfd2(MDB_env * env,HANDLE fd,unsigned int flags)10593 mdb_env_copyfd2(MDB_env *env, HANDLE fd, unsigned int flags)
10594 {
10595 	if (flags & MDB_CP_COMPACT)
10596 		return mdb_env_copyfd1(env, fd);
10597 	else
10598 		return mdb_env_copyfd0(env, fd);
10599 }
10600 
10601 int ESECT
mdb_env_copyfd(MDB_env * env,HANDLE fd)10602 mdb_env_copyfd(MDB_env *env, HANDLE fd)
10603 {
10604 	return mdb_env_copyfd2(env, fd, 0);
10605 }
10606 
10607 int ESECT
mdb_env_copy2(MDB_env * env,const char * path,unsigned int flags)10608 mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags)
10609 {
10610 	int rc;
10611 	MDB_name fname;
10612 	HANDLE newfd = INVALID_HANDLE_VALUE;
10613 
10614 	rc = mdb_fname_init(path, env->me_flags | MDB_NOLOCK, &fname);
10615 	if (rc == MDB_SUCCESS) {
10616 		rc = mdb_fopen(env, &fname, MDB_O_COPY, 0666, &newfd);
10617 		mdb_fname_destroy(fname);
10618 	}
10619 	if (rc == MDB_SUCCESS) {
10620 		rc = mdb_env_copyfd2(env, newfd, flags);
10621 		if (close(newfd) < 0 && rc == MDB_SUCCESS)
10622 			rc = ErrCode();
10623 	}
10624 	return rc;
10625 }
10626 
10627 int ESECT
mdb_env_copy(MDB_env * env,const char * path)10628 mdb_env_copy(MDB_env *env, const char *path)
10629 {
10630 	return mdb_env_copy2(env, path, 0);
10631 }
10632 
10633 int ESECT
mdb_env_set_flags(MDB_env * env,unsigned int flag,int onoff)10634 mdb_env_set_flags(MDB_env *env, unsigned int flag, int onoff)
10635 {
10636 	if (flag & ~CHANGEABLE)
10637 		return EINVAL;
10638 	if (onoff)
10639 		env->me_flags |= flag;
10640 	else
10641 		env->me_flags &= ~flag;
10642 	return MDB_SUCCESS;
10643 }
10644 
10645 int ESECT
mdb_env_get_flags(MDB_env * env,unsigned int * arg)10646 mdb_env_get_flags(MDB_env *env, unsigned int *arg)
10647 {
10648 	if (!env || !arg)
10649 		return EINVAL;
10650 
10651 	*arg = env->me_flags & (CHANGEABLE|CHANGELESS);
10652 	return MDB_SUCCESS;
10653 }
10654 
10655 int ESECT
mdb_env_set_userctx(MDB_env * env,void * ctx)10656 mdb_env_set_userctx(MDB_env *env, void *ctx)
10657 {
10658 	if (!env)
10659 		return EINVAL;
10660 	env->me_userctx = ctx;
10661 	return MDB_SUCCESS;
10662 }
10663 
10664 void * ESECT
mdb_env_get_userctx(MDB_env * env)10665 mdb_env_get_userctx(MDB_env *env)
10666 {
10667 	return env ? env->me_userctx : NULL;
10668 }
10669 
10670 int ESECT
mdb_env_set_assert(MDB_env * env,MDB_assert_func * func)10671 mdb_env_set_assert(MDB_env *env, MDB_assert_func *func)
10672 {
10673 	if (!env)
10674 		return EINVAL;
10675 #ifndef NDEBUG
10676 	env->me_assert_func = func;
10677 #endif
10678 	return MDB_SUCCESS;
10679 }
10680 
10681 int ESECT
mdb_env_get_path(MDB_env * env,const char ** arg)10682 mdb_env_get_path(MDB_env *env, const char **arg)
10683 {
10684 	if (!env || !arg)
10685 		return EINVAL;
10686 
10687 	*arg = env->me_path;
10688 	return MDB_SUCCESS;
10689 }
10690 
10691 int ESECT
mdb_env_get_fd(MDB_env * env,mdb_filehandle_t * arg)10692 mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *arg)
10693 {
10694 	if (!env || !arg)
10695 		return EINVAL;
10696 
10697 	*arg = env->me_fd;
10698 	return MDB_SUCCESS;
10699 }
10700 
10701 /** Common code for #mdb_stat() and #mdb_env_stat().
10702  * @param[in] env the environment to operate in.
10703  * @param[in] db the #MDB_db record containing the stats to return.
10704  * @param[out] arg the address of an #MDB_stat structure to receive the stats.
10705  * @return 0, this function always succeeds.
10706  */
10707 static int ESECT
mdb_stat0(MDB_env * env,MDB_db * db,MDB_stat * arg)10708 mdb_stat0(MDB_env *env, MDB_db *db, MDB_stat *arg)
10709 {
10710 	arg->ms_psize = env->me_psize;
10711 	arg->ms_depth = db->md_depth;
10712 	arg->ms_branch_pages = db->md_branch_pages;
10713 	arg->ms_leaf_pages = db->md_leaf_pages;
10714 	arg->ms_overflow_pages = db->md_overflow_pages;
10715 	arg->ms_entries = db->md_entries;
10716 
10717 	return MDB_SUCCESS;
10718 }
10719 
10720 int ESECT
mdb_env_stat(MDB_env * env,MDB_stat * arg)10721 mdb_env_stat(MDB_env *env, MDB_stat *arg)
10722 {
10723 	MDB_meta *meta;
10724 
10725 	if (env == NULL || arg == NULL)
10726 		return EINVAL;
10727 
10728 	meta = mdb_env_pick_meta(env);
10729 
10730 	return mdb_stat0(env, &meta->mm_dbs[MAIN_DBI], arg);
10731 }
10732 
10733 int ESECT
mdb_env_info(MDB_env * env,MDB_envinfo * arg)10734 mdb_env_info(MDB_env *env, MDB_envinfo *arg)
10735 {
10736 	MDB_meta *meta;
10737 
10738 	if (env == NULL || arg == NULL)
10739 		return EINVAL;
10740 
10741 	meta = mdb_env_pick_meta(env);
10742 	arg->me_mapaddr = meta->mm_address;
10743 	arg->me_last_pgno = meta->mm_last_pg;
10744 	arg->me_last_txnid = meta->mm_txnid;
10745 
10746 	arg->me_mapsize = env->me_mapsize;
10747 	arg->me_maxreaders = env->me_maxreaders;
10748 	arg->me_numreaders = env->me_txns ? env->me_txns->mti_numreaders : 0;
10749 	return MDB_SUCCESS;
10750 }
10751 
10752 /** Set the default comparison functions for a database.
10753  * Called immediately after a database is opened to set the defaults.
10754  * The user can then override them with #mdb_set_compare() or
10755  * #mdb_set_dupsort().
10756  * @param[in] txn A transaction handle returned by #mdb_txn_begin()
10757  * @param[in] dbi A database handle returned by #mdb_dbi_open()
10758  */
10759 static void
mdb_default_cmp(MDB_txn * txn,MDB_dbi dbi)10760 mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi)
10761 {
10762 	uint16_t f = txn->mt_dbs[dbi].md_flags;
10763 
10764 	txn->mt_dbxs[dbi].md_cmp =
10765 		(f & MDB_REVERSEKEY) ? mdb_cmp_memnr :
10766 		(f & MDB_INTEGERKEY) ? mdb_cmp_cint  : mdb_cmp_memn;
10767 
10768 	txn->mt_dbxs[dbi].md_dcmp =
10769 		!(f & MDB_DUPSORT) ? 0 :
10770 		((f & MDB_INTEGERDUP)
10771 		 ? ((f & MDB_DUPFIXED)   ? mdb_cmp_int   : mdb_cmp_cint)
10772 		 : ((f & MDB_REVERSEDUP) ? mdb_cmp_memnr : mdb_cmp_memn));
10773 }
10774 
mdb_dbi_open(MDB_txn * txn,const char * name,unsigned int flags,MDB_dbi * dbi)10775 int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi)
10776 {
10777 	MDB_val key, data;
10778 	MDB_dbi i;
10779 	MDB_cursor mc;
10780 	MDB_db dummy;
10781 	int rc, dbflag, exact;
10782 	unsigned int unused = 0, seq;
10783 	char *namedup;
10784 	size_t len;
10785 
10786 	if (flags & ~VALID_FLAGS)
10787 		return EINVAL;
10788 	if (txn->mt_flags & MDB_TXN_BLOCKED)
10789 		return MDB_BAD_TXN;
10790 
10791 	/* main DB? */
10792 	if (!name) {
10793 		*dbi = MAIN_DBI;
10794 		if (flags & PERSISTENT_FLAGS) {
10795 			uint16_t f2 = flags & PERSISTENT_FLAGS;
10796 			/* make sure flag changes get committed */
10797 			if ((txn->mt_dbs[MAIN_DBI].md_flags | f2) != txn->mt_dbs[MAIN_DBI].md_flags) {
10798 				txn->mt_dbs[MAIN_DBI].md_flags |= f2;
10799 				txn->mt_flags |= MDB_TXN_DIRTY;
10800 			}
10801 		}
10802 		mdb_default_cmp(txn, MAIN_DBI);
10803 		return MDB_SUCCESS;
10804 	}
10805 
10806 	if (txn->mt_dbxs[MAIN_DBI].md_cmp == NULL) {
10807 		mdb_default_cmp(txn, MAIN_DBI);
10808 	}
10809 
10810 	/* Is the DB already open? */
10811 	len = strlen(name);
10812 	for (i=CORE_DBS; i<txn->mt_numdbs; i++) {
10813 		if (!txn->mt_dbxs[i].md_name.mv_size) {
10814 			/* Remember this free slot */
10815 			if (!unused) unused = i;
10816 			continue;
10817 		}
10818 		if (len == txn->mt_dbxs[i].md_name.mv_size &&
10819 			!strncmp(name, txn->mt_dbxs[i].md_name.mv_data, len)) {
10820 			*dbi = i;
10821 			return MDB_SUCCESS;
10822 		}
10823 	}
10824 
10825 	/* If no free slot and max hit, fail */
10826 	if (!unused && txn->mt_numdbs >= txn->mt_env->me_maxdbs)
10827 		return MDB_DBS_FULL;
10828 
10829 	/* Cannot mix named databases with some mainDB flags */
10830 	if (txn->mt_dbs[MAIN_DBI].md_flags & (MDB_DUPSORT|MDB_INTEGERKEY))
10831 		return (flags & MDB_CREATE) ? MDB_INCOMPATIBLE : MDB_NOTFOUND;
10832 
10833 	/* Find the DB info */
10834 	dbflag = DB_NEW|DB_VALID|DB_USRVALID;
10835 	exact = 0;
10836 	key.mv_size = len;
10837 	key.mv_data = (void *)name;
10838 	mdb_cursor_init(&mc, txn, MAIN_DBI, NULL);
10839 	rc = mdb_cursor_set(&mc, &key, &data, MDB_SET, &exact);
10840 	if (rc == MDB_SUCCESS) {
10841 		/* make sure this is actually a DB */
10842 		MDB_node *node = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]);
10843 		if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA)
10844 			return MDB_INCOMPATIBLE;
10845 	} else {
10846 		if (rc != MDB_NOTFOUND || !(flags & MDB_CREATE))
10847 			return rc;
10848 		if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY))
10849 			return EACCES;
10850 	}
10851 
10852 	/* Done here so we cannot fail after creating a new DB */
10853 	if ((namedup = strdup(name)) == NULL)
10854 		return ENOMEM;
10855 
10856 	if (rc) {
10857 		/* MDB_NOTFOUND and MDB_CREATE: Create new DB */
10858 		data.mv_size = sizeof(MDB_db);
10859 		data.mv_data = &dummy;
10860 		memset(&dummy, 0, sizeof(dummy));
10861 		dummy.md_root = P_INVALID;
10862 		dummy.md_flags = flags & PERSISTENT_FLAGS;
10863 		WITH_CURSOR_TRACKING(mc,
10864 			rc = mdb_cursor_put(&mc, &key, &data, F_SUBDATA));
10865 		dbflag |= DB_DIRTY;
10866 	}
10867 
10868 	if (rc) {
10869 		free(namedup);
10870 	} else {
10871 		/* Got info, register DBI in this txn */
10872 		unsigned int slot = unused ? unused : txn->mt_numdbs;
10873 		txn->mt_dbxs[slot].md_name.mv_data = namedup;
10874 		txn->mt_dbxs[slot].md_name.mv_size = len;
10875 		txn->mt_dbxs[slot].md_rel = NULL;
10876 		txn->mt_dbflags[slot] = dbflag;
10877 		/* txn-> and env-> are the same in read txns, use
10878 		 * tmp variable to avoid undefined assignment
10879 		 */
10880 		seq = ++txn->mt_env->me_dbiseqs[slot];
10881 		txn->mt_dbiseqs[slot] = seq;
10882 
10883 		memcpy(&txn->mt_dbs[slot], data.mv_data, sizeof(MDB_db));
10884 		*dbi = slot;
10885 		mdb_default_cmp(txn, slot);
10886 		if (!unused) {
10887 			txn->mt_numdbs++;
10888 		}
10889 	}
10890 
10891 	return rc;
10892 }
10893 
10894 int ESECT
mdb_stat(MDB_txn * txn,MDB_dbi dbi,MDB_stat * arg)10895 mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *arg)
10896 {
10897 	if (!arg || !TXN_DBI_EXIST(txn, dbi, DB_VALID))
10898 		return EINVAL;
10899 
10900 	if (txn->mt_flags & MDB_TXN_BLOCKED)
10901 		return MDB_BAD_TXN;
10902 
10903 	if (txn->mt_dbflags[dbi] & DB_STALE) {
10904 		MDB_cursor mc;
10905 		MDB_xcursor mx;
10906 		/* Stale, must read the DB's root. cursor_init does it for us. */
10907 		mdb_cursor_init(&mc, txn, dbi, &mx);
10908 	}
10909 	return mdb_stat0(txn->mt_env, &txn->mt_dbs[dbi], arg);
10910 }
10911 
mdb_dbi_close(MDB_env * env,MDB_dbi dbi)10912 void mdb_dbi_close(MDB_env *env, MDB_dbi dbi)
10913 {
10914 	char *ptr;
10915 	if (dbi < CORE_DBS || dbi >= env->me_maxdbs)
10916 		return;
10917 	ptr = env->me_dbxs[dbi].md_name.mv_data;
10918 	/* If there was no name, this was already closed */
10919 	if (ptr) {
10920 		env->me_dbxs[dbi].md_name.mv_data = NULL;
10921 		env->me_dbxs[dbi].md_name.mv_size = 0;
10922 		env->me_dbflags[dbi] = 0;
10923 		env->me_dbiseqs[dbi]++;
10924 		free(ptr);
10925 	}
10926 }
10927 
mdb_dbi_flags(MDB_txn * txn,MDB_dbi dbi,unsigned int * flags)10928 int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags)
10929 {
10930 	/* We could return the flags for the FREE_DBI too but what's the point? */
10931 	if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
10932 		return EINVAL;
10933 	*flags = txn->mt_dbs[dbi].md_flags & PERSISTENT_FLAGS;
10934 	return MDB_SUCCESS;
10935 }
10936 
10937 /** Add all the DB's pages to the free list.
10938  * @param[in] mc Cursor on the DB to free.
10939  * @param[in] subs non-Zero to check for sub-DBs in this DB.
10940  * @return 0 on success, non-zero on failure.
10941  */
10942 static int
mdb_drop0(MDB_cursor * mc,int subs)10943 mdb_drop0(MDB_cursor *mc, int subs)
10944 {
10945 	int rc;
10946 
10947 	rc = mdb_page_search(mc, NULL, MDB_PS_FIRST);
10948 	if (rc == MDB_SUCCESS) {
10949 		MDB_txn *txn = mc->mc_txn;
10950 		MDB_node *ni;
10951 		MDB_cursor mx;
10952 		unsigned int i;
10953 
10954 		/* DUPSORT sub-DBs have no ovpages/DBs. Omit scanning leaves.
10955 		 * This also avoids any P_LEAF2 pages, which have no nodes.
10956 		 * Also if the DB doesn't have sub-DBs and has no overflow
10957 		 * pages, omit scanning leaves.
10958 		 */
10959 		if ((mc->mc_flags & C_SUB) ||
10960 			(!subs && !mc->mc_db->md_overflow_pages))
10961 			mdb_cursor_pop(mc);
10962 
10963 		mdb_cursor_copy(mc, &mx);
10964 #ifdef MDB_VL32
10965 		/* bump refcount for mx's pages */
10966 		for (i=0; i<mc->mc_snum; i++)
10967 			mdb_page_get(&mx, mc->mc_pg[i]->mp_pgno, &mx.mc_pg[i], NULL);
10968 #endif
10969 		while (mc->mc_snum > 0) {
10970 			MDB_page *mp = mc->mc_pg[mc->mc_top];
10971 			unsigned n = NUMKEYS(mp);
10972 			if (IS_LEAF(mp)) {
10973 				for (i=0; i<n; i++) {
10974 					ni = NODEPTR(mp, i);
10975 					if (ni->mn_flags & F_BIGDATA) {
10976 						MDB_page *omp;
10977 						pgno_t pg;
10978 						memcpy(&pg, NODEDATA(ni), sizeof(pg));
10979 						rc = mdb_page_get(mc, pg, &omp, NULL);
10980 						if (rc != 0)
10981 							goto done;
10982 						mdb_cassert(mc, IS_OVERFLOW(omp));
10983 						rc = mdb_midl_append_range(&txn->mt_free_pgs,
10984 							pg, omp->mp_pages);
10985 						if (rc)
10986 							goto done;
10987 						mc->mc_db->md_overflow_pages -= omp->mp_pages;
10988 						if (!mc->mc_db->md_overflow_pages && !subs)
10989 							break;
10990 					} else if (subs && (ni->mn_flags & F_SUBDATA)) {
10991 						mdb_xcursor_init1(mc, ni);
10992 						rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0);
10993 						if (rc)
10994 							goto done;
10995 					}
10996 				}
10997 				if (!subs && !mc->mc_db->md_overflow_pages)
10998 					goto pop;
10999 			} else {
11000 				if ((rc = mdb_midl_need(&txn->mt_free_pgs, n)) != 0)
11001 					goto done;
11002 				for (i=0; i<n; i++) {
11003 					pgno_t pg;
11004 					ni = NODEPTR(mp, i);
11005 					pg = NODEPGNO(ni);
11006 					/* free it */
11007 					mdb_midl_xappend(txn->mt_free_pgs, pg);
11008 				}
11009 			}
11010 			if (!mc->mc_top)
11011 				break;
11012 			mc->mc_ki[mc->mc_top] = i;
11013 			rc = mdb_cursor_sibling(mc, 1);
11014 			if (rc) {
11015 				if (rc != MDB_NOTFOUND)
11016 					goto done;
11017 				/* no more siblings, go back to beginning
11018 				 * of previous level.
11019 				 */
11020 pop:
11021 				mdb_cursor_pop(mc);
11022 				mc->mc_ki[0] = 0;
11023 				for (i=1; i<mc->mc_snum; i++) {
11024 					mc->mc_ki[i] = 0;
11025 					mc->mc_pg[i] = mx.mc_pg[i];
11026 				}
11027 			}
11028 		}
11029 		/* free it */
11030 		rc = mdb_midl_append(&txn->mt_free_pgs, mc->mc_db->md_root);
11031 done:
11032 		if (rc)
11033 			txn->mt_flags |= MDB_TXN_ERROR;
11034 		/* drop refcount for mx's pages */
11035 		MDB_CURSOR_UNREF(&mx, 0);
11036 	} else if (rc == MDB_NOTFOUND) {
11037 		rc = MDB_SUCCESS;
11038 	}
11039 	mc->mc_flags &= ~C_INITIALIZED;
11040 	return rc;
11041 }
11042 
mdb_drop(MDB_txn * txn,MDB_dbi dbi,int del)11043 int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del)
11044 {
11045 	MDB_cursor *mc, *m2;
11046 	int rc;
11047 
11048 	if ((unsigned)del > 1 || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
11049 		return EINVAL;
11050 
11051 	if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY))
11052 		return EACCES;
11053 
11054 	if (TXN_DBI_CHANGED(txn, dbi))
11055 		return MDB_BAD_DBI;
11056 
11057 	rc = mdb_cursor_open(txn, dbi, &mc);
11058 	if (rc)
11059 		return rc;
11060 
11061 	rc = mdb_drop0(mc, mc->mc_db->md_flags & MDB_DUPSORT);
11062 	/* Invalidate the dropped DB's cursors */
11063 	for (m2 = txn->mt_cursors[dbi]; m2; m2 = m2->mc_next)
11064 		m2->mc_flags &= ~(C_INITIALIZED|C_EOF);
11065 	if (rc)
11066 		goto leave;
11067 
11068 	/* Can't delete the main DB */
11069 	if (del && dbi >= CORE_DBS) {
11070 		rc = mdb_del0(txn, MAIN_DBI, &mc->mc_dbx->md_name, NULL, F_SUBDATA);
11071 		if (!rc) {
11072 			txn->mt_dbflags[dbi] = DB_STALE;
11073 			mdb_dbi_close(txn->mt_env, dbi);
11074 		} else {
11075 			txn->mt_flags |= MDB_TXN_ERROR;
11076 		}
11077 	} else {
11078 		/* reset the DB record, mark it dirty */
11079 		txn->mt_dbflags[dbi] |= DB_DIRTY;
11080 		txn->mt_dbs[dbi].md_depth = 0;
11081 		txn->mt_dbs[dbi].md_branch_pages = 0;
11082 		txn->mt_dbs[dbi].md_leaf_pages = 0;
11083 		txn->mt_dbs[dbi].md_overflow_pages = 0;
11084 		txn->mt_dbs[dbi].md_entries = 0;
11085 		txn->mt_dbs[dbi].md_root = P_INVALID;
11086 
11087 		txn->mt_flags |= MDB_TXN_DIRTY;
11088 	}
11089 leave:
11090 	mdb_cursor_close(mc);
11091 	return rc;
11092 }
11093 
mdb_set_compare(MDB_txn * txn,MDB_dbi dbi,MDB_cmp_func * cmp)11094 int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp)
11095 {
11096 	if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
11097 		return EINVAL;
11098 
11099 	txn->mt_dbxs[dbi].md_cmp = cmp;
11100 	return MDB_SUCCESS;
11101 }
11102 
mdb_set_dupsort(MDB_txn * txn,MDB_dbi dbi,MDB_cmp_func * cmp)11103 int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp)
11104 {
11105 	if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
11106 		return EINVAL;
11107 
11108 	txn->mt_dbxs[dbi].md_dcmp = cmp;
11109 	return MDB_SUCCESS;
11110 }
11111 
mdb_set_relfunc(MDB_txn * txn,MDB_dbi dbi,MDB_rel_func * rel)11112 int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel)
11113 {
11114 	if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
11115 		return EINVAL;
11116 
11117 	txn->mt_dbxs[dbi].md_rel = rel;
11118 	return MDB_SUCCESS;
11119 }
11120 
mdb_set_relctx(MDB_txn * txn,MDB_dbi dbi,void * ctx)11121 int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx)
11122 {
11123 	if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))
11124 		return EINVAL;
11125 
11126 	txn->mt_dbxs[dbi].md_relctx = ctx;
11127 	return MDB_SUCCESS;
11128 }
11129 
11130 int ESECT
mdb_env_get_maxkeysize(MDB_env * env)11131 mdb_env_get_maxkeysize(MDB_env *env)
11132 {
11133 	return ENV_MAXKEY(env);
11134 }
11135 
11136 int ESECT
mdb_reader_list(MDB_env * env,MDB_msg_func * func,void * ctx)11137 mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx)
11138 {
11139 	unsigned int i, rdrs;
11140 	MDB_reader *mr;
11141 	char buf[64];
11142 	int rc = 0, first = 1;
11143 
11144 	if (!env || !func)
11145 		return -1;
11146 	if (!env->me_txns) {
11147 		return func("(no reader locks)\n", ctx);
11148 	}
11149 	rdrs = env->me_txns->mti_numreaders;
11150 	mr = env->me_txns->mti_readers;
11151 	for (i=0; i<rdrs; i++) {
11152 		if (mr[i].mr_pid) {
11153 			txnid_t	txnid = mr[i].mr_txnid;
11154 			sprintf(buf, txnid == (txnid_t)-1 ?
11155 				"%10d %"Z"x -\n" : "%10d %"Z"x %"Yu"\n",
11156 				(int)mr[i].mr_pid, (size_t)mr[i].mr_tid, txnid);
11157 			if (first) {
11158 				first = 0;
11159 				rc = func("    pid     thread     txnid\n", ctx);
11160 				if (rc < 0)
11161 					break;
11162 			}
11163 			rc = func(buf, ctx);
11164 			if (rc < 0)
11165 				break;
11166 		}
11167 	}
11168 	if (first) {
11169 		rc = func("(no active readers)\n", ctx);
11170 	}
11171 	return rc;
11172 }
11173 
11174 /** Insert pid into list if not already present.
11175  * return -1 if already present.
11176  */
11177 static int ESECT
mdb_pid_insert(MDB_PID_T * ids,MDB_PID_T pid)11178 mdb_pid_insert(MDB_PID_T *ids, MDB_PID_T pid)
11179 {
11180 	/* binary search of pid in list */
11181 	unsigned base = 0;
11182 	unsigned cursor = 1;
11183 	int val = 0;
11184 	unsigned n = ids[0];
11185 
11186 	while( 0 < n ) {
11187 		unsigned pivot = n >> 1;
11188 		cursor = base + pivot + 1;
11189 		val = pid - ids[cursor];
11190 
11191 		if( val < 0 ) {
11192 			n = pivot;
11193 
11194 		} else if ( val > 0 ) {
11195 			base = cursor;
11196 			n -= pivot + 1;
11197 
11198 		} else {
11199 			/* found, so it's a duplicate */
11200 			return -1;
11201 		}
11202 	}
11203 
11204 	if( val > 0 ) {
11205 		++cursor;
11206 	}
11207 	ids[0]++;
11208 	for (n = ids[0]; n > cursor; n--)
11209 		ids[n] = ids[n-1];
11210 	ids[n] = pid;
11211 	return 0;
11212 }
11213 
11214 int ESECT
mdb_reader_check(MDB_env * env,int * dead)11215 mdb_reader_check(MDB_env *env, int *dead)
11216 {
11217 	if (!env)
11218 		return EINVAL;
11219 	if (dead)
11220 		*dead = 0;
11221 	return env->me_txns ? mdb_reader_check0(env, 0, dead) : MDB_SUCCESS;
11222 }
11223 
11224 /** As #mdb_reader_check(). \b rlocked is set if caller locked #me_rmutex. */
11225 static int ESECT
mdb_reader_check0(MDB_env * env,int rlocked,int * dead)11226 mdb_reader_check0(MDB_env *env, int rlocked, int *dead)
11227 {
11228 	mdb_mutexref_t rmutex = rlocked ? NULL : env->me_rmutex;
11229 	unsigned int i, j, rdrs;
11230 	MDB_reader *mr;
11231 	MDB_PID_T *pids, pid;
11232 	int rc = MDB_SUCCESS, count = 0;
11233 
11234 	rdrs = env->me_txns->mti_numreaders;
11235 	pids = malloc((rdrs+1) * sizeof(MDB_PID_T));
11236 	if (!pids)
11237 		return ENOMEM;
11238 	pids[0] = 0;
11239 	mr = env->me_txns->mti_readers;
11240 	for (i=0; i<rdrs; i++) {
11241 		pid = mr[i].mr_pid;
11242 		if (pid && pid != env->me_pid) {
11243 			if (mdb_pid_insert(pids, pid) == 0) {
11244 				if (!mdb_reader_pid(env, Pidcheck, pid)) {
11245 					/* Stale reader found */
11246 					j = i;
11247 					if (rmutex) {
11248 						if ((rc = LOCK_MUTEX0(rmutex)) != 0) {
11249 							if ((rc = mdb_mutex_failed(env, rmutex, rc)))
11250 								break;
11251 							rdrs = 0; /* the above checked all readers */
11252 						} else {
11253 							/* Recheck, a new process may have reused pid */
11254 							if (mdb_reader_pid(env, Pidcheck, pid))
11255 								j = rdrs;
11256 						}
11257 					}
11258 					for (; j<rdrs; j++)
11259 							if (mr[j].mr_pid == pid) {
11260 								DPRINTF(("clear stale reader pid %u txn %"Yd,
11261 									(unsigned) pid, mr[j].mr_txnid));
11262 								mr[j].mr_pid = 0;
11263 								count++;
11264 							}
11265 					if (rmutex)
11266 						UNLOCK_MUTEX(rmutex);
11267 				}
11268 			}
11269 		}
11270 	}
11271 	free(pids);
11272 	if (dead)
11273 		*dead = count;
11274 	return rc;
11275 }
11276 
11277 #ifdef MDB_ROBUST_SUPPORTED
11278 /** Handle #LOCK_MUTEX0() failure.
11279  * Try to repair the lock file if the mutex owner died.
11280  * @param[in] env	the environment handle
11281  * @param[in] mutex	LOCK_MUTEX0() mutex
11282  * @param[in] rc	LOCK_MUTEX0() error (nonzero)
11283  * @return 0 on success with the mutex locked, or an error code on failure.
11284  */
11285 static int ESECT
mdb_mutex_failed(MDB_env * env,mdb_mutexref_t mutex,int rc)11286 mdb_mutex_failed(MDB_env *env, mdb_mutexref_t mutex, int rc)
11287 {
11288 	int rlocked, rc2;
11289 	MDB_meta *meta;
11290 
11291 	if (rc == MDB_OWNERDEAD) {
11292 		/* We own the mutex. Clean up after dead previous owner. */
11293 		rc = MDB_SUCCESS;
11294 		rlocked = (mutex == env->me_rmutex);
11295 		if (!rlocked) {
11296 			/* Keep mti_txnid updated, otherwise next writer can
11297 			 * overwrite data which latest meta page refers to.
11298 			 */
11299 			meta = mdb_env_pick_meta(env);
11300 			env->me_txns->mti_txnid = meta->mm_txnid;
11301 			/* env is hosed if the dead thread was ours */
11302 			if (env->me_txn) {
11303 				env->me_flags |= MDB_FATAL_ERROR;
11304 				env->me_txn = NULL;
11305 				rc = MDB_PANIC;
11306 			}
11307 		}
11308 		DPRINTF(("%cmutex owner died, %s", (rlocked ? 'r' : 'w'),
11309 			(rc ? "this process' env is hosed" : "recovering")));
11310 		rc2 = mdb_reader_check0(env, rlocked, NULL);
11311 		if (rc2 == 0)
11312 			rc2 = mdb_mutex_consistent(mutex);
11313 		if (rc || (rc = rc2)) {
11314 			DPRINTF(("LOCK_MUTEX recovery failed, %s", mdb_strerror(rc)));
11315 			UNLOCK_MUTEX(mutex);
11316 		}
11317 	} else {
11318 #ifdef _WIN32
11319 		rc = ErrCode();
11320 #endif
11321 		DPRINTF(("LOCK_MUTEX failed, %s", mdb_strerror(rc)));
11322 	}
11323 
11324 	return rc;
11325 }
11326 #endif	/* MDB_ROBUST_SUPPORTED */
11327 
11328 #if defined(_WIN32)
11329 /** Convert \b src to new wchar_t[] string with room for \b xtra extra chars */
11330 static int ESECT
utf8_to_utf16(const char * src,MDB_name * dst,int xtra)11331 utf8_to_utf16(const char *src, MDB_name *dst, int xtra)
11332 {
11333 	int rc, need = 0;
11334 	wchar_t *result = NULL;
11335 	for (;;) {					/* malloc result, then fill it in */
11336 		need = MultiByteToWideChar(CP_UTF8, 0, src, -1, result, need);
11337 		if (!need) {
11338 			rc = ErrCode();
11339 			free(result);
11340 			return rc;
11341 		}
11342 		if (!result) {
11343 			result = malloc(sizeof(wchar_t) * (need + xtra));
11344 			if (!result)
11345 				return ENOMEM;
11346 			continue;
11347 		}
11348 		dst->mn_alloced = 1;
11349 		dst->mn_len = need - 1;
11350 		dst->mn_val = result;
11351 		return MDB_SUCCESS;
11352 	}
11353 }
11354 #endif /* defined(_WIN32) */
11355 /** @} */
11356