1 /* secmem.c  -	memory allocation from a secure heap
2  * Copyright (C) 1998, 1999, 2000, 2001, 2002,
3  *               2003, 2007 Free Software Foundation, Inc.
4  * Copyright (C) 2013, 2016 g10 Code GmbH
5  *
6  * This file is part of Libgcrypt.
7  *
8  * Libgcrypt is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU Lesser general Public License as
10  * published by the Free Software Foundation; either version 2.1 of
11  * the License, or (at your option) any later version.
12  *
13  * Libgcrypt is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <config.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <stdarg.h>
28 #include <unistd.h>
29 #include <stddef.h>
30 
31 #if defined(HAVE_MLOCK) || defined(HAVE_MMAP)
32 #include <sys/mman.h>
33 #include <sys/types.h>
34 #include <fcntl.h>
35 #ifdef USE_CAPABILITIES
36 #include <sys/capability.h>
37 #endif
38 #endif
39 
40 #include "g10lib.h"
41 #include "secmem.h"
42 
43 #if defined (MAP_ANON) && ! defined (MAP_ANONYMOUS)
44 #define MAP_ANONYMOUS MAP_ANON
45 #endif
46 
47 #define MINIMUM_POOL_SIZE 16384
48 #define STANDARD_POOL_SIZE 32768
49 #define DEFAULT_PAGE_SIZE 4096
50 
51 typedef struct memblock
52 {
53   unsigned size;		/* Size of the memory available to the
54 				   user.  */
55   int flags;			/* See below.  */
56   PROPERLY_ALIGNED_TYPE aligned;
57 } memblock_t;
58 
59 /* This flag specifies that the memory block is in use.  */
60 #define MB_FLAG_ACTIVE (1 << 0)
61 
62 /* An object describing a memory pool.  */
63 typedef struct pooldesc_s
64 {
65   /* A link to the next pool.  This is used to connect the overflow
66    * pools.  */
67   struct pooldesc_s * volatile next;
68 
69   /* A memory buffer used as allocation pool.  */
70   void *mem;
71 
72   /* The allocated size of MEM. */
73   size_t size;
74 
75   /* Flag indicating that this memory pool is ready for use.  May be
76    * checked in an atexit function.  */
77   volatile int okay;
78 
79   /* Flag indicating whether MEM is mmapped.  */
80   volatile int is_mmapped;
81 
82   /* The number of allocated bytes and the number of used blocks in
83    * this pool.  */
84   unsigned int cur_alloced, cur_blocks;
85 } pooldesc_t;
86 
87 
88 /* The pool of secure memory.  This is the head of a linked list with
89  * the first element being the standard mlock-ed pool and the
90  * following elements being the overflow pools. */
91 static pooldesc_t mainpool;
92 
93 
94 /* A couple of flags with some being set early.  */
95 static int disable_secmem;
96 static int show_warning;
97 static int not_locked;
98 static int no_warning;
99 static int suspend_warning;
100 static int no_mlock;
101 static int no_priv_drop;
102 static unsigned int auto_expand;
103 
104 
105 /* Lock protecting accesses to the memory pools.  */
106 GPGRT_LOCK_DEFINE (secmem_lock);
107 
108 /* Convenient macros.  */
109 #define SECMEM_LOCK   gpgrt_lock_lock   (&secmem_lock)
110 #define SECMEM_UNLOCK gpgrt_lock_unlock (&secmem_lock)
111 
112 /* The size of the memblock structure; this does not include the
113    memory that is available to the user.  */
114 #define BLOCK_HEAD_SIZE \
115   offsetof (memblock_t, aligned)
116 
117 /* Convert an address into the according memory block structure.  */
118 #define ADDR_TO_BLOCK(addr) \
119   (memblock_t *) (void *) ((char *) addr - BLOCK_HEAD_SIZE)
120 
121 /* Prototypes. */
122 static void secmem_dump_stats_internal (int extended);
123 
124 
125 /*
126  * Functions
127  */
128 
129 /* Memory barrier */
130 static inline void
memory_barrier(void)131 memory_barrier(void)
132 {
133 #ifdef HAVE_SYNC_SYNCHRONIZE
134 #ifdef HAVE_GCC_ASM_VOLATILE_MEMORY
135   asm volatile ("":::"memory");
136 #endif
137   /* Use GCC / clang intrinsic for memory barrier. */
138   __sync_synchronize();
139 #else
140   /* Slow portable alternative, implement memory barrier by using mutex. */
141   gpgrt_lock_t tmp;
142   memset (&tmp, 0, sizeof(tmp));
143   gpgrt_lock_init (&tmp);
144   gpgrt_lock_lock (&tmp);
145   gpgrt_lock_unlock (&tmp);
146   gpgrt_lock_destroy (&tmp);
147 #endif
148 }
149 
150 
151 /* Check whether P points into POOL.  */
152 static inline int
ptr_into_pool_p(pooldesc_t * pool,const void * p)153 ptr_into_pool_p (pooldesc_t *pool, const void *p)
154 {
155   /* We need to convert pointers to addresses.  This is required by
156      C-99 6.5.8 to avoid undefined behaviour.  See also
157      http://lists.gnupg.org/pipermail/gcrypt-devel/2007-February/001102.html
158   */
159   uintptr_t p_addr    = (uintptr_t)p;
160   uintptr_t pool_addr = (uintptr_t)pool->mem;
161 
162   return p_addr >= pool_addr && p_addr <  pool_addr + pool->size;
163 }
164 
165 /* Update the stats.  */
166 static void
stats_update(pooldesc_t * pool,size_t add,size_t sub)167 stats_update (pooldesc_t *pool, size_t add, size_t sub)
168 {
169   if (add)
170     {
171       pool->cur_alloced += add;
172       pool->cur_blocks++;
173     }
174   if (sub)
175     {
176       pool->cur_alloced -= sub;
177       pool->cur_blocks--;
178     }
179 }
180 
181 /* Return the block following MB or NULL, if MB is the last block.  */
182 static memblock_t *
mb_get_next(pooldesc_t * pool,memblock_t * mb)183 mb_get_next (pooldesc_t *pool, memblock_t *mb)
184 {
185   memblock_t *mb_next;
186 
187   mb_next = (memblock_t *) (void *) ((char *) mb + BLOCK_HEAD_SIZE + mb->size);
188 
189   if (! ptr_into_pool_p (pool, mb_next))
190     mb_next = NULL;
191 
192   return mb_next;
193 }
194 
195 /* Return the block preceding MB or NULL, if MB is the first
196    block.  */
197 static memblock_t *
mb_get_prev(pooldesc_t * pool,memblock_t * mb)198 mb_get_prev (pooldesc_t *pool, memblock_t *mb)
199 {
200   memblock_t *mb_prev, *mb_next;
201 
202   if (mb == pool->mem)
203     mb_prev = NULL;
204   else
205     {
206       mb_prev = (memblock_t *) pool->mem;
207       while (1)
208 	{
209 	  mb_next = mb_get_next (pool, mb_prev);
210 	  if (mb_next == mb)
211 	    break;
212 	  else
213 	    mb_prev = mb_next;
214 	}
215     }
216 
217   return mb_prev;
218 }
219 
220 /* If the preceding block of MB and/or the following block of MB
221    exist and are not active, merge them to form a bigger block.  */
222 static void
mb_merge(pooldesc_t * pool,memblock_t * mb)223 mb_merge (pooldesc_t *pool, memblock_t *mb)
224 {
225   memblock_t *mb_prev, *mb_next;
226 
227   mb_prev = mb_get_prev (pool, mb);
228   mb_next = mb_get_next (pool, mb);
229 
230   if (mb_prev && (! (mb_prev->flags & MB_FLAG_ACTIVE)))
231     {
232       mb_prev->size += BLOCK_HEAD_SIZE + mb->size;
233       mb = mb_prev;
234     }
235   if (mb_next && (! (mb_next->flags & MB_FLAG_ACTIVE)))
236     mb->size += BLOCK_HEAD_SIZE + mb_next->size;
237 }
238 
239 /* Return a new block, which can hold SIZE bytes.  */
240 static memblock_t *
mb_get_new(pooldesc_t * pool,memblock_t * block,size_t size)241 mb_get_new (pooldesc_t *pool, memblock_t *block, size_t size)
242 {
243   memblock_t *mb, *mb_split;
244 
245   for (mb = block; ptr_into_pool_p (pool, mb); mb = mb_get_next (pool, mb))
246     if (! (mb->flags & MB_FLAG_ACTIVE) && mb->size >= size)
247       {
248 	/* Found a free block.  */
249 	mb->flags |= MB_FLAG_ACTIVE;
250 
251 	if (mb->size - size > BLOCK_HEAD_SIZE)
252 	  {
253 	    /* Split block.  */
254 
255 	    mb_split = (memblock_t *) (void *) (((char *) mb) + BLOCK_HEAD_SIZE
256 						+ size);
257 	    mb_split->size = mb->size - size - BLOCK_HEAD_SIZE;
258 	    mb_split->flags = 0;
259 
260 	    mb->size = size;
261 
262 	    mb_merge (pool, mb_split);
263 
264 	  }
265 
266 	break;
267       }
268 
269   if (! ptr_into_pool_p (pool, mb))
270     {
271       gpg_err_set_errno (ENOMEM);
272       mb = NULL;
273     }
274 
275   return mb;
276 }
277 
278 /* Print a warning message.  */
279 static void
print_warn(void)280 print_warn (void)
281 {
282   if (!no_warning)
283     log_info (_("Warning: using insecure memory!\n"));
284 }
285 
286 
287 /* Lock the memory pages of pool P of size N into core and drop
288  * privileges.  */
289 static void
lock_pool_pages(void * p,size_t n)290 lock_pool_pages (void *p, size_t n)
291 {
292 #if defined(USE_CAPABILITIES) && defined(HAVE_MLOCK)
293   int err;
294 
295   {
296     cap_t cap;
297 
298     if (!no_priv_drop)
299       {
300         cap = cap_from_text ("cap_ipc_lock+ep");
301         cap_set_proc (cap);
302         cap_free (cap);
303       }
304     err = no_mlock? 0 : mlock (p, n);
305     if (err && errno)
306       err = errno;
307     if (!no_priv_drop)
308       {
309         cap = cap_from_text ("cap_ipc_lock+p");
310         cap_set_proc (cap);
311         cap_free(cap);
312       }
313   }
314 
315   if (err)
316     {
317       if (err != EPERM
318 #ifdef EAGAIN	/* BSD and also Linux may return EAGAIN */
319 	  && err != EAGAIN
320 #endif
321 #ifdef ENOSYS	/* Some SCOs return this (function not implemented) */
322 	  && err != ENOSYS
323 #endif
324 #ifdef ENOMEM  /* Linux might return this. */
325             && err != ENOMEM
326 #endif
327 	  )
328 	log_error ("can't lock memory: %s\n", strerror (err));
329       show_warning = 1;
330       not_locked = 1;
331     }
332 
333 #elif defined(HAVE_MLOCK)
334   uid_t uid;
335   int err;
336 
337   uid = getuid ();
338 
339 #ifdef HAVE_BROKEN_MLOCK
340   /* Under HP/UX mlock segfaults if called by non-root.  Note, we have
341      noch checked whether mlock does really work under AIX where we
342      also detected a broken nlock.  Note further, that using plock ()
343      is not a good idea under AIX. */
344   if (uid)
345     {
346       errno = EPERM;
347       err = errno;
348     }
349   else
350     {
351       err = no_mlock? 0 : mlock (p, n);
352       if (err && errno)
353 	err = errno;
354     }
355 #else /* !HAVE_BROKEN_MLOCK */
356   err = no_mlock? 0 : mlock (p, n);
357   if (err && errno)
358     err = errno;
359 #endif /* !HAVE_BROKEN_MLOCK */
360 
361   /* Test whether we are running setuid(0).  */
362   if (uid && ! geteuid ())
363     {
364       /* Yes, we are.  */
365       if (!no_priv_drop)
366         {
367           /* Check that we really dropped the privs.
368            * Note: setuid(0) should always fail */
369           if (setuid (uid) || getuid () != geteuid () || !setuid (0))
370             log_fatal ("failed to reset uid: %s\n", strerror (errno));
371         }
372     }
373 
374   if (err)
375     {
376       if (err != EPERM
377 #ifdef EAGAIN	/* BSD and also Linux may return this. */
378 	  && err != EAGAIN
379 #endif
380 #ifdef ENOSYS	/* Some SCOs return this (function not implemented). */
381 	  && err != ENOSYS
382 #endif
383 #ifdef ENOMEM  /* Linux might return this. */
384             && err != ENOMEM
385 #endif
386 	  )
387 	log_error ("can't lock memory: %s\n", strerror (err));
388       show_warning = 1;
389       not_locked = 1;
390     }
391 
392 #elif defined ( __QNX__ )
393   /* QNX does not page at all, so the whole secure memory stuff does
394    * not make much sense.  However it is still of use because it
395    * wipes out the memory on a free().
396    * Therefore it is sufficient to suppress the warning.  */
397   (void)p;
398   (void)n;
399 #elif defined (HAVE_DOSISH_SYSTEM) || defined (__CYGWIN__)
400     /* It does not make sense to print such a warning, given the fact that
401      * this whole Windows !@#$% and their user base are inherently insecure. */
402   (void)p;
403   (void)n;
404 #elif defined (__riscos__)
405     /* No virtual memory on RISC OS, so no pages are swapped to disc,
406      * besides we don't have mmap, so we don't use it! ;-)
407      * But don't complain, as explained above.  */
408   (void)p;
409   (void)n;
410 #else
411   (void)p;
412   (void)n;
413   if (!no_mlock)
414     log_info ("Please note that you don't have secure memory on this system\n");
415 #endif
416 }
417 
418 /* Initialize POOL.  */
419 static void
init_pool(pooldesc_t * pool,size_t n)420 init_pool (pooldesc_t *pool, size_t n)
421 {
422   memblock_t *mb;
423 
424   pool->size = n;
425 
426   if (disable_secmem)
427     log_bug ("secure memory is disabled");
428 
429 
430 #if HAVE_MMAP
431   {
432     size_t pgsize;
433     long int pgsize_val;
434 
435 # if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE)
436     pgsize_val = sysconf (_SC_PAGESIZE);
437 # elif defined(HAVE_GETPAGESIZE)
438     pgsize_val = getpagesize ();
439 # else
440     pgsize_val = -1;
441 # endif
442     pgsize = (pgsize_val > 0)? pgsize_val:DEFAULT_PAGE_SIZE;
443 
444     pool->size = (pool->size + pgsize - 1) & ~(pgsize - 1);
445 # ifdef MAP_ANONYMOUS
446     pool->mem = mmap (0, pool->size, PROT_READ | PROT_WRITE,
447                      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
448 # else /* map /dev/zero instead */
449     {
450       int fd;
451 
452       fd = open ("/dev/zero", O_RDWR);
453       if (fd == -1)
454         {
455           log_error ("can't open /dev/zero: %s\n", strerror (errno));
456           pool->mem = (void *) -1;
457         }
458       else
459         {
460           pool->mem = mmap (0, pool->size,
461                            (PROT_READ | PROT_WRITE), MAP_PRIVATE, fd, 0);
462           close (fd);
463         }
464     }
465 # endif
466     if (pool->mem == (void *) -1)
467       log_info ("can't mmap pool of %u bytes: %s - using malloc\n",
468                 (unsigned) pool->size, strerror (errno));
469     else
470       {
471         pool->is_mmapped = 1;
472         pool->okay = 1;
473       }
474   }
475 #endif /*HAVE_MMAP*/
476 
477   if (!pool->okay)
478     {
479       pool->mem = malloc (pool->size);
480       if (!pool->mem)
481 	log_fatal ("can't allocate memory pool of %u bytes\n",
482 		   (unsigned) pool->size);
483       else
484 	pool->okay = 1;
485     }
486 
487   /* Initialize first memory block.  */
488   mb = (memblock_t *) pool->mem;
489   mb->size = pool->size - BLOCK_HEAD_SIZE;
490   mb->flags = 0;
491 }
492 
493 
494 /* Enable overflow pool allocation in all cases.  CHUNKSIZE is a hint
495  * on how large to allocate overflow pools.  */
496 void
_gcry_secmem_set_auto_expand(unsigned int chunksize)497 _gcry_secmem_set_auto_expand (unsigned int chunksize)
498 {
499   /* Round up to a multiple of the STANDARD_POOL_SIZE.  */
500   chunksize = ((chunksize + (2*STANDARD_POOL_SIZE) - 1)
501                / STANDARD_POOL_SIZE ) * STANDARD_POOL_SIZE;
502   if (chunksize < STANDARD_POOL_SIZE) /* In case of overflow.  */
503     chunksize = STANDARD_POOL_SIZE;
504 
505   SECMEM_LOCK;
506   auto_expand = chunksize;
507   SECMEM_UNLOCK;
508 }
509 
510 
511 void
_gcry_secmem_set_flags(unsigned flags)512 _gcry_secmem_set_flags (unsigned flags)
513 {
514   int was_susp;
515 
516   SECMEM_LOCK;
517 
518   was_susp = suspend_warning;
519   no_warning = flags & GCRY_SECMEM_FLAG_NO_WARNING;
520   suspend_warning = flags & GCRY_SECMEM_FLAG_SUSPEND_WARNING;
521   no_mlock      = flags & GCRY_SECMEM_FLAG_NO_MLOCK;
522   no_priv_drop = flags & GCRY_SECMEM_FLAG_NO_PRIV_DROP;
523 
524   /* and now issue the warning if it is not longer suspended */
525   if (was_susp && !suspend_warning && show_warning)
526     {
527       show_warning = 0;
528       print_warn ();
529     }
530 
531   SECMEM_UNLOCK;
532 }
533 
534 unsigned int
_gcry_secmem_get_flags(void)535 _gcry_secmem_get_flags (void)
536 {
537   unsigned flags;
538 
539   SECMEM_LOCK;
540 
541   flags = no_warning ? GCRY_SECMEM_FLAG_NO_WARNING : 0;
542   flags |= suspend_warning ? GCRY_SECMEM_FLAG_SUSPEND_WARNING : 0;
543   flags |= not_locked ? GCRY_SECMEM_FLAG_NOT_LOCKED : 0;
544   flags |= no_mlock ? GCRY_SECMEM_FLAG_NO_MLOCK : 0;
545   flags |= no_priv_drop ? GCRY_SECMEM_FLAG_NO_PRIV_DROP : 0;
546 
547   SECMEM_UNLOCK;
548 
549   return flags;
550 }
551 
552 
553 /* This function initializes the main memory pool MAINPOOL.  It is
554  * expected to be called with the secmem lock held.  */
555 static void
_gcry_secmem_init_internal(size_t n)556 _gcry_secmem_init_internal (size_t n)
557 {
558   pooldesc_t *pool;
559 
560   pool = &mainpool;
561   if (!n)
562     {
563 #ifdef USE_CAPABILITIES
564       /* drop all capabilities */
565       if (!no_priv_drop)
566         {
567           cap_t cap;
568 
569           cap = cap_from_text ("all-eip");
570           cap_set_proc (cap);
571           cap_free (cap);
572         }
573 
574 #elif !defined(HAVE_DOSISH_SYSTEM)
575       uid_t uid;
576 
577       disable_secmem = 1;
578       uid = getuid ();
579       if (uid != geteuid ())
580 	{
581 	  if (setuid (uid) || getuid () != geteuid () || !setuid (0))
582 	    log_fatal ("failed to drop setuid\n");
583 	}
584 #endif
585     }
586   else
587     {
588       if (n < MINIMUM_POOL_SIZE)
589 	n = MINIMUM_POOL_SIZE;
590       if (! pool->okay)
591 	{
592 	  init_pool (pool, n);
593 	  lock_pool_pages (pool->mem, n);
594 	}
595       else
596 	log_error ("Oops, secure memory pool already initialized\n");
597     }
598 }
599 
600 
601 
602 /* Initialize the secure memory system.  If running with the necessary
603    privileges, the secure memory pool will be locked into the core in
604    order to prevent page-outs of the data.  Furthermore allocated
605    secure memory will be wiped out when released.  */
606 void
_gcry_secmem_init(size_t n)607 _gcry_secmem_init (size_t n)
608 {
609   SECMEM_LOCK;
610 
611   _gcry_secmem_init_internal (n);
612 
613   SECMEM_UNLOCK;
614 }
615 
616 
617 gcry_err_code_t
_gcry_secmem_module_init()618 _gcry_secmem_module_init ()
619 {
620   /* Not anymore needed.  */
621   return 0;
622 }
623 
624 
625 static void *
_gcry_secmem_malloc_internal(size_t size,int xhint)626 _gcry_secmem_malloc_internal (size_t size, int xhint)
627 {
628   pooldesc_t *pool;
629   memblock_t *mb;
630 
631   pool = &mainpool;
632 
633   if (!pool->okay)
634     {
635       /* Try to initialize the pool if the user forgot about it.  */
636       _gcry_secmem_init_internal (STANDARD_POOL_SIZE);
637       if (!pool->okay)
638         {
639           log_info (_("operation is not possible without "
640                       "initialized secure memory\n"));
641           gpg_err_set_errno (ENOMEM);
642           return NULL;
643         }
644     }
645   if (not_locked && fips_mode ())
646     {
647       log_info (_("secure memory pool is not locked while in FIPS mode\n"));
648       gpg_err_set_errno (ENOMEM);
649       return NULL;
650     }
651   if (show_warning && !suspend_warning)
652     {
653       show_warning = 0;
654       print_warn ();
655     }
656 
657   /* Blocks are always a multiple of 32. */
658   size = ((size + 31) / 32) * 32;
659 
660   mb = mb_get_new (pool, (memblock_t *) pool->mem, size);
661   if (mb)
662     {
663       stats_update (pool, mb->size, 0);
664       return &mb->aligned.c;
665     }
666 
667   /* If we are called from xmalloc style functions resort to the
668    * overflow pools to return memory.  We don't do this in FIPS mode,
669    * though.  If the auto-expand option is active we do the expanding
670    * also for the standard malloc functions.
671    *
672    * The idea of using them by default only for the xmalloc function
673    * is so that a user can control whether memory will be allocated in
674    * the initial created mlock protected secmem area or may also be
675    * allocated from the overflow pools.  */
676   if ((xhint || auto_expand) && !fips_mode ())
677     {
678       /* Check whether we can allocate from the overflow pools.  */
679       for (pool = pool->next; pool; pool = pool->next)
680         {
681           mb = mb_get_new (pool, (memblock_t *) pool->mem, size);
682           if (mb)
683             {
684               stats_update (pool, mb->size, 0);
685               return &mb->aligned.c;
686             }
687         }
688       /* Allocate a new overflow pool.  We put a new pool right after
689        * the mainpool so that the next allocation will happen in that
690        * pool and not in one of the older pools.  When this new pool
691        * gets full we will try to find space in the older pools.  */
692       pool = calloc (1, sizeof *pool);
693       if (!pool)
694         return NULL;  /* Not enough memory for a new pool descriptor.  */
695       pool->size = auto_expand? auto_expand : STANDARD_POOL_SIZE;
696       pool->mem = malloc (pool->size);
697       if (!pool->mem)
698         {
699           free (pool);
700           return NULL; /* Not enough memory available for a new pool.  */
701         }
702       /* Initialize first memory block.  */
703       mb = (memblock_t *) pool->mem;
704       mb->size = pool->size - BLOCK_HEAD_SIZE;
705       mb->flags = 0;
706 
707       pool->okay = 1;
708 
709       /* Take care: in _gcry_private_is_secure we do not lock and thus
710        * we assume that the second assignment below is atomic.  Memory
711        * barrier prevents reordering of stores to new pool structure after
712        * MAINPOOL.NEXT assigment and prevents _gcry_private_is_secure seeing
713        * non-initialized POOL->NEXT pointers.  */
714       pool->next = mainpool.next;
715       memory_barrier();
716       mainpool.next = pool;
717 
718       /* After the first time we allocated an overflow pool, print a
719        * warning.  */
720       if (!pool->next)
721         print_warn ();
722 
723       /* Allocate.  */
724       mb = mb_get_new (pool, (memblock_t *) pool->mem, size);
725       if (mb)
726         {
727           stats_update (pool, mb->size, 0);
728           return &mb->aligned.c;
729         }
730     }
731 
732   return NULL;
733 }
734 
735 
736 /* Allocate a block from the secmem of SIZE.  With XHINT set assume
737  * that the caller is a xmalloc style function.  */
738 void *
_gcry_secmem_malloc(size_t size,int xhint)739 _gcry_secmem_malloc (size_t size, int xhint)
740 {
741   void *p;
742 
743   SECMEM_LOCK;
744   p = _gcry_secmem_malloc_internal (size, xhint);
745   SECMEM_UNLOCK;
746 
747   return p;
748 }
749 
750 static int
_gcry_secmem_free_internal(void * a)751 _gcry_secmem_free_internal (void *a)
752 {
753   pooldesc_t *pool;
754   memblock_t *mb;
755   int size;
756 
757   for (pool = &mainpool; pool; pool = pool->next)
758     if (pool->okay && ptr_into_pool_p (pool, a))
759       break;
760   if (!pool)
761     return 0; /* A does not belong to use.  */
762 
763   mb = ADDR_TO_BLOCK (a);
764   size = mb->size;
765 
766   /* This does not make much sense: probably this memory is held in the
767    * cache. We do it anyway: */
768 #define MB_WIPE_OUT(byte) \
769   wipememory2 (((char *) mb + BLOCK_HEAD_SIZE), (byte), size);
770 
771   MB_WIPE_OUT (0xff);
772   MB_WIPE_OUT (0xaa);
773   MB_WIPE_OUT (0x55);
774   MB_WIPE_OUT (0x00);
775 
776   /* Update stats.  */
777   stats_update (pool, 0, size);
778 
779   mb->flags &= ~MB_FLAG_ACTIVE;
780 
781   mb_merge (pool, mb);
782 
783   return 1; /* Freed.  */
784 }
785 
786 
787 /* Wipe out and release memory.  Returns true if this function
788  * actually released A.  */
789 int
_gcry_secmem_free(void * a)790 _gcry_secmem_free (void *a)
791 {
792   int mine;
793 
794   if (!a)
795     return 1; /* Tell caller that we handled it.  */
796 
797   SECMEM_LOCK;
798   mine = _gcry_secmem_free_internal (a);
799   SECMEM_UNLOCK;
800   return mine;
801 }
802 
803 
804 static void *
_gcry_secmem_realloc_internal(void * p,size_t newsize,int xhint)805 _gcry_secmem_realloc_internal (void *p, size_t newsize, int xhint)
806 {
807   memblock_t *mb;
808   size_t size;
809   void *a;
810 
811   mb = (memblock_t *) (void *) ((char *) p
812 				- ((size_t) &((memblock_t *) 0)->aligned.c));
813   size = mb->size;
814   if (newsize < size)
815     {
816       /* It is easier to not shrink the memory.  */
817       a = p;
818     }
819   else
820     {
821       a = _gcry_secmem_malloc_internal (newsize, xhint);
822       if (a)
823 	{
824 	  memcpy (a, p, size);
825 	  memset ((char *) a + size, 0, newsize - size);
826 	  _gcry_secmem_free_internal (p);
827 	}
828     }
829 
830   return a;
831 }
832 
833 
834 /* Realloc memory.  With XHINT set assume that the caller is a xmalloc
835  * style function.  */
836 void *
_gcry_secmem_realloc(void * p,size_t newsize,int xhint)837 _gcry_secmem_realloc (void *p, size_t newsize, int xhint)
838 {
839   void *a;
840 
841   SECMEM_LOCK;
842   a = _gcry_secmem_realloc_internal (p, newsize, xhint);
843   SECMEM_UNLOCK;
844 
845   return a;
846 }
847 
848 
849 /* Return true if P points into the secure memory areas.  */
850 int
_gcry_private_is_secure(const void * p)851 _gcry_private_is_secure (const void *p)
852 {
853   pooldesc_t *pool;
854 
855   /* We do no lock here because once a pool is allocated it will not
856    * be removed anymore (except for gcry_secmem_term).  Further, as
857    * assigment of POOL->NEXT in new pool structure is visible in
858    * this thread before assigment of MAINPOOL.NEXT, pool list can be
859    * iterated locklessly.  This visiblity is ensured by memory barrier
860    * between POOL->NEXT and MAINPOOL.NEXT assignments in
861    * _gcry_secmem_malloc_internal. */
862   for (pool = &mainpool; pool; pool = pool->next)
863     if (pool->okay && ptr_into_pool_p (pool, p))
864       return 1;
865 
866   return 0;
867 }
868 
869 
870 /****************
871  * Warning:  This code might be called by an interrupt handler
872  *	     and frankly, there should really be such a handler,
873  *	     to make sure that the memory is wiped out.
874  *	     We hope that the OS wipes out mlocked memory after
875  *	     receiving a SIGKILL - it really should do so, otherwise
876  *	     there is no chance to get the secure memory cleaned.
877  */
878 void
_gcry_secmem_term()879 _gcry_secmem_term ()
880 {
881   pooldesc_t *pool, *next;
882 
883   for (pool = &mainpool; pool; pool = next)
884     {
885       next = pool->next;
886       if (!pool->okay)
887         continue;
888 
889       wipememory2 (pool->mem, 0xff, pool->size);
890       wipememory2 (pool->mem, 0xaa, pool->size);
891       wipememory2 (pool->mem, 0x55, pool->size);
892       wipememory2 (pool->mem, 0x00, pool->size);
893       if (0)
894         ;
895 #if HAVE_MMAP
896       else if (pool->is_mmapped)
897         munmap (pool->mem, pool->size);
898 #endif
899       else
900         free (pool->mem);
901       pool->mem = NULL;
902       pool->okay = 0;
903       pool->size = 0;
904       if (pool != &mainpool)
905         free (pool);
906     }
907   mainpool.next = NULL;
908   not_locked = 0;
909 }
910 
911 
912 /* Print stats of the secmem allocator.  With EXTENDED passwed as true
913  * a detiled listing is returned (used for testing).  */
914 void
_gcry_secmem_dump_stats(int extended)915 _gcry_secmem_dump_stats (int extended)
916 {
917   SECMEM_LOCK;
918   secmem_dump_stats_internal (extended);
919   SECMEM_UNLOCK;
920 }
921 
922 
923 static void
secmem_dump_stats_internal(int extended)924 secmem_dump_stats_internal (int extended)
925 {
926   pooldesc_t *pool;
927   memblock_t *mb;
928   int i, poolno;
929 
930   for (pool = &mainpool, poolno = 0; pool; pool = pool->next, poolno++)
931     {
932       if (!extended)
933         {
934           if (pool->okay)
935             log_info ("%-13s %u/%lu bytes in %u blocks\n",
936                       pool == &mainpool? "secmem usage:":"",
937                       pool->cur_alloced, (unsigned long)pool->size,
938                       pool->cur_blocks);
939         }
940       else
941         {
942           for (i = 0, mb = (memblock_t *) pool->mem;
943                ptr_into_pool_p (pool, mb);
944                mb = mb_get_next (pool, mb), i++)
945             log_info ("SECMEM: pool %d %s block %i size %i\n",
946                       poolno,
947                       (mb->flags & MB_FLAG_ACTIVE) ? "used" : "free",
948                       i,
949                       mb->size);
950         }
951     }
952 }
953