1 #ifndef __STDC_WANT_LIB_EXT1__
2 # define __STDC_WANT_LIB_EXT1__ 1
3 #endif
4 #include <assert.h>
5 #include <errno.h>
6 #include <limits.h>
7 #include <signal.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 
13 #ifdef HAVE_SYS_MMAN_H
14 # include <sys/mman.h>
15 #endif
16 
17 #ifdef _WIN32
18 # include <windows.h>
19 # include <wincrypt.h>
20 #else
21 # include <unistd.h>
22 #endif
23 
24 #ifndef HAVE_C_VARARRAYS
25 # ifdef HAVE_ALLOCA_H
26 #  include <alloca.h>
27 # elif !defined(alloca)
28 #  if defined(__clang__) || defined(__GNUC__)
29 #   define alloca __builtin_alloca
30 #  elif defined _AIX
31 #   define alloca __alloca
32 #  elif defined _MSC_VER
33 #   include <malloc.h>
34 #   define alloca _alloca
35 #  else
36 #   include <stddef.h>
37 #   ifdef  __cplusplus
38 extern "C"
39 #   endif
40 void *alloca (size_t);
41 #  endif
42 # endif
43 #endif
44 
45 #include "core.h"
46 #include "randombytes.h"
47 #include "utils.h"
48 
49 #ifndef ENOSYS
50 # define ENOSYS ENXIO
51 #endif
52 
53 #if defined(_WIN32) && \
54     (!defined(WINAPI_FAMILY) || WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP)
55 # define WINAPI_DESKTOP
56 #endif
57 
58 #define CANARY_SIZE 16U
59 #define GARBAGE_VALUE 0xdb
60 
61 #ifndef MAP_NOCORE
62 # define MAP_NOCORE 0
63 #endif
64 #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
65 # define MAP_ANON MAP_ANONYMOUS
66 #endif
67 #if defined(WINAPI_DESKTOP) || (defined(MAP_ANON) && defined(HAVE_MMAP)) || \
68     defined(HAVE_POSIX_MEMALIGN)
69 # define HAVE_ALIGNED_MALLOC
70 #endif
71 #if defined(HAVE_MPROTECT) && \
72     !(defined(PROT_NONE) && defined(PROT_READ) && defined(PROT_WRITE))
73 # undef HAVE_MPROTECT
74 #endif
75 #if defined(HAVE_ALIGNED_MALLOC) && \
76     (defined(WINAPI_DESKTOP) || defined(HAVE_MPROTECT))
77 # define HAVE_PAGE_PROTECTION
78 #endif
79 #if !defined(MADV_DODUMP) && defined(MADV_CORE)
80 # define MADV_DODUMP   MADV_CORE
81 # define MADV_DONTDUMP MADV_NOCORE
82 #endif
83 
84 static size_t        page_size;
85 static unsigned char canary[CANARY_SIZE];
86 
87 /* LCOV_EXCL_START */
88 #ifdef HAVE_WEAK_SYMBOLS
89 __attribute__((weak)) void
90 _sodium_dummy_symbol_to_prevent_memzero_lto(void *const  pnt,
91                                             const size_t len);
92 __attribute__((weak)) void
_sodium_dummy_symbol_to_prevent_memzero_lto(void * const pnt,const size_t len)93 _sodium_dummy_symbol_to_prevent_memzero_lto(void *const  pnt,
94                                             const size_t len)
95 {
96     (void) pnt; /* LCOV_EXCL_LINE */
97     (void) len; /* LCOV_EXCL_LINE */
98 }
99 #endif
100 /* LCOV_EXCL_STOP */
101 
102 void
sodium_memzero(void * const pnt,const size_t len)103 sodium_memzero(void * const pnt, const size_t len)
104 {
105 #ifdef _WIN32
106     SecureZeroMemory(pnt, len);
107 #elif defined(HAVE_MEMSET_S)
108     if (len > 0U && memset_s(pnt, (rsize_t) len, 0, (rsize_t) len) != 0) {
109         sodium_misuse(); /* LCOV_EXCL_LINE */
110     }
111 #elif defined(HAVE_EXPLICIT_BZERO)
112     explicit_bzero(pnt, len);
113 #elif defined(HAVE_EXPLICIT_MEMSET)
114     explicit_memset(pnt, 0, len);
115 #elif HAVE_WEAK_SYMBOLS
116     if (len > 0U) {
117         memset(pnt, 0, len);
118         _sodium_dummy_symbol_to_prevent_memzero_lto(pnt, len);
119     }
120 # ifdef HAVE_INLINE_ASM
121     __asm__ __volatile__ ("" : : "r"(pnt) : "memory");
122 # endif
123 #else
124     volatile unsigned char *volatile pnt_ =
125         (volatile unsigned char *volatile) pnt;
126     size_t i = (size_t) 0U;
127 
128     while (i < len) {
129         pnt_[i++] = 0U;
130     }
131 #endif
132 }
133 
134 void
sodium_stackzero(const size_t len)135 sodium_stackzero(const size_t len)
136 {
137 #ifdef HAVE_C_VARARRAYS
138     unsigned char fodder[len];
139     sodium_memzero(fodder, len);
140 #elif HAVE_ALLOCA
141     sodium_memzero(alloca(len), len);
142 #endif
143 }
144 
145 #ifdef HAVE_WEAK_SYMBOLS
146 __attribute__((weak)) void
147 _sodium_dummy_symbol_to_prevent_memcmp_lto(const unsigned char *b1,
148                                            const unsigned char *b2,
149                                            const size_t         len);
150 __attribute__((weak)) void
_sodium_dummy_symbol_to_prevent_memcmp_lto(const unsigned char * b1,const unsigned char * b2,const size_t len)151 _sodium_dummy_symbol_to_prevent_memcmp_lto(const unsigned char *b1,
152                                            const unsigned char *b2,
153                                            const size_t         len)
154 {
155     (void) b1;
156     (void) b2;
157     (void) len;
158 }
159 #endif
160 
161 int
sodium_memcmp(const void * const b1_,const void * const b2_,size_t len)162 sodium_memcmp(const void *const b1_, const void *const b2_, size_t len)
163 {
164 #ifdef HAVE_WEAK_SYMBOLS
165     const unsigned char *b1 = (const unsigned char *) b1_;
166     const unsigned char *b2 = (const unsigned char *) b2_;
167 #else
168     const volatile unsigned char *volatile b1 =
169         (const volatile unsigned char *volatile) b1_;
170     const volatile unsigned char *volatile b2 =
171         (const volatile unsigned char *volatile) b2_;
172 #endif
173     size_t                 i;
174     volatile unsigned char d = 0U;
175 
176 #if HAVE_WEAK_SYMBOLS
177     _sodium_dummy_symbol_to_prevent_memcmp_lto(b1, b2, len);
178 #endif
179     for (i = 0U; i < len; i++) {
180         d |= b1[i] ^ b2[i];
181     }
182     return (1 & ((d - 1) >> 8)) - 1;
183 }
184 
185 #ifdef HAVE_WEAK_SYMBOLS
186 __attribute__((weak)) void
187 _sodium_dummy_symbol_to_prevent_compare_lto(const unsigned char *b1,
188                                             const unsigned char *b2,
189                                             const size_t         len);
190 __attribute__((weak)) void
_sodium_dummy_symbol_to_prevent_compare_lto(const unsigned char * b1,const unsigned char * b2,const size_t len)191 _sodium_dummy_symbol_to_prevent_compare_lto(const unsigned char *b1,
192                                             const unsigned char *b2,
193                                             const size_t         len)
194 {
195     (void) b1;
196     (void) b2;
197     (void) len;
198 }
199 #endif
200 
201 int
sodium_compare(const unsigned char * b1_,const unsigned char * b2_,size_t len)202 sodium_compare(const unsigned char *b1_, const unsigned char *b2_, size_t len)
203 {
204 #ifdef HAVE_WEAK_SYMBOLS
205     const unsigned char *b1 = b1_;
206     const unsigned char *b2 = b2_;
207 #else
208     const volatile unsigned char *volatile b1 =
209         (const volatile unsigned char *volatile) b1_;
210     const volatile unsigned char *volatile b2 =
211         (const volatile unsigned char *volatile) b2_;
212 #endif
213     size_t                 i;
214     volatile unsigned char gt = 0U;
215     volatile unsigned char eq = 1U;
216     uint16_t               x1, x2;
217 
218 #if HAVE_WEAK_SYMBOLS
219     _sodium_dummy_symbol_to_prevent_compare_lto(b1, b2, len);
220 #endif
221     i = len;
222     while (i != 0U) {
223         i--;
224         x1 = b1[i];
225         x2 = b2[i];
226         gt |= ((x2 - x1) >> 8) & eq;
227         eq &= ((x2 ^ x1) - 1) >> 8;
228     }
229     return (int) (gt + gt + eq) - 1;
230 }
231 
232 int
sodium_is_zero(const unsigned char * n,const size_t nlen)233 sodium_is_zero(const unsigned char *n, const size_t nlen)
234 {
235     size_t                 i;
236     volatile unsigned char d = 0U;
237 
238     for (i = 0U; i < nlen; i++) {
239         d |= n[i];
240     }
241     return 1 & ((d - 1) >> 8);
242 }
243 
244 void
sodium_increment(unsigned char * n,const size_t nlen)245 sodium_increment(unsigned char *n, const size_t nlen)
246 {
247     size_t        i = 0U;
248     uint_fast16_t c = 1U;
249 
250 #ifdef HAVE_AMD64_ASM
251     uint64_t t64, t64_2;
252     uint32_t t32;
253 
254     if (nlen == 12U) {
255         __asm__ __volatile__(
256             "xorq %[t64], %[t64] \n"
257             "xorl %[t32], %[t32] \n"
258             "stc \n"
259             "adcq %[t64], (%[out]) \n"
260             "adcl %[t32], 8(%[out]) \n"
261             : [t64] "=&r"(t64), [t32] "=&r"(t32)
262             : [out] "D"(n)
263             : "memory", "flags", "cc");
264         return;
265     } else if (nlen == 24U) {
266         __asm__ __volatile__(
267             "movq $1, %[t64] \n"
268             "xorq %[t64_2], %[t64_2] \n"
269             "addq %[t64], (%[out]) \n"
270             "adcq %[t64_2], 8(%[out]) \n"
271             "adcq %[t64_2], 16(%[out]) \n"
272             : [t64] "=&r"(t64), [t64_2] "=&r"(t64_2)
273             : [out] "D"(n)
274             : "memory", "flags", "cc");
275         return;
276     } else if (nlen == 8U) {
277         __asm__ __volatile__("incq (%[out]) \n"
278                              :
279                              : [out] "D"(n)
280                              : "memory", "flags", "cc");
281         return;
282     }
283 #endif
284     for (; i < nlen; i++) {
285         c += (uint_fast16_t) n[i];
286         n[i] = (unsigned char) c;
287         c >>= 8;
288     }
289 }
290 
291 void
sodium_add(unsigned char * a,const unsigned char * b,const size_t len)292 sodium_add(unsigned char *a, const unsigned char *b, const size_t len)
293 {
294     size_t        i;
295     uint_fast16_t c = 0U;
296 
297 #ifdef HAVE_AMD64_ASM
298     uint64_t t64, t64_2, t64_3;
299     uint32_t t32;
300 
301     if (len == 12U) {
302         __asm__ __volatile__(
303             "movq (%[in]), %[t64] \n"
304             "movl 8(%[in]), %[t32] \n"
305             "addq %[t64], (%[out]) \n"
306             "adcl %[t32], 8(%[out]) \n"
307             : [t64] "=&r"(t64), [t32] "=&r"(t32)
308             : [in] "S"(b), [out] "D"(a)
309             : "memory", "flags", "cc");
310         return;
311     } else if (len == 24U) {
312         __asm__ __volatile__(
313             "movq (%[in]), %[t64] \n"
314             "movq 8(%[in]), %[t64_2] \n"
315             "movq 16(%[in]), %[t64_3] \n"
316             "addq %[t64], (%[out]) \n"
317             "adcq %[t64_2], 8(%[out]) \n"
318             "adcq %[t64_3], 16(%[out]) \n"
319             : [t64] "=&r"(t64), [t64_2] "=&r"(t64_2), [t64_3] "=&r"(t64_3)
320             : [in] "S"(b), [out] "D"(a)
321             : "memory", "flags", "cc");
322         return;
323     } else if (len == 8U) {
324         __asm__ __volatile__(
325             "movq (%[in]), %[t64] \n"
326             "addq %[t64], (%[out]) \n"
327             : [t64] "=&r"(t64)
328             : [in] "S"(b), [out] "D"(a)
329             : "memory", "flags", "cc");
330         return;
331     }
332 #endif
333     for (i = 0U; i < len; i++) {
334         c += (uint_fast16_t) a[i] + (uint_fast16_t) b[i];
335         a[i] = (unsigned char) c;
336         c >>= 8;
337     }
338 }
339 
340 void
sodium_sub(unsigned char * a,const unsigned char * b,const size_t len)341 sodium_sub(unsigned char *a, const unsigned char *b, const size_t len)
342 {
343     uint_fast16_t c = 0U;
344     size_t        i;
345 
346 #ifdef HAVE_AMD64_ASM
347     uint64_t t64_1, t64_2, t64_3, t64_4;
348     uint64_t t64_5, t64_6, t64_7, t64_8;
349     uint32_t t32;
350 
351     if (len == 64U) {
352         __asm__ __volatile__(
353             "movq   (%[in]), %[t64_1] \n"
354             "movq  8(%[in]), %[t64_2] \n"
355             "movq 16(%[in]), %[t64_3] \n"
356             "movq 24(%[in]), %[t64_4] \n"
357             "movq 32(%[in]), %[t64_5] \n"
358             "movq 40(%[in]), %[t64_6] \n"
359             "movq 48(%[in]), %[t64_7] \n"
360             "movq 56(%[in]), %[t64_8] \n"
361             "subq %[t64_1],   (%[out]) \n"
362             "sbbq %[t64_2],  8(%[out]) \n"
363             "sbbq %[t64_3], 16(%[out]) \n"
364             "sbbq %[t64_4], 24(%[out]) \n"
365             "sbbq %[t64_5], 32(%[out]) \n"
366             "sbbq %[t64_6], 40(%[out]) \n"
367             "sbbq %[t64_7], 48(%[out]) \n"
368             "sbbq %[t64_8], 56(%[out]) \n"
369             : [t64_1] "=&r"(t64_1), [t64_2] "=&r"(t64_2), [t64_3] "=&r"(t64_3), [t64_4] "=&r"(t64_4),
370               [t64_5] "=&r"(t64_5), [t64_6] "=&r"(t64_6), [t64_7] "=&r"(t64_7), [t64_8] "=&r"(t64_8)
371             : [in] "S"(b), [out] "D"(a)
372             : "memory", "flags", "cc");
373         return;
374     }
375 #endif
376     for (i = 0U; i < len; i++) {
377         c = (uint_fast16_t) a[i] - (uint_fast16_t) b[i] - c;
378         a[i] = (unsigned char) c;
379         c = (c >> 8) & 1U;
380     }
381 }
382 
383 int
_sodium_alloc_init(void)384 _sodium_alloc_init(void)
385 {
386 #ifdef HAVE_ALIGNED_MALLOC
387 # if defined(_SC_PAGESIZE)
388     long page_size_ = sysconf(_SC_PAGESIZE);
389     if (page_size_ > 0L) {
390         page_size = (size_t) page_size_;
391     }
392 # elif defined(WINAPI_DESKTOP)
393     SYSTEM_INFO si;
394     GetSystemInfo(&si);
395     page_size = (size_t) si.dwPageSize;
396 # endif
397     if (page_size < CANARY_SIZE || page_size < sizeof(size_t)) {
398         sodium_misuse(); /* LCOV_EXCL_LINE */
399     }
400 #endif
401     randombytes_buf(canary, sizeof canary);
402 
403     return 0;
404 }
405 
406 int
sodium_mlock(void * const addr,const size_t len)407 sodium_mlock(void *const addr, const size_t len)
408 {
409 #if defined(MADV_DONTDUMP) && defined(HAVE_MADVISE)
410     (void) madvise(addr, len, MADV_DONTDUMP);
411 #endif
412 #ifdef HAVE_MLOCK
413     return mlock(addr, len);
414 #elif defined(WINAPI_DESKTOP)
415     return -(VirtualLock(addr, len) == 0);
416 #else
417     errno = ENOSYS;
418     return -1;
419 #endif
420 }
421 
422 int
sodium_munlock(void * const addr,const size_t len)423 sodium_munlock(void *const addr, const size_t len)
424 {
425     sodium_memzero(addr, len);
426 #if defined(MADV_DODUMP) && defined(HAVE_MADVISE)
427     (void) madvise(addr, len, MADV_DODUMP);
428 #endif
429 #ifdef HAVE_MLOCK
430     return munlock(addr, len);
431 #elif defined(WINAPI_DESKTOP)
432     return -(VirtualUnlock(addr, len) == 0);
433 #else
434     errno = ENOSYS;
435     return -1;
436 #endif
437 }
438 
439 static int
_mprotect_noaccess(void * ptr,size_t size)440 _mprotect_noaccess(void *ptr, size_t size)
441 {
442 #ifdef HAVE_MPROTECT
443     return mprotect(ptr, size, PROT_NONE);
444 #elif defined(WINAPI_DESKTOP)
445     DWORD old;
446     return -(VirtualProtect(ptr, size, PAGE_NOACCESS, &old) == 0);
447 #else
448     errno = ENOSYS;
449     return -1;
450 #endif
451 }
452 
453 static int
_mprotect_readonly(void * ptr,size_t size)454 _mprotect_readonly(void *ptr, size_t size)
455 {
456 #ifdef HAVE_MPROTECT
457     return mprotect(ptr, size, PROT_READ);
458 #elif defined(WINAPI_DESKTOP)
459     DWORD old;
460     return -(VirtualProtect(ptr, size, PAGE_READONLY, &old) == 0);
461 #else
462     errno = ENOSYS;
463     return -1;
464 #endif
465 }
466 
467 static int
_mprotect_readwrite(void * ptr,size_t size)468 _mprotect_readwrite(void *ptr, size_t size)
469 {
470 #ifdef HAVE_MPROTECT
471     return mprotect(ptr, size, PROT_READ | PROT_WRITE);
472 #elif defined(WINAPI_DESKTOP)
473     DWORD old;
474     return -(VirtualProtect(ptr, size, PAGE_READWRITE, &old) == 0);
475 #else
476     errno = ENOSYS;
477     return -1;
478 #endif
479 }
480 
481 #ifdef HAVE_ALIGNED_MALLOC
482 
483 __attribute__((noreturn)) static void
_out_of_bounds(void)484 _out_of_bounds(void)
485 {
486 # ifndef __wasm__
487 #  ifdef SIGSEGV
488     raise(SIGSEGV);
489 #  elif defined(SIGKILL)
490     raise(SIGKILL);
491 #  endif
492 # endif
493     abort(); /* not something we want any higher-level API to catch */
494 } /* LCOV_EXCL_LINE */
495 
496 static inline size_t
_page_round(const size_t size)497 _page_round(const size_t size)
498 {
499     const size_t page_mask = page_size - 1U;
500 
501     return (size + page_mask) & ~page_mask;
502 }
503 
504 static __attribute__((malloc)) unsigned char *
_alloc_aligned(const size_t size)505 _alloc_aligned(const size_t size)
506 {
507     void *ptr;
508 
509 # if defined(MAP_ANON) && defined(HAVE_MMAP)
510     if ((ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
511                     MAP_ANON | MAP_PRIVATE | MAP_NOCORE, -1, 0)) ==
512         MAP_FAILED) {
513         ptr = NULL; /* LCOV_EXCL_LINE */
514     }               /* LCOV_EXCL_LINE */
515 # elif defined(HAVE_POSIX_MEMALIGN)
516     if (posix_memalign(&ptr, page_size, size) != 0) {
517         ptr = NULL; /* LCOV_EXCL_LINE */
518     }               /* LCOV_EXCL_LINE */
519 # elif defined(WINAPI_DESKTOP)
520     ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
521 # else
522 #  error Bug
523 # endif
524     return (unsigned char *) ptr;
525 }
526 
527 static void
_free_aligned(unsigned char * const ptr,const size_t size)528 _free_aligned(unsigned char *const ptr, const size_t size)
529 {
530 # if defined(MAP_ANON) && defined(HAVE_MMAP)
531     (void) munmap(ptr, size);
532 # elif defined(HAVE_POSIX_MEMALIGN)
533     free(ptr);
534 # elif defined(WINAPI_DESKTOP)
535     VirtualFree(ptr, 0U, MEM_RELEASE);
536 # else
537 #  error Bug
538 #endif
539 }
540 
541 static unsigned char *
_unprotected_ptr_from_user_ptr(void * const ptr)542 _unprotected_ptr_from_user_ptr(void *const ptr)
543 {
544     uintptr_t      unprotected_ptr_u;
545     unsigned char *canary_ptr;
546     size_t         page_mask;
547 
548     canary_ptr = ((unsigned char *) ptr) - sizeof canary;
549     page_mask = page_size - 1U;
550     unprotected_ptr_u = ((uintptr_t) canary_ptr & (uintptr_t) ~page_mask);
551     if (unprotected_ptr_u <= page_size * 2U) {
552         sodium_misuse(); /* LCOV_EXCL_LINE */
553     }
554     return (unsigned char *) unprotected_ptr_u;
555 }
556 
557 #endif /* HAVE_ALIGNED_MALLOC */
558 
559 #ifndef HAVE_ALIGNED_MALLOC
560 static __attribute__((malloc)) void *
_sodium_malloc(const size_t size)561 _sodium_malloc(const size_t size)
562 {
563     return malloc(size > (size_t) 0U ? size : (size_t) 1U);
564 }
565 #else
566 static __attribute__((malloc)) void *
_sodium_malloc(const size_t size)567 _sodium_malloc(const size_t size)
568 {
569     void          *user_ptr;
570     unsigned char *base_ptr;
571     unsigned char *canary_ptr;
572     unsigned char *unprotected_ptr;
573     size_t         size_with_canary;
574     size_t         total_size;
575     size_t         unprotected_size;
576 
577     if (size >= (size_t) SIZE_MAX - page_size * 4U) {
578         errno = ENOMEM;
579         return NULL;
580     }
581     if (page_size <= sizeof canary || page_size < sizeof unprotected_size) {
582         sodium_misuse(); /* LCOV_EXCL_LINE */
583     }
584     size_with_canary = (sizeof canary) + size;
585     unprotected_size = _page_round(size_with_canary);
586     total_size       = page_size + page_size + unprotected_size + page_size;
587     if ((base_ptr = _alloc_aligned(total_size)) == NULL) {
588         return NULL; /* LCOV_EXCL_LINE */
589     }
590     unprotected_ptr = base_ptr + page_size * 2U;
591     _mprotect_noaccess(base_ptr + page_size, page_size);
592 # ifndef HAVE_PAGE_PROTECTION
593     memcpy(unprotected_ptr + unprotected_size, canary, sizeof canary);
594 # endif
595     _mprotect_noaccess(unprotected_ptr + unprotected_size, page_size);
596     sodium_mlock(unprotected_ptr, unprotected_size);
597     canary_ptr =
598         unprotected_ptr + _page_round(size_with_canary) - size_with_canary;
599     user_ptr = canary_ptr + sizeof canary;
600     memcpy(canary_ptr, canary, sizeof canary);
601     memcpy(base_ptr, &unprotected_size, sizeof unprotected_size);
602     _mprotect_readonly(base_ptr, page_size);
603     assert(_unprotected_ptr_from_user_ptr(user_ptr) == unprotected_ptr);
604 
605     return user_ptr;
606 }
607 #endif /* !HAVE_ALIGNED_MALLOC */
608 
609 __attribute__((malloc)) void *
sodium_malloc(const size_t size)610 sodium_malloc(const size_t size)
611 {
612     void *ptr;
613 
614     if ((ptr = _sodium_malloc(size)) == NULL) {
615         return NULL;
616     }
617     memset(ptr, (int) GARBAGE_VALUE, size);
618 
619     return ptr;
620 }
621 
622 __attribute__((malloc)) void *
sodium_allocarray(size_t count,size_t size)623 sodium_allocarray(size_t count, size_t size)
624 {
625     if (count > (size_t) 0U && size >= (size_t) SIZE_MAX / count) {
626         errno = ENOMEM;
627         return NULL;
628     }
629     return sodium_malloc(count * size);
630 }
631 
632 #ifndef HAVE_ALIGNED_MALLOC
633 void
sodium_free(void * ptr)634 sodium_free(void *ptr)
635 {
636     free(ptr);
637 }
638 #else
639 void
sodium_free(void * ptr)640 sodium_free(void *ptr)
641 {
642     unsigned char *base_ptr;
643     unsigned char *canary_ptr;
644     unsigned char *unprotected_ptr;
645     size_t         total_size;
646     size_t         unprotected_size;
647 
648     if (ptr == NULL) {
649         return;
650     }
651     canary_ptr      = ((unsigned char *) ptr) - sizeof canary;
652     unprotected_ptr = _unprotected_ptr_from_user_ptr(ptr);
653     base_ptr        = unprotected_ptr - page_size * 2U;
654     memcpy(&unprotected_size, base_ptr, sizeof unprotected_size);
655     total_size = page_size + page_size + unprotected_size + page_size;
656     _mprotect_readwrite(base_ptr, total_size);
657     if (sodium_memcmp(canary_ptr, canary, sizeof canary) != 0) {
658         _out_of_bounds();
659     }
660 # ifndef HAVE_PAGE_PROTECTION
661     if (sodium_memcmp(unprotected_ptr + unprotected_size, canary,
662                       sizeof canary) != 0) {
663         _out_of_bounds();
664     }
665 # endif
666     sodium_munlock(unprotected_ptr, unprotected_size);
667     _free_aligned(base_ptr, total_size);
668 }
669 #endif /* HAVE_ALIGNED_MALLOC */
670 
671 #ifndef HAVE_PAGE_PROTECTION
672 static int
_sodium_mprotect(void * ptr,int (* cb)(void * ptr,size_t size))673 _sodium_mprotect(void *ptr, int (*cb)(void *ptr, size_t size))
674 {
675     (void) ptr;
676     (void) cb;
677     errno = ENOSYS;
678     return -1;
679 }
680 #else
681 static int
_sodium_mprotect(void * ptr,int (* cb)(void * ptr,size_t size))682 _sodium_mprotect(void *ptr, int (*cb)(void *ptr, size_t size))
683 {
684     unsigned char *base_ptr;
685     unsigned char *unprotected_ptr;
686     size_t         unprotected_size;
687 
688     unprotected_ptr = _unprotected_ptr_from_user_ptr(ptr);
689     base_ptr        = unprotected_ptr - page_size * 2U;
690     memcpy(&unprotected_size, base_ptr, sizeof unprotected_size);
691 
692     return cb(unprotected_ptr, unprotected_size);
693 }
694 #endif
695 
696 int
sodium_mprotect_noaccess(void * ptr)697 sodium_mprotect_noaccess(void *ptr)
698 {
699     return _sodium_mprotect(ptr, _mprotect_noaccess);
700 }
701 
702 int
sodium_mprotect_readonly(void * ptr)703 sodium_mprotect_readonly(void *ptr)
704 {
705     return _sodium_mprotect(ptr, _mprotect_readonly);
706 }
707 
708 int
sodium_mprotect_readwrite(void * ptr)709 sodium_mprotect_readwrite(void *ptr)
710 {
711     return _sodium_mprotect(ptr, _mprotect_readwrite);
712 }
713 
714 int
sodium_pad(size_t * padded_buflen_p,unsigned char * buf,size_t unpadded_buflen,size_t blocksize,size_t max_buflen)715 sodium_pad(size_t *padded_buflen_p, unsigned char *buf,
716            size_t unpadded_buflen, size_t blocksize, size_t max_buflen)
717 {
718     unsigned char          *tail;
719     size_t                  i;
720     size_t                  xpadlen;
721     size_t                  xpadded_len;
722     volatile unsigned char  mask;
723     unsigned char           barrier_mask;
724 
725     if (blocksize <= 0U) {
726         return -1;
727     }
728     xpadlen = blocksize - 1U;
729     if ((blocksize & (blocksize - 1U)) == 0U) {
730         xpadlen -= unpadded_buflen & (blocksize - 1U);
731     } else {
732         xpadlen -= unpadded_buflen % blocksize;
733     }
734     if ((size_t) SIZE_MAX - unpadded_buflen <= xpadlen) {
735         sodium_misuse();
736     }
737     xpadded_len = unpadded_buflen + xpadlen;
738     if (xpadded_len >= max_buflen) {
739         return -1;
740     }
741     tail = &buf[xpadded_len];
742     if (padded_buflen_p != NULL) {
743         *padded_buflen_p = xpadded_len + 1U;
744     }
745     mask = 0U;
746     for (i = 0; i < blocksize; i++) {
747         barrier_mask = (unsigned char) (((i ^ xpadlen) - 1U)
748            >> ((sizeof(size_t) - 1) * CHAR_BIT));
749         *(tail - i) = ((*(tail - i)) & mask) | (0x80 & barrier_mask);
750         mask |= barrier_mask;
751     }
752     return 0;
753 }
754 
755 int
sodium_unpad(size_t * unpadded_buflen_p,const unsigned char * buf,size_t padded_buflen,size_t blocksize)756 sodium_unpad(size_t *unpadded_buflen_p, const unsigned char *buf,
757              size_t padded_buflen, size_t blocksize)
758 {
759     const unsigned char *tail;
760     unsigned char        acc = 0U;
761     unsigned char        c;
762     unsigned char        valid = 0U;
763     volatile size_t      pad_len = 0U;
764     size_t               i;
765     size_t               is_barrier;
766 
767     if (padded_buflen < blocksize || blocksize <= 0U) {
768         return -1;
769     }
770     tail = &buf[padded_buflen - 1U];
771 
772     for (i = 0U; i < blocksize; i++) {
773         c = *(tail - i);
774         is_barrier =
775             (( (acc - 1U) & (pad_len - 1U) & ((c ^ 0x80) - 1U) ) >> 8) & 1U;
776         acc |= c;
777         pad_len |= i & (1U + ~is_barrier);
778         valid |= (unsigned char) is_barrier;
779     }
780     *unpadded_buflen_p = padded_buflen - 1U - pad_len;
781 
782     return (int) (valid - 1U);
783 }
784