1 /* $NetBSD: memlock.c,v 1.1.1.2 2009/12/02 00:26:25 haad Exp $ */
2
3 /*
4 * Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6 *
7 * This file is part of LVM2.
8 *
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
12 *
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18 #include "lib.h"
19 #include "memlock.h"
20 #include "defaults.h"
21 #include "config.h"
22 #include "toolcontext.h"
23
24 #include <limits.h>
25 #include <fcntl.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/time.h>
29 #include <sys/resource.h>
30
31 #ifndef DEVMAPPER_SUPPORT
32
memlock_inc(void)33 void memlock_inc(void)
34 {
35 return;
36 }
memlock_dec(void)37 void memlock_dec(void)
38 {
39 return;
40 }
memlock(void)41 int memlock(void)
42 {
43 return 0;
44 }
memlock_init(struct cmd_context * cmd)45 void memlock_init(struct cmd_context *cmd)
46 {
47 return;
48 }
49
50 #else /* DEVMAPPER_SUPPORT */
51
52 static size_t _size_stack;
53 static size_t _size_malloc_tmp;
54 static size_t _size_malloc = 2000000;
55
56 static void *_malloc_mem = NULL;
57 static int _memlock_count = 0;
58 static int _memlock_count_daemon = 0;
59 static int _priority;
60 static int _default_priority;
61
_touch_memory(void * mem,size_t size)62 static void _touch_memory(void *mem, size_t size)
63 {
64 size_t pagesize = lvm_getpagesize();
65 void *pos = mem;
66 void *end = mem + size - sizeof(long);
67
68 while (pos < end) {
69 *(long *) pos = 1;
70 pos += pagesize;
71 }
72 }
73
_allocate_memory(void)74 static void _allocate_memory(void)
75 {
76 void *stack_mem, *temp_malloc_mem;
77
78 if ((stack_mem = alloca(_size_stack)))
79 _touch_memory(stack_mem, _size_stack);
80
81 if ((temp_malloc_mem = malloc(_size_malloc_tmp)))
82 _touch_memory(temp_malloc_mem, _size_malloc_tmp);
83
84 if ((_malloc_mem = malloc(_size_malloc)))
85 _touch_memory(_malloc_mem, _size_malloc);
86
87 free(temp_malloc_mem);
88 }
89
_release_memory(void)90 static void _release_memory(void)
91 {
92 free(_malloc_mem);
93 }
94
95 #undef MCL_CURRENT /* XXX: please implement m{,un}lockall */
96
97 /* Stop memory getting swapped out */
_lock_mem(void)98 static void _lock_mem(void)
99 {
100 #ifdef MCL_CURRENT
101 if (mlockall(MCL_CURRENT | MCL_FUTURE))
102 log_sys_error("mlockall", "");
103 else
104 log_very_verbose("Locking memory");
105 #endif
106 _allocate_memory();
107
108 errno = 0;
109 if (((_priority = getpriority(PRIO_PROCESS, 0)) == -1) && errno)
110 log_sys_error("getpriority", "");
111 else
112 if (setpriority(PRIO_PROCESS, 0, _default_priority))
113 log_error("setpriority %d failed: %s",
114 _default_priority, strerror(errno));
115 }
116
_unlock_mem(void)117 static void _unlock_mem(void)
118 {
119 #ifdef MCL_CURRENT
120 if (munlockall())
121 log_sys_error("munlockall", "");
122 else
123 log_very_verbose("Unlocking memory");
124 #endif
125 _release_memory();
126 if (setpriority(PRIO_PROCESS, 0, _priority))
127 log_error("setpriority %u failed: %s", _priority,
128 strerror(errno));
129 }
130
_lock_mem_if_needed(void)131 static void _lock_mem_if_needed(void) {
132 if ((_memlock_count + _memlock_count_daemon) == 1)
133 _lock_mem();
134 }
135
_unlock_mem_if_possible(void)136 static void _unlock_mem_if_possible(void) {
137 if ((_memlock_count + _memlock_count_daemon) == 0)
138 _unlock_mem();
139 }
140
memlock_inc(void)141 void memlock_inc(void)
142 {
143 ++_memlock_count;
144 _lock_mem_if_needed();
145 log_debug("memlock_count inc to %d", _memlock_count);
146 }
147
memlock_dec(void)148 void memlock_dec(void)
149 {
150 if (!_memlock_count)
151 log_error("Internal error: _memlock_count has dropped below 0.");
152 --_memlock_count;
153 _unlock_mem_if_possible();
154 log_debug("memlock_count dec to %d", _memlock_count);
155 }
156
157 /*
158 * The memlock_*_daemon functions will force the mlockall() call that we need
159 * to stay in memory, but they will have no effect on device scans (unlike
160 * normal memlock_inc and memlock_dec). Memory is kept locked as long as either
161 * of memlock or memlock_daemon is in effect.
162 */
163
memlock_inc_daemon(void)164 void memlock_inc_daemon(void)
165 {
166 ++_memlock_count_daemon;
167 _lock_mem_if_needed();
168 log_debug("memlock_count_daemon inc to %d", _memlock_count_daemon);
169 }
170
memlock_dec_daemon(void)171 void memlock_dec_daemon(void)
172 {
173 if (!_memlock_count_daemon)
174 log_error("Internal error: _memlock_count_daemon has dropped below 0.");
175 --_memlock_count_daemon;
176 _unlock_mem_if_possible();
177 log_debug("memlock_count_daemon dec to %d", _memlock_count_daemon);
178 }
179
180 /*
181 * This disregards the daemon (dmeventd) locks, since we use memlock() to check
182 * whether it is safe to run a device scan, which would normally coincide with
183 * !memlock() -- but the daemon global memory lock breaks this assumption, so
184 * we do not take those into account here.
185 */
memlock(void)186 int memlock(void)
187 {
188 return _memlock_count;
189 }
190
memlock_init(struct cmd_context * cmd)191 void memlock_init(struct cmd_context *cmd)
192 {
193 _size_stack = find_config_tree_int(cmd,
194 "activation/reserved_stack",
195 DEFAULT_RESERVED_STACK) * 1024;
196 _size_malloc_tmp = find_config_tree_int(cmd,
197 "activation/reserved_memory",
198 DEFAULT_RESERVED_MEMORY) * 1024;
199 _default_priority = find_config_tree_int(cmd,
200 "activation/process_priority",
201 DEFAULT_PROCESS_PRIORITY);
202 }
203
204 #endif
205