1 /*
2  * This library is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU Lesser General Public
4  * License as published by the Free Software Foundation; either
5  * version 2.1 of the License, or (at your option) any later version.
6  *
7  * This library is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
10  * Lesser General Public License for more details.
11  *
12  * You should have received a copy of the GNU Lesser General Public
13  * License along with this library; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
15  */
16 
17 #include <lha_internal.h>
18 #include <sys/types.h>
19 #include <stdlib.h>
20 #include <stddef.h>
21 /* The BSD's do not use malloc.h directly. */
22 /* They use stdlib.h instead */
23 #ifndef BSD
24 #ifdef HAVE_MALLOC_H
25 #	include <malloc.h>
26 #endif
27 #endif
28 #include <unistd.h>
29 #ifdef _POSIX_MEMLOCK
30 #	include <sys/mman.h>
31 #	include <sys/time.h>
32 #	include <sys/resource.h>
33 #endif
34 #ifdef _POSIX_PRIORITY_SCHEDULING
35 #	include <sched.h>
36 #endif
37 #include <string.h>
38 #include <clplumbing/cl_log.h>
39 #include <clplumbing/realtime.h>
40 #include <clplumbing/uids.h>
41 #include <time.h>
42 #include <errno.h>
43 
44 static gboolean	cl_realtimepermitted = TRUE;
45 static void cl_rtmalloc_setup(void);
46 
47 #define HOGRET	0xff
48 /*
49  * Slightly wacko recursive function to touch requested amount
50  * of stack so we have it pre-allocated inside our realtime code
51  * as per suggestion from mlockall(2)
52  */
53 #ifdef _POSIX_MEMLOCK
54 static unsigned char
cl_stack_hogger(unsigned char * inbuf,int kbytes)55 cl_stack_hogger(unsigned char * inbuf, int kbytes)
56 {
57 	unsigned char	buf[1024];
58 
59 	if (inbuf == NULL) {
60 		memset(buf, HOGRET, sizeof(buf));
61 	}else{
62 		memcpy(buf, inbuf, sizeof(buf));
63 	}
64 
65 	if (kbytes > 0) {
66 		return cl_stack_hogger(buf, kbytes-1);
67 	}else{
68 		return buf[sizeof(buf)-1];
69 	}
70 /* #else
71 	return HOGRET;
72 */
73 }
74 #endif
75 /*
76  * We do things this way to hopefully defeat "smart" malloc code which
77  * handles large mallocs as special cases using mmap().
78  */
79 static void
cl_malloc_hogger(int kbytes)80 cl_malloc_hogger(int kbytes)
81 {
82 	long	size		= kbytes * 1024;
83 	int	chunksize	= 1024;
84 	long	nchunks		= (int)(size / chunksize);
85 	int	chunkbytes 	= nchunks * sizeof(void *);
86 	void**	chunks;
87 	int	j;
88 
89 #ifdef HAVE_MALLOPT
90 #	ifdef M_MMAP_MAX
91 	/* Keep malloc from using mmap */
92 	mallopt(M_MMAP_MAX, 0);
93 #endif
94 #	ifdef M_TRIM_THRESHOLD
95 	/* Keep malloc from giving memory back to the system */
96 	mallopt(M_TRIM_THRESHOLD, -1);
97 #	endif
98 #endif
99 	chunks=malloc(chunkbytes);
100 	if (chunks == NULL) {
101 		cl_log(LOG_INFO, "Could not preallocate (%d) bytes"
102 		,	chunkbytes);
103 		return;
104 	}
105 	memset(chunks, 0, chunkbytes);
106 
107 	for (j=0; j < nchunks; ++j) {
108 		chunks[j] = malloc(chunksize);
109 		if (chunks[j] == NULL) {
110 			cl_log(LOG_INFO, "Could not preallocate (%d) bytes"
111 		,	chunksize);
112 		}else{
113 			memset(chunks[j], 0, chunksize);
114 		}
115 	}
116 	for (j=0; j < nchunks; ++j) {
117 		if (chunks[j]) {
118 			free(chunks[j]);
119 			chunks[j] = NULL;
120 		}
121 	}
122 	free(chunks);
123 	chunks = NULL;
124 }
125 
126 /*
127  *	Make us behave like a soft real-time process.
128  *	We need scheduling priority and being locked in memory.
129  *	If you ask us nicely, we'll even grow the stack and heap
130  *	for you before locking you into memory ;-).
131  */
132 void
cl_make_realtime(int spolicy,int priority,int stackgrowK,int heapgrowK)133 cl_make_realtime(int spolicy, int priority,  int stackgrowK, int heapgrowK)
134 {
135 #ifdef DEFAULT_REALTIME_POLICY
136 	struct sched_param	sp;
137 	int			staticp;
138 #endif
139 
140 	if (heapgrowK > 0) {
141 		cl_malloc_hogger(heapgrowK);
142 	}
143 
144 #ifdef _POSIX_MEMLOCK
145 	if (stackgrowK > 0) {
146 		unsigned char ret;
147 		if ((ret=cl_stack_hogger(NULL, stackgrowK)) != HOGRET) {
148 			cl_log(LOG_INFO, "Stack hogger failed 0x%x"
149 			,	ret);
150 		}
151 	}
152 #endif
153 	cl_rtmalloc_setup();
154 
155 	if (!cl_realtimepermitted) {
156 		cl_log(LOG_INFO
157 		,	"Request to set pid %ld to realtime ignored."
158 		,	(long)getpid());
159 		return;
160 	}
161 
162 #ifdef DEFAULT_REALTIME_POLICY
163 	if (spolicy < 0) {
164 		spolicy = DEFAULT_REALTIME_POLICY;
165 	}
166 
167 	if (priority <= 0) {
168 		priority = sched_get_priority_min(spolicy);
169 	}
170 
171 	if (priority > sched_get_priority_max(spolicy)) {
172 		priority = sched_get_priority_max(spolicy);
173 	}
174 
175 
176 	if ((staticp=sched_getscheduler(0)) < 0) {
177 		cl_perror("unable to get scheduler parameters.");
178 	}else{
179 		memset(&sp, 0, sizeof(sp));
180 		sp.sched_priority = priority;
181 
182 		if (sched_setscheduler(0, spolicy, &sp) < 0) {
183 			cl_perror("Unable to set scheduler parameters.");
184 		}
185 	}
186 #endif
187 
188 #if defined _POSIX_MEMLOCK
189 #	ifdef RLIMIT_MEMLOCK
190 #	define	THRESHOLD(lim)	(((lim))/2)
191 	{
192 		unsigned long		growsize = ((stackgrowK+heapgrowK)*1024);
193 		struct rlimit		memlocklim;
194 
195 		getrlimit(RLIMIT_MEMLOCK, &memlocklim);	/* Allow for future added fields */
196 		memlocklim.rlim_max = RLIM_INFINITY;
197 		memlocklim.rlim_cur = RLIM_INFINITY;
198 		/* Try and remove memory locking limits -- if we can */
199 		if (setrlimit(RLIMIT_MEMLOCK, &memlocklim) < 0) {
200 			/* Didn't work - get what we can */
201 			getrlimit(RLIMIT_MEMLOCK, &memlocklim);
202 			memlocklim.rlim_cur = memlocklim.rlim_max;
203 			setrlimit(RLIMIT_MEMLOCK, &memlocklim);
204 		}
205 
206 		/* Could we get 'enough' ? */
207 		/* (this is a guess - might not be right if we're not root) */
208 		if (getrlimit(RLIMIT_MEMLOCK, &memlocklim) >= 0
209 		&&	memlocklim.rlim_cur != RLIM_INFINITY
210 		&&	(growsize >= THRESHOLD(memlocklim.rlim_cur))) {
211 			cl_log(LOG_ERR
212 			,	"Cannot lock ourselves into memory:  System limits"
213 			" on locked-in memory are too small.");
214 				return;
215 		}
216 	}
217 #	endif	/*RLIMIT_MEMLOCK*/
218 	if (mlockall(MCL_CURRENT|MCL_FUTURE) >= 0) {
219 		if (ANYDEBUG) {
220 			cl_log(LOG_DEBUG, "pid %d locked in memory.", (int) getpid());
221 		}
222 
223 	} else if(errno == ENOSYS) {
224 		const char *err = strerror(errno);
225 		cl_log(LOG_WARNING, "Unable to lock pid %d in memory: %s",
226 		       (int) getpid(), err);
227 
228 	} else {
229 		cl_perror("Unable to lock pid %d in memory", (int) getpid());
230 	}
231 #endif
232 }
233 
234 void
cl_make_normaltime(void)235 cl_make_normaltime(void)
236 {
237 #ifdef DEFAULT_REALTIME_POLICY
238 	struct sched_param	sp;
239 
240 	memset(&sp, 0, sizeof(sp));
241 	sp.sched_priority = sched_get_priority_min(SCHED_OTHER);
242 	if (sched_setscheduler(0, SCHED_OTHER, &sp) < 0) {
243 		cl_perror("unable to (re)set scheduler parameters.");
244 	}
245 #endif
246 #ifdef _POSIX_MEMLOCK
247 	/* Not strictly necessary. */
248 	munlockall();
249 #endif
250 }
251 
252 void
cl_disable_realtime(void)253 cl_disable_realtime(void)
254 {
255 	cl_realtimepermitted = FALSE;
256 }
257 
258 void
cl_enable_realtime(void)259 cl_enable_realtime(void)
260 {
261 	cl_realtimepermitted = TRUE;
262 }
263 
264 /* Give up the CPU for a little bit */
265 /* This is similar to sched_yield() but allows lower prio processes to run */
266 int
cl_shortsleep(void)267 cl_shortsleep(void)
268 {
269 	static const struct timespec	req = {0,2000001L};
270 
271 	return nanosleep(&req, NULL);
272 }
273 
274 
275 static int		post_rt_morecore_count = 0;
276 static unsigned long	init_malloc_arena = 0L;
277 
278 #ifdef HAVE_MALLINFO
279 #	define	MALLOC_TOTALSIZE()	(((unsigned long)mallinfo().arena)+((unsigned long)mallinfo().hblkhd))
280 #else
281 #	define	MALLOC_TOTALSIZE()	(0L)
282 #endif
283 
284 
285 
286 /* Return the number of times we went after more core */
287 int
cl_nonrealtime_malloc_count(void)288 cl_nonrealtime_malloc_count(void)
289 {
290 	return post_rt_morecore_count;
291 }
292 unsigned long
cl_nonrealtime_malloc_size(void)293 cl_nonrealtime_malloc_size(void)
294 {
295 		return (MALLOC_TOTALSIZE() - init_malloc_arena);
296 }
297 /* Log the number of times we went after more core */
298 void
cl_realtime_malloc_check(void)299 cl_realtime_malloc_check(void)
300 {
301 	static	int		lastcount = 0;
302 	static unsigned long	oldarena = 0UL;
303 
304 	if (oldarena == 0UL) {
305 		oldarena = init_malloc_arena;
306 	}
307 
308 	if (post_rt_morecore_count > lastcount) {
309 
310 		if (MALLOC_TOTALSIZE() > oldarena) {
311 
312 			cl_log(LOG_WARNING,
313 			       "Performed %d more non-realtime malloc calls.",
314 			       post_rt_morecore_count - lastcount);
315 
316 			cl_log(LOG_INFO,
317 			       "Total non-realtime malloc bytes: %ld",
318 			       MALLOC_TOTALSIZE() - init_malloc_arena);
319 			oldarena = MALLOC_TOTALSIZE();
320 
321 		}
322 
323 		lastcount = post_rt_morecore_count;
324 	}
325 }
326 
327 #ifdef HAVE___DEFAULT_MORECORE
328 
329 static void	(*our_save_morecore_hook)(void) = NULL;
330 static void	cl_rtmalloc_morecore_fun(void);
331 
332 static void
cl_rtmalloc_morecore_fun(void)333 cl_rtmalloc_morecore_fun(void)
334 {
335 	post_rt_morecore_count++;
336 	if (our_save_morecore_hook) {
337 		our_save_morecore_hook();
338 	}
339 }
340 #endif
341 
342 static void
cl_rtmalloc_setup(void)343 cl_rtmalloc_setup(void)
344 {
345 	static gboolean	inityet = FALSE;
346 	if (!inityet) {
347 		init_malloc_arena = MALLOC_TOTALSIZE();
348 #ifdef HAVE___DEFAULT_MORECORE
349 		our_save_morecore_hook = __after_morecore_hook;
350 	 	__after_morecore_hook = cl_rtmalloc_morecore_fun;
351 		inityet = TRUE;
352 #endif
353 	}
354  }
355