1 //fast_mblock.c
2
3 #include <errno.h>
4 #include <sys/resource.h>
5 #include <pthread.h>
6 #include <assert.h>
7 #include "logger.h"
8 #include "shared_func.h"
9 #include "pthread_func.h"
10 #include "sched_thread.h"
11 #include "fast_mblock.h"
12
13 struct _fast_mblock_manager
14 {
15 bool initialized;
16 int count;
17 struct fast_mblock_man head;
18 pthread_mutex_t lock;
19 };
20
21 #define INIT_HEAD(head) (head)->next = (head)->prev = head
22 #define IS_EMPTY(head) ((head)->next == head)
23
24 static struct _fast_mblock_manager mblock_manager = {false, 0};
25
fast_mblock_manager_init()26 int fast_mblock_manager_init()
27 {
28 int result;
29 if ((result=init_pthread_lock(&(mblock_manager.lock))) != 0)
30 {
31 logError("file: "__FILE__", line: %d, " \
32 "init_pthread_lock fail, errno: %d, error info: %s", \
33 __LINE__, result, STRERROR(result));
34 return result;
35 }
36 INIT_HEAD(&mblock_manager.head);
37 mblock_manager.initialized = true;
38
39 return 0;
40 }
41
cmp_mblock_info(struct fast_mblock_man * mb1,struct fast_mblock_man * mb2)42 static int cmp_mblock_info(struct fast_mblock_man *mb1, struct fast_mblock_man *mb2)
43 {
44 int result;
45 result = strcmp(mb1->info.name, mb2->info.name);
46 if (result != 0)
47 {
48 return result;
49 }
50
51 return mb1->info.element_size - mb2->info.element_size;
52 }
53
add_to_mblock_list(struct fast_mblock_man * mblock)54 static void add_to_mblock_list(struct fast_mblock_man *mblock)
55 {
56 struct fast_mblock_man *current;
57 if (!mblock_manager.initialized)
58 {
59 return;
60 }
61
62 if (*mblock->info.name == '\0')
63 {
64 snprintf(mblock->info.name, sizeof(mblock->info.name),
65 "size-%d", mblock->info.element_size);
66 }
67 pthread_mutex_lock(&(mblock_manager.lock));
68 current = mblock_manager.head.next;
69 while (current != &mblock_manager.head)
70 {
71 if (cmp_mblock_info(mblock, current) <= 0)
72 {
73 break;
74 }
75 current = current->next;
76 }
77
78 mblock->next = current;
79 mblock->prev = current->prev;
80 current->prev->next = mblock;
81 current->prev = mblock;
82 mblock_manager.count++;
83
84 pthread_mutex_unlock(&(mblock_manager.lock));
85 }
86
delete_from_mblock_list(struct fast_mblock_man * mblock)87 static void delete_from_mblock_list(struct fast_mblock_man *mblock)
88 {
89 if (!mblock_manager.initialized || IS_EMPTY(mblock))
90 {
91 return;
92 }
93
94 pthread_mutex_lock(&(mblock_manager.lock));
95 mblock->prev->next = mblock->next;
96 mblock->next->prev = mblock->prev;
97 mblock_manager.count--;
98 pthread_mutex_unlock(&(mblock_manager.lock));
99
100 INIT_HEAD(mblock);
101 }
102
103 #define STAT_DUP(pStat, current, copy_name) \
104 do { \
105 if (copy_name) { \
106 strcpy(pStat->name, current->info.name); \
107 pStat->trunk_size = current->info.trunk_size; \
108 pStat->element_size = current->info.element_size; \
109 } \
110 pStat->element_total_count += current->info.element_total_count; \
111 pStat->element_used_count += current->info.element_used_count; \
112 pStat->trunk_total_count += current->info.trunk_total_count; \
113 pStat->trunk_used_count += current->info.trunk_used_count; \
114 pStat->instance_count += current->info.instance_count; \
115 /* logInfo("name: %s, element_size: %d, total_count: %d, used_count: %d", */ \
116 /* pStat->name, pStat->element_size, pStat->element_total_count, pStat->element_used_count); */\
117 } while (0)
118
fast_mblock_manager_stat(struct fast_mblock_info * stats,const int size,int * count)119 int fast_mblock_manager_stat(struct fast_mblock_info *stats,
120 const int size, int *count)
121 {
122 int result;
123 struct fast_mblock_man *current;
124 struct fast_mblock_info *pStat;
125
126 if (!mblock_manager.initialized)
127 {
128 *count = 0;
129 return EFAULT;
130 }
131
132 if (size <= 0)
133 {
134 *count = 0;
135 return EOVERFLOW;
136 }
137
138 result = 0;
139 pStat = stats;
140 memset(stats, 0, sizeof(struct fast_mblock_info) * size);
141 pthread_mutex_lock(&(mblock_manager.lock));
142 current = mblock_manager.head.next;
143 while (current != &mblock_manager.head)
144 {
145 if (current->prev != &mblock_manager.head)
146 {
147 if (cmp_mblock_info(current, current->prev) != 0)
148 {
149 if (size <= (int)(pStat - stats))
150 {
151 result = EOVERFLOW;
152 break;
153 }
154 STAT_DUP(pStat, current->prev, true);
155 pStat++;
156 }
157 else
158 {
159 STAT_DUP(pStat, current->prev, false);
160 }
161 }
162 current = current->next;
163 }
164
165 if (!IS_EMPTY(&mblock_manager.head))
166 {
167 if (size <= (int)(pStat - stats))
168 {
169 result = EOVERFLOW;
170 }
171 else
172 {
173 STAT_DUP(pStat, current->prev, true);
174 pStat++;
175 }
176 }
177 pthread_mutex_unlock(&(mblock_manager.lock));
178
179 *count = (int)(pStat - stats);
180 return result;
181 }
182
183 //desc order
fast_mblock_info_cmp_by_alloc_bytes(const void * p1,const void * p2)184 static int fast_mblock_info_cmp_by_alloc_bytes(const void *p1, const void *p2)
185 {
186 struct fast_mblock_info *pStat1;
187 struct fast_mblock_info *pStat2;
188
189 pStat1 = (struct fast_mblock_info *)p1;
190 pStat2 = (struct fast_mblock_info *)p2;
191 return pStat2->trunk_size * pStat2->trunk_total_count -
192 pStat1->trunk_size * pStat1->trunk_total_count;
193 }
194
195 //desc order
fast_mblock_info_cmp_by_element_size(const void * p1,const void * p2)196 static int fast_mblock_info_cmp_by_element_size(const void *p1, const void *p2)
197 {
198 struct fast_mblock_info *pStat1;
199 struct fast_mblock_info *pStat2;
200
201 pStat1 = (struct fast_mblock_info *)p1;
202 pStat2 = (struct fast_mblock_info *)p2;
203 return pStat2->element_size - pStat1->element_size;
204 }
205
fast_mblock_manager_stat_print_ex(const bool hide_empty,const int order_by)206 int fast_mblock_manager_stat_print_ex(const bool hide_empty, const int order_by)
207 {
208 int result;
209 int count;
210 int alloc_size;
211 struct fast_mblock_info *stats;
212 struct fast_mblock_info *pStat;
213 struct fast_mblock_info *stat_end;
214
215 stats = NULL;
216 count = 0;
217 alloc_size = 64;
218 result = EOVERFLOW;
219 while (result == EOVERFLOW)
220 {
221 alloc_size *= 2;
222 stats = realloc(stats, sizeof(struct fast_mblock_info) * alloc_size);
223 if (stats == NULL)
224 {
225 return ENOMEM;
226 }
227 result = fast_mblock_manager_stat(stats,
228 alloc_size, &count);
229 }
230
231 if (result == 0)
232 {
233 int64_t alloc_mem;
234 int64_t used_mem;
235 int64_t amem;
236 char alloc_mem_str[32];
237 char used_mem_str[32];
238
239 if (order_by == FAST_MBLOCK_ORDER_BY_ALLOC_BYTES)
240 {
241 qsort(stats, count, sizeof(struct fast_mblock_info),
242 fast_mblock_info_cmp_by_alloc_bytes);
243 }
244 else
245 {
246 qsort(stats, count, sizeof(struct fast_mblock_info),
247 fast_mblock_info_cmp_by_element_size);
248 }
249
250 alloc_mem = 0;
251 used_mem = 0;
252 logInfo("%20s %12s %8s %12s %10s %10s %14s %12s %12s", "name", "element_size",
253 "instance", "alloc_bytes", "trunc_alloc", "trunk_used",
254 "element_alloc", "element_used", "used_ratio");
255 stat_end = stats + count;
256 for (pStat=stats; pStat<stat_end; pStat++)
257 {
258 if (pStat->trunk_total_count > 0)
259 {
260 amem = pStat->trunk_size * pStat->trunk_total_count;
261 alloc_mem += amem;
262 used_mem += GET_BLOCK_SIZE(*pStat) * pStat->element_used_count;
263 }
264 else
265 {
266 amem = 0;
267 if (hide_empty)
268 {
269 continue;
270 }
271 }
272
273 logInfo("%20s %12d %8d %12"PRId64" %10d %10d %14d %12d %11.2f%%", pStat->name,
274 pStat->element_size, pStat->instance_count, amem,
275 pStat->trunk_total_count, pStat->trunk_used_count,
276 pStat->element_total_count, pStat->element_used_count,
277 pStat->element_total_count > 0 ? 100.00 * (double)
278 pStat->element_used_count / (double)
279 pStat->element_total_count : 0.00);
280 }
281
282 if (alloc_mem < 1024)
283 {
284 sprintf(alloc_mem_str, "%"PRId64" bytes", alloc_mem);
285 sprintf(used_mem_str, "%"PRId64" bytes", used_mem);
286 }
287 else if (alloc_mem < 1024 * 1024)
288 {
289 sprintf(alloc_mem_str, "%.3f KB", (double)alloc_mem / 1024);
290 sprintf(used_mem_str, "%.3f KB", (double)used_mem / 1024);
291 }
292 else if (alloc_mem < 1024 * 1024 * 1024)
293 {
294 sprintf(alloc_mem_str, "%.3f MB", (double)alloc_mem / (1024 * 1024));
295 sprintf(used_mem_str, "%.3f MB", (double)used_mem / (1024 * 1024));
296 }
297 else
298 {
299 sprintf(alloc_mem_str, "%.3f GB", (double)alloc_mem / (1024 * 1024 * 1024));
300 sprintf(used_mem_str, "%.3f GB", (double)used_mem / (1024 * 1024 * 1024));
301 }
302
303 logInfo("mblock entry count: %d, alloc memory: %s, used memory: %s, used ratio: %.2f%%",
304 count, alloc_mem_str, used_mem_str,
305 alloc_mem > 0 ? 100.00 * (double)used_mem / alloc_mem : 0.00);
306 }
307
308 if (stats != NULL) free(stats);
309 return 0;
310 }
311
fast_mblock_init_ex(struct fast_mblock_man * mblock,const int element_size,const int alloc_elements_once,fast_mblock_alloc_init_func init_func,const bool need_lock)312 int fast_mblock_init_ex(struct fast_mblock_man *mblock,
313 const int element_size, const int alloc_elements_once,
314 fast_mblock_alloc_init_func init_func, const bool need_lock)
315 {
316 return fast_mblock_init_ex2(mblock, NULL, element_size,
317 alloc_elements_once, init_func, need_lock, NULL, NULL, NULL);
318 }
319
fast_mblock_init_ex2(struct fast_mblock_man * mblock,const char * name,const int element_size,const int alloc_elements_once,fast_mblock_alloc_init_func init_func,const bool need_lock,fast_mblock_malloc_trunk_check_func malloc_trunk_check,fast_mblock_malloc_trunk_notify_func malloc_trunk_notify,void * malloc_trunk_args)320 int fast_mblock_init_ex2(struct fast_mblock_man *mblock, const char *name,
321 const int element_size, const int alloc_elements_once,
322 fast_mblock_alloc_init_func init_func, const bool need_lock,
323 fast_mblock_malloc_trunk_check_func malloc_trunk_check,
324 fast_mblock_malloc_trunk_notify_func malloc_trunk_notify,
325 void *malloc_trunk_args)
326 {
327 int result;
328 int block_size;
329
330 if (element_size <= 0)
331 {
332 logError("file: "__FILE__", line: %d, " \
333 "invalid block size: %d", \
334 __LINE__, element_size);
335 return EINVAL;
336 }
337
338 mblock->info.element_size = MEM_ALIGN(element_size);
339 block_size = fast_mblock_get_block_size(mblock);
340 if (alloc_elements_once > 0)
341 {
342 mblock->alloc_elements_once = alloc_elements_once;
343 }
344 else
345 {
346 mblock->alloc_elements_once = (1024 * 1024) / block_size;
347 }
348
349 if (need_lock && (result=init_pthread_lock(&(mblock->lock))) != 0)
350 {
351 logError("file: "__FILE__", line: %d, " \
352 "init_pthread_lock fail, errno: %d, error info: %s", \
353 __LINE__, result, STRERROR(result));
354 return result;
355 }
356
357 mblock->alloc_init_func = init_func;
358 INIT_HEAD(&mblock->trunks.head);
359 mblock->info.trunk_total_count = 0;
360 mblock->info.trunk_used_count = 0;
361 mblock->free_chain_head = NULL;
362 mblock->delay_free_chain.head = NULL;
363 mblock->delay_free_chain.tail = NULL;
364 mblock->info.element_total_count = 0;
365 mblock->info.element_used_count = 0;
366 mblock->info.instance_count = 1;
367 mblock->info.trunk_size = sizeof(struct fast_mblock_malloc) + block_size *
368 mblock->alloc_elements_once;
369 mblock->need_lock = need_lock;
370 mblock->malloc_trunk_callback.check_func = malloc_trunk_check;
371 mblock->malloc_trunk_callback.notify_func = malloc_trunk_notify;
372 mblock->malloc_trunk_callback.args = malloc_trunk_args;
373
374 if (name != NULL)
375 {
376 snprintf(mblock->info.name, sizeof(mblock->info.name), "%s", name);
377 }
378 else
379 {
380 *mblock->info.name = '\0';
381 }
382 add_to_mblock_list(mblock);
383
384 return 0;
385 }
386
fast_mblock_prealloc(struct fast_mblock_man * mblock)387 static int fast_mblock_prealloc(struct fast_mblock_man *mblock)
388 {
389 struct fast_mblock_node *pNode;
390 struct fast_mblock_malloc *pMallocNode;
391 char *pNew;
392 char *pTrunkStart;
393 char *p;
394 char *pLast;
395 int result;
396 int block_size;
397
398 block_size = fast_mblock_get_block_size(mblock);
399 if (mblock->malloc_trunk_callback.check_func != NULL &&
400 mblock->malloc_trunk_callback.check_func(
401 mblock->info.trunk_size,
402 mblock->malloc_trunk_callback.args) != 0)
403 {
404 return ENOMEM;
405 }
406
407 pNew = (char *)malloc(mblock->info.trunk_size);
408 if (pNew == NULL)
409 {
410 logError("file: "__FILE__", line: %d, " \
411 "malloc %d bytes fail, " \
412 "errno: %d, error info: %s", \
413 __LINE__, mblock->info.trunk_size,
414 errno, STRERROR(errno));
415 return errno != 0 ? errno : ENOMEM;
416 }
417 memset(pNew, 0, mblock->info.trunk_size);
418
419 pMallocNode = (struct fast_mblock_malloc *)pNew;
420
421 pTrunkStart = pNew + sizeof(struct fast_mblock_malloc);
422 pLast = pNew + (mblock->info.trunk_size - block_size);
423 for (p=pTrunkStart; p<=pLast; p += block_size)
424 {
425 pNode = (struct fast_mblock_node *)p;
426
427 if (mblock->alloc_init_func != NULL)
428 {
429 if ((result=mblock->alloc_init_func(pNode->data)) != 0)
430 {
431 free(pNew);
432 return result;
433 }
434 }
435 pNode->offset = (int)(p - pNew);
436 pNode->next = (struct fast_mblock_node *)(p + block_size);
437 }
438
439 ((struct fast_mblock_node *)pLast)->next = NULL;
440 mblock->free_chain_head = (struct fast_mblock_node *)pTrunkStart;
441
442 pMallocNode->ref_count = 0;
443 pMallocNode->prev = mblock->trunks.head.prev;
444 pMallocNode->next = &mblock->trunks.head;
445 mblock->trunks.head.prev->next = pMallocNode;
446 mblock->trunks.head.prev = pMallocNode;
447
448 mblock->info.trunk_total_count++;
449 mblock->info.element_total_count += mblock->alloc_elements_once;
450
451 if (mblock->malloc_trunk_callback.notify_func != NULL)
452 {
453 mblock->malloc_trunk_callback.notify_func(mblock->info.trunk_size,
454 mblock->malloc_trunk_callback.args);
455 }
456
457 return 0;
458 }
459
fast_mblock_remove_trunk(struct fast_mblock_man * mblock,struct fast_mblock_malloc * pMallocNode)460 static inline void fast_mblock_remove_trunk(struct fast_mblock_man *mblock,
461 struct fast_mblock_malloc *pMallocNode)
462 {
463 pMallocNode->prev->next = pMallocNode->next;
464 pMallocNode->next->prev = pMallocNode->prev;
465 mblock->info.trunk_total_count--;
466 mblock->info.element_total_count -= mblock->alloc_elements_once;
467
468 if (mblock->malloc_trunk_callback.notify_func != NULL)
469 {
470 mblock->malloc_trunk_callback.notify_func(-1 * mblock->info.trunk_size,
471 mblock->malloc_trunk_callback.args);
472 }
473 }
474
475 #define FAST_MBLOCK_GET_TRUNK(pNode) \
476 (struct fast_mblock_malloc *)((char *)pNode - pNode->offset)
477
fast_mblock_ref_counter_op(struct fast_mblock_man * mblock,struct fast_mblock_node * pNode,const bool is_inc)478 static inline void fast_mblock_ref_counter_op(struct fast_mblock_man *mblock,
479 struct fast_mblock_node *pNode, const bool is_inc)
480 {
481 struct fast_mblock_malloc *pMallocNode;
482
483 pMallocNode = FAST_MBLOCK_GET_TRUNK(pNode);
484 if (is_inc)
485 {
486 if (pMallocNode->ref_count == 0)
487 {
488 mblock->info.trunk_used_count++;
489 }
490 pMallocNode->ref_count++;
491 }
492 else
493 {
494 pMallocNode->ref_count--;
495 if (pMallocNode->ref_count == 0)
496 {
497 mblock->info.trunk_used_count--;
498 }
499 }
500 }
501
502 #define fast_mblock_ref_counter_inc(mblock, pNode) \
503 fast_mblock_ref_counter_op(mblock, pNode, true)
504
505 #define fast_mblock_ref_counter_dec(mblock, pNode) \
506 fast_mblock_ref_counter_op(mblock, pNode, false)
507
fast_mblock_destroy(struct fast_mblock_man * mblock)508 void fast_mblock_destroy(struct fast_mblock_man *mblock)
509 {
510 struct fast_mblock_malloc *pMallocNode;
511 struct fast_mblock_malloc *pMallocTmp;
512
513 if (IS_EMPTY(&mblock->trunks.head))
514 {
515 delete_from_mblock_list(mblock);
516 return;
517 }
518
519 pMallocNode = mblock->trunks.head.next;
520 while (pMallocNode != &mblock->trunks.head)
521 {
522 pMallocTmp = pMallocNode;
523 pMallocNode = pMallocNode->next;
524
525 free(pMallocTmp);
526 }
527
528 INIT_HEAD(&mblock->trunks.head);
529 mblock->info.trunk_total_count = 0;
530 mblock->info.trunk_used_count = 0;
531 mblock->free_chain_head = NULL;
532 mblock->info.element_used_count = 0;
533 mblock->info.element_total_count = 0;
534
535 if (mblock->need_lock) pthread_mutex_destroy(&(mblock->lock));
536 delete_from_mblock_list(mblock);
537 }
538
fast_mblock_alloc(struct fast_mblock_man * mblock)539 struct fast_mblock_node *fast_mblock_alloc(struct fast_mblock_man *mblock)
540 {
541 struct fast_mblock_node *pNode;
542 int result;
543
544 if (mblock->need_lock && (result=pthread_mutex_lock(&(mblock->lock))) != 0)
545 {
546 logError("file: "__FILE__", line: %d, " \
547 "call pthread_mutex_lock fail, " \
548 "errno: %d, error info: %s", \
549 __LINE__, result, STRERROR(result));
550 return NULL;
551 }
552
553 if (mblock->free_chain_head != NULL)
554 {
555 pNode = mblock->free_chain_head;
556 mblock->free_chain_head = pNode->next;
557 mblock->info.element_used_count++;
558
559 fast_mblock_ref_counter_inc(mblock, pNode);
560 }
561 else
562 {
563 if (mblock->delay_free_chain.head != NULL &&
564 mblock->delay_free_chain.head->recycle_timestamp <= get_current_time())
565 {
566 pNode = mblock->delay_free_chain.head;
567 mblock->delay_free_chain.head = pNode->next;
568 if (mblock->delay_free_chain.tail == pNode)
569 {
570 mblock->delay_free_chain.tail = NULL;
571 }
572 }
573 else if ((result=fast_mblock_prealloc(mblock)) == 0)
574 {
575 pNode = mblock->free_chain_head;
576 mblock->free_chain_head = pNode->next;
577 mblock->info.element_used_count++;
578 fast_mblock_ref_counter_inc(mblock, pNode);
579 }
580 else
581 {
582 pNode = NULL;
583 }
584 }
585
586 if (mblock->need_lock && (result=pthread_mutex_unlock(&(mblock->lock))) != 0)
587 {
588 logError("file: "__FILE__", line: %d, " \
589 "call pthread_mutex_unlock fail, " \
590 "errno: %d, error info: %s", \
591 __LINE__, result, STRERROR(result));
592 }
593
594 return pNode;
595 }
596
fast_mblock_free(struct fast_mblock_man * mblock,struct fast_mblock_node * pNode)597 int fast_mblock_free(struct fast_mblock_man *mblock, \
598 struct fast_mblock_node *pNode)
599 {
600 int result;
601
602 if (mblock->need_lock && (result=pthread_mutex_lock(&(mblock->lock))) != 0)
603 {
604 logError("file: "__FILE__", line: %d, " \
605 "call pthread_mutex_lock fail, " \
606 "errno: %d, error info: %s", \
607 __LINE__, result, STRERROR(result));
608 return result;
609 }
610
611 pNode->next = mblock->free_chain_head;
612 mblock->free_chain_head = pNode;
613 mblock->info.element_used_count--;
614 fast_mblock_ref_counter_dec(mblock, pNode);
615
616 if (mblock->need_lock && (result=pthread_mutex_unlock(&(mblock->lock))) != 0)
617 {
618 logError("file: "__FILE__", line: %d, " \
619 "call pthread_mutex_unlock fail, " \
620 "errno: %d, error info: %s", \
621 __LINE__, result, STRERROR(result));
622 }
623
624 return 0;
625 }
626
fast_mblock_delay_free(struct fast_mblock_man * mblock,struct fast_mblock_node * pNode,const int deley)627 int fast_mblock_delay_free(struct fast_mblock_man *mblock,
628 struct fast_mblock_node *pNode, const int deley)
629 {
630 int result;
631
632 if (mblock->need_lock && (result=pthread_mutex_lock(&(mblock->lock))) != 0)
633 {
634 logError("file: "__FILE__", line: %d, " \
635 "call pthread_mutex_lock fail, " \
636 "errno: %d, error info: %s", \
637 __LINE__, result, STRERROR(result));
638 return result;
639 }
640
641 pNode->recycle_timestamp = get_current_time() + deley;
642 if (mblock->delay_free_chain.head == NULL)
643 {
644 mblock->delay_free_chain.head = pNode;
645 }
646 else
647 {
648 mblock->delay_free_chain.tail->next = pNode;
649 }
650 mblock->delay_free_chain.tail = pNode;
651 pNode->next = NULL;
652
653 if (mblock->need_lock && (result=pthread_mutex_unlock(&(mblock->lock))) != 0)
654 {
655 logError("file: "__FILE__", line: %d, " \
656 "call pthread_mutex_unlock fail, " \
657 "errno: %d, error info: %s", \
658 __LINE__, result, STRERROR(result));
659 }
660
661 return 0;
662 }
663
fast_mblock_chain_count(struct fast_mblock_man * mblock,struct fast_mblock_node * head)664 static int fast_mblock_chain_count(struct fast_mblock_man *mblock,
665 struct fast_mblock_node *head)
666 {
667 struct fast_mblock_node *pNode;
668 int count;
669 int result;
670
671 if (mblock->need_lock && (result=pthread_mutex_lock(&(mblock->lock))) != 0)
672 {
673 logError("file: "__FILE__", line: %d, " \
674 "call pthread_mutex_lock fail, " \
675 "errno: %d, error info: %s", \
676 __LINE__, result, STRERROR(result));
677 return -1;
678 }
679
680 count = 0;
681 pNode = head;
682 while (pNode != NULL)
683 {
684 pNode = pNode->next;
685 count++;
686 }
687
688 if (mblock->need_lock && (result=pthread_mutex_unlock(&(mblock->lock))) != 0)
689 {
690 logError("file: "__FILE__", line: %d, " \
691 "call pthread_mutex_unlock fail, " \
692 "errno: %d, error info: %s", \
693 __LINE__, result, STRERROR(result));
694 }
695
696 return count;
697 }
698
fast_mblock_free_count(struct fast_mblock_man * mblock)699 int fast_mblock_free_count(struct fast_mblock_man *mblock)
700 {
701 return fast_mblock_chain_count(mblock, mblock->free_chain_head);
702 }
703
fast_mblock_delay_free_count(struct fast_mblock_man * mblock)704 int fast_mblock_delay_free_count(struct fast_mblock_man *mblock)
705 {
706 return fast_mblock_chain_count(mblock, mblock->delay_free_chain.head);
707 }
708
fast_mblock_do_reclaim(struct fast_mblock_man * mblock,const int reclaim_target,int * reclaim_count,struct fast_mblock_malloc ** ppFreelist)709 static int fast_mblock_do_reclaim(struct fast_mblock_man *mblock,
710 const int reclaim_target, int *reclaim_count,
711 struct fast_mblock_malloc **ppFreelist)
712 {
713 struct fast_mblock_node *pPrevious;
714 struct fast_mblock_node *pCurrent;
715 struct fast_mblock_malloc *pMallocNode;
716 struct fast_mblock_malloc *freelist;
717 bool lookup_done;
718
719 lookup_done = false;
720 *reclaim_count = 0;
721 freelist = NULL;
722 pPrevious = NULL;
723 pCurrent = mblock->free_chain_head;
724 mblock->free_chain_head = NULL;
725 while (pCurrent != NULL)
726 {
727 pMallocNode = FAST_MBLOCK_GET_TRUNK(pCurrent);
728 if (pMallocNode->ref_count > 0 ||
729 (pMallocNode->ref_count == 0 && lookup_done))
730 { //keep in free chain
731
732 if (pPrevious != NULL)
733 {
734 pPrevious->next = pCurrent;
735 }
736 else
737 {
738 mblock->free_chain_head = pCurrent;
739 }
740
741 pPrevious = pCurrent;
742 pCurrent = pCurrent->next;
743 if (pCurrent == NULL)
744 {
745 goto OUTER;
746 }
747 pMallocNode = FAST_MBLOCK_GET_TRUNK(pCurrent);
748
749 while (pMallocNode->ref_count > 0 ||
750 (pMallocNode->ref_count == 0 && lookup_done))
751 {
752 pPrevious = pCurrent;
753 pCurrent = pCurrent->next;
754 if (pCurrent == NULL)
755 {
756 goto OUTER;
757 }
758 pMallocNode = FAST_MBLOCK_GET_TRUNK(pCurrent);
759 }
760 }
761
762 while (pMallocNode->ref_count < 0 ||
763 (pMallocNode->ref_count == 0 && !lookup_done))
764 {
765 if (pMallocNode->ref_count == 0) //trigger by the first node
766 {
767 fast_mblock_remove_trunk(mblock, pMallocNode);
768 pMallocNode->ref_count = -1;
769
770 pMallocNode->next = freelist;
771 freelist = pMallocNode;
772 (*reclaim_count)++;
773 if (reclaim_target > 0 && *reclaim_count == reclaim_target)
774 {
775 lookup_done = true;
776 }
777 }
778
779 pCurrent = pCurrent->next;
780 if (pCurrent == NULL)
781 {
782 goto OUTER;
783 }
784 pMallocNode = FAST_MBLOCK_GET_TRUNK(pCurrent);
785 }
786 }
787
788
789 OUTER:
790 if (pPrevious != NULL)
791 {
792 pPrevious->next = NULL;
793 }
794
795
796 {
797 bool old_need_lock;
798 old_need_lock = mblock->need_lock;
799 mblock->need_lock = false;
800 logDebug("file: "__FILE__", line: %d, "
801 "reclaim trunks for %p, reclaimed trunks: %d, "
802 "free node count: %d", __LINE__,
803 mblock, *reclaim_count, fast_mblock_free_count(mblock));
804 mblock->need_lock = old_need_lock;
805 }
806
807 *ppFreelist = freelist;
808 return (freelist != NULL ? 0 : ENOENT);
809 }
810
fast_mblock_free_trunks(struct fast_mblock_man * mblock,struct fast_mblock_malloc * freelist)811 void fast_mblock_free_trunks(struct fast_mblock_man *mblock,
812 struct fast_mblock_malloc *freelist)
813 {
814 struct fast_mblock_malloc *pDeleted;
815 int count;
816 count = 0;
817 while (freelist != NULL)
818 {
819 pDeleted = freelist;
820 freelist = freelist->next;
821 free(pDeleted);
822 count++;
823 }
824 logDebug("file: "__FILE__", line: %d, "
825 "free_trunks for %p, free trunks: %d", __LINE__,
826 mblock, count);
827 }
828
fast_mblock_reclaim(struct fast_mblock_man * mblock,const int reclaim_target,int * reclaim_count,fast_mblock_free_trunks_func free_trunks_func)829 int fast_mblock_reclaim(struct fast_mblock_man *mblock,
830 const int reclaim_target, int *reclaim_count,
831 fast_mblock_free_trunks_func free_trunks_func)
832 {
833 int result;
834 struct fast_mblock_malloc *freelist;
835
836 if (reclaim_target < 0 || mblock->info.trunk_total_count -
837 mblock->info.trunk_used_count <= 0)
838 {
839 *reclaim_count = 0;
840 return EINVAL;
841 }
842
843 if (mblock->need_lock && (result=pthread_mutex_lock(&(mblock->lock))) != 0)
844 {
845 logError("file: "__FILE__", line: %d, " \
846 "call pthread_mutex_lock fail, " \
847 "errno: %d, error info: %s", \
848 __LINE__, result, STRERROR(result));
849 *reclaim_count = 0;
850 return result;
851 }
852
853 if (reclaim_target > 0 && mblock->info.trunk_total_count -
854 mblock->info.trunk_used_count < reclaim_target)
855 {
856 *reclaim_count = 0;
857 result = E2BIG;
858 freelist = NULL;
859 }
860 else
861 {
862 result = fast_mblock_do_reclaim(mblock, reclaim_target,
863 reclaim_count, &freelist);
864 }
865
866 if (mblock->need_lock && (result=pthread_mutex_unlock(&(mblock->lock))) != 0)
867 {
868 logError("file: "__FILE__", line: %d, " \
869 "call pthread_mutex_unlock fail, " \
870 "errno: %d, error info: %s", \
871 __LINE__, result, STRERROR(result));
872 }
873
874 if (result == 0)
875 {
876 if (free_trunks_func != NULL)
877 {
878 free_trunks_func(mblock, freelist);
879 }
880 else
881 {
882 fast_mblock_free_trunks(mblock, freelist);
883 }
884 }
885
886 return result;
887 }
888
889