1/** Zone memory management. -*- Mode: ObjC -*-
2   Copyright (C) 1997,1998 Free Software Foundation, Inc.
3
4   Written by: Yoo C. Chung <wacko@laplace.snu.ac.kr>
5   Date: January 1997
6   Rewrite by: Richard Frith-Macdonald <richard@brainstrom.co.uk>
7
8   This file is part of the GNUstep Base Library.
9
10   This library is free software; you can redistribute it and/or
11   modify it under the terms of the GNU Lesser General Public License
12   as published by the Free Software Foundation; either
13   version 2 of the License, or (at your option) any later version.
14
15   This library is distributed in the hope that it will be useful, but
16   WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   Lesser General Public License for more details.
19
20   You should have received a copy of the GNU Lesser General Public
21   License along with this library; if not, write to the Free Software
22   Foundation, Inc., 51 Franklin Street, Fifth Floor,
23   Boston, MA 02110 USA.
24
25   <title>NSZone class reference</title>
26   $Date$ $Revision$
27*/
28
29/*  Design goals:
30
31    - Allocation and deallocation should be reasonably efficient.
32    - We want to catch code that writes outside it's permitted area.
33
34 */
35
36
37/* Actual design:
38
39   - The default zone uses malloc() and friends.  We assume that
40   they're thread safe and that they return NULL if we're out of
41   memory (glibc malloc does this, what about other mallocs? FIXME).
42
43    - The OpenStep spec says that when a zone is recycled, any memory in
44   use is returned to the default zone.
45   Since, in general, we have no control over the system malloc, we can't
46   possibly do this.  Instead, we move the recycled zone to a list of
47   'dead' zones, and as soon as all memory used in it is released, we
48   destroy it and remove it from that list.  In the meantime, we release
49   any blocks of memory we can (ie those that don't contain unfreed chunks).
50
51   - For freeable zones, a small linear buffer is used for
52   deallocating and allocating.  Anything that can't go into the
53   buffer then uses a more general purpose segregated fit algorithm
54   after flushing the buffer.
55
56   - For memory chunks in freeable zones, the pointer to the chunk is
57   preceded by the a chunk header which contains the size of the chunk
58   (plus a couple of flags) and a pointer to the end of the memory
59   requested.  This adds 8 bytes for freeable zones, which is usually
60   what we need for alignment purposes anyway (assuming we're on a
61   32 bit machine).  The granularity for allocation of chunks is quite
62   large - a chunk must be big enough to hold the chunk header plus a
63   couple of pointers and an unsigned size value.
64   The actual memory allocated will be the size of the chunk header plus
65   the size of memory requested plus one (a guard byte), all rounded up
66   to a multiple of the granularity.
67
68   - For nonfreeable zones, worst-like fit is used.  This is OK since
69   we don't have to worry about memory fragmentation. */
70
71/* Other information:
72
73   - This uses some GCC specific extensions.  But since the library is
74   supposed to compile on GCC 2.7.2.1 (patched) or higher, and the
75   only other Objective-C compiler I know of (other than NeXT's, which
76   is based on GCC as far as I know) is the StepStone compiler, which
77   I haven't the foggiest idea why anyone would prefer it to GCC ;),
78   it should be OK.
79
80   - These functions should be thread safe, but I haven't really
81   tested them extensively in multithreaded cases. */
82
83
84/* Define to turn off NSAssertions. */
85#define NS_BLOCK_ASSERTIONS 1
86
87#define IN_NSZONE_M 1
88
89#import "common.h"
90#include <stddef.h>
91#import "Foundation/NSException.h"
92#import "Foundation/NSLock.h"
93#import "GSPrivate.h"
94#import "GSPThread.h"
95
96static pthread_mutex_t  zoneLock = PTHREAD_MUTEX_INITIALIZER;
97
98/**
99 * Try to get more memory - the normal process has failed.
100 * If we can't do anything, just return a null pointer.
101 * Try to do some logging if possible.
102 */
103void *
104GSOutOfMemory(NSUInteger size, BOOL retry)
105{
106  fprintf(stderr, "GSOutOfMemory ... wanting %"PRIuPTR" bytes.\n", size);
107  return 0;
108}
109
110/* Default zone functions for default zone. */
111static void* default_malloc (NSZone *zone, size_t size);
112static void* default_realloc (NSZone *zone, void *ptr, size_t size);
113static void default_free (NSZone *zone, void *ptr);
114static void default_recycle (NSZone *zone);
115static BOOL default_check (NSZone *zone);
116static BOOL default_lookup (NSZone *zone, void *ptr);
117static struct NSZoneStats default_stats (NSZone *zone);
118
119static void*
120default_malloc (NSZone *zone, size_t size)
121{
122  void *mem;
123
124  mem = malloc(size);
125  if (mem != NULL)
126    {
127      return mem;
128    }
129  [NSException raise: NSMallocException
130              format: @"Default zone has run out of memory"];
131  return 0;
132}
133
134static void*
135default_realloc (NSZone *zone, void *ptr, size_t size)
136{
137  void *mem;
138
139  mem = realloc(ptr, size);
140  if (mem != NULL)
141    {
142      return mem;
143    }
144  [NSException raise: NSMallocException
145              format: @"Default zone has run out of memory"];
146  return 0;
147}
148
149static void
150default_free (NSZone *zone, void *ptr)
151{
152  free(ptr);
153}
154
155static void
156default_recycle (NSZone *zone)
157{
158  /* Recycle the default zone?  Thou hast got to be kiddin'. */
159  [NSException raise: NSGenericException
160              format: @"Trying to recycle default zone"];
161}
162
163static BOOL
164default_check (NSZone *zone)
165{
166  /* We can't check memory managed by malloc(). */
167  [NSException raise: NSGenericException
168	      format: @"No checking for default zone"];
169  return NO;
170}
171
172static BOOL
173default_lookup (NSZone *zone, void *ptr)
174{
175  /* Assume all memory is in default zone. */
176  return YES;
177}
178
179static struct NSZoneStats
180default_stats (NSZone *zone)
181{
182  struct NSZoneStats dummy = {0,0,0,0,0};
183
184  /* We can't obtain statistics from the memory managed by malloc(). */
185  [NSException raise: NSGenericException
186	      format: @"No statistics for default zone"];
187  return dummy;
188}
189
190static NSZone default_zone =
191{
192  default_malloc, default_realloc, default_free, default_recycle,
193  default_check, default_lookup, default_stats, 0, @"default", 0
194};
195
196/*
197 * For backward compatibility.
198 */
199NSZone	*__nszone_private_hidden_default_zone = &default_zone;
200
201
202
203void
204NSSetZoneName (NSZone *zone, NSString *name)
205{
206  if (!zone)
207    zone = NSDefaultMallocZone();
208  pthread_mutex_lock(&zoneLock);
209  name = [name copy];
210  if (zone->name != nil)
211    [zone->name release];
212  zone->name = name;
213  pthread_mutex_unlock(&zoneLock);
214}
215
216NSString*
217NSZoneName (NSZone *zone)
218{
219  if (!zone)
220    zone = NSDefaultMallocZone();
221  return zone->name;
222}
223
224/* Alignment */
225#ifdef ALIGN
226#undef ALIGN
227#endif
228#define ALIGN ((__alignof__(double) < 8) ? 8 : __alignof__(double))
229#define MINGRAN 256 /* Minimum granularity. */
230#define DEFBLOCK 16384 /* Default granularity. */
231#define BUFFER 4 /* Buffer size.  FIXME?: Is this a reasonable optimum. */
232#define MAX_SEG 16 /* Segregated list size. */
233#define FBSZ sizeof(ff_block)
234#define NBSZ sizeof(nf_chunk)
235
236/* Information bits in size. */
237#define INUSE 0x01 /* Current chunk in use. */
238#define PREVUSE 0x02 /* Previous chunk in use. */
239#define LIVE 0x04
240
241/* Bits to mask off to get size. */
242#define SIZE_BITS (INUSE | PREVUSE | LIVE)
243
244#define	NF_HEAD sizeof(nf_block)
245
246typedef struct _ffree_free_link ff_link;
247typedef struct _nfree_block_struct nf_block;
248typedef struct _ffree_block_struct ff_block;
249typedef struct _ffree_zone_struct ffree_zone;
250typedef struct _nfree_zone_struct nfree_zone;
251
252
253/* Header for blocks in nonfreeable zones. */
254struct _nfree_block_unpadded
255{
256  struct _nfree_block_struct *next;
257  size_t size; // Size of block
258  size_t top; // Position of next memory chunk to allocate
259};
260#define	NFBPAD	sizeof(struct _nfree_block_unpadded)
261
262struct _nfree_block_struct
263{
264  struct _nfree_block_struct *next;
265  size_t size; // Size of block
266  size_t top; // Position of next memory chunk to allocate
267  char	padding[ALIGN - ((NFBPAD % ALIGN) ? (NFBPAD % ALIGN) : ALIGN)];
268};
269
270struct _ffree_block_unpadded {
271  size_t	size;
272  struct _ffree_block_struct *next;
273};
274#define	FFCPAD	sizeof(struct _ffree_block_unpadded)
275
276/* Header for blocks and chunks in freeable zones. */
277struct _ffree_block_struct
278{
279  size_t size;
280  struct _ffree_block_struct *next;
281  char	padding[ALIGN - ((FFCPAD % ALIGN) ? (FFCPAD % ALIGN) : ALIGN)];
282};
283
284struct _ffree_free_link_unpadded
285{
286  size_t	size;
287  ff_link	*prev;
288  ff_link	*next;
289  size_t	back;	/* Back link at end of 'dead' block.	*/
290};
291#define	FFDPAD	sizeof(struct _ffree_free_link_unpadded)
292
293struct _ffree_free_link
294{
295  size_t	size;
296  ff_link	*prev;
297  ff_link	*next;
298  size_t	back;
299  char	padding[ALIGN - ((FFDPAD % ALIGN) ? (FFDPAD % ALIGN) : ALIGN)];
300};
301
302/* NSZone structure for freeable zones. */
303struct _ffree_zone_struct
304{
305  NSZone common;
306  pthread_mutex_t lock;
307  ff_block *blocks; // Linked list of blocks
308  ff_link *segheadlist[MAX_SEG]; // Segregated list, holds heads
309  ff_link *segtaillist[MAX_SEG]; // Segregated list, holds tails
310  size_t bufsize; // Buffer size
311  size_t size_buf[BUFFER]; // Buffer holding sizes
312  ff_block *ptr_buf[BUFFER]; // Buffer holding pointers to chunks
313};
314
315/* Rounds up N to nearest multiple of BASE. */
316static inline size_t
317roundupto (size_t n, size_t base)
318{
319  size_t a = (n/base)*base;
320
321  return (n-a)? (a+base): n;
322}
323
324/*
325 *	Minimum chunk size for freeable zones.
326 *	Need room for basic chunk header, next and prev pointers for
327 *	free-list, and a reverse pointer (size_t) to go at the end of the
328 *	chunk while it is waiting to be consolidated with other chunks.
329 */
330#define MINCHUNK sizeof(ff_link)
331
332#define CLTOSZ(n) ((n)*MINCHUNK) /* Converts classes to sizes. */
333
334static inline void*
335chunkToPointer(ff_block *chunk)
336{
337  return (void*)(&chunk[1]);
338}
339
340static inline ff_block*
341pointerToChunk(void* ptr)
342{
343  return &(((ff_block*)ptr)[-1]);
344}
345
346static inline size_t
347chunkIsLive(ff_block* ptr)
348{
349  return ptr->size & LIVE;
350}
351
352static inline size_t
353chunkIsInUse(ff_block* ptr)
354{
355  return ptr->size & INUSE;
356}
357
358static inline size_t
359chunkIsPrevInUse(ff_block* ptr)
360{
361  return ptr->size & PREVUSE;
362}
363
364static inline size_t
365chunkSize(ff_block* ptr)
366{
367  return ptr->size & ~SIZE_BITS;
368}
369
370static inline size_t
371chunkClrLive(ff_block* ptr)
372{
373  return ptr->size &= ~LIVE;
374}
375
376static inline void
377chunkClrPrevInUse(ff_block* ptr)
378{
379  ptr->size &= ~PREVUSE;
380}
381
382static inline void
383chunkSetInUse(ff_block* ptr)
384{
385  ptr->size |= INUSE;
386}
387
388static inline size_t
389chunkSetLive(ff_block* ptr)
390{
391  return ptr->size |= LIVE;
392}
393
394static inline void
395chunkSetPrevInUse(ff_block* ptr)
396{
397  ptr->size |= PREVUSE;
398}
399
400static inline void
401chunkSetSize(ff_block* ptr, size_t size)
402{
403  ptr->size = size;
404}
405
406static inline ff_block*
407chunkNext(ff_block *ptr)
408{
409  return (ff_block*) ((void*)ptr+chunkSize(ptr));
410}
411
412static inline void
413chunkMakeLink(ff_block *ptr)
414{
415  NSAssert(!chunkIsInUse(ptr), NSInternalInconsistencyException);
416  NSAssert(!chunkIsLive(ptr), NSInternalInconsistencyException);
417  (&(chunkNext(ptr)->size))[-1] = chunkSize(ptr);
418}
419
420static inline ff_block*
421chunkChop(ff_block *ptr, size_t size)
422{
423  ff_block	*remainder;
424  size_t	left = chunkSize(ptr)-size;
425
426  NSAssert((chunkSize(ptr) % MINCHUNK) == 0, NSInternalInconsistencyException);
427  NSAssert(chunkSize(ptr) > size, NSInternalInconsistencyException);
428  remainder = (ff_block*)((void*)ptr+size);
429  chunkSetSize(remainder, left | PREVUSE);
430  chunkMakeLink(remainder);
431  chunkSetSize(ptr, size | chunkIsPrevInUse(ptr) | INUSE);
432  return remainder;
433}
434
435static inline ff_block*
436chunkPrev(ff_block *ptr)
437{
438  size_t	offset;
439  ff_block	*prev;
440
441  NSAssert(!chunkIsPrevInUse(ptr), NSInternalInconsistencyException);
442  offset = (&(ptr->size))[-1];
443  NSAssert(offset > 0 && (offset % MINCHUNK) == 0,
444    NSInternalInconsistencyException);
445  prev = (ff_block*)((void*)ptr-offset);
446  NSAssert(chunkSize(prev) == offset, NSInternalInconsistencyException);
447  NSAssert(!chunkIsInUse(prev), NSInternalInconsistencyException);
448  return prev;
449}
450
451/* NSZone structure for nonfreeable zones. */
452struct _nfree_zone_struct
453{
454  NSZone common;
455  pthread_mutex_t lock;
456  /* Linked list of blocks in decreasing order of free space,
457     except maybe for the first block. */
458  nf_block *blocks;
459  size_t use;
460};
461
462/* Memory management functions for freeable zones. */
463static void* fmalloc (NSZone *zone, size_t size);
464static void* frealloc (NSZone *zone, void *ptr, size_t size);
465static void ffree (NSZone *zone, void *ptr);
466static void frecycle (NSZone *zone);
467static BOOL fcheck (NSZone *zone);
468static BOOL flookup (NSZone *zone, void *ptr);
469static struct NSZoneStats fstats (NSZone *zone);
470
471static inline size_t segindex (size_t size);
472static ff_block* get_chunk (ffree_zone *zone, size_t size);
473static void take_chunk (ffree_zone *zone, ff_block *chunk);
474static void put_chunk (ffree_zone *zone, ff_block *chunk);
475static inline void add_buf (ffree_zone *zone, ff_block *chunk);
476static void flush_buf (ffree_zone *zone);
477
478/* Memory management functions for nonfreeable zones. */
479static void* nmalloc (NSZone *zone, size_t size);
480static void nrecycle (NSZone *zone);
481static void* nrealloc (NSZone *zone, void *ptr, size_t size);
482static void nfree (NSZone *zone, void *ptr);
483static BOOL ncheck (NSZone *zone);
484static BOOL nlookup (NSZone *zone, void *ptr);
485static struct NSZoneStats nstats (NSZone *zone);
486
487/* Memory management functions for recycled zones. */
488static void* rmalloc (NSZone *zone, size_t size);
489static void rrecycle (NSZone *zone);
490static void* rrealloc (NSZone *zone, void *ptr, size_t size);
491static void rffree (NSZone *zone, void *ptr);
492static void rnfree (NSZone *zone, void *ptr);
493
494/*
495 *	Lists of zones to be used to determine if a pointer is in a zone.
496 */
497static NSZone	*zone_list = 0;
498
499static inline void
500destroy_zone(NSZone* zone)
501{
502  if (zone)
503    {
504      if (zone_list == zone)
505        {
506          zone_list = zone->next;
507        }
508      else
509        {
510          NSZone *ptr = zone_list;
511
512          while (ptr != NULL && ptr->next != zone)
513            {
514              ptr = ptr->next;
515            }
516          if (ptr)
517            {
518              ptr->next = zone->next;
519            }
520        }
521      free((void*)zone);
522    }
523}
524
525/* Search the buffer to see if there is any memory chunks large enough
526   to satisfy request using first fit.  If the memory chunk found has
527   a size exactly equal to the one requested, remove it from the buffer
528   and return it.  If not, cut off a chunk that does match the size
529   and return it.  If there is no chunk large enough in the buffer,
530   get a chunk from the general purpose allocator that uses segregated
531   fit.  Since a chunk in the buffer is not freed in the general purpose
532   allocator, the headers are as if it is still in use. */
533static void*
534fmalloc (NSZone *zone, size_t size)
535{
536  size_t i = 0;
537  size_t chunksize = roundupto(size+FBSZ+1, MINCHUNK);
538  ffree_zone *zptr = (ffree_zone*)zone;
539  size_t bufsize;
540  size_t *size_buf = zptr->size_buf;
541  ff_block **ptr_buf = zptr->ptr_buf;
542  ff_block *chunkhead;
543  void *result;
544
545  pthread_mutex_lock(&(zptr->lock));
546  bufsize = zptr->bufsize;
547  while ((i < bufsize) && (chunksize > size_buf[i]))
548    i++;
549  if (i < bufsize)
550    /* Use memory chunk in buffer. */
551    {
552      if (size_buf[i] == chunksize)
553        /* Exact fit. */
554        {
555          zptr->bufsize--;
556          bufsize = zptr->bufsize;
557          chunkhead = ptr_buf[i];
558          size_buf[i] = size_buf[bufsize];
559          ptr_buf[i] = ptr_buf[bufsize];
560
561          NSAssert(chunkIsInUse(chunkhead), NSInternalInconsistencyException);
562          NSAssert((chunkSize(chunkhead) % MINCHUNK) == 0,
563	    NSInternalInconsistencyException);
564        }
565      else
566        {
567          /*
568	   *	Break off chunk leaving remainder marked as in use since it
569	   *	stays in this buffer rather than on a free-list.
570	   */
571          chunkhead = ptr_buf[i];
572          size_buf[i] -= chunksize;
573          ptr_buf[i] = chunkChop(chunkhead, chunksize);
574	  chunkSetInUse(ptr_buf[i]);
575        }
576    }
577  else
578    /* Get memory from segregate fit allocator. */
579    {
580      flush_buf(zptr);
581      chunkhead = get_chunk(zptr, chunksize);
582      if (chunkhead == NULL)
583        {
584          pthread_mutex_unlock(&(zptr->lock));
585          if (zone->name != nil)
586            [NSException raise: NSMallocException
587                        format: @"Zone %@ has run out of memory", zone->name];
588          else
589            [NSException raise: NSMallocException
590                        format: @"Out of memory"];
591        }
592
593      NSAssert(chunkIsInUse(chunkhead), NSInternalInconsistencyException);
594      NSAssert(chunkIsPrevInUse(chunkNext(chunkhead)),
595	NSInternalInconsistencyException);
596      NSAssert((chunkSize(chunkhead) % MINCHUNK) == 0,
597	NSInternalInconsistencyException);
598    }
599  chunkhead->next = (ff_block*)(chunkToPointer(chunkhead)+size);
600  *((char*)chunkhead->next) = (char)42;
601  chunkSetLive(chunkhead);
602  result = chunkToPointer(chunkhead);
603  pthread_mutex_unlock(&(zptr->lock));
604  return result;
605}
606
607/* If PTR == NULL, then it's the same as ordinary memory allocation.
608   If a smaller size than it originally had is requested, shrink the
609   chunk.  If a larger size is requested, check if there is enough
610   space after it.  If there isn't enough space, get a new chunk and
611   move it there, releasing the original.  The space before the chunk
612   should also be checked, but I'll leave this to a later date. */
613static void*
614frealloc (NSZone *zone, void *ptr, size_t size)
615{
616  size_t realsize;
617  size_t chunksize = roundupto(size+FBSZ+1, MINCHUNK);
618  ffree_zone *zptr = (ffree_zone*)zone;
619  ff_block *chunkhead, *slack;
620  void *result;
621
622  NSAssert(ptr == NULL || NSZoneFromPointer(ptr) == zone,
623    NSInternalInconsistencyException);
624  if (ptr == NULL)
625    return fmalloc(zone, size);
626  chunkhead = pointerToChunk(ptr);
627  pthread_mutex_lock(&(zptr->lock));
628  realsize = chunkSize(chunkhead);
629
630  NSAssert(chunkIsInUse(chunkhead), NSInternalInconsistencyException);
631  NSAssert((realsize % MINCHUNK) == 0, NSInternalInconsistencyException);
632
633  chunkClrLive(chunkhead);
634  if (chunksize < realsize)
635    {
636      /*
637       *	Chop tail off existing memory chunk and tell the next chunk
638       *	after it that it is no longer in use.  Then put it in the
639       *	buffer to be added to the free list later (we can't add it
640       *	immediately 'cos we might invalidate the rule that there
641       *	must not be two adjacent unused chunks).
642       */
643      slack = chunkChop(chunkhead, chunksize);
644      chunkSetInUse(slack);
645      add_buf(zptr, slack);
646    }
647  else if (chunksize > realsize)
648    {
649      size_t nextsize;
650      ff_block *nextchunk, *farchunk;
651
652      nextchunk = chunkNext(chunkhead);
653      nextsize = chunkSize(nextchunk);
654
655      NSAssert((nextsize % MINCHUNK) == 0, NSInternalInconsistencyException);
656
657      if (!chunkIsInUse(nextchunk) && (nextsize+realsize >= chunksize))
658        /* Expand to next chunk. */
659        {
660          take_chunk(zptr, nextchunk);
661          if (nextsize+realsize == chunksize)
662            {
663              farchunk = chunkNext(nextchunk);
664              chunkSetPrevInUse(farchunk);
665            }
666          else
667            {
668	      chunkSetSize(chunkhead, nextsize+realsize);
669	      slack = chunkChop(chunkhead, chunksize);
670              put_chunk(zptr, slack);
671            }
672	  chunkSetSize(chunkhead, chunksize |
673		chunkIsPrevInUse(chunkhead) | INUSE);
674        }
675      else
676        /* Get new chunk and copy. */
677        {
678          ff_block *newchunk;
679
680          newchunk = get_chunk(zptr, chunksize);
681          if (newchunk == NULL)
682            {
683              pthread_mutex_unlock(&(zptr->lock));
684              if (zone->name != nil)
685                [NSException raise: NSMallocException
686                            format: @"Zone %@ has run out of memory",
687                             zone->name];
688              else
689                [NSException raise: NSMallocException
690                            format: @"Out of memory"];
691            }
692          memcpy((void*)(&newchunk[1]), (void*)(&chunkhead[1]), realsize-FBSZ);
693          add_buf(zptr, chunkhead);
694          chunkhead = newchunk;
695        }
696    }
697  chunkhead->next = (ff_block*)(chunkToPointer(chunkhead)+size);
698  *((char*)chunkhead->next) = (char)42;
699  chunkSetLive(chunkhead);
700  result = chunkToPointer(chunkhead);
701  pthread_mutex_unlock(&(zptr->lock));
702  return result;
703}
704
705/* Frees memory chunk by simply adding it to the buffer. */
706static void
707ffree (NSZone *zone, void *ptr)
708{
709  ff_block *chunk;
710  NSAssert(NSZoneFromPointer(ptr) == zone, NSInternalInconsistencyException);
711  pthread_mutex_lock(&(((ffree_zone*)zone)->lock));
712  chunk = pointerToChunk(ptr);
713  if (chunkIsLive(chunk) == 0)
714    [NSException raise: NSMallocException
715	        format: @"Attempt to free freed memory"];
716  NSAssert(*((char*)chunk->next) == (char)42, NSInternalInconsistencyException);
717  add_buf((ffree_zone*)zone, chunk);
718  pthread_mutex_unlock(&(((ffree_zone*)zone)->lock));
719}
720
721static BOOL
722frecycle1(NSZone *zone)
723{
724  ffree_zone *zptr = (ffree_zone*)zone;
725  ff_block *block;
726  ff_block *nextblock;
727
728  pthread_mutex_lock(&(zptr->lock));
729  flush_buf(zptr);
730  block = zptr->blocks;
731  while (block != NULL)
732    {
733      ff_block	*tmp = &block[1];
734      nextblock = block->next;
735      if (chunkIsInUse(tmp) == 0 && chunkNext(tmp) == chunkNext(block))
736	{
737	  if (zptr->blocks == block)
738	    zptr->blocks = block->next;
739	  else
740	    {
741	      tmp = zptr->blocks;
742	      while (tmp->next != block)
743		tmp = tmp->next;
744	      tmp->next = block->next;
745	    }
746          free((void*)block);
747	}
748      block = nextblock;
749    }
750  pthread_mutex_unlock(&(zptr->lock));
751  if (zptr->blocks == 0)
752    {
753      pthread_mutex_destroy(&(zptr->lock));
754      return YES;
755    }
756  return NO;
757}
758
759/* Recycle the zone. */
760static void
761frecycle (NSZone *zone)
762{
763  pthread_mutex_lock(&zoneLock);
764  if (zone->name != nil)
765    {
766      NSString *name = zone->name;
767      zone->name = nil;
768      [name release];
769    }
770  if (frecycle1(zone) == YES)
771    destroy_zone(zone);
772  else
773    {
774      zone->malloc = rmalloc;
775      zone->realloc = rrealloc;
776      zone->free = rffree;
777      zone->recycle = rrecycle;
778    }
779  pthread_mutex_unlock(&zoneLock);
780}
781
782static void
783rffree (NSZone *zone, void *ptr)
784{
785  ffree(zone, ptr);
786  pthread_mutex_lock(&zoneLock);
787  if (frecycle1(zone))
788    destroy_zone(zone);
789  pthread_mutex_unlock(&zoneLock);
790}
791
792
793/* Check integrity of a freeable zone.  Doesn't have to be
794   particularly efficient. */
795static BOOL
796fcheck (NSZone *zone)
797{
798  size_t i;
799  ffree_zone *zptr = (ffree_zone*)zone;
800  ff_block *block;
801
802  pthread_mutex_lock(&(zptr->lock));
803  /* Check integrity of each block the zone owns. */
804  block = zptr->blocks;
805  while (block != NULL)
806    {
807      ff_block *blockstart = &block[1];
808      ff_block *blockend = chunkNext(block);
809      ff_block *nextchunk = blockstart;
810
811      if (blockend->next != block)
812	goto inconsistent;
813      if (!chunkIsPrevInUse(blockstart))
814	goto inconsistent;
815
816      while (nextchunk < blockend)
817        {
818          ff_block *chunk = nextchunk;
819          size_t chunksize;
820
821	  chunksize = chunkSize(chunk);
822	  if ((chunksize % ALIGN) != 0)
823	    goto inconsistent;
824	  nextchunk = chunkNext(chunk);
825
826          if (chunkIsInUse(chunk))
827            /* Check whether this is a valid used chunk. */
828            {
829              if (!chunkIsPrevInUse(nextchunk))
830                goto inconsistent;
831	      if (chunkIsLive(chunk))
832		{
833	          if (chunk->next < &chunk[1] || chunk->next > nextchunk)
834                    goto inconsistent;
835	          if (*(char*)chunk->next != (char)42)
836                    goto inconsistent;
837		}
838            }
839          else
840            /* Check whether this is a valid free chunk. */
841            {
842              if (chunkIsPrevInUse(nextchunk))
843                goto inconsistent;
844              if (!chunkIsInUse(nextchunk))
845                goto inconsistent;
846	      if (chunkIsLive(chunk))
847                goto inconsistent;
848            }
849	  if (chunk != blockstart && chunkIsPrevInUse(chunk) == 0)
850	    {
851	      ff_block *prev = chunkPrev(chunk);
852
853	      if (chunkNext(prev) != chunk)
854		goto inconsistent;
855	    }
856        }
857      /* Check whether the block ends properly. */
858      if (nextchunk != blockend)
859	goto inconsistent;
860      if (chunkSize(blockend) != 0)
861        goto inconsistent;
862      if (chunkIsInUse(blockend) == 0)
863        goto inconsistent;
864
865      block = block->next;
866    }
867  /* Check the integrity of the segregated list. */
868  for (i = 0; i < MAX_SEG; i++)
869    {
870      ff_link	*chunk = zptr->segheadlist[i];
871
872      while (chunk != NULL)
873        {
874          ff_link *nextchunk;
875
876          nextchunk = chunk->next;
877          /* Isn't this one ugly if statement? */
878          if (chunkIsInUse((ff_block*)chunk)
879              || (segindex(chunkSize((ff_block*)chunk)) != i)
880              || ((nextchunk != NULL) && (chunk != nextchunk->prev))
881              || ((nextchunk == NULL) && (chunk != zptr->segtaillist[i])))
882            goto inconsistent;
883          chunk = nextchunk;
884        }
885    }
886  /* Check the buffer. */
887  if (zptr->bufsize > BUFFER)
888    goto inconsistent;
889  for (i = 0; i < zptr->bufsize; i++)
890    {
891      ff_block *chunk = zptr->ptr_buf[i];
892      if ((zptr->size_buf[i] != chunkSize(chunk)) || !chunkIsInUse(chunk))
893        goto inconsistent;
894    }
895  pthread_mutex_unlock(&(zptr->lock));
896  return YES;
897
898inconsistent: // Jump here if an inconsistency was found.
899  pthread_mutex_unlock(&(zptr->lock));
900  return NO;
901}
902
903static BOOL
904flookup (NSZone *zone, void *ptr)
905{
906  ffree_zone	*zptr = (ffree_zone*)zone;
907  ff_block	*block;
908  BOOL		found = NO;
909
910  pthread_mutex_lock(&(zptr->lock));
911  for (block = zptr->blocks; block != NULL; block = block->next)
912    {
913      if (ptr >= (void*)block && ptr < (void*)chunkNext(block))
914	{
915	  found = YES;
916	  break;
917	}
918    }
919  pthread_mutex_unlock(&(zptr->lock));
920  return found;
921}
922
923/* Obtain statistics about the zone.  Doesn't have to be particularly
924   efficient. */
925static struct NSZoneStats
926fstats (NSZone *zone)
927{
928  size_t i;
929  struct NSZoneStats stats;
930  ffree_zone *zptr = (ffree_zone*)zone;
931  ff_block *block;
932
933  stats.bytes_total = 0;
934  stats.chunks_used = 0;
935  stats.bytes_used = 0;
936  stats.chunks_free = 0;
937  stats.bytes_free = 0;
938  pthread_mutex_lock(&(zptr->lock));
939  block = zptr->blocks;
940  /* Go through each block. */
941  while (block != NULL)
942    {
943      ff_block *blockend = chunkNext(block);
944      ff_block *chunk = &block[1];
945
946      stats.bytes_total += chunkSize(block);
947      while (chunk < blockend)
948        {
949          size_t chunksize = chunkSize(chunk);
950
951          if (chunkIsInUse(chunk))
952            {
953              stats.chunks_used++;
954              stats.bytes_used += chunksize;
955            }
956          else
957            {
958              stats.chunks_free++;
959              stats.bytes_free += chunksize;
960            }
961          chunk = chunkNext(chunk);
962        }
963      block = block->next;
964    }
965  /* Go through buffer. */
966  for (i = 0; i < zptr->bufsize; i++)
967    {
968      stats.chunks_used--;
969      stats.chunks_free++;
970      stats.bytes_used -= zptr->size_buf[i];
971      stats.bytes_free += zptr->size_buf[i];
972    }
973  pthread_mutex_unlock(&(zptr->lock));
974  /* Remove overhead. */
975  stats.bytes_used -= FBSZ*stats.chunks_used;
976  return stats;
977}
978
979/* Calculate which segregation class a certain size should be in.
980   FIXME: Optimize code and find a more optimum distribution. */
981static inline size_t
982segindex (size_t size)
983{
984  NSAssert(size%MINCHUNK == 0, NSInternalInconsistencyException);
985
986  if (size < CLTOSZ(8))
987    return size/MINCHUNK;
988  else if (size < CLTOSZ(16))
989    return 7;
990  else if (size < CLTOSZ(32))
991    return 8;
992  else if (size < CLTOSZ(64))
993    return 9;
994  else if (size < CLTOSZ(128))
995    return 10;
996  else if (size < CLTOSZ(256))
997    return 11;
998  else if (size < CLTOSZ(512))
999    return 12;
1000  else if (size < CLTOSZ(1024))
1001    return 13;
1002  else if (size < CLTOSZ(2048))
1003    return 14;
1004  else
1005    return 15;
1006}
1007
1008/* Look through the segregated list with first fit to find a memory
1009   chunk.  If one is not found, get more memory. */
1010static ff_block*
1011get_chunk (ffree_zone *zone, size_t size)
1012{
1013  size_t class = segindex(size);
1014  ff_block *chunk;
1015  ff_link *link = zone->segheadlist[class];
1016
1017  NSAssert(size%MINCHUNK == 0, NSInternalInconsistencyException);
1018
1019  while ((link != NULL) && (chunkSize((ff_block*)link) < size))
1020    link = link->next;
1021  if (link == NULL)
1022    /* Get more memory. */
1023    {
1024      class++;
1025      while ((class < MAX_SEG) && (zone->segheadlist[class] == NULL))
1026        class++;
1027      if (class == MAX_SEG)
1028        /* Absolutely no memory in segregated list. */
1029        {
1030          size_t blocksize;
1031          ff_block *block;
1032
1033          blocksize = roundupto(size, zone->common.gran);
1034          block = malloc(blocksize+2*FBSZ);
1035          if (block == NULL)
1036            return NULL;
1037
1038	  /*
1039	   *	Set up the new block header and add to blocks list.
1040	   */
1041          block->size = blocksize+FBSZ;	/* Point to block trailer.	*/
1042          block->next = zone->blocks;
1043          zone->blocks = block;
1044	  /*
1045	   *	Set up the block trailer.
1046	   */
1047          chunk = chunkNext(block);
1048	  chunk->next = block;		/* Point back to block head.	*/
1049	  /*
1050	   *	Now set up block contents.
1051	   */
1052          if (size < blocksize)
1053            {
1054	      chunkSetSize(chunk, INUSE);	/* Tailer size is zero.	*/
1055              chunk = &block[1];
1056	      chunkSetSize(chunk, size | PREVUSE | INUSE);
1057	      chunk = chunkNext(chunk);
1058	      chunkSetSize(chunk, (block->size-FBSZ-size) | PREVUSE);
1059              put_chunk(zone, chunk);
1060              chunk = &block[1];
1061            }
1062          else
1063	    {
1064	      chunkSetSize(chunk, PREVUSE | INUSE);
1065              chunk = &block[1];
1066	      chunkSetSize(chunk, size | PREVUSE | INUSE);
1067	    }
1068        }
1069      else
1070        {
1071          ff_block *slack;
1072
1073          NSAssert(class < MAX_SEG, NSInternalInconsistencyException);
1074
1075          chunk = (ff_block*)zone->segheadlist[class];
1076
1077          NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
1078          NSAssert(size < chunkSize(chunk), NSInternalInconsistencyException);
1079          NSAssert((chunkSize(chunk) % MINCHUNK) == 0,
1080	    NSInternalInconsistencyException);
1081
1082          take_chunk(zone, chunk);
1083	  slack = chunkChop(chunk, size);
1084          put_chunk(zone, slack);
1085        }
1086    }
1087  else
1088    {
1089      size_t chunksize;
1090
1091      chunk = (ff_block*)link;
1092      chunksize = chunkSize(chunk);
1093
1094      NSAssert((chunksize % MINCHUNK) == 0, NSInternalInconsistencyException);
1095      NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
1096      NSAssert(chunkIsPrevInUse(chunk), NSInternalInconsistencyException);
1097      NSAssert(chunkIsInUse(chunkNext(chunk)),
1098	NSInternalInconsistencyException);
1099
1100      take_chunk(zone, chunk);
1101      if (chunksize > size)
1102        {
1103          ff_block *slack;
1104
1105          slack = chunkChop(chunk, size);
1106          put_chunk(zone, slack);
1107        }
1108      else
1109        {
1110          ff_block *nextchunk = chunkNext(chunk);
1111
1112          NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
1113          NSAssert(!chunkIsPrevInUse(nextchunk),
1114	    NSInternalInconsistencyException);
1115          NSAssert(chunksize == size, NSInternalInconsistencyException);
1116	  chunkSetInUse(chunk);
1117	  chunkSetPrevInUse(nextchunk);
1118        }
1119    }
1120  NSAssert(chunkIsInUse(chunk), NSInternalInconsistencyException);
1121  NSAssert(chunkIsPrevInUse(chunkNext(chunk)),
1122    NSInternalInconsistencyException);
1123  return chunk;
1124}
1125
1126/* Take the given chunk out of the free list.  No headers are set. */
1127static void
1128take_chunk (ffree_zone *zone, ff_block *chunk)
1129{
1130  size_t size = chunkSize(chunk);
1131  size_t class = segindex(size);
1132  ff_link *otherlink;
1133  ff_link *links = (ff_link*)chunk;
1134
1135  NSAssert((size % MINCHUNK) == 0, NSInternalInconsistencyException);
1136  NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
1137
1138  if (links->prev == NULL)
1139    zone->segheadlist[class] = links->next;
1140  else
1141    {
1142      otherlink = links->prev;
1143      otherlink->next = links->next;
1144    }
1145  if (links->next == NULL)
1146    zone->segtaillist[class] = links->prev;
1147  else
1148    {
1149      otherlink = links->next;
1150      otherlink->prev = links->prev;
1151    }
1152}
1153
1154/*
1155 *	Add the given chunk to the segregated list.  The header to the
1156 *	chunk must be set appropriately, but the tailer is set here.
1157 *	NB.  The chunk must NOT be in use, and the adjacent chunks within
1158 *	its memory block MUST be in use - the memory coalescing done in
1159 *	flush_buf() depends on this rule.
1160 */
1161static void
1162put_chunk (ffree_zone *zone, ff_block *chunk)
1163{
1164  size_t size = chunkSize(chunk);
1165  size_t class = segindex(size);
1166  ff_link *links = (ff_link*)chunk;
1167
1168  NSAssert((chunkSize(chunk) % MINCHUNK) == 0,
1169    NSInternalInconsistencyException);
1170  NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
1171  NSAssert(chunkIsPrevInUse(chunk), NSInternalInconsistencyException);
1172  NSAssert(chunkIsInUse(chunkNext(chunk)), NSInternalInconsistencyException);
1173
1174  chunkMakeLink(chunk);
1175  if (zone->segtaillist[class] == NULL)
1176    {
1177      NSAssert(zone->segheadlist[class] == NULL,
1178	NSInternalInconsistencyException);
1179
1180      zone->segheadlist[class] = zone->segtaillist[class] = links;
1181      links->prev = links->next = NULL;
1182    }
1183  else
1184    {
1185      ff_link *prevlink = zone->segtaillist[class];
1186
1187      NSAssert(zone->segheadlist[class] != NULL,
1188	NSInternalInconsistencyException);
1189
1190      links->next = NULL;
1191      links->prev = prevlink;
1192      prevlink->next = links;
1193      zone->segtaillist[class] = links;
1194    }
1195}
1196
1197/* Add the given pointer to the buffer.  If the buffer becomes full,
1198   flush it.  The given pointer must always be one that points to used
1199   memory (i.e. chunks with headers that declare them as used). */
1200static inline void
1201add_buf (ffree_zone *zone, ff_block *chunk)
1202{
1203  size_t bufsize = zone->bufsize;
1204
1205  NSAssert(bufsize < BUFFER, NSInternalInconsistencyException);
1206  NSAssert(chunkIsInUse(chunk), NSInternalInconsistencyException);
1207  NSAssert((chunkSize(chunk) % MINCHUNK) == 0,
1208    NSInternalInconsistencyException);
1209  NSAssert(chunkSize(chunk) >= MINCHUNK, NSInternalInconsistencyException);
1210
1211  zone->bufsize++;
1212  zone->size_buf[bufsize] = chunkSize(chunk);
1213  zone->ptr_buf[bufsize] = chunk;
1214  chunkClrLive(chunk);
1215  if (bufsize == BUFFER-1)
1216    flush_buf(zone);
1217}
1218
1219/* Flush buffers.  All coalescing is done here. */
1220static void
1221flush_buf (ffree_zone *zone)
1222{
1223  size_t i, size;
1224  size_t bufsize = zone->bufsize;
1225  ff_block *chunk, *nextchunk;
1226  size_t *size_buf = zone->size_buf;
1227  ff_block **ptr_buf = zone->ptr_buf;
1228
1229  NSAssert(bufsize <= BUFFER, NSInternalInconsistencyException);
1230
1231  for (i = 0; i < bufsize; i++)
1232    {
1233      size = size_buf[i];
1234      chunk = ptr_buf[i];
1235
1236      NSAssert(chunkSize(chunk) == size, NSInternalInconsistencyException);
1237      NSAssert(chunkIsInUse(chunk), NSInternalInconsistencyException);
1238
1239      nextchunk = chunkNext(chunk);
1240      if (!chunkIsPrevInUse(chunk))
1241        /* Coalesce with previous chunk. */
1242        {
1243	  chunk = chunkPrev(chunk);
1244	  NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
1245	  NSAssert(chunkIsPrevInUse(chunk), NSInternalInconsistencyException);
1246          size += chunkSize(chunk);
1247          take_chunk(zone, chunk);
1248        }
1249      if (!chunkIsInUse(nextchunk))
1250        /* Coalesce with next chunk. */
1251        {
1252          size_t nextsize = chunkSize(nextchunk);
1253
1254	  NSAssert(chunkIsPrevInUse(nextchunk),
1255	    NSInternalInconsistencyException);
1256          NSAssert((nextsize % MINCHUNK) == 0,
1257	    NSInternalInconsistencyException);
1258          size += nextsize;
1259          take_chunk(zone, nextchunk);
1260	  nextchunk = chunkNext(nextchunk);
1261        }
1262      chunkSetSize(chunk, size | PREVUSE);
1263      put_chunk(zone, chunk);
1264      chunkClrPrevInUse(nextchunk);
1265      NSAssert(chunkNext(chunk) == nextchunk, NSInternalInconsistencyException);
1266      NSAssert(chunkPrev(nextchunk) == chunk, NSInternalInconsistencyException);
1267      NSAssert((chunkSize(chunk) % MINCHUNK) == 0,
1268	NSInternalInconsistencyException);
1269      NSAssert(!chunkIsInUse(chunk), NSInternalInconsistencyException);
1270      NSAssert(chunkIsPrevInUse(chunk), NSInternalInconsistencyException);
1271      NSAssert(chunkIsInUse(nextchunk), NSInternalInconsistencyException);
1272      NSAssert(!chunkIsPrevInUse(nextchunk), NSInternalInconsistencyException);
1273    }
1274  zone->bufsize = 0;
1275}
1276
1277/* If the first block in block list has enough space, use that space.
1278   Otherwise, sort the block list in decreasing free space order (only
1279   the first block needs to be put in its appropriate place since
1280   the rest of the list is already sorted).  Then check if the first
1281   block has enough space for the request.  If it does, use it.  If it
1282   doesn't, get more memory from the default zone, since none of the
1283   other blocks in the block list could have enough memory. */
1284static void*
1285nmalloc (NSZone *zone, size_t size)
1286{
1287  nfree_zone *zptr = (nfree_zone*)zone;
1288  size_t chunksize = roundupto(size, ALIGN);
1289  size_t freesize;
1290  void *chunkhead;
1291  nf_block *block;
1292  size_t top;
1293
1294  pthread_mutex_lock(&(zptr->lock));
1295  block = zptr->blocks;
1296  top = block->top;
1297  freesize = block->size-top;
1298  if (freesize >= chunksize)
1299    {
1300      chunkhead = (void*)(block)+top;
1301      block->top += chunksize;
1302    }
1303  else
1304    {
1305      nf_block *preblock;
1306
1307      /* First, get the block list in decreasing free size order. */
1308      preblock = NULL;
1309      while ((block->next != NULL)
1310        && (freesize < block->next->size-block->next->top))
1311        {
1312          preblock = block;
1313          block = block->next;
1314        }
1315      if (preblock != NULL)
1316        {
1317          preblock->next = zptr->blocks;
1318          zptr->blocks = zptr->blocks->next;
1319          preblock->next->next = block;
1320        }
1321      if (zptr->blocks->size-zptr->blocks->top < chunksize)
1322        /* Get new block. */
1323        {
1324          size_t blocksize = roundupto(chunksize+NF_HEAD, zone->gran);
1325
1326          block = malloc(blocksize);
1327          if (block == NULL)
1328            {
1329              pthread_mutex_unlock(&(zptr->lock));
1330              if (zone->name != nil)
1331                [NSException raise: NSMallocException
1332                            format: @"Zone %@ has run out of memory",
1333                             zone->name];
1334              else
1335                [NSException raise: NSMallocException
1336                            format: @"Out of memory"];
1337            }
1338          block->next = zptr->blocks;
1339          block->size = blocksize;
1340          block->top = NF_HEAD;
1341          zptr->blocks = block;
1342        }
1343      chunkhead = (void*)block+block->top;
1344      block->top += chunksize;
1345    }
1346  zptr->use++;
1347  pthread_mutex_unlock(&(zptr->lock));
1348  return chunkhead;
1349}
1350
1351/* Return the blocks to the default zone, then deallocate mutex, and
1352   then release zone name if it exists. */
1353static BOOL
1354nrecycle1 (NSZone *zone)
1355{
1356  nfree_zone *zptr = (nfree_zone*)zone;
1357
1358  pthread_mutex_lock(&(zptr->lock));
1359  if (zptr->use == 0)
1360    {
1361      nf_block *nextblock;
1362      nf_block *block = zptr->blocks;
1363
1364      while (block != NULL)
1365	{
1366	  nextblock = block->next;
1367	  free(block);
1368	  block = nextblock;
1369	}
1370      zptr->blocks = 0;
1371    }
1372  pthread_mutex_unlock(&(zptr->lock));
1373  if (zptr->blocks == 0)
1374    {
1375      pthread_mutex_destroy(&(zptr->lock));
1376      return YES;
1377    }
1378  return NO;
1379}
1380
1381/* Recycle the zone. */
1382static void
1383nrecycle (NSZone *zone)
1384{
1385  pthread_mutex_lock(&zoneLock);
1386  if (zone->name != nil)
1387    {
1388      NSString *name = zone->name;
1389      zone->name = nil;
1390      [name release];
1391    }
1392  if (nrecycle1(zone) == YES)
1393    destroy_zone(zone);
1394  else
1395    {
1396      zone->malloc = rmalloc;
1397      zone->realloc = rrealloc;
1398      zone->free = rnfree;
1399      zone->recycle = rrecycle;
1400    }
1401  pthread_mutex_unlock(&zoneLock);
1402}
1403
1404static void*
1405nrealloc (NSZone *zone, void *ptr, size_t size)
1406{
1407  nfree_zone *zptr = (nfree_zone*)zone;
1408  void *tmp = nmalloc(zone, size);
1409
1410  if (ptr != 0)
1411    {
1412      pthread_mutex_lock(&(zptr->lock));
1413      if (tmp)
1414	{
1415	  nf_block *block;
1416	  size_t old = 0;
1417
1418	  for (block = zptr->blocks; block != NULL; block = block->next) {
1419	    if (ptr >= (void*)block && ptr < ((void*)block)+block->size) {
1420		old = ((void*)block)+block->size - ptr;
1421		break;
1422	    }
1423	  }
1424	  if (old > 0)
1425	    {
1426	      if (size < old)
1427		old = size;
1428	      memcpy(tmp, ptr, old);
1429	    }
1430	}
1431      zptr->use--;
1432      pthread_mutex_unlock(&(zptr->lock));
1433    }
1434  return tmp;
1435}
1436
1437/*
1438 *	The OpenStep spec says we don't release memory - but we have to do
1439 *	some minimal bookkeeping so that, when the zone is recycled, we can
1440 *	determine if all the allocated memory has been freed.  Until it is
1441 *	all freed, we can't actually destroy the zone!
1442 */
1443static void
1444nfree (NSZone *zone, void *ptr)
1445{
1446  nfree_zone *zptr = (nfree_zone*)zone;
1447
1448  pthread_mutex_lock(&(zptr->lock));
1449  zptr->use--;
1450  pthread_mutex_unlock(&(zptr->lock));
1451}
1452
1453static void
1454rnfree (NSZone *zone, void *ptr)
1455{
1456  nfree_zone *zptr = (nfree_zone*)zone;
1457
1458  nfree(zone, ptr);
1459  if (zptr->use == 0)
1460    {
1461      pthread_mutex_lock(&zoneLock);
1462      nrecycle1(zone);
1463      destroy_zone(zone);
1464      pthread_mutex_unlock(&zoneLock);
1465    }
1466}
1467
1468/* Check integrity of a nonfreeable zone.  Doesn't have to
1469   particularly efficient. */
1470static BOOL
1471ncheck (NSZone *zone)
1472{
1473  nfree_zone *zptr = (nfree_zone*)zone;
1474  nf_block *block;
1475
1476  pthread_mutex_lock(&(zptr->lock));
1477  block = zptr->blocks;
1478  while (block != NULL)
1479    {
1480      if (block->size < block->top)
1481        {
1482          pthread_mutex_unlock(&(zptr->lock));
1483          return NO;
1484        }
1485      block = block->next;
1486    }
1487  /* FIXME: Do more checking? */
1488  pthread_mutex_unlock(&(zptr->lock));
1489  return YES;
1490}
1491
1492static BOOL
1493nlookup (NSZone *zone, void *ptr)
1494{
1495  nfree_zone *zptr = (nfree_zone*)zone;
1496  nf_block *block;
1497  BOOL found = NO;
1498
1499  pthread_mutex_lock(&(zptr->lock));
1500  for (block = zptr->blocks; block != NULL; block = block->next)
1501    {
1502      if (ptr >= (void*)block &&  ptr < ((void*)block)+block->size)
1503	{
1504	  found = YES;
1505	  break;
1506	}
1507    }
1508  pthread_mutex_unlock(&(zptr->lock));
1509  return found;
1510}
1511
1512/* Return statistics for a nonfreeable zone.  Doesn't have to
1513   particularly efficient. */
1514static struct NSZoneStats
1515nstats (NSZone *zone)
1516{
1517  struct NSZoneStats stats;
1518  nfree_zone *zptr = (nfree_zone*)zone;
1519  nf_block *block;
1520
1521  stats.bytes_total = 0;
1522  stats.chunks_used = 0;
1523  stats.bytes_used = 0;
1524  stats.chunks_free = 0;
1525  stats.bytes_free = 0;
1526  pthread_mutex_lock(&(zptr->lock));
1527  block = zptr->blocks;
1528  while (block != NULL)
1529    {
1530      size_t *chunk;
1531
1532      stats.bytes_total += block->size;
1533      chunk = (void*)block+NF_HEAD;
1534      while ((void*)chunk < (void*)block+block->top)
1535        {
1536          stats.chunks_used++;
1537          stats.bytes_used += *chunk;
1538          chunk = (void*)chunk+(*chunk);
1539        }
1540      if (block->size != block->top)
1541        {
1542          stats.chunks_free++;
1543          stats.bytes_free += block->size-block->top;
1544        }
1545      block = block->next;
1546    }
1547  pthread_mutex_unlock(&(zptr->lock));
1548  return stats;
1549}
1550
1551
1552static void*
1553rmalloc (NSZone *zone, size_t size)
1554{
1555  [NSException raise: NSMallocException
1556	      format: @"Attempt to malloc memory in recycled zone"];
1557  return 0;
1558}
1559
1560static void
1561rrecycle (NSZone *zone)
1562{
1563  [NSException raise: NSMallocException
1564	      format: @"Attempt to recycle a recycled zone"];
1565}
1566
1567static void*
1568rrealloc (NSZone *zone, void *ptr, size_t size)
1569{
1570  [NSException raise: NSMallocException
1571	      format: @"Attempt to realloc memory in recycled zone"];
1572  return 0;
1573}
1574
1575static void rnfree (NSZone *zone, void *ptr);
1576
1577GS_DECLARE NSZone*
1578NSZoneFromPointer(void *ptr)
1579{
1580  NSZone	*zone;
1581
1582  if (ptr == 0) return 0;
1583  if (zone_list == 0) return &default_zone;
1584
1585  /*
1586   *	See if we can find the zone in our list of all zones.
1587   */
1588  pthread_mutex_lock(&zoneLock);
1589  for (zone = zone_list; zone != 0; zone = zone->next)
1590    {
1591      if ((zone->lookup)(zone, ptr) == YES)
1592	{
1593	  break;
1594	}
1595    }
1596  pthread_mutex_unlock(&zoneLock);
1597  return (zone == 0) ? &default_zone : zone;
1598}
1599
1600NSZone*
1601NSCreateZone (NSUInteger start, NSUInteger gran, BOOL canFree)
1602{
1603  size_t i, startsize, granularity;
1604  NSZone *newZone;
1605
1606  if (start > 0)
1607    startsize = roundupto(start, roundupto(MINGRAN, MINCHUNK));
1608  else
1609    startsize = roundupto(MINGRAN, MINCHUNK);
1610  if (gran > 0)
1611    granularity = roundupto(gran, roundupto(MINGRAN, MINCHUNK));
1612  else
1613    granularity = roundupto(MINGRAN, MINCHUNK);
1614  if (canFree)
1615    {
1616      ffree_zone *zone;
1617      ff_block *block;
1618      ff_block *chunk;
1619      ff_block *tailer;
1620
1621      zone = malloc(sizeof(ffree_zone));
1622      if (zone == NULL)
1623        [NSException raise: NSMallocException
1624                    format: @"No memory to create zone"];
1625      zone->common.malloc = fmalloc;
1626      zone->common.realloc = frealloc;
1627      zone->common.free = ffree;
1628      zone->common.recycle = frecycle;
1629      zone->common.check = fcheck;
1630      zone->common.lookup = flookup;
1631      zone->common.stats = fstats;
1632      zone->common.gran = granularity;
1633      zone->common.name = nil;
1634      GS_INIT_RECURSIVE_MUTEX(zone->lock);
1635      for (i = 0; i < MAX_SEG; i++)
1636        {
1637          zone->segheadlist[i] = NULL;
1638          zone->segtaillist[i] = NULL;
1639        }
1640      zone->bufsize = 0;
1641      zone->blocks = malloc(startsize + 2*FBSZ);
1642      if (zone->blocks == NULL)
1643        {
1644          pthread_mutex_destroy(&(zone->lock));
1645          free(zone);
1646          [NSException raise: NSMallocException
1647                      format: @"No memory to create zone"];
1648        }
1649      /*
1650       *	Set up block header.
1651       */
1652      block = zone->blocks;
1653      block->next = NULL;		/* Point to next block.		*/
1654      block->size = startsize+FBSZ;	/* Point to first chunk.	*/
1655      /*
1656       *	Set up block trailer.
1657       */
1658      tailer = chunkNext(block);
1659      chunkSetSize(tailer, PREVUSE|INUSE);
1660      tailer->next = block;		/* Point back to block start.	*/
1661      /*
1662       *	Set up the block as a single chunk and put it in the
1663       *	buffer for quick allocation.
1664       */
1665      chunk = &block[1];
1666      chunkSetSize(chunk, (block->size-FBSZ) | PREVUSE|INUSE);
1667      add_buf(zone, chunk);
1668
1669      newZone = (NSZone*)zone;
1670    }
1671  else
1672    {
1673      nf_block *block;
1674      nfree_zone *zone;
1675
1676      zone = malloc(sizeof(nfree_zone));
1677      if (zone == NULL)
1678        [NSException raise: NSMallocException
1679                    format: @"No memory to create zone"];
1680      zone->common.malloc = nmalloc;
1681      zone->common.realloc = nrealloc;
1682      zone->common.free = nfree;
1683      zone->common.recycle = nrecycle;
1684      zone->common.check = ncheck;
1685      zone->common.lookup = nlookup;
1686      zone->common.stats = nstats;
1687      zone->common.gran = granularity;
1688      zone->common.name = nil;
1689      GS_INIT_RECURSIVE_MUTEX(zone->lock);
1690      zone->blocks = malloc(startsize);
1691      zone->use = 0;
1692      if (zone->blocks == NULL)
1693        {
1694          pthread_mutex_destroy(&(zone->lock));
1695          free(zone);
1696          [NSException raise: NSMallocException
1697                      format: @"No memory to create zone"];
1698        }
1699
1700      block = zone->blocks;
1701      block->next = NULL;
1702      block->size = startsize;
1703      block->top = NF_HEAD;
1704      newZone = (NSZone*)zone;
1705    }
1706
1707  pthread_mutex_lock(&zoneLock);
1708  newZone->next = zone_list;
1709  zone_list = newZone;
1710  pthread_mutex_unlock(&zoneLock);
1711
1712  return newZone;
1713}
1714
1715void*
1716NSZoneCalloc (NSZone *zone, NSUInteger elems, NSUInteger bytes)
1717{
1718  void *mem;
1719
1720  if (0 == zone || NSDefaultMallocZone() == zone)
1721    {
1722      mem = calloc(elems, bytes);
1723      if (mem != NULL)
1724        {
1725          return mem;
1726        }
1727      [NSException raise: NSMallocException
1728                  format: @"Default zone has run out of memory"];
1729    }
1730  return memset(NSZoneMalloc(zone, elems*bytes), 0, elems*bytes);
1731}
1732
1733void *
1734NSAllocateCollectable(NSUInteger size, NSUInteger options)
1735{
1736  return NSZoneCalloc(NSDefaultMallocZone(), 1, size);
1737}
1738
1739void *
1740NSReallocateCollectable(void *ptr, NSUInteger size, NSUInteger options)
1741{
1742  return NSZoneRealloc(0, ptr, size);
1743}
1744
1745NSZone*
1746NSDefaultMallocZone (void)
1747{
1748  return &default_zone;
1749}
1750
1751NSZone*
1752GSAtomicMallocZone (void)
1753{
1754  return &default_zone;
1755}
1756
1757void
1758GSMakeWeakPointer(Class theClass, const char *iVarName)
1759{
1760  return;
1761}
1762
1763BOOL
1764GSAssignZeroingWeakPointer(void **destination, void *source)
1765{
1766  if (destination == 0)
1767    {
1768      return NO;
1769    }
1770  *destination = source;
1771  return YES;
1772}
1773
1774void*
1775NSZoneMalloc (NSZone *zone, NSUInteger size)
1776{
1777  if (!zone)
1778    zone = NSDefaultMallocZone();
1779  return (zone->malloc)(zone, size);
1780}
1781
1782void*
1783NSZoneRealloc (NSZone *zone, void *ptr, NSUInteger size)
1784{
1785  if (!zone)
1786    zone = NSDefaultMallocZone();
1787  return (zone->realloc)(zone, ptr, size);
1788}
1789
1790void
1791NSRecycleZone (NSZone *zone)
1792{
1793  if (!zone)
1794    zone = NSDefaultMallocZone();
1795  (zone->recycle)(zone);
1796}
1797
1798void
1799NSZoneFree (NSZone *zone, void *ptr)
1800{
1801  if (!zone)
1802    zone = NSDefaultMallocZone();
1803  (zone->free)(zone, ptr);
1804}
1805
1806BOOL
1807NSZoneCheck (NSZone *zone)
1808{
1809  if (!zone)
1810    zone = NSDefaultMallocZone();
1811  return (zone->check)(zone);
1812}
1813
1814struct NSZoneStats
1815NSZoneStats (NSZone *zone)
1816{
1817  if (!zone)
1818    zone = NSDefaultMallocZone();
1819  return (zone->stats)(zone);
1820}
1821
1822BOOL
1823GSPrivateIsCollectable(const void *ptr)
1824{
1825  return NO;
1826}
1827
1828