1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 
5 /*
6  * Generational GC handle manager.  Internal Implementation Header.
7  *
8  * Shared defines and declarations for handle table implementation.
9  *
10 
11  *
12  */
13 
14 #include "common.h"
15 
16 #include "handletable.h"
17 
18 /*--------------------------------------------------------------------------*/
19 
20 //<TODO>@TODO: find a home for this in a project-level header file</TODO>
21 #define BITS_PER_BYTE               (8)
22 /*--------------------------------------------------------------------------*/
23 
24 
25 
26 /****************************************************************************
27  *
28  * MAJOR TABLE DEFINITIONS THAT CHANGE DEPENDING ON THE WEATHER
29  *
30  ****************************************************************************/
31 
32 // 64k reserved per segment with 4k as header.
33 #define HANDLE_SEGMENT_SIZE     (0x10000)   // MUST be a power of 2 (and currently must be 64K due to VirtualAlloc semantics)
34 #define HANDLE_HEADER_SIZE      (0x1000)    // SHOULD be <= OS page size
35 
36 #define HANDLE_SEGMENT_ALIGNMENT     HANDLE_SEGMENT_SIZE
37 
38 
39 #if !BIGENDIAN
40 
41     // little-endian write barrier mask manipulation
42     #define GEN_CLUMP_0_MASK        (0x000000FF)
43     #define NEXT_CLUMP_IN_MASK(dw)  (dw >> BITS_PER_BYTE)
44 
45 #else
46 
47     // big-endian write barrier mask manipulation
48     #define GEN_CLUMP_0_MASK        (0xFF000000)
49     #define NEXT_CLUMP_IN_MASK(dw)  (dw << BITS_PER_BYTE)
50 
51 #endif
52 
53 
54 // if the above numbers change than these will likely change as well
55 #define HANDLE_HANDLES_PER_CLUMP    (16)        // segment write-barrier granularity
56 #define HANDLE_HANDLES_PER_BLOCK    (64)        // segment suballocation granularity
57 #define HANDLE_OPTIMIZE_FOR_64_HANDLE_BLOCKS    // flag for certain optimizations
58 
59 // maximum number of internally supported handle types
60 #define HANDLE_MAX_INTERNAL_TYPES   (12)                             // should be a multiple of 4
61 
62 // number of types allowed for public callers
63 #define HANDLE_MAX_PUBLIC_TYPES     (HANDLE_MAX_INTERNAL_TYPES - 1) // reserve one internal type
64 
65 // internal block types
66 #define HNDTYPE_INTERNAL_DATABLOCK  (HANDLE_MAX_INTERNAL_TYPES - 1) // reserve last type for data blocks
67 
68 // max number of generations to support statistics on
69 #define MAXSTATGEN                  (5)
70 
71 /*--------------------------------------------------------------------------*/
72 
73 
74 
75 /****************************************************************************
76  *
77  * MORE DEFINITIONS
78  *
79  ****************************************************************************/
80 
81 // fast handle-to-segment mapping
82 #define HANDLE_SEGMENT_CONTENT_MASK     (HANDLE_SEGMENT_SIZE - 1)
83 #define HANDLE_SEGMENT_ALIGN_MASK       (~HANDLE_SEGMENT_CONTENT_MASK)
84 
85 // table layout metrics
86 #define HANDLE_SIZE                     sizeof(_UNCHECKED_OBJECTREF)
87 #define HANDLE_HANDLES_PER_SEGMENT      ((HANDLE_SEGMENT_SIZE - HANDLE_HEADER_SIZE) / HANDLE_SIZE)
88 #define HANDLE_BLOCKS_PER_SEGMENT       (HANDLE_HANDLES_PER_SEGMENT / HANDLE_HANDLES_PER_BLOCK)
89 #define HANDLE_CLUMPS_PER_SEGMENT       (HANDLE_HANDLES_PER_SEGMENT / HANDLE_HANDLES_PER_CLUMP)
90 #define HANDLE_CLUMPS_PER_BLOCK         (HANDLE_HANDLES_PER_BLOCK / HANDLE_HANDLES_PER_CLUMP)
91 #define HANDLE_BYTES_PER_BLOCK          (HANDLE_HANDLES_PER_BLOCK * HANDLE_SIZE)
92 #define HANDLE_HANDLES_PER_MASK         (sizeof(uint32_t) * BITS_PER_BYTE)
93 #define HANDLE_MASKS_PER_SEGMENT        (HANDLE_HANDLES_PER_SEGMENT / HANDLE_HANDLES_PER_MASK)
94 #define HANDLE_MASKS_PER_BLOCK          (HANDLE_HANDLES_PER_BLOCK / HANDLE_HANDLES_PER_MASK)
95 #define HANDLE_CLUMPS_PER_MASK          (HANDLE_HANDLES_PER_MASK / HANDLE_HANDLES_PER_CLUMP)
96 
97 // We use this relation to check for free mask per block.
98 C_ASSERT (HANDLE_HANDLES_PER_MASK * 2 == HANDLE_HANDLES_PER_BLOCK);
99 
100 
101 // cache layout metrics
102 #define HANDLE_CACHE_TYPE_SIZE          128 // 128 == 63 handles per bank
103 #define HANDLES_PER_CACHE_BANK          ((HANDLE_CACHE_TYPE_SIZE / 2) - 1)
104 
105 // cache policy defines
106 #define REBALANCE_TOLERANCE             (HANDLES_PER_CACHE_BANK / 3)
107 #define REBALANCE_LOWATER_MARK          (HANDLES_PER_CACHE_BANK - REBALANCE_TOLERANCE)
108 #define REBALANCE_HIWATER_MARK          (HANDLES_PER_CACHE_BANK + REBALANCE_TOLERANCE)
109 
110 // bulk alloc policy defines
111 #define SMALL_ALLOC_COUNT               (HANDLES_PER_CACHE_BANK / 10)
112 
113 // misc constants
114 #define MASK_FULL                       (0)
115 #define MASK_EMPTY                      (0xFFFFFFFF)
116 #define MASK_LOBYTE                     (0x000000FF)
117 #define TYPE_INVALID                    ((uint8_t)0xFF)
118 #define BLOCK_INVALID                   ((uint8_t)0xFF)
119 
120 /*--------------------------------------------------------------------------*/
121 
122 
123 
124 /****************************************************************************
125  *
126  * CORE TABLE LAYOUT STRUCTURES
127  *
128  ****************************************************************************/
129 
130 /*
131  * we need byte packing for the handle table layout to work
132  */
133 #pragma pack(push,1)
134 
135 
136 /*
137  * Table Segment Header
138  *
139  * Defines the layout for a segment's header data.
140  */
141 struct _TableSegmentHeader
142 {
143     /*
144      * Write Barrier Generation Numbers
145      *
146      * Each slot holds four bytes.  Each byte corresponds to a clump of handles.
147      * The value of the byte corresponds to the lowest possible generation that a
148      * handle in that clump could point into.
149      *
150      * WARNING: Although this array is logically organized as a uint8_t[], it is sometimes
151      *  accessed as uint32_t[] when processing bytes in parallel.  Code which treats the
152      *  array as an array of ULONG32s must handle big/little endian issues itself.
153      */
154     uint8_t rgGeneration[HANDLE_BLOCKS_PER_SEGMENT * sizeof(uint32_t) / sizeof(uint8_t)];
155 
156     /*
157      * Block Allocation Chains
158      *
159      * Each slot indexes the next block in an allocation chain.
160      */
161     uint8_t rgAllocation[HANDLE_BLOCKS_PER_SEGMENT];
162 
163     /*
164      * Block Free Masks
165      *
166      * Masks - 1 bit for every handle in the segment.
167      */
168     uint32_t rgFreeMask[HANDLE_MASKS_PER_SEGMENT];
169 
170     /*
171      * Block Handle Types
172      *
173      * Each slot holds the handle type of the associated block.
174      */
175     uint8_t rgBlockType[HANDLE_BLOCKS_PER_SEGMENT];
176 
177     /*
178      * Block User Data Map
179      *
180      * Each slot holds the index of a user data block (if any) for the associated block.
181      */
182     uint8_t rgUserData[HANDLE_BLOCKS_PER_SEGMENT];
183 
184     /*
185      * Block Lock Count
186      *
187      * Each slot holds a lock count for its associated block.
188      * Locked blocks are not freed, even when empty.
189      */
190     uint8_t rgLocks[HANDLE_BLOCKS_PER_SEGMENT];
191 
192     /*
193      * Allocation Chain Tails
194      *
195      * Each slot holds the tail block index for an allocation chain.
196      */
197     uint8_t rgTail[HANDLE_MAX_INTERNAL_TYPES];
198 
199     /*
200      * Allocation Chain Hints
201      *
202      * Each slot holds a hint block index for an allocation chain.
203      */
204     uint8_t rgHint[HANDLE_MAX_INTERNAL_TYPES];
205 
206     /*
207      * Free Count
208      *
209      * Each slot holds the number of free handles in an allocation chain.
210      */
211     uint32_t rgFreeCount[HANDLE_MAX_INTERNAL_TYPES];
212 
213     /*
214      * Next Segment
215      *
216      * Points to the next segment in the chain (if we ran out of space in this one).
217      */
218 #ifdef DACCESS_COMPILE
219     TADDR pNextSegment;
220 #else
221     struct TableSegment *pNextSegment;
222 #endif // DACCESS_COMPILE
223 
224     /*
225      * Handle Table
226      *
227      * Points to owning handle table for this table segment.
228      */
229     PTR_HandleTable pHandleTable;
230 
231     /*
232      * Flags
233      */
234     uint8_t fResortChains      : 1;    // allocation chains need sorting
235     uint8_t fNeedsScavenging   : 1;    // free blocks need scavenging
236     uint8_t _fUnused           : 6;    // unused
237 
238     /*
239      * Free List Head
240      *
241      * Index of the first free block in the segment.
242      */
243     uint8_t bFreeList;
244 
245     /*
246      * Empty Line
247      *
248      * Index of the first KNOWN block of the last group of unused blocks in the segment.
249      */
250     uint8_t bEmptyLine;
251 
252     /*
253      * Commit Line
254      *
255      * Index of the first uncommited block in the segment.
256      */
257     uint8_t bCommitLine;
258 
259     /*
260      * Decommit Line
261      *
262      * Index of the first block in the highest committed page of the segment.
263      */
264     uint8_t bDecommitLine;
265 
266     /*
267      * Sequence
268      *
269      * Indicates the segment sequence number.
270      */
271     uint8_t bSequence;
272 };
273 
274 typedef DPTR(struct _TableSegmentHeader) PTR__TableSegmentHeader;
275 typedef DPTR(uintptr_t) PTR_uintptr_t;
276 
277 // The handle table is large and may not be entirely mapped. That's one reason for splitting out the table
278 // segment and the header as two separate classes. In DAC builds, we generally need only a single element from
279 // the table segment, so we can use the DAC to retrieve just the information we require.
280 /*
281  * Table Segment
282  *
283  * Defines the layout for a handle table segment.
284  */
285 struct TableSegment : public _TableSegmentHeader
286 {
287     /*
288      * Filler
289      */
290     uint8_t rgUnused[HANDLE_HEADER_SIZE - sizeof(_TableSegmentHeader)];
291 
292     /*
293      * Handles
294      */
295     _UNCHECKED_OBJECTREF rgValue[HANDLE_HANDLES_PER_SEGMENT];
296 
297 #ifdef DACCESS_COMPILE
298     static uint32_t DacSize(TADDR addr);
299 #endif
300 };
301 
302 typedef SPTR(struct TableSegment) PTR_TableSegment;
303 
304 /*
305  * restore default packing
306  */
307 #pragma pack(pop)
308 
309 
310 /*
311  * Handle Type Cache
312  *
313  * Defines the layout of a per-type handle cache.
314  */
315 struct HandleTypeCache
316 {
317     /*
318      * reserve bank
319      */
320     OBJECTHANDLE rgReserveBank[HANDLES_PER_CACHE_BANK];
321 
322     /*
323      * index of next available handle slot in the reserve bank
324      */
325     int32_t lReserveIndex;
326 
327 
328     /*---------------------------------------------------------------------------------
329      * N.B. this structure is split up this way so that when HANDLES_PER_CACHE_BANK is
330      * large enough, lReserveIndex and lFreeIndex will reside in different cache lines
331      *--------------------------------------------------------------------------------*/
332 
333     /*
334      * free bank
335      */
336     OBJECTHANDLE rgFreeBank[HANDLES_PER_CACHE_BANK];
337 
338     /*
339      * index of next empty slot in the free bank
340      */
341     int32_t lFreeIndex;
342 };
343 
344 
345 /*---------------------------------------------------------------------------*/
346 
347 
348 
349 /****************************************************************************
350  *
351  * SCANNING PROTOTYPES
352  *
353  ****************************************************************************/
354 
355 /*
356  * ScanCallbackInfo
357  *
358  * Carries parameters for per-segment and per-block scanning callbacks.
359  *
360  */
361 struct ScanCallbackInfo
362 {
363     PTR_TableSegment pCurrentSegment;   // segment we are presently scanning, if any
364     uint32_t         uFlags;            // HNDGCF_* flags
365     BOOL             fEnumUserData;     // whether user data is being enumerated as well
366     HANDLESCANPROC   pfnScan;           // per-handle scan callback
367     uintptr_t        param1;            // callback param 1
368     uintptr_t        param2;            // callback param 2
369     uint32_t         dwAgeMask;         // generation mask for ephemeral GCs
370 
371 #ifdef _DEBUG
372     uint32_t DEBUG_BlocksScanned;
373     uint32_t DEBUG_BlocksScannedNonTrivially;
374     uint32_t DEBUG_HandleSlotsScanned;
375     uint32_t DEBUG_HandlesActuallyScanned;
376 #endif
377 };
378 
379 
380 /*
381  * BLOCKSCANPROC
382  *
383  * Prototype for callbacks that implement per-block scanning logic.
384  *
385  */
386 typedef void (CALLBACK *BLOCKSCANPROC)(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo);
387 
388 
389 /*
390  * SEGMENTITERATOR
391  *
392  * Prototype for callbacks that implement per-segment scanning logic.
393  *
394  */
395 typedef PTR_TableSegment (CALLBACK *SEGMENTITERATOR)(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder);
396 
397 
398 /*
399  * TABLESCANPROC
400  *
401  * Prototype for TableScanHandles and xxxTableScanHandlesAsync.
402  *
403  */
404 typedef void (CALLBACK *TABLESCANPROC)(PTR_HandleTable pTable,
405                                        const uint32_t *puType, uint32_t uTypeCount,
406                                        SEGMENTITERATOR pfnSegmentIterator,
407                                        BLOCKSCANPROC pfnBlockHandler,
408                                        ScanCallbackInfo *pInfo,
409                                        CrstHolderWithState *pCrstHolder);
410 
411 /*--------------------------------------------------------------------------*/
412 
413 
414 
415 /****************************************************************************
416  *
417  * ADDITIONAL TABLE STRUCTURES
418  *
419  ****************************************************************************/
420 
421 /*
422  * AsyncScanInfo
423  *
424  * Tracks the state of an async scan for a handle table.
425  *
426  */
427 struct AsyncScanInfo
428 {
429     /*
430      * Underlying Callback Info
431      *
432      * Specifies callback info for the underlying block handler.
433      */
434     struct ScanCallbackInfo *pCallbackInfo;
435 
436     /*
437      * Underlying Segment Iterator
438      *
439      * Specifies the segment iterator to be used during async scanning.
440      */
441     SEGMENTITERATOR   pfnSegmentIterator;
442 
443     /*
444      * Underlying Block Handler
445      *
446      * Specifies the block handler to be used during async scanning.
447      */
448     BLOCKSCANPROC     pfnBlockHandler;
449 
450     /*
451      * Scan Queue
452      *
453      * Specifies the nodes to be processed asynchronously.
454      */
455     struct ScanQNode *pScanQueue;
456 
457     /*
458      * Queue Tail
459      *
460      * Specifies the tail node in the queue, or NULL if the queue is empty.
461      */
462     struct ScanQNode *pQueueTail;
463 };
464 
465 
466 /*
467  * Handle Table
468  *
469  * Defines the layout of a handle table object.
470  */
471 #ifdef _MSC_VER
472 #pragma warning(push)
473 #pragma warning(disable : 4200 )  // zero-sized array
474 #endif
475 struct HandleTable
476 {
477     /*
478      * flags describing handle attributes
479      *
480      * N.B. this is at offset 0 due to frequent access by cache free codepath
481      */
482     uint32_t rgTypeFlags[HANDLE_MAX_INTERNAL_TYPES];
483 
484     /*
485      * lock for this table
486      */
487     CrstStatic Lock;
488 
489     /*
490      * number of types this table supports
491      */
492     uint32_t uTypeCount;
493 
494     /*
495      * number of handles owned by this table that are marked as "used"
496      * (this includes the handles residing in rgMainCache and rgQuickCache)
497      */
498     uint32_t dwCount;
499 
500     /*
501      * head of segment list for this table
502      */
503     PTR_TableSegment pSegmentList;
504 
505     /*
506      * information on current async scan (if any)
507      */
508     AsyncScanInfo *pAsyncScanInfo;
509 
510     /*
511      * per-table user info
512      */
513     uint32_t uTableIndex;
514 
515     /*
516      * per-table AppDomain info
517      */
518     ADIndex uADIndex;
519 
520     /*
521      * one-level per-type 'quick' handle cache
522      */
523     OBJECTHANDLE rgQuickCache[HANDLE_MAX_INTERNAL_TYPES];   // interlocked ops used here
524 
525     /*
526      * debug-only statistics
527      */
528 #ifdef _DEBUG
529     int     _DEBUG_iMaxGen;
530     int64_t _DEBUG_TotalBlocksScanned            [MAXSTATGEN];
531     int64_t _DEBUG_TotalBlocksScannedNonTrivially[MAXSTATGEN];
532     int64_t _DEBUG_TotalHandleSlotsScanned       [MAXSTATGEN];
533     int64_t _DEBUG_TotalHandlesActuallyScanned   [MAXSTATGEN];
534 #endif
535 
536     /*
537      * primary per-type handle cache
538      */
539     HandleTypeCache rgMainCache[0];                         // interlocked ops used here
540 };
541 
542 #ifdef _MSC_VER
543 #pragma warning(pop)
544 #endif
545 
546 /*--------------------------------------------------------------------------*/
547 
548 
549 
550 /****************************************************************************
551  *
552  * HELPERS
553  *
554  ****************************************************************************/
555 
556 /*
557  * A 32/64 comparison callback
558  *<TODO>
559  * @TODO: move/merge into common util file
560  *</TODO>
561  */
562 typedef int (*PFNCOMPARE)(uintptr_t p, uintptr_t q);
563 
564 
565 /*
566  * A 32/64 neutral quicksort
567  *<TODO>
568  * @TODO: move/merge into common util file
569  *</TODO>
570  */
571 void QuickSort(uintptr_t *pData, int left, int right, PFNCOMPARE pfnCompare);
572 
573 
574 /*
575  * CompareHandlesByFreeOrder
576  *
577  * Returns:
578  *  <0 - handle P should be freed before handle Q
579  *  =0 - handles are eqivalent for free order purposes
580  *  >0 - handle Q should be freed before handle P
581  *
582  */
583 int CompareHandlesByFreeOrder(uintptr_t p, uintptr_t q);
584 
585 /*--------------------------------------------------------------------------*/
586 
587 
588 
589 /****************************************************************************
590  *
591  * CORE TABLE MANAGEMENT
592  *
593  ****************************************************************************/
594 
595 /*
596  * TypeHasUserData
597  *
598  * Determines whether a given handle type has user data.
599  *
600  */
TypeHasUserData(HandleTable * pTable,uint32_t uType)601 __inline BOOL TypeHasUserData(HandleTable *pTable, uint32_t uType)
602 {
603     LIMITED_METHOD_CONTRACT;
604 
605     // sanity
606     _ASSERTE(uType < HANDLE_MAX_INTERNAL_TYPES);
607 
608     // consult the type flags
609     return (pTable->rgTypeFlags[uType] & HNDF_EXTRAINFO);
610 }
611 
612 
613 /*
614  * TableCanFreeSegmentNow
615  *
616  * Determines if it is OK to free the specified segment at this time.
617  *
618  */
619 BOOL TableCanFreeSegmentNow(HandleTable *pTable, TableSegment *pSegment);
620 
621 
622 /*
623  * BlockIsLocked
624  *
625  * Determines if the lock count for the specified block is currently non-zero.
626  *
627  */
BlockIsLocked(TableSegment * pSegment,uint32_t uBlock)628 __inline BOOL BlockIsLocked(TableSegment *pSegment, uint32_t uBlock)
629 {
630     LIMITED_METHOD_CONTRACT;
631 
632     // sanity
633     _ASSERTE(uBlock < HANDLE_BLOCKS_PER_SEGMENT);
634 
635     // fetch the lock count and compare it to zero
636     return (pSegment->rgLocks[uBlock] != 0);
637 }
638 
639 
640 /*
641  * BlockLock
642  *
643  * Increases the lock count for a block.
644  *
645  */
BlockLock(TableSegment * pSegment,uint32_t uBlock)646 __inline void BlockLock(TableSegment *pSegment, uint32_t uBlock)
647 {
648     LIMITED_METHOD_CONTRACT;
649 
650     // fetch the old lock count
651     uint8_t bLocks = pSegment->rgLocks[uBlock];
652 
653     // assert if we are about to trash the count
654     _ASSERTE(bLocks < 0xFF);
655 
656     // store the incremented lock count
657     pSegment->rgLocks[uBlock] = bLocks + 1;
658 }
659 
660 
661 /*
662  * BlockUnlock
663  *
664  * Decreases the lock count for a block.
665  *
666  */
BlockUnlock(TableSegment * pSegment,uint32_t uBlock)667 __inline void BlockUnlock(TableSegment *pSegment, uint32_t uBlock)
668 {
669     LIMITED_METHOD_CONTRACT;
670 
671     // fetch the old lock count
672     uint8_t bLocks = pSegment->rgLocks[uBlock];
673 
674     // assert if we are about to trash the count
675     _ASSERTE(bLocks > 0);
676 
677     // store the decremented lock count
678     pSegment->rgLocks[uBlock] = bLocks - 1;
679 }
680 
681 
682 /*
683  * BlockFetchUserDataPointer
684  *
685  * Gets the user data pointer for the first handle in a block.
686  *
687  */
688 PTR_uintptr_t BlockFetchUserDataPointer(PTR__TableSegmentHeader pSegment, uint32_t uBlock, BOOL fAssertOnError);
689 
690 
691 /*
692  * HandleValidateAndFetchUserDataPointer
693  *
694  * Gets the user data pointer for a handle.
695  * ASSERTs and returns NULL if handle is not of the expected type.
696  *
697  */
698 uintptr_t *HandleValidateAndFetchUserDataPointer(OBJECTHANDLE handle, uint32_t uTypeExpected);
699 
700 
701 /*
702  * HandleQuickFetchUserDataPointer
703  *
704  * Gets the user data pointer for a handle.
705  * Less validation is performed.
706  *
707  */
708 PTR_uintptr_t HandleQuickFetchUserDataPointer(OBJECTHANDLE handle);
709 
710 
711 /*
712  * HandleQuickSetUserData
713  *
714  * Stores user data with a handle.
715  * Less validation is performed.
716  *
717  */
718 void HandleQuickSetUserData(OBJECTHANDLE handle, uintptr_t lUserData);
719 
720 
721 /*
722  * HandleFetchType
723  *
724  * Computes the type index for a given handle.
725  *
726  */
727 uint32_t HandleFetchType(OBJECTHANDLE handle);
728 
729 
730 /*
731  * HandleFetchHandleTable
732  *
733  * Returns the containing handle table of a given handle.
734  *
735  */
736 PTR_HandleTable HandleFetchHandleTable(OBJECTHANDLE handle);
737 
738 
739 /*
740  * SegmentAlloc
741  *
742  * Allocates a new segment.
743  *
744  */
745 TableSegment *SegmentAlloc(HandleTable *pTable);
746 
747 
748 /*
749  * SegmentFree
750  *
751  * Frees the specified segment.
752  *
753  */
754 void SegmentFree(TableSegment *pSegment);
755 
756 /*
757  * TableHandleAsyncPinHandles
758  *
759  * Mark ready for all non-pending OverlappedData that get moved to default domain.
760  *
761  */
762 BOOL TableHandleAsyncPinHandles(HandleTable *pTable);
763 
764 /*
765  * TableRelocateAsyncPinHandles
766  *
767  * Replaces async pin handles with ones in default domain.
768  *
769  */
770 void TableRelocateAsyncPinHandles(HandleTable *pTable, HandleTable *pTargetTable);
771 
772 /*
773  * Check if a handle is part of a HandleTable
774  */
775 BOOL TableContainHandle(HandleTable *pTable, OBJECTHANDLE handle);
776 
777 /*
778  * SegmentRemoveFreeBlocks
779  *
780  * Removes a block from a block list in a segment.  The block is returned to
781  * the segment's free list.
782  *
783  */
784 void SegmentRemoveFreeBlocks(TableSegment *pSegment, uint32_t uType);
785 
786 
787 /*
788  * SegmentResortChains
789  *
790  * Sorts the block chains for optimal scanning order.
791  * Sorts the free list to combat fragmentation.
792  *
793  */
794 void SegmentResortChains(TableSegment *pSegment);
795 
796 
797 /*
798  * DoesSegmentNeedsToTrimExcessPages
799  *
800  * Checks to see if any pages can be decommitted from the segment.
801  *
802  */
803 BOOL DoesSegmentNeedsToTrimExcessPages(TableSegment *pSegment);
804 
805 /*
806  * SegmentTrimExcessPages
807  *
808  * Checks to see if any pages can be decommitted from the segment.
809  * In case there any unused pages it goes and decommits them.
810  *
811  */
812 void SegmentTrimExcessPages(TableSegment *pSegment);
813 
814 
815 /*
816  * TableAllocBulkHandles
817  *
818  * Attempts to allocate the requested number of handes of the specified type.
819  *
820  * Returns the number of handles that were actually allocated.  This is always
821  * the same as the number of handles requested except in out-of-memory conditions,
822  * in which case it is the number of handles that were successfully allocated.
823  *
824  */
825 uint32_t TableAllocBulkHandles(HandleTable *pTable, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount);
826 
827 
828 /*
829  * TableFreeBulkPreparedHandles
830  *
831  * Frees an array of handles of the specified type.
832  *
833  * This routine is optimized for a sorted array of handles but will accept any order.
834  *
835  */
836 void TableFreeBulkPreparedHandles(HandleTable *pTable, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount);
837 
838 
839 /*
840  * TableFreeBulkUnpreparedHandles
841  *
842  * Frees an array of handles of the specified type by preparing them and calling TableFreeBulkPreparedHandles.
843  *
844  */
845 void TableFreeBulkUnpreparedHandles(HandleTable *pTable, uint32_t uType, const OBJECTHANDLE *pHandles, uint32_t uCount);
846 
847 /*--------------------------------------------------------------------------*/
848 
849 
850 
851 /****************************************************************************
852  *
853  * HANDLE CACHE
854  *
855  ****************************************************************************/
856 
857 /*
858  * TableAllocSingleHandleFromCache
859  *
860  * Gets a single handle of the specified type from the handle table by
861  * trying to fetch it from the reserve cache for that handle type.  If the
862  * reserve cache is empty, this routine calls TableCacheMissOnAlloc.
863  *
864  */
865 OBJECTHANDLE TableAllocSingleHandleFromCache(HandleTable *pTable, uint32_t uType);
866 
867 
868 /*
869  * TableFreeSingleHandleToCache
870  *
871  * Returns a single handle of the specified type to the handle table
872  * by trying to store it in the free cache for that handle type.  If the
873  * free cache is full, this routine calls TableCacheMissOnFree.
874  *
875  */
876 void TableFreeSingleHandleToCache(HandleTable *pTable, uint32_t uType, OBJECTHANDLE handle);
877 
878 
879 /*
880  * TableAllocHandlesFromCache
881  *
882  * Allocates multiple handles of the specified type by repeatedly
883  * calling TableAllocSingleHandleFromCache.
884  *
885  */
886 uint32_t TableAllocHandlesFromCache(HandleTable *pTable, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount);
887 
888 
889 /*
890  * TableFreeHandlesToCache
891  *
892  * Frees multiple handles of the specified type by repeatedly
893  * calling TableFreeSingleHandleToCache.
894  *
895  */
896 void TableFreeHandlesToCache(HandleTable *pTable, uint32_t uType, const OBJECTHANDLE *pHandleBase, uint32_t uCount);
897 
898 /*--------------------------------------------------------------------------*/
899 
900 
901 
902 /****************************************************************************
903  *
904  * TABLE SCANNING
905  *
906  ****************************************************************************/
907 
908 /*
909  * TableScanHandles
910  *
911  * Implements the core handle scanning loop for a table.
912  *
913  */
914 void CALLBACK TableScanHandles(PTR_HandleTable pTable,
915                                const uint32_t *puType,
916                                uint32_t uTypeCount,
917                                SEGMENTITERATOR pfnSegmentIterator,
918                                BLOCKSCANPROC pfnBlockHandler,
919                                ScanCallbackInfo *pInfo,
920                                CrstHolderWithState *pCrstHolder);
921 
922 
923 /*
924  * xxxTableScanHandlesAsync
925  *
926  * Implements asynchronous handle scanning for a table.
927  *
928  */
929 void CALLBACK xxxTableScanHandlesAsync(PTR_HandleTable pTable,
930                                        const uint32_t *puType,
931                                        uint32_t uTypeCount,
932                                        SEGMENTITERATOR pfnSegmentIterator,
933                                        BLOCKSCANPROC pfnBlockHandler,
934                                        ScanCallbackInfo *pInfo,
935                                        CrstHolderWithState *pCrstHolder);
936 
937 
938 /*
939  * TypesRequireUserDataScanning
940  *
941  * Determines whether the set of types listed should get user data during scans
942  *
943  * if ALL types passed have user data then this function will enable user data support
944  * otherwise it will disable user data support
945  *
946  * IN OTHER WORDS, SCANNING WITH A MIX OF USER-DATA AND NON-USER-DATA TYPES IS NOT SUPPORTED
947  *
948  */
949 BOOL TypesRequireUserDataScanning(HandleTable *pTable, const uint32_t *types, uint32_t typeCount);
950 
951 
952 /*
953  * BuildAgeMask
954  *
955  * Builds an age mask to be used when examining/updating the write barrier.
956  *
957  */
958 uint32_t BuildAgeMask(uint32_t uGen, uint32_t uMaxGen);
959 
960 
961 /*
962  * QuickSegmentIterator
963  *
964  * Returns the next segment to be scanned in a scanning loop.
965  *
966  */
967 PTR_TableSegment CALLBACK QuickSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder = 0);
968 
969 
970 /*
971  * StandardSegmentIterator
972  *
973  * Returns the next segment to be scanned in a scanning loop.
974  *
975  * This iterator performs some maintenance on the segments,
976  * primarily making sure the block chains are sorted so that
977  * g0 scans are more likely to operate on contiguous blocks.
978  *
979  */
980 PTR_TableSegment CALLBACK StandardSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder = 0);
981 
982 
983 /*
984  * FullSegmentIterator
985  *
986  * Returns the next segment to be scanned in a scanning loop.
987  *
988  * This iterator performs full maintenance on the segments,
989  * including freeing those it notices are empty along the way.
990  *
991  */
992 PTR_TableSegment CALLBACK FullSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder = 0);
993 
994 
995 /*
996  * BlockScanBlocksWithoutUserData
997  *
998  * Calls the specified callback for each handle, optionally aging the corresponding generation clumps.
999  * NEVER propagates per-handle user data to the callback.
1000  *
1001  */
1002 void CALLBACK BlockScanBlocksWithoutUserData(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo);
1003 
1004 
1005 /*
1006  * BlockScanBlocksWithUserData
1007  *
1008  * Calls the specified callback for each handle, optionally aging the corresponding generation clumps.
1009  * ALWAYS propagates per-handle user data to the callback.
1010  *
1011  */
1012 void CALLBACK BlockScanBlocksWithUserData(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo);
1013 
1014 
1015 /*
1016  * BlockScanBlocksEphemeral
1017  *
1018  * Calls the specified callback for each handle from the specified generation.
1019  * Propagates per-handle user data to the callback if present.
1020  *
1021  */
1022 void CALLBACK BlockScanBlocksEphemeral(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo);
1023 
1024 
1025 /*
1026  * BlockAgeBlocks
1027  *
1028  * Ages all clumps in a range of consecutive blocks.
1029  *
1030  */
1031 void CALLBACK BlockAgeBlocks(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo);
1032 
1033 
1034 /*
1035  * BlockAgeBlocksEphemeral
1036  *
1037  * Ages all clumps within the specified generation.
1038  *
1039  */
1040 void CALLBACK BlockAgeBlocksEphemeral(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo);
1041 
1042 
1043 /*
1044  * BlockResetAgeMapForBlocks
1045  *
1046  * Clears the age maps for a range of blocks.
1047  *
1048  */
1049 void CALLBACK BlockResetAgeMapForBlocks(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo);
1050 
1051 
1052 /*
1053  * BlockVerifyAgeMapForBlocks
1054  *
1055  * Verifies the age maps for a range of blocks, and also validates the objects pointed to.
1056  *
1057  */
1058 void CALLBACK BlockVerifyAgeMapForBlocks(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo);
1059 
1060 
1061 /*
1062  * xxxAsyncSegmentIterator
1063  *
1064  * Implements the core handle scanning loop for a table.
1065  *
1066  */
1067 PTR_TableSegment CALLBACK xxxAsyncSegmentIterator(PTR_HandleTable pTable, TableSegment *pPrevSegment, CrstHolderWithState *pCrstHolder);
1068 
1069 /*--------------------------------------------------------------------------*/
1070