1 /*-------------------------------------------------------------------------
2 *
3 * hashsort.c
4 * Sort tuples for insertion into a new hash index.
5 *
6 * When building a very large hash index, we pre-sort the tuples by bucket
7 * number to improve locality of access to the index, and thereby avoid
8 * thrashing. We use tuplesort.c to sort the given index tuples into order.
9 *
10 * Note: if the number of rows in the table has been underestimated,
11 * bucket splits may occur during the index build. In that case we'd
12 * be inserting into two or more buckets for each possible masked-off
13 * hash code value. That's no big problem though, since we'll still have
14 * plenty of locality of access.
15 *
16 *
17 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
18 * Portions Copyright (c) 1994, Regents of the University of California
19 *
20 * IDENTIFICATION
21 * src/backend/access/hash/hashsort.c
22 *
23 *-------------------------------------------------------------------------
24 */
25
26 #include "postgres.h"
27
28 #include "access/hash.h"
29 #include "miscadmin.h"
30 #include "utils/tuplesort.h"
31
32
33 /*
34 * Status record for spooling/sorting phase.
35 */
36 struct HSpool
37 {
38 Tuplesortstate *sortstate; /* state data for tuplesort.c */
39 Relation index;
40 uint32 hash_mask; /* bitmask for hash codes */
41 };
42
43
44 /*
45 * create and initialize a spool structure
46 */
47 HSpool *
_h_spoolinit(Relation heap,Relation index,uint32 num_buckets)48 _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
49 {
50 HSpool *hspool = (HSpool *) palloc0(sizeof(HSpool));
51
52 hspool->index = index;
53
54 /*
55 * Determine the bitmask for hash code values. Since there are currently
56 * num_buckets buckets in the index, the appropriate mask can be computed
57 * as follows.
58 *
59 * Note: at present, the passed-in num_buckets is always a power of 2, so
60 * we could just compute num_buckets - 1. We prefer not to assume that
61 * here, though.
62 */
63 hspool->hash_mask = (((uint32) 1) << _hash_log2(num_buckets)) - 1;
64
65 /*
66 * We size the sort area as maintenance_work_mem rather than work_mem to
67 * speed index creation. This should be OK since a single backend can't
68 * run multiple index creations in parallel.
69 */
70 hspool->sortstate = tuplesort_begin_index_hash(heap,
71 index,
72 hspool->hash_mask,
73 maintenance_work_mem,
74 false);
75
76 return hspool;
77 }
78
79 /*
80 * clean up a spool structure and its substructures.
81 */
82 void
_h_spooldestroy(HSpool * hspool)83 _h_spooldestroy(HSpool *hspool)
84 {
85 tuplesort_end(hspool->sortstate);
86 pfree(hspool);
87 }
88
89 /*
90 * spool an index entry into the sort file.
91 */
92 void
_h_spool(HSpool * hspool,ItemPointer self,Datum * values,bool * isnull)93 _h_spool(HSpool *hspool, ItemPointer self, Datum *values, bool *isnull)
94 {
95 tuplesort_putindextuplevalues(hspool->sortstate, hspool->index,
96 self, values, isnull);
97 }
98
99 /*
100 * given a spool loaded by successive calls to _h_spool,
101 * create an entire index.
102 */
103 void
_h_indexbuild(HSpool * hspool)104 _h_indexbuild(HSpool *hspool)
105 {
106 IndexTuple itup;
107 bool should_free;
108 #ifdef USE_ASSERT_CHECKING
109 uint32 hashkey = 0;
110 #endif
111
112 tuplesort_performsort(hspool->sortstate);
113
114 while ((itup = tuplesort_getindextuple(hspool->sortstate,
115 true, &should_free)) != NULL)
116 {
117 /*
118 * Technically, it isn't critical that hash keys be found in sorted
119 * order, since this sorting is only used to increase locality of
120 * access as a performance optimization. It still seems like a good
121 * idea to test tuplesort.c's handling of hash index tuple sorts
122 * through an assertion, though.
123 */
124 #ifdef USE_ASSERT_CHECKING
125 uint32 lasthashkey = hashkey;
126
127 hashkey = _hash_get_indextuple_hashkey(itup) & hspool->hash_mask;
128 Assert(hashkey >= lasthashkey);
129 #endif
130
131 _hash_doinsert(hspool->index, itup);
132 if (should_free)
133 pfree(itup);
134 }
135 }
136